pax_global_header 0000666 0000000 0000000 00000000064 13162462543 0014520 g ustar 00root root 0000000 0000000 52 comment=89ede3ba90c906a8ec6b9a0f4bef188ba5bb2fd8
.gitignore 0000664 0000000 0000000 00000000007 13162462543 0013051 0 ustar 00root root 0000000 0000000 build/
.travis.yml 0000664 0000000 0000000 00000003405 13162462543 0013177 0 ustar 00root root 0000000 0000000 language: c
compiler:
- gcc
- clang
cache:
directories:
- $HOME/OpenBlasInstall
sudo: false
env:
- TORCH_LUA_VERSION=LUAJIT21
- TORCH_LUA_VERSION=LUA51
- TORCH_LUA_VERSION=LUA52
os:
- linux
addons:
apt:
packages:
- cmake
- gfortran
- gcc-multilib
- gfortran-multilib
- liblapack-dev
- build-essential
- gcc
- g++
- curl
- cmake
- libreadline-dev
- git-core
- libqt4-core
- libqt4-gui
- libqt4-dev
- libjpeg-dev
- libpng-dev
- ncurses-dev
- imagemagick
- libzmq3-dev
- gfortran
- unzip
- gnuplot
- gnuplot-x11
before_script:
- export ROOT_TRAVIS_DIR=$(pwd)
- export INSTALL_PREFIX=~/torch/install
- ls $HOME/OpenBlasInstall/lib || (cd /tmp/ && git clone https://github.com/xianyi/OpenBLAS.git -b master && cd OpenBLAS && (make NO_AFFINITY=1 -j$(getconf _NPROCESSORS_ONLN) 2>/dev/null >/dev/null) && make PREFIX=$HOME/OpenBlasInstall install)
- git clone https://github.com/torch/distro.git ~/torch --recursive
- cd ~/torch && git submodule update --init --recursive
- mkdir build && cd build
- export CMAKE_LIBRARY_PATH=$HOME/OpenBlasInstall/include:$HOME/OpenBlasInstall/lib:$CMAKE_LIBRARY_PATH
- cmake .. -DCMAKE_INSTALL_PREFIX="${INSTALL_PREFIX}" -DCMAKE_BUILD_TYPE=Release -DWITH_${TORCH_LUA_VERSION}=ON
- make && make install
- cd $ROOT_TRAVIS_DIR
- export LD_LIBRARY_PATH=${INSTALL_PREFIX}/lib:$LD_LIBRARY_PATH
script:
- ${INSTALL_PREFIX}/bin/luarocks make rocks/torch-scm-1.rockspec
- ${INSTALL_PREFIX}/bin/luarocks install luaffi
- export PATH=${INSTALL_PREFIX}/bin:$PATH
- export TESTLUA=$(which luajit lua | head -n 1)
- ${TESTLUA} -ltorch -e "t=torch.test(); if t.errors[1] then os.exit(1) end"
- cd test
- ${TESTLUA} test_writeObject.lua
- ${TESTLUA} test_Tester.lua
CMakeLists.txt 0000664 0000000 0000000 00000006066 13162462543 0013634 0 ustar 00root root 0000000 0000000 IF(APPLE)
CMAKE_MINIMUM_REQUIRED(VERSION 2.8.12 FATAL_ERROR)
CMAKE_POLICY(VERSION 2.8.12)
ELSE()
CMAKE_MINIMUM_REQUIRED(VERSION 2.8 FATAL_ERROR)
CMAKE_POLICY(VERSION 2.8)
ENDIF()
SET(CMAKE_MODULE_PATH
"${CMAKE_CURRENT_SOURCE_DIR}/cmake"
"${CMAKE_MODULE_PATH}")
IF (NOT MSVC)
IF (MINGW)
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror=format")
ELSE()
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror=implicit-function-declaration -Werror=format")
ENDIF(MINGW)
ENDIF(NOT MSVC)
# Flags
# When using MSVC
IF(MSVC)
# we want to respect the standard, and we are bored of those **** .
ADD_DEFINITIONS(-D_CRT_SECURE_NO_DEPRECATE=1)
ENDIF(MSVC)
# OpenMP support?
SET(WITH_OPENMP ON CACHE BOOL "OpenMP support if available?")
IF (APPLE AND CMAKE_COMPILER_IS_GNUCC)
EXEC_PROGRAM (uname ARGS -v OUTPUT_VARIABLE DARWIN_VERSION)
STRING (REGEX MATCH "[0-9]+" DARWIN_VERSION ${DARWIN_VERSION})
MESSAGE (STATUS "MAC OS Darwin Version: ${DARWIN_VERSION}")
IF (DARWIN_VERSION GREATER 9)
SET(APPLE_OPENMP_SUCKS 1)
ENDIF (DARWIN_VERSION GREATER 9)
EXECUTE_PROCESS (COMMAND ${CMAKE_C_COMPILER} -dumpversion
OUTPUT_VARIABLE GCC_VERSION)
IF (APPLE_OPENMP_SUCKS AND GCC_VERSION VERSION_LESS 4.6.2)
MESSAGE(STATUS "Warning: Disabling OpenMP (unstable with this version of GCC)")
MESSAGE(STATUS " Install GCC >= 4.6.2 or change your OS to enable OpenMP")
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-unknown-pragmas")
SET(WITH_OPENMP OFF CACHE BOOL "OpenMP support if available?" FORCE)
ENDIF ()
ENDIF ()
IF (WITH_OPENMP)
FIND_PACKAGE(OpenMP)
IF(OPENMP_FOUND)
MESSAGE(STATUS "Compiling with OpenMP support")
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OpenMP_C_FLAGS}")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}")
SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${OpenMP_EXE_LINKER_FLAGS}")
ENDIF(OPENMP_FOUND)
ENDIF (WITH_OPENMP)
# Includes
INCLUDE(TorchPaths)
INCLUDE(TorchPathsInit)
INCLUDE(TorchPackage)
INCLUDE(TorchWrap)
INCLUDE(TorchExports)
# Torch libraries
ADD_SUBDIRECTORY(lib)
CONFIGURE_FILE(paths.lua.in "${CMAKE_CURRENT_BINARY_DIR}/paths.lua")
INCLUDE_DIRECTORIES(BEFORE "${LUA_INCDIR}")
INCLUDE_DIRECTORIES(BEFORE "${CMAKE_CURRENT_SOURCE_DIR}/lib/TH")
INCLUDE_DIRECTORIES(BEFORE "${CMAKE_CURRENT_BINARY_DIR}/lib/TH")
INCLUDE_DIRECTORIES(BEFORE "${CMAKE_CURRENT_SOURCE_DIR}/lib/luaT")
LINK_DIRECTORIES("${LUA_LIBDIR}")
SET(src DiskFile.c File.c MemoryFile.c PipeFile.c Storage.c Tensor.c Timer.c utils.c init.c TensorOperator.c TensorMath.c random.c Generator.c)
SET(luasrc init.lua File.lua Tensor.lua CmdLine.lua FFInterface.lua Tester.lua TestSuite.lua ${CMAKE_CURRENT_BINARY_DIR}/paths.lua test/test.lua)
# Necessary do generate wrapper
ADD_TORCH_WRAP(tensormathwrap TensorMath.lua)
ADD_TORCH_WRAP(randomwrap random.lua)
ADD_TORCH_PACKAGE(torch "${src}" "${luasrc}")
TARGET_LINK_LIBRARIES(torch luaT TH)
IF(LUALIB)
TARGET_LINK_LIBRARIES(torch ${LUALIB})
ENDIF()
INSTALL(FILES "README.md" DESTINATION "${Torch_INSTALL_LUA_PATH_SUBDIR}/torch")
INSTALL(DIRECTORY "doc" DESTINATION "${Torch_INSTALL_LUA_PATH_SUBDIR}/torch")
CONTRIBUTING.md 0000664 0000000 0000000 00000011615 13162462543 0013321 0 ustar 00root root 0000000 0000000 # Contributing to Torch7 Core (torch7, nn, cutorch, cunn)
Thanks a lot! There are plenty of ways you can help!
Please take a moment to review this document in order to make the contribution
process easy and effective for everyone involved.
Following these guidelines helps to communicate that you respect the time of
the developers managing and developing this open source project. In return,
they should reciprocate that respect in addressing your issue or assessing
patches and features.
## Using the issue tracker
The [issue tracker](https://github.com/torch/torch7/issues) is
the preferred channel for [bug reports](#bugs), [features requests](#features)
and [submitting pull requests](#pull-requests), but please respect the following
restrictions:
* Please **do not** use the issue tracker for personal support requests (use
[mailing-list](https://groups.google.com/forum/#!forum/torch7)).
* Please **do not** open issues regarding the code in a torch package
outside the core. For example don't open issues about the
REPL in the torch7 issue tracker, use the trepl issue tracker for that.
## Bug reports
A bug is a _demonstrable problem_ that is caused by the code in the repository.
Good bug reports are extremely helpful - thank you!
Guidelines for bug reports:
1. **Use the GitHub issue search** — check if the issue has already been
reported.
2. **Check if the issue has been fixed** — try to reproduce it using the
latest `master` or development branch in the repository.
3. **Isolate the problem** — ideally create test case that is within reason,
preferably within 100 lines of code.
A good bug report shouldn't leave others needing to chase you up for more
information. Please try to be as detailed as possible in your report. What is
your environment? What steps will reproduce the issue? What OS do you
experience the problem? What would you expect to be the outcome? All these
details will help people to fix any potential bugs.
## Feature requests
Feature requests are welcome to be filed. Torch is community-developed,
the maintainers are not exclusive torch developers, so keep that in mind.
The purpose of feature requests is for others who are looking to implement
a feature are aware of the interest in the feature.
## Pull requests
Good pull requests - patches, improvements, new features - are a fantastic
help. They should remain focused in scope **and avoid containing unrelated
commits.**
**Please ask first** before embarking on any significant pull request (e.g.
implementing features, refactoring code, porting to a different language),
otherwise you risk spending a lot of time working on something that the
project's developers might not want to merge into the project.
Please adhere to the coding conventions used throughout a project (indentation,
accurate comments, etc.) and any other requirements (such as test coverage).
Adhering to the following this process is the best way to get your work
included in the project:
1. [Fork](https://help.github.com/articles/fork-a-repo) the project, clone your
fork, and configure the remotes:
```bash
# Clone your fork of the repo into the current directory
git clone https://github.com//torch7.git
# Navigate to the newly cloned directory
cd torch7
# Assign the original repo to a remote called "upstream"
git remote add upstream https://github.com/torch/torch7.git
```
2. If you cloned a while ago, get the latest changes from upstream:
```bash
git checkout master
git pull upstream master
```
3. Create a new topic branch (off the main project development branch) to
contain your feature, change, or fix:
```bash
git checkout -b
```
4. Commit your changes in logical chunks. Please try to adhere to these [git commit
message guidelines](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html)
. Use Git's [interactive rebase](https://help.github.com/articles/about-git-rebase)
feature to tidy up your commits before making them public. This helps us keep the
commit history in logical blocks and clean, as torch grows.
For example:
- If you are adding a new function or a module, keep the module + tests + doc
to a single commit unless logically warranted.
- If you are fixing a bug, keep the bugfix to a single commit unless logically warranted.
5. Locally merge (or rebase) the upstream development branch into your topic branch:
```bash
git pull [--rebase] upstream master
```
6. Push your topic branch up to your fork:
```bash
git push origin
```
7. [Open a Pull Request](https://help.github.com/articles/using-pull-requests/)
with a clear title and description.
**IMPORTANT**: By submitting a patch, you agree to allow the project owners to
license your work under the terms of the BSD License.
COPYRIGHT.txt 0000664 0000000 0000000 00000004001 13162462543 0013170 0 ustar 00root root 0000000 0000000 Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert)
Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)
Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)
Copyright (c) 2011-2013 NYU (Clement Farabet)
Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston)
Copyright (c) 2006 Idiap Research Institute (Samy Bengio)
Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the names of Deepmind Technologies, NYU, NEC Laboratories America
and IDIAP Research Institute nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
CmdLine.lua 0000664 0000000 0000000 00000016057 13162462543 0013113 0 ustar 00root root 0000000 0000000 local CmdLine = torch.class('torch.CmdLine')
local function strip(str)
return string.match(str, '%-*(.*)')
end
local function pad(str, sz)
return str .. string.rep(' ', sz-#str)
end
function CmdLine:error(msg)
print('')
io.stderr:write(msg)
print('')
self:help()
os.exit(1)
end
function CmdLine:__readArgument__(params, arg, i, nArgument)
local argument = self.arguments[nArgument]
local value = arg[i]
if nArgument > #self.arguments then
self:error('invalid argument: ' .. value)
end
if argument.type and type(value) ~= argument.type then
self:error('invalid argument type for argument ' .. argument.key .. ' (should be ' .. argument.type .. ')')
end
params[strip(argument.key)] = value
return 1
end
function CmdLine:__readOption__(params, arg, i)
local key = arg[i]
local option = self.options[key]
if not option then
self:error('unknown option ' .. key)
end
if option.type and option.type == 'boolean' then
params[strip(key)] = not option.default
return 1
else
local value = arg[i+1]
if not value then
self:error('missing argument for option ' .. key)
end
if not option.type or option.type == 'string' then
elseif option.type == 'number' then
value = tonumber(value)
else
self:error('unknown required option type ' .. option.type)
end
if not value then
self:error('invalid type for option ' .. key .. ' (should be ' .. option.type .. ')')
end
params[strip(key)] = value
return 2
end
end
function CmdLine:__init(argseparator_,keyseparator_)
self.argseparator = argseparator_ or ','
self.keyseparator = keyseparator_ or '='
self.options = {}
self.arguments = {}
self.helplines = {}
self.dateformat = nil
self.silentio = false
end
function CmdLine:silent()
self.silentio = true
end
function CmdLine:addTime(name, format)
format = format or '%Y-%m-%d %H:%M:%S'
if type(format) ~= 'string' then
error('Argument has to be string')
end
if name ~= nil then
name = '[' .. name .. ']: '
else
name = ''
end
self.dateformat = format .. name
end
function CmdLine:argument(key, help, _type_)
table.insert(self.arguments, {key=key, help=help, type=_type_})
table.insert(self.helplines, self.arguments[#self.arguments])
end
function CmdLine:option(key, default, help, _type_)
if default == nil then
error('option ' .. key .. ' has no default value')
end
_type_ = _type_ or type(default)
if type(default) ~= _type_ then
error('option ' .. key .. ' has wrong default type value')
end
self.options[key] = {key=key, default=default, help=help, type=_type_}
table.insert(self.helplines, self.options[key])
end
function CmdLine:default()
local params = {}
for option,v in pairs(self.options) do
params[strip(option)] = v.default
end
return params
end
function CmdLine:parse(arg)
local i = 1
local params = self:default()
local nArgument = 0
while i <= #arg do
if arg[i] == '-help' or arg[i] == '-h' or arg[i] == '--help' then
self:help(arg)
os.exit(0)
end
if self.options[arg[i]] then
i = i + self:__readOption__(params, arg, i)
else
nArgument = nArgument + 1
i = i + self:__readArgument__(params, arg, i, nArgument)
end
end
if nArgument ~= #self.arguments then
self:error('not enough arguments')
end
return params
end
function CmdLine:string(prefix, params, ignore)
local arguments = {}
local options = {}
prefix = prefix or ''
for k,v in pairs(params) do
if ignore[k] then
print('-- ignore option ' .. k)
elseif self.options['-' .. k] then
if v ~= self.options['-' .. k].default or ignore[k] == false then
if type(v) == 'boolean' then
if v then
v = 't'
else
v = 'f'
end
end
table.insert(options, k .. self.keyseparator .. v)
print(k,v,self.options['-' .. k].default)
end
else
local narg
for i=1,#self.arguments do
if strip(self.arguments[i].key) == k then
narg = i
end
end
if narg then
arguments[narg] = k .. self.keyseparator .. v
else
print('WARNING: unknown option/argument: ' .. k .. ' IGNORING for DIRECTORY NAME')
end
end
end
table.sort(options)
local str = table.concat(arguments, self.argseparator)
if str == '' then
str = table.concat(options, self.argseparator)
else
str = str .. self.argseparator .. table.concat(options, self.argseparator)
end
if str == '' then
return prefix
else
return prefix .. self.argseparator .. str
end
end
local oprint = nil
function CmdLine:log(file, params)
local f = (io.type(file) == 'file' and file) or io.open(file, 'w')
oprint = oprint or print -- get the current print function lazily
function print(...)
local n = select("#", ...)
local arg = {...}
if not self.silentio then
oprint(...)
end
local str = {}
if self.dateformat then
table.insert(str, os.date(self.dateformat))
end
for i=1,n do
table.insert(str,tostring(arg[i]))
end
table.insert(str,'\n')
f:write(table.concat(str,' '))
f:flush()
end
print('[program started on ' .. os.date() .. ']')
print('[command line arguments]')
if params then
for k,v in pairs(params) do
print(k,v)
end
end
print('[----------------------]')
end
function CmdLine:text(txt)
txt = txt or ''
assert(type(txt) == 'string')
table.insert(self.helplines, txt)
end
function CmdLine:help(arg)
io.write('Usage: ')
if arg then io.write(arg[0] .. ' ') end
io.write('[options]')
for i=1,#self.arguments do
io.write(' <' .. strip(self.arguments[i].key) .. '>')
end
io.write('\n')
-- first pass to compute max length
local optsz = 0
for _,option in ipairs(self.helplines) do
if type(option) == 'table' then
if option.default ~= nil then -- it is an option
if #option.key > optsz then
optsz = #option.key
end
else -- it is an argument
if #strip(option.key)+2 > optsz then
optsz = #strip(option.key)+2
end
end
end
end
-- second pass to print
for _,option in ipairs(self.helplines) do
if type(option) == 'table' then
io.write(' ')
if option.default ~= nil then -- it is an option
io.write(pad(option.key, optsz))
if option.help then io.write(' ' .. option.help) end
io.write(' [' .. tostring(option.default) .. ']')
else -- it is an argument
io.write(pad('<' .. strip(option.key) .. '>', optsz))
if option.help then io.write(' ' .. option.help) end
end
else
io.write(option) -- just some additional help
end
io.write('\n')
end
end
DiskFile.c 0000664 0000000 0000000 00000005400 13162462543 0012721 0 ustar 00root root 0000000 0000000 #include "general.h"
static int torch_DiskFile_new(lua_State *L)
{
const char *name = luaL_checkstring(L, 1);
const char *mode = luaL_optstring(L, 2, "r");
int isQuiet = luaT_optboolean(L, 3, 0);
THFile *self = THDiskFile_new(name, mode, isQuiet);
luaT_pushudata(L, self, "torch.DiskFile");
return 1;
}
static int torch_DiskFile_free(lua_State *L)
{
THFile *self = luaT_checkudata(L, 1, "torch.DiskFile");
THFile_free(self);
return 0;
}
static int torch_DiskFile_isLittleEndianCPU(lua_State *L)
{
lua_pushboolean(L, THDiskFile_isLittleEndianCPU());
return 1;
}
static int torch_DiskFile_isBigEndianCPU(lua_State *L)
{
lua_pushboolean(L, !THDiskFile_isLittleEndianCPU());
return 1;
}
static int torch_DiskFile_nativeEndianEncoding(lua_State *L)
{
THFile *self = luaT_checkudata(L, 1, "torch.DiskFile");
THDiskFile_nativeEndianEncoding(self);
lua_settop(L, 1);
return 1;
}
static int torch_DiskFile_littleEndianEncoding(lua_State *L)
{
THFile *self = luaT_checkudata(L, 1, "torch.DiskFile");
THDiskFile_littleEndianEncoding(self);
lua_settop(L, 1);
return 1;
}
static int torch_DiskFile_bigEndianEncoding(lua_State *L)
{
THFile *self = luaT_checkudata(L, 1, "torch.DiskFile");
THDiskFile_bigEndianEncoding(self);
lua_settop(L, 1);
return 1;
}
static int torch_DiskFile_longSize(lua_State *L)
{
THFile *self = luaT_checkudata(L, 1, "torch.DiskFile");
THDiskFile_longSize(self, lua_tointeger(L, 2));
lua_settop(L, 1);
return 1;
}
static int torch_DiskFile_noBuffer(lua_State *L)
{
THFile *self = luaT_checkudata(L, 1, "torch.DiskFile");
THDiskFile_noBuffer(self);
lua_settop(L, 1);
return 1;
}
static int torch_DiskFile___tostring__(lua_State *L)
{
THFile *self = luaT_checkudata(L, 1, "torch.DiskFile");
lua_pushfstring(L, "torch.DiskFile on <%s> [status: %s -- mode %c%c]",
THDiskFile_name(self),
(THFile_isOpened(self) ? "open" : "closed"),
(THFile_isReadable(self) ? 'r' : ' '),
(THFile_isWritable(self) ? 'w' : ' '));
return 1;
}
static const struct luaL_Reg torch_DiskFile__ [] = {
{"isLittleEndianCPU", torch_DiskFile_isLittleEndianCPU},
{"isBigEndianCPU", torch_DiskFile_isBigEndianCPU},
{"nativeEndianEncoding", torch_DiskFile_nativeEndianEncoding},
{"littleEndianEncoding", torch_DiskFile_littleEndianEncoding},
{"bigEndianEncoding", torch_DiskFile_bigEndianEncoding},
{"longSize", torch_DiskFile_longSize},
{"noBuffer", torch_DiskFile_noBuffer},
{"__tostring__", torch_DiskFile___tostring__},
{NULL, NULL}
};
void torch_DiskFile_init(lua_State *L)
{
luaT_newmetatable(L, "torch.DiskFile", "torch.File",
torch_DiskFile_new, torch_DiskFile_free, NULL);
luaT_setfuncs(L, torch_DiskFile__, 0);
lua_pop(L, 1);
}
FFInterface.lua 0000664 0000000 0000000 00000015063 13162462543 0013710 0 ustar 00root root 0000000 0000000 -- if this causes issues, you may need to:
-- luarocks remove --force ffi
-- and follow instructions to install
-- https://github.com/facebook/luaffifb
local ok, ffi = pcall(require, 'ffi')
local function checkArgument(condition, fn, ud, msg, level)
local level = level or 3
if not condition then
error("bad argument #" .. ud .. " to '" .. fn .. "' (" .. msg .. ")", level)
end
end
local function checkArgumentType(expected, actual, fn, ud, level)
local level = level or 3
if expected ~= actual then
checkArgument(false, fn, ud, expected .. " expected, got " .. actual, level + 1)
end
end
if ok then
local Real2real = {
Byte='unsigned char',
Char='char',
Short='short',
Int='int',
Long='long',
Float='float',
Double='double',
Half='THHalf'
}
-- Allocator
ffi.cdef[[
typedef struct THAllocator {
void* (*malloc)(void*, ptrdiff_t);
void* (*realloc)(void*, void*, ptrdiff_t);
void (*free)(void*, void*);
} THAllocator;
]]
-- Half
ffi.cdef[[
typedef struct {
unsigned short x;
} __THHalf;
typedef __THHalf THHalf;
]]
-- Storage
for Real, real in pairs(Real2real) do
local cdefs = [[
typedef struct THRealStorage
{
real *data;
ptrdiff_t size;
int refcount;
char flag;
THAllocator *allocator;
void *allocatorContext;
} THRealStorage;
]]
cdefs = cdefs:gsub('Real', Real):gsub('real', real)
ffi.cdef(cdefs)
local Storage = torch.getmetatable(string.format('torch.%sStorage', Real))
local Storage_tt = ffi.typeof('TH' .. Real .. 'Storage**')
rawset(Storage,
"cdata",
function(self)
return Storage_tt(self)[0]
end)
rawset(Storage,
"data",
function(self)
return Storage_tt(self)[0].data
end)
end
-- Tensor
for Real, real in pairs(Real2real) do
local cdefs = [[
typedef struct THRealTensor
{
long *size;
long *stride;
int nDimension;
THRealStorage *storage;
ptrdiff_t storageOffset;
int refcount;
char flag;
} THRealTensor;
]]
cdefs = cdefs:gsub('Real', Real):gsub('real', real)
ffi.cdef(cdefs)
local Tensor_type = string.format('torch.%sTensor', Real)
local Tensor = torch.getmetatable(Tensor_type)
local Tensor_tt = ffi.typeof('TH' .. Real .. 'Tensor**')
rawset(Tensor,
"cdata",
function(self)
if not self then return nil; end
return Tensor_tt(self)[0]
end)
rawset(Tensor,
"data",
function(self)
if not self then return nil; end
self = Tensor_tt(self)[0]
return self.storage ~= nil and self.storage.data + self.storageOffset or nil
end)
-- faster apply (contiguous case)
if Tensor_type ~= 'torch.HalfTensor' then
local apply = Tensor.apply
rawset(Tensor,
"apply",
function(self, func)
if self:isContiguous() and self.data then
local self_d = self:data()
for i=0,self:nElement()-1 do
local res = func(tonumber(self_d[i])) -- tonumber() required for long...
if res then
self_d[i] = res
end
end
return self
else
return apply(self, func)
end
end)
-- faster map (contiguous case)
local map = Tensor.map
rawset(Tensor,
"map",
function(self, src, func)
checkArgument(torch.isTensor(src), "map", 1, "tensor expected")
checkArgumentType(self:type(), src:type(), "map", 1)
if self:isContiguous() and src:isContiguous() and self.data and src.data then
local self_d = self:data()
local src_d = src:data()
assert(src:nElement() == self:nElement(), 'size mismatch')
for i=0,self:nElement()-1 do
local res = func(tonumber(self_d[i]), tonumber(src_d[i])) -- tonumber() required for long...
if res then
self_d[i] = res
end
end
return self
else
return map(self, src, func)
end
end)
-- faster map2 (contiguous case)
local map2 = Tensor.map2
rawset(Tensor,
"map2",
function(self, src1, src2, func)
checkArgument(torch.isTensor(src1), "map", 1, "tensor expected")
checkArgument(torch.isTensor(src2), "map", 2, "tensor expected")
checkArgumentType(self:type(), src1:type(), "map", 1)
checkArgumentType(self:type(), src2:type(), "map", 2)
if self:isContiguous() and src1:isContiguous() and src2:isContiguous() and self.data and src1.data and src2.data then
local self_d = self:data()
local src1_d = src1:data()
local src2_d = src2:data()
assert(src1:nElement() == self:nElement(), 'size mismatch')
assert(src2:nElement() == self:nElement(), 'size mismatch')
for i=0,self:nElement()-1 do
local res = func(tonumber(self_d[i]), tonumber(src1_d[i]), tonumber(src2_d[i])) -- tonumber() required for long...
if res then
self_d[i] = res
end
end
return self
else
return map2(self, src1, src2, func)
end
end)
end
end
-- torch.data
-- will fail if :data() is not defined
function torch.data(self, asnumber)
if not self then return nil; end
local data = self:data()
if asnumber then
return ffi.cast('intptr_t', data)
else
return data
end
end
-- torch.cdata
-- will fail if :cdata() is not defined
function torch.cdata(self, asnumber)
if not self then return nil; end
local cdata = self:cdata()
if asnumber then
return ffi.cast('intptr_t', cdata)
else
return cdata
end
end
end
File.c 0000664 0000000 0000000 00000021237 13162462543 0012114 0 ustar 00root root 0000000 0000000 #include "general.h"
#include "THFile.h"
#include "luaT.h"
#define IMPLEMENT_TORCH_FILE_FLAG(NAME) \
static int torch_File_##NAME(lua_State *L) \
{ \
THFile *self = luaT_checkudata(L, 1, "torch.File"); \
lua_pushboolean(L, THFile_##NAME(self)); \
return 1; \
}
IMPLEMENT_TORCH_FILE_FLAG(isQuiet)
IMPLEMENT_TORCH_FILE_FLAG(isReadable)
IMPLEMENT_TORCH_FILE_FLAG(isWritable)
IMPLEMENT_TORCH_FILE_FLAG(isBinary)
IMPLEMENT_TORCH_FILE_FLAG(isAutoSpacing)
IMPLEMENT_TORCH_FILE_FLAG(hasError)
#define IMPLEMENT_TORCH_FILE_FUNC(NAME) \
static int torch_File_##NAME(lua_State *L) \
{ \
THFile *self = luaT_checkudata(L, 1, "torch.File"); \
THFile_##NAME(self); \
lua_settop(L, 1); \
return 1; \
}
IMPLEMENT_TORCH_FILE_FUNC(binary)
IMPLEMENT_TORCH_FILE_FUNC(ascii)
IMPLEMENT_TORCH_FILE_FUNC(autoSpacing)
IMPLEMENT_TORCH_FILE_FUNC(noAutoSpacing)
IMPLEMENT_TORCH_FILE_FUNC(quiet)
IMPLEMENT_TORCH_FILE_FUNC(pedantic)
IMPLEMENT_TORCH_FILE_FUNC(clearError)
IMPLEMENT_TORCH_FILE_FUNC(synchronize)
static int torch_File_seek(lua_State *L)
{
THFile *self = luaT_checkudata(L, 1, "torch.File");
ptrdiff_t position = luaL_checkinteger(L, 2)-1;
// >= 0 because it has 1 already subtracted
THArgCheck(position >= 0, 2, "position has to be greater than 0!");
THFile_seek(self, (size_t)position);
lua_settop(L, 1);
return 1;
}
IMPLEMENT_TORCH_FILE_FUNC(seekEnd)
static int torch_File_position(lua_State *L)
{
THFile *self = luaT_checkudata(L, 1, "torch.File");
lua_pushnumber(L, THFile_position(self)+1);
return 1;
}
IMPLEMENT_TORCH_FILE_FUNC(close)
#define IMPLEMENT_TORCH_FILE_RW(TYPEC, TYPE) \
static int torch_File_read##TYPEC(lua_State *L) \
{ \
THFile *self = luaT_checkudata(L, 1, "torch.File"); \
int narg = lua_gettop(L); \
\
if(narg == 1) \
{ \
lua_pushnumber(L, THFile_read##TYPEC##Scalar(self)); \
return 1; \
} \
else if(narg == 2) \
{ \
if(lua_isnumber(L, 2)) \
{ \
ptrdiff_t size = lua_tonumber(L, 2); \
ptrdiff_t nread; \
\
TH##TYPEC##Storage *storage = TH##TYPEC##Storage_newWithSize(size); \
luaT_pushudata(L, storage, "torch." #TYPEC "Storage"); \
nread = THFile_read##TYPEC(self, storage); \
if(nread != size) \
TH##TYPEC##Storage_resize(storage, nread); \
return 1; \
} \
else if(luaT_toudata(L, 2, "torch." #TYPEC "Storage")) \
{ \
TH##TYPEC##Storage *storage = luaT_toudata(L, 2, "torch." #TYPEC "Storage"); \
lua_pushnumber(L, THFile_read##TYPEC(self, storage)); \
return 1; \
} \
} \
\
luaL_error(L, "nothing, number, or " #TYPEC "Storage expected"); \
return 0; \
} \
\
static int torch_File_write##TYPEC(lua_State *L) \
{ \
THFile *self = luaT_checkudata(L, 1, "torch.File"); \
int narg = lua_gettop(L); \
\
if(narg == 2) \
{ \
if(lua_isnumber(L, 2)) \
{ \
TYPE value = lua_tonumber(L, 2); \
THFile_write##TYPEC##Scalar(self, (TYPE)value); \
return 0; \
} \
else if(luaT_toudata(L, 2, "torch." #TYPEC "Storage")) \
{ \
TH##TYPEC##Storage *storage = luaT_toudata(L, 2, "torch." #TYPEC "Storage"); \
lua_pushnumber(L, THFile_write##TYPEC(self, storage)); \
return 1; \
} \
} \
\
luaL_error(L, "number, or " #TYPEC "Storage expected"); \
return 0; \
}
IMPLEMENT_TORCH_FILE_RW(Byte, unsigned char)
IMPLEMENT_TORCH_FILE_RW(Char, char)
IMPLEMENT_TORCH_FILE_RW(Short, short)
IMPLEMENT_TORCH_FILE_RW(Int, int)
IMPLEMENT_TORCH_FILE_RW(Long, long)
IMPLEMENT_TORCH_FILE_RW(Float, float)
IMPLEMENT_TORCH_FILE_RW(Double, double)
static int torch_File_readString(lua_State *L)
{
THFile *self = luaT_checkudata(L, 1, "torch.File");
const char *format = luaL_checkstring(L, 2);
char *str;
ptrdiff_t size;
size = THFile_readStringRaw(self, format, &str);
lua_pushlstring(L, str, size);
THFree(str);
return 1;
}
static int torch_File_writeString(lua_State *L)
{
THFile *self = luaT_checkudata(L, 1, "torch.File");
const char *str = NULL;
size_t size;
luaL_checktype(L, 2, LUA_TSTRING);
str = lua_tolstring(L, 2, &size);
lua_pushnumber(L, THFile_writeStringRaw(self, str, size));
return 1;
}
static const struct luaL_Reg torch_File__ [] = {
{"isQuiet", torch_File_isQuiet},
{"isReadable", torch_File_isReadable},
{"isWritable", torch_File_isWritable},
{"isBinary", torch_File_isBinary},
{"isAutoSpacing", torch_File_isAutoSpacing},
{"hasError", torch_File_hasError},
{"binary", torch_File_binary},
{"ascii", torch_File_ascii},
{"autoSpacing", torch_File_autoSpacing},
{"noAutoSpacing", torch_File_noAutoSpacing},
{"quiet", torch_File_quiet},
{"pedantic", torch_File_pedantic},
{"clearError", torch_File_clearError},
/* DEBUG: CHECK DISK FREE & READ/WRITE STRING*/
{"readByte", torch_File_readByte},
{"readChar", torch_File_readChar},
{"readShort", torch_File_readShort},
{"readInt", torch_File_readInt},
{"readLong", torch_File_readLong},
{"readFloat", torch_File_readFloat},
{"readDouble", torch_File_readDouble},
{"readString", torch_File_readString},
{"writeByte", torch_File_writeByte},
{"writeChar", torch_File_writeChar},
{"writeShort", torch_File_writeShort},
{"writeInt", torch_File_writeInt},
{"writeLong", torch_File_writeLong},
{"writeFloat", torch_File_writeFloat},
{"writeDouble", torch_File_writeDouble},
{"writeString", torch_File_writeString},
{"synchronize", torch_File_synchronize},
{"seek", torch_File_seek},
{"seekEnd", torch_File_seekEnd},
{"position", torch_File_position},
{"close", torch_File_close},
{NULL, NULL}
};
void torch_File_init(lua_State *L)
{
luaT_newmetatable(L, "torch.File", NULL, NULL, NULL, NULL);
luaT_setfuncs(L, torch_File__, 0);
lua_pop(L, 1);
}
File.lua 0000664 0000000 0000000 00000037245 13162462543 0012461 0 ustar 00root root 0000000 0000000 local File = torch.getmetatable('torch.File')
function File:writeBool(value)
if value then
self:writeInt(1)
else
self:writeInt(0)
end
end
function File:readBool()
return (self:readInt() == 1)
end
local TYPE_NIL = 0
local TYPE_NUMBER = 1
local TYPE_STRING = 2
local TYPE_TABLE = 3
local TYPE_TORCH = 4
local TYPE_BOOLEAN = 5
local TYPE_FUNCTION = 6
local TYPE_RECUR_FUNCTION = 8
local LEGACY_TYPE_RECUR_FUNCTION = 7
-- Lua 5.2 compatibility
local loadstring = loadstring or load
function File:isWritableObject(object)
local typename = type(object)
local typeidx
if type(object) ~= 'boolean' and not object then
typeidx = TYPE_NIL
elseif torch.typename(object) and torch.factory(torch.typename(object)) then
typeidx = TYPE_TORCH
elseif typename == 'table' then
typeidx = TYPE_TABLE
elseif typename == 'number' then
typeidx = TYPE_NUMBER
elseif typename == 'string' then
typeidx = TYPE_STRING
elseif typename == 'boolean' then
typeidx = TYPE_BOOLEAN
elseif typename == 'function' and pcall(string.dump, object) then
typeidx = TYPE_RECUR_FUNCTION
end
return typeidx
end
function File:referenced(ref)
-- we use an environment to keep a record of written objects
if not torch.getenv(self).writeObjects then
torch.setenv(self, {
writeObjects={}, writeObjectsRef={},
readObjects={},
objectNameStack={},
upvalueRefToId={}, upvalueIdToClosure={},
})
end
local env = torch.getenv(self)
env.force = not ref
torch.setenv(self,env)
return self
end
function File:isReferenced()
-- if no environment, then no forcing setup yet
if not torch.getenv(self).writeObjects then
return true
end
local env = torch.getenv(self)
return not env.force
end
local function getmetamethod(obj, name)
local func
local status
-- check getmetatable(obj).__name or
-- check getmetatable(obj).name
status, func = pcall(
function()
-- note that sometimes the metatable is hidden
-- we get it for sure through the torch type system
local mt = torch.getmetatable(torch.typename(obj))
if mt then
return mt['__' .. name] or mt[name]
end
end
)
if status and type(func) == 'function' then
return func
end
end
local UPVALUES_TOKEN = {} -- unique object
local function formatStack(objectNameStack)
-- Format object name stack skipping UPVALUES_TOKEN and upvalue index
local parts = {}
for i, v in ipairs(objectNameStack) do
if v ~= UPVALUES_TOKEN and objectNameStack[i-1] ~= UPVALUES_TOKEN then
table.insert(parts, v)
end
end
return table.concat(parts, '.')
end
function File:writeObject(object, debugname, hook)
-- define a default hook function if not provided
hook = hook or function(object) return object end
-- we use an environment to keep a record of written objects
if not torch.getenv(self).writeObjects then
torch.setenv(self, {
writeObjects={}, writeObjectsRef={},
readObjects={},
objectNameStack={},
upvalueRefToId={}, upvalueIdToClosure={},
})
end
-- That guy is used for references' book-keeping
local sobject = object
-- That guy is the object that is actually persisted
-- hook(object) can be used to modify the object before writing it to the file.
-- Useful for serializing objects under a config
-- that we want to deserialize safely under another config.
-- (e.g. Cuda to Float tensors, cudnn to nn, ...)
object = hook(object)
local force = torch.getenv(self).force
-- if nil object, only write the type and return
if type(object) ~= 'boolean' and not object then
self:writeInt(TYPE_NIL)
return
end
local objectNameStack = torch.getenv(self).objectNameStack
table.insert(objectNameStack, debugname or '>')
-- check the type we are dealing with
local typeidx = self:isWritableObject(object)
if not typeidx then
error(string.format('Unwritable object <%s> at %s', type(object), formatStack(objectNameStack)))
end
self:writeInt(typeidx)
if typeidx == TYPE_NUMBER then
self:writeDouble(object)
elseif typeidx == TYPE_BOOLEAN then
self:writeBool(object)
elseif typeidx == TYPE_STRING then
local stringStorage = torch.CharStorage():string(object)
self:writeInt(#stringStorage)
self:writeChar(stringStorage)
elseif typeidx == TYPE_TORCH or typeidx == TYPE_TABLE or typeidx == TYPE_RECUR_FUNCTION then
-- check it exists already (we look at the pointer!)
local objects = torch.getenv(self).writeObjects
local objectsRef = torch.getenv(self).writeObjectsRef
local index = objects[torch.pointer(sobject)]
if index and (not force) then
-- if already exists, write only its index
self:writeInt(index)
else
-- else write the object itself
index = objects.nWriteObject or 0
index = index + 1
if not force then
objects[torch.pointer(sobject)] = index
objectsRef[object] = index -- we make sure the object is not going to disappear
end
self:writeInt(index)
objects.nWriteObject = index
if typeidx == TYPE_RECUR_FUNCTION then
local upvalueRefToId = torch.getenv(self).upvalueRefToId
-- Unique ID for each ref since lightuserdata are not serializable
local nextId = 1
for _ in pairs(upvalueRefToId) do nextId=nextId+1 end
local upvalues = {}
local counter = 0
while true do
counter = counter + 1
local name,value = debug.getupvalue(object, counter)
if not name then break end
if name == '_ENV' then value = nil end
local id=nil
-- debug.upvalueid exists only for lua>=5.2 and luajit
if debug.upvalueid then
local upvalueRef = debug.upvalueid(object, counter)
if not upvalueRefToId[upvalueRef] then
upvalueRefToId[upvalueRef] = nextId
nextId = nextId + 1
end
id = upvalueRefToId[upvalueRef]
end
table.insert(upvalues, {name=name, id=id, value=value})
end
local dumped = string.dump(object)
local stringStorage = torch.CharStorage():string(dumped)
self:writeInt(#stringStorage)
self:writeChar(stringStorage)
self:writeObject(upvalues, UPVALUES_TOKEN, hook)
elseif typeidx == TYPE_TORCH then
local version = torch.CharStorage():string('V ' .. torch.version(object))
local className = torch.CharStorage():string(torch.typename(object))
self:writeInt(#version)
self:writeChar(version)
self:writeInt(#className)
self:writeChar(className)
local write = getmetamethod(object, 'write')
if write then
write(object, self)
elseif type(object) == 'table' then
local var = {}
for k,v in pairs(object) do
if self:isWritableObject(v) then
var[k] = v
else
print(string.format('$ Warning: cannot write object field <%s> of <%s> %s', k, torch.typename(object), formatStack(objectNameStack)))
end
end
self:writeObject(var, torch.typename(object), hook)
else
error(string.format('<%s> is a non-serializable Torch object %s', torch.typename(object), formatStack(objectNameStack)))
end
else -- it is a table
local size = 0; for k,v in pairs(object) do size = size + 1 end
self:writeInt(size)
for k,v in pairs(object) do
self:writeObject(k, nil, hook)
local name = (type(k) == 'string' or type(k) == 'number') and tostring(k) or nil
-- special case name for upvalues
if objectNameStack[#objectNameStack-1] == UPVALUES_TOKEN and
name == 'value' and type(object.name) == 'string' then
name = object.name
end
self:writeObject(v, name, hook)
end
end
end
else
error('Unwritable object')
end
table.remove(objectNameStack)
end
function File:readObject()
-- we use an environment to keep a record of read objects
if not torch.getenv(self).writeObjects then
torch.setenv(self, {
writeObjects={}, writeObjectsRef={},
readObjects={},
objectNameStack={},
upvalueRefToId={}, upvalueIdToClosure={},
})
end
local force = torch.getenv(self).force
-- read the typeidx
local typeidx = self:readInt()
-- is it nil?
if typeidx == TYPE_NIL then
return nil
end
if typeidx == TYPE_NUMBER then
return self:readDouble()
elseif typeidx == TYPE_BOOLEAN then
return self:readBool()
elseif typeidx == TYPE_STRING then
local size = self:readInt()
return self:readChar(size):string()
elseif typeidx == TYPE_FUNCTION then
local size = self:readInt()
local dumped = self:readChar(size):string()
local func, err = loadstring(dumped)
if not func then
io.stderr:write(string.format('Warning: Failed to load function from bytecode: %s', err))
end
local upvalues = self:readObject()
for index,upvalue in ipairs(upvalues) do
debug.setupvalue(func, index, upvalue)
end
return func
elseif typeidx == TYPE_TABLE or typeidx == TYPE_TORCH or typeidx == TYPE_RECUR_FUNCTION or typeidx == LEGACY_TYPE_RECUR_FUNCTION then
-- read the index
local index = self:readInt()
-- check it is loaded already
local objects = torch.getenv(self).readObjects
if objects[index] and not force then
return objects[index]
end
-- otherwise read it
if typeidx == TYPE_RECUR_FUNCTION or typeidx == LEGACY_TYPE_RECUR_FUNCTION then
local size = self:readInt()
local dumped = self:readChar(size):string()
local func, err = loadstring(dumped)
if not func then
io.stderr:write(string.format('Warning: Failed to load function from bytecode: %s', err))
end
if not force then
objects[index] = func
end
local upvalueIdToClosure = torch.getenv(self).upvalueIdToClosure
local upvalues = self:readObject()
for index,upvalue in ipairs(upvalues) do
if typeidx == LEGACY_TYPE_RECUR_FUNCTION then
debug.setupvalue(func, index, upvalue)
elseif upvalue.name == '_ENV' then
debug.setupvalue(func, index, _ENV)
else
debug.setupvalue(func, index, upvalue.value)
-- debug.upvaluejoin exists only for lua>=5.2 and luajit
if debug.upvaluejoin and upvalue.id then
if upvalueIdToClosure[upvalue.id] then
-- This upvalue is linked to another one
local otherClosure = upvalueIdToClosure[upvalue.id]
debug.upvaluejoin(func, index, otherClosure.func, otherClosure.index)
else
-- Save this closure for next time
upvalueIdToClosure[upvalue.id] = {
func = func,
index = index,
}
end
end
end
end
return func
elseif typeidx == TYPE_TORCH then
local version, className, versionNumber
version = self:readChar(self:readInt()):string()
versionNumber = tonumber(string.match(version, '^V (.*)$'))
if not versionNumber then
className = version
versionNumber = 0 -- file created before existence of versioning system
else
className = self:readChar(self:readInt()):string()
end
if not torch.factory(className) then
error(string.format('unknown Torch class <%s>', tostring(className)))
end
local object = torch.factory(className)(self)
if not force then
objects[index] = object
end
local read = getmetamethod(object, 'read')
if read then
read(object, self, versionNumber)
elseif type(object) == 'table' then
local var = self:readObject()
for k,v in pairs(var) do
object[k] = v
end
else
error(string.format('Cannot load object class <%s>', tostring(className)))
end
return object
else -- it is a table
local size = self:readInt()
local object = {}
if not force then
objects[index] = object
end
for i = 1,size do
local k = self:readObject()
local v = self:readObject()
object[k] = v
end
return object
end
else
error('unknown object')
end
end
-- simple helpers to save/load arbitrary objects/tables
function torch.save(filename, object, mode, referenced)
assert(mode == nil or mode == 'binary' or mode == 'ascii', '"binary" or "ascii" (or nil) expected for mode')
assert(referenced == nil or referenced == true or referenced == false, 'true or false (or nil) expected for referenced')
mode = mode or 'binary'
referenced = referenced == nil and true or referenced
local file = torch.DiskFile(filename, 'w')
file[mode](file)
file:referenced(referenced)
file:writeObject(object)
file:close()
end
function torch.load(filename, mode, referenced)
assert(mode == 'binary' or mode == 'b32' or mode == 'b64' or
mode == nil or mode == 'ascii',
'"binary", "b32", "b64" or "ascii" (or nil) expected for mode')
assert(referenced == nil or referenced == true or referenced == false,
'true or false (or nil) expected for referenced')
local longSize
if mode == 'b32' or mode == 'b64' then
longSize = tonumber(mode:match('%d+')) / 8
mode = 'binary'
end
mode = mode or 'binary'
referenced = referenced == nil and true or referenced
local file = torch.DiskFile(filename, 'r')
file[mode](file)
file:referenced(referenced)
if longSize then file:longSize(longSize) end
local object = file:readObject()
file:close()
return object
end
-- simple helpers to serialize/deserialize arbitrary objects/tables
function torch.serialize(object, mode)
local storage = torch.serializeToStorage(object, mode)
return storage:string()
end
-- Serialize to a CharStorage, not a lua string. This avoids
function torch.serializeToStorage(object, mode)
mode = mode or 'binary'
local f = torch.MemoryFile()
f = f[mode](f)
f:writeObject(object)
local storage = f:storage()
-- the storage includes an extra NULL character: get rid of it
storage:resize(storage:size()-1)
f:close()
return storage
end
function torch.deserializeFromStorage(storage, mode)
mode = mode or 'binary'
local tx = torch.CharTensor(storage)
local xp = torch.CharStorage(tx:size(1)+1)
local txp = torch.CharTensor(xp)
txp:narrow(1,1,tx:size(1)):copy(tx)
txp[tx:size(1)+1] = 0
local f = torch.MemoryFile(xp)
f = f[mode](f)
local object = f:readObject()
f:close()
return object
end
function torch.deserialize(str, mode)
local storage = torch.CharStorage():string(str)
return torch.deserializeFromStorage(storage, mode)
end
-- public API (saveobj/loadobj are safe for global import)
torch.saveobj = torch.save
torch.loadobj = torch.load
Generator.c 0000664 0000000 0000000 00000002340 13162462543 0013155 0 ustar 00root root 0000000 0000000 #include
int torch_Generator_new(lua_State *L)
{
THGenerator *gen = THGenerator_new();
luaT_pushudata(L, gen, torch_Generator);
return 1;
}
int torch_Generator_free(lua_State *L)
{
THGenerator *gen= luaT_checkudata(L, 1, torch_Generator);
THGenerator_free(gen);
return 0;
}
static int torch_Generator_write(lua_State *L)
{
THGenerator *gen = luaT_checkudata(L, 1, torch_Generator);
THFile *file = luaT_checkudata(L, 2, "torch.File");
THFile_writeByteRaw(file, (unsigned char *)gen, sizeof(THGenerator));
return 0;
}
static int torch_Generator_read(lua_State *L)
{
THGenerator *gen = luaT_checkudata(L, 1, torch_Generator);
THFile *file = luaT_checkudata(L, 2, "torch.File");
THFile_readByteRaw(file, (unsigned char *)gen, sizeof(THGenerator));
return 0;
}
static const struct luaL_Reg torch_Generator_table_ [] = {
{"write", torch_Generator_write},
{"read", torch_Generator_read},
{NULL, NULL}
};
#define torch_Generator_factory torch_Generator_new
void torch_Generator_init(lua_State *L)
{
luaT_newmetatable(L, torch_Generator, NULL,
torch_Generator_new, torch_Generator_free, torch_Generator_factory);
luaT_setfuncs(L, torch_Generator_table_, 0);
lua_pop(L, 1);
}
MemoryFile.c 0000664 0000000 0000000 00000003454 13162462543 0013306 0 ustar 00root root 0000000 0000000 #include "general.h"
static int torch_MemoryFile_new(lua_State *L)
{
const char *mode;
THCharStorage *storage = luaT_toudata(L, 1, "torch.CharStorage");
THFile *self;
if(storage)
{
mode = luaL_optstring(L, 2, "rw");
self = THMemoryFile_newWithStorage(storage, mode);
}
else
{
mode = luaL_optstring(L, 1, "rw");
self = THMemoryFile_new(mode);
}
luaT_pushudata(L, self, "torch.MemoryFile");
return 1;
}
static int torch_MemoryFile_storage(lua_State *L)
{
THFile *self = luaT_checkudata(L, 1, "torch.MemoryFile");
THCharStorage_retain(THMemoryFile_storage(self));
luaT_pushudata(L, THMemoryFile_storage(self), "torch.CharStorage");
return 1;
}
static int torch_longSize(lua_State *L)
{
THFile *self = luaT_checkudata(L, 1, "torch.MemoryFile");
THMemoryFile_longSize(self, lua_tointeger(L, 2));
lua_settop(L, 1);
return 1;
}
static int torch_MemoryFile_free(lua_State *L)
{
THFile *self = luaT_checkudata(L, 1, "torch.MemoryFile");
THFile_free(self);
return 0;
}
static int torch_MemoryFile___tostring__(lua_State *L)
{
THFile *self = luaT_checkudata(L, 1, "torch.MemoryFile");
lua_pushfstring(L, "torch.MemoryFile [status: %s -- mode: %c%c]",
(THFile_isOpened(self) ? "open" : "closed"),
(THFile_isReadable(self) ? 'r' : ' '),
(THFile_isWritable(self) ? 'w' : ' '));
return 1;
}
static const struct luaL_Reg torch_MemoryFile__ [] = {
{"storage", torch_MemoryFile_storage},
{"longSize", torch_longSize},
{"__tostring__", torch_MemoryFile___tostring__},
{NULL, NULL}
};
void torch_MemoryFile_init(lua_State *L)
{
luaT_newmetatable(L, "torch.MemoryFile", "torch.File",
torch_MemoryFile_new, torch_MemoryFile_free, NULL);
luaT_setfuncs(L, torch_MemoryFile__, 0);
lua_pop(L, 1);
}
PipeFile.c 0000664 0000000 0000000 00000002315 13162462543 0012726 0 ustar 00root root 0000000 0000000 #include "general.h"
static int torch_PipeFile_new(lua_State *L)
{
const char *name = luaL_checkstring(L, 1);
const char *mode = luaL_optstring(L, 2, "r");
int isQuiet = luaT_optboolean(L, 3, 0);
THFile *self = THPipeFile_new(name, mode, isQuiet);
luaT_pushudata(L, self, "torch.PipeFile");
return 1;
}
static int torch_PipeFile_free(lua_State *L)
{
THFile *self = luaT_checkudata(L, 1, "torch.PipeFile");
THFile_free(self);
return 0;
}
static int torch_PipeFile___tostring__(lua_State *L)
{
THFile *self = luaT_checkudata(L, 1, "torch.PipeFile");
lua_pushfstring(L, "torch.PipeFile on <%s> [status: %s -- mode: %c%c]",
THDiskFile_name(self),
(THFile_isOpened(self) ? "open" : "closed"),
(THFile_isReadable(self) ? 'r' : ' '),
(THFile_isWritable(self) ? 'w' : ' '));
return 1;
}
static const struct luaL_Reg torch_PipeFile__ [] = {
{"__tostring__", torch_PipeFile___tostring__},
{NULL, NULL}
};
void torch_PipeFile_init(lua_State *L)
{
luaT_newmetatable(L, "torch.PipeFile", "torch.DiskFile",
torch_PipeFile_new, torch_PipeFile_free, NULL);
luaT_setfuncs(L, torch_PipeFile__, 0);
lua_pop(L, 1);
}
README.md 0000664 0000000 0000000 00000005416 13162462543 0012351 0 ustar 00root root 0000000 0000000 [](https://gitter.im/torch/torch7?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
[](https://travis-ci.org/torch/torch7)
## Need help? ##
* Questions, Support, Install issues: [Google groups](https://groups.google.com/forum/#!forum/torch7)
* Reporting bugs: [torch7](https://github.com/torch/torch7/issues) [nn](https://github.com/torch/nn/issues) [cutorch](https://github.com/torch/cutorch/issues) [cunn](https://github.com/torch/cutorch/issues) [optim](https://github.com/torch/optim/issues) [threads](https://github.com/torch/threads/issues)
* Hanging out with other developers and users (strictly no install issues, no large blobs of text): [Gitter Chat](https://gitter.im/torch/torch7)
# Torch Package Reference Manual #
__Torch__ is the main package in [Torch7](http://torch.ch) where data
structures for multi-dimensional tensors and mathematical operations
over these are defined. Additionally, it provides many utilities for
accessing files, serializing objects of arbitrary types and other
useful utilities.
## Torch Packages ##
* Tensor Library
* [Tensor](doc/tensor.md) defines the _all powerful_ tensor object that provides multi-dimensional numerical arrays with type templating.
* [Mathematical operations](doc/maths.md) that are defined for the tensor object types.
* [Storage](doc/storage.md) defines a simple storage interface that controls the underlying storage for any tensor object.
* File I/O Interface Library
* [File](doc/file.md) is an abstract interface for common file operations.
* [Disk File](doc/diskfile.md) defines operations on files stored on disk.
* [Memory File](doc/memoryfile.md) defines operations on stored in RAM.
* [Pipe File](doc/pipefile.md) defines operations for using piped commands.
* [High-Level File operations](doc/serialization.md) defines higher-level serialization functions.
* Useful Utilities
* [Timer](doc/timer.md) provides functionality for _measuring time_.
* [Tester](doc/tester.md) is a generic tester framework.
* [CmdLine](doc/cmdline.md) is a command line argument parsing utility.
* [Random](doc/random.md) defines a random number generator package with various distributions.
* Finally useful [utility](doc/utility.md) functions are provided for easy handling of torch tensor types and class inheritance.
## Useful Links ##
* [Community packages](https://github.com/torch/torch7/wiki/Cheatsheet)
* [Torch Blog](http://torch.ch/blog/)
* [Torch Slides](https://github.com/soumith/cvpr2015/blob/master/cvpr-torch.pdf)
ROADMAP.md 0000664 0000000 0000000 00000024346 13162462543 0012502 0 ustar 00root root 0000000 0000000
# Torch Roadmap (August 2015 - March 2016)
This roadmap document is intended to serve as a loose plan of our vision for Torch in the short term.
It is open to community feedback and contribution and only intends to serve as an initial draft.
After community feedback, we shall freeze it and work on it.
The roadmap focuses on five separate things
- Core development: improving the core technically. Design changes, code refactors, performance, they go here.
- Documentation and Accessibility: Outlining the changes in documentation, and improving general user and developer documentation in various ways.
- Versioning and Packaging: Planned and much needed changes to the packaging of Torch are discussed here.
- Continuous Build Infrastructure: Making our continuous builds more robust, introducing CUDA and OpenCL contbuilds etc.
- Other improvements
## Torch Core Project Development
- New class system:
- **[definite]** with no global side-effects (i.e. the class constructor should be scoped into its parent package)
Get rid of every statement/system that has a global effect on the environment (torch.setdefaultensortype => dangerous and not clean)
- **[needs discussion]** fully serializable (i.e. when deserializing/reloading a model, there shouldn't be a need to load libraries that defined the class originally, like nn; the class definition should be serialized as well: this would remove a lot of backward compatibility hacks that we have to add to class definitions currently
- **koray**: I like this, but wouldn't it break backward compatibility?
Currently, whatever we serialize, it is just the data and implementation is defined
at load time, so if a bug is fixed (or introduced) you use that.
And it starts being ambiguous, what if I load a layer from file and
create a new one and their implementation is inconsistent...)
- **[definite]** Get rid of non-tensor-related stuff (like serialization) in TH, and move it to lua side
- **[needs discussion]** OpenMP: Should it stay or go? Is Threads sufficient?
- **Ronan**: I really wonder about this guy, especially now that I have been using threads intensively. I am not sure that fine-grine threading is necessary.
- **koray**: I guess you mean with threading, there is no need for OpenMP, but I disagree.
Our convolution layer will use multiple threads and then if we run a ReLu over a huge state space, it would become embarrassingly slow.
We shouldn't expect everyone to run their experiments in a threading framework. It is more work than necessary sometimes.)
- **[needs discussion]** Templated C++ in TH Core?
- **Ronan**: Should I cleanup TH core? In the end, I am scared to move to C++, but some iterators based taking a closure could be nice (I have some of those that I could add easily).
I could move to C++ if it was only template + keeping pointers (and not C++11/14/17, because that would limit the number of users that it can reach because of the latest compilers needed etc.).
- **[definite]** Migrate to a single, better/modern testing support
- **koray**: like some aspects of Totem, but should be in core Tester
- **[definite]** Benchmarking support in Tester
- **[definite]** Consistent testing scripts across all core projects
- **[definite]** 'nn' container unified interface between containers and graph
- **[mostly definite]** Switch to batch only assumption in 'nn'. Right now, the code is unnecessarily complicated for stochastic/batch confusion, we needed extra functions like nInputDims and such.
- **[needs discussion]** Support named arguments in the constructor for all 'nn' layers.
- **[definite]** 'rnn' package.
- **Soumith**: Nicholas Leonard's seems to be a good one.
- **[mostly definite]** argcheck for all core functions in torch. Get rid of cwrap's ugliness.
- **[definite]** improve paths to support more file system operations
- **Clement**: could lfs and penlight be made more standard? penlight is a heavy package but provides so much utility
- **Soumith**: I think penlight is lightweight and provides strong utility, definitely consider dependence.
- **[definite]** JIT/Lua/FFI/GC:
- **koray**: I think Torch should be agnostic to whatever is the backend;
- **clement**: yes!
- at this point, we need to have all core packages use the regular Lua api (almost the case)
- **Ronan**: agreed.
- **[definite]** plan to have standalone FFI?
- Facebook releases their puc LUA based FFI package mostly improved by Sam Gross
- [needs discussion] **Ronan** improves it a bit more to use Leon's C99 parser
- **Koray**: I am not opposed to Leon's C99 parser, but we should not have the QT like situation where
it relies mostly on Leon to maintain it.
And, still we need to have FFI since there are people and packages that rely on it now.
- **[definite]** Lua 5.2 migration (I think it's already finished ;) ).
- **[mostly definite]** Lua 5.3 migration
- **[mostly definite]** Optionally replace GC by Ref-counting (existing version in luajit-rocks; but completely broken but will need to be fixed)
- **[needs discussion]** Make OpenCL support more visible under torch/opencl (**Soumith**: Hugh Perkins will maintain it of course ;) ).
- **[definite]** Split nn into THNN and nn. THNN would be NN package using TH as backend and nn would be the lua layer. THNN can be used as a standalone C library. Same for cunn
- **[Definite]** CUDA typed tensor support - CudaHalfTensor CudaDoubleTensor etc.
- **[Definite]** better plotting support
- **[needs discussion]** UI package that doesn't suck?
- **Ronan**: something based on cairo?
- **clement**: not sure if this would have much adoption
- **Ronan**: yes, it is a worry. I started to do some fancy stuff there, it is not that hard.
However, I would need quite some time to polish it.
I think having something fully customizable from lua really
makes a difference (rather than something like Qt, for example).
- something based on a web client?
- **clement**: i like the idea of itorch but could never easily build it, build process is too big.
- **Ronan**: I cannot use something which forces me to use global variables.
- **koray**: I think at the end of the day, we need to have both a GUI client and a web based client.
My main problem with web based clients is that I can't easily create
custom displays to play an animation or such.
It is an offline process that I need to generate a movie and then load it in.
This and similar things make it hard to use for me.
Also, I agree, I actually could not install iTorch on my laptop
before cvpr tutorial somehow, it did not want to work :).
- **soumith**: I think we should propose a common display API that any interface can implement,
that way the users don't need to change scripts across different UI backends.
Also, szym/display is a good candidate for the Web UI, ITorch is indeed a bit of a pain to install.
- Should we endorse iTorch for everyone to use?
- **Ronan**: I know **Soumith** likes it, but I am not a big fan.
- Heavy+encourages the use of global variables. Excellent for tutorials, though.
- This ties to the first question in **Other Questions** section.
- Can we/community do pull requests on iTorch? ( **Soumith**: Yes )
- First step would be to leanify dependencies and/or install procedure (**Soumith**: agreed)
- **[needs discussion]** How about Penlight? It has many crucial things that people use.
Should we endorse it, use some things from it? Replicate some things in penlight in torch?
- **clement**: upvoting this! we use it extensively.
- **Ronan**: I live better with less abstractions, but I can be convinced there.
However, I find penlight quite big.
There are things like the classes that I do not like as well (because of the way they chose for creating classes).
- **[needs discussion]** how about Moses? New lean functional package that's pretty useful
- **[definite]** A style guide
- Guidelines are super important:
- for Lua: at least impose strict camel case + 3 spaces (no tab)
- for C: camel case + use of underscore to represent namespace scoping + 2 spaces
## Documentation + Accessibility
- Tutorials: provide guidelines and basic framework/standard to write and publish tutorials?
- Universal dataset API
- Dataset classes for several popular datasets
- high performance, thread support etc.
- support CPU and GPU
- Model Zoo + Training scripts, with training scripts we can highlight Torch's strengths
- How do we build a super friendly model zoo? git repo of pre-trained models?
- Better documentation support, have a doc server
- Documentation for TH/THC interface and design
- Inline documentation parser
- doc/shell integration (maybe this is still working but needs redoing?)
## Versioning + Packaging
- Package owners need to start releasing frequent versions (i.e. torch v7.0.1, 7.0.2, ...)
- scm packages should become deprecated
- Packages need to avoid global side effects, and return themselves as simple tables (Lua 5.2 started enforcing this on the C side)
- Provide standard AMI instances that people can launch (already loosely done by the community). We can load it with many standard+optional packages and/or provide one line option to update to latest.
## Build Infrastructure Requirements
- Prepare core distro release
- Professional Continuous build for distro and individual core projects
- Continuous build for GPU
- continuous build should include testing
- The distro should be build and tested at every pull into any of the member projects
- CI for Linux and OSX
## Other Questions?
- If there is a project that seems good from outside or consortium, how do we endorse/improve/modify that?
- do we put some technical criteria to do that?
- being able to do pull requests?
- Licensing?
- or maybe maintain a list of suggested packages?
- when does existence of a package stop us from developing the same in core torch?
- **Soumith**: I think this should largely be community driven and by popularity. Top starred or watched repos in the ecosystem would be a good start.
Storage.c 0000664 0000000 0000000 00000000624 13162462543 0012636 0 ustar 00root root 0000000 0000000 #include "general.h"
#define torch_Storage_(NAME) TH_CONCAT_4(torch_,Real,Storage_,NAME)
#define THFile_readRealRaw TH_CONCAT_3(THFile_read, Real, Raw)
#define THFile_writeRealRaw TH_CONCAT_3(THFile_write, Real, Raw)
#define torch_Storage TH_CONCAT_STRING_3(torch.,Real,Storage)
#include "generic/Storage.c"
#include "THGenerateAllTypes.h"
#include "generic/Storage.c"
#include "THGenerateHalfType.h"
Tensor.c 0000664 0000000 0000000 00000000620 13162462543 0012500 0 ustar 00root root 0000000 0000000 #include "general.h"
#define torch_Storage_(NAME) TH_CONCAT_4(torch_,Real,Storage_,NAME)
#define torch_Storage TH_CONCAT_STRING_3(torch.,Real,Storage)
#define torch_Tensor_(NAME) TH_CONCAT_4(torch_,Real,Tensor_,NAME)
#define torch_Tensor TH_CONCAT_STRING_3(torch.,Real,Tensor)
#include "generic/Tensor.c"
#include "THGenerateAllTypes.h"
#include "generic/Tensor.c"
#include "THGenerateHalfType.h"
Tensor.lua 0000664 0000000 0000000 00000040500 13162462543 0013040 0 ustar 00root root 0000000 0000000 -- additional methods for Storage
local Storage = {}
-- additional methods for Tensor
local Tensor = {}
-- types
local types = {'Byte', 'Char', 'Short', 'Int', 'Long', 'Float', 'Half', 'Double'}
-- Lua 5.2 compatibility
local log10 = math.log10 or function(x) return math.log(x, 10) end
-- tostring() functions for Tensor and Storage
local function Storage__printformat(self)
if self:size() == 0 then
return "", nil, 0
end
local intMode = true
local type = torch.typename(self)
-- if type == 'torch.FloatStorage' or type == 'torch.DoubleStorage' then
for i=1,self:size() do
if self[i] ~= math.ceil(self[i]) then
intMode = false
break
end
end
-- end
local tensor = torch.DoubleTensor(torch.DoubleStorage(self:size()):copy(self), 1, self:size()):abs()
local expMin = tensor:min()
if expMin ~= 0 then
expMin = math.floor(log10(expMin)) + 1
else
expMin = 1
end
local expMax = tensor:max()
if expMax ~= 0 then
expMax = math.floor(log10(expMax)) + 1
else
expMax = 1
end
local format
local scale
local sz
if intMode then
if expMax > 9 then
format = "%11.4e"
sz = 11
else
format = "%SZd"
sz = expMax + 1
end
else
if expMax-expMin > 4 then
format = "%SZ.4e"
sz = 11
if math.abs(expMax) > 99 or math.abs(expMin) > 99 then
sz = sz + 1
end
else
if expMax > 5 or expMax < 0 then
format = "%SZ.4f"
sz = 7
scale = math.pow(10, expMax-1)
else
format = "%SZ.4f"
if expMax == 0 then
sz = 7
else
sz = expMax+6
end
end
end
end
format = string.gsub(format, 'SZ', sz)
if scale == 1 then
scale = nil
end
return format, scale, sz
end
function Storage.__tostring__(self)
local strt = {}
local format,scale = Storage__printformat(self)
if format:sub(2,4) == 'nan' then format = '%f' end
if scale then
table.insert(strt, string.format('%g', scale) .. ' *\n')
for i = 1,self:size() do
table.insert(strt, string.format(format, self[i]/scale) .. '\n')
end
else
for i = 1,self:size() do
table.insert(strt, string.format(format, self[i]) .. '\n')
end
end
table.insert(strt, '[' .. torch.typename(self) .. ' of size ' .. self:size() .. ']\n')
local str = table.concat(strt)
return str
end
for _,type in ipairs(types) do
local metatable = torch.getmetatable('torch.' .. type .. 'Storage')
for funcname, func in pairs(Storage) do
rawset(metatable, funcname, func)
end
end
local function Tensor__printMatrix(self, indent)
local format,scale,sz = Storage__printformat(self:storage())
if format:sub(2,4) == 'nan' then format = '%f' end
-- print('format = ' .. format)
scale = scale or 1
indent = indent or ''
local strt = {indent}
local nColumnPerLine = math.floor((80-#indent)/(sz+1))
-- print('sz = ' .. sz .. ' and nColumnPerLine = ' .. nColumnPerLine)
local firstColumn = 1
local lastColumn = -1
while firstColumn <= self:size(2) do
if firstColumn + nColumnPerLine - 1 <= self:size(2) then
lastColumn = firstColumn + nColumnPerLine - 1
else
lastColumn = self:size(2)
end
if nColumnPerLine < self:size(2) then
if firstColumn ~= 1 then
table.insert(strt, '\n')
end
table.insert(strt, 'Columns ' .. firstColumn .. ' to ' .. lastColumn .. '\n' .. indent)
end
if scale ~= 1 then
table.insert(strt, string.format('%g', scale) .. ' *\n ' .. indent)
end
for l=1,self:size(1) do
local row = self:select(1, l)
for c=firstColumn,lastColumn do
table.insert(strt, string.format(format, row[c]/scale))
if c == lastColumn then
table.insert(strt, '\n')
if l~=self:size(1) then
if scale ~= 1 then
table.insert(strt, indent .. ' ')
else
table.insert(strt, indent)
end
end
else
table.insert(strt, ' ')
end
end
end
firstColumn = lastColumn + 1
end
local str = table.concat(strt)
return str
end
local function Tensor__printTensor(self)
local counter = torch.LongStorage(self:nDimension()-2)
local strt = {''}
local finished
counter:fill(1)
counter[1] = 0
while true do
for i=1,self:nDimension()-2 do
counter[i] = counter[i] + 1
if counter[i] > self:size(i) then
if i == self:nDimension()-2 then
finished = true
break
end
counter[i] = 1
else
break
end
end
if finished then
break
end
-- print(counter)
if #strt > 1 then
table.insert(strt, '\n')
end
table.insert(strt, '(')
local tensor = self
for i=1,self:nDimension()-2 do
tensor = tensor:select(1, counter[i])
table.insert(strt, counter[i] .. ',')
end
table.insert(strt, '.,.) = \n')
table.insert(strt, Tensor__printMatrix(tensor, ' '))
end
return table.concat(strt)
end
function Tensor.__tostring__(self)
local strt = {''}
if self:nDimension() == 0 then
table.insert(strt, '[' .. torch.typename(self) .. ' with no dimension]\n')
else
local tensor = torch.DoubleTensor():resize(self:size()):copy(self)
if tensor:nDimension() == 1 then
local format,scale,sz = Storage__printformat(tensor:storage())
if format:sub(2,4) == 'nan' then format = '%f' end
if scale then
table.insert(strt, string.format('%g', scale) .. ' *\n')
for i = 1,tensor:size(1) do
table.insert(strt, string.format(format, tensor[i]/scale) .. '\n')
end
else
for i = 1,tensor:size(1) do
table.insert(strt, string.format(format, tensor[i]) .. '\n')
end
end
table.insert(strt, '[' .. torch.typename(self) .. ' of size ' .. tensor:size(1) .. ']\n')
elseif tensor:nDimension() == 2 then
table.insert(strt, Tensor__printMatrix(tensor))
table.insert(strt, '[' .. torch.typename(self) .. ' of size ' .. tensor:size(1) .. 'x' .. tensor:size(2) .. ']\n')
else
table.insert(strt, Tensor__printTensor(tensor))
table.insert(strt, '[' .. torch.typename(self) .. ' of size ')
for i=1,tensor:nDimension() do
table.insert(strt, tensor:size(i))
if i ~= tensor:nDimension() then
table.insert(strt, 'x')
end
end
table.insert(strt, ']\n')
end
end
return table.concat(strt)
end
function Tensor.type(self,type)
local current = torch.typename(self)
if not type then return current end
if type ~= current then
local new = torch.getmetatable(type).new()
if self:nElement() > 0 then
new:resize(self:size()):copy(self)
end
return new
else
return self
end
end
function Tensor.typeAs(self,tensor)
return self:type(tensor:type())
end
function Tensor.byte(self)
return self:type('torch.ByteTensor')
end
function Tensor.char(self)
return self:type('torch.CharTensor')
end
function Tensor.short(self)
return self:type('torch.ShortTensor')
end
function Tensor.int(self)
return self:type('torch.IntTensor')
end
function Tensor.long(self)
return self:type('torch.LongTensor')
end
function Tensor.float(self)
return self:type('torch.FloatTensor')
end
function Tensor.double(self)
return self:type('torch.DoubleTensor')
end
function Tensor.half(self)
return self:type('torch.HalfTensor')
end
function Tensor.real(self)
return self:type(torch.getdefaulttensortype())
end
function Tensor.expand(result,tensor,...)
-- get sizes
local sizes = {...}
local t = torch.type(tensor)
if (t == 'number' or t == 'torch.LongStorage') then
table.insert(sizes,1,tensor)
tensor = result
result = tensor.new()
end
-- check type
local size
if torch.type(sizes[1])=='torch.LongStorage' then
size = sizes[1]
else
size = torch.LongStorage(#sizes)
for i,s in ipairs(sizes) do
size[i] = s
end
end
-- get dimensions
local tensor_dim = tensor:dim()
local tensor_stride = tensor:stride()
local tensor_size = tensor:size()
-- check nb of dimensions
if #size ~= tensor:dim() then
error('the number of dimensions provided must equal tensor:dim()')
end
-- create a new geometry for tensor:
for i = 1,tensor_dim do
if tensor_size[i] == 1 then
tensor_size[i] = size[i]
tensor_stride[i] = 0
elseif tensor_size[i] ~= size[i] then
error('incorrect size: only supporting singleton expansion (size=1)')
end
end
-- create new view, with singleton expansion:
result:set(tensor:storage(), tensor:storageOffset(),
tensor_size, tensor_stride)
return result
end
torch.expand = Tensor.expand
function Tensor.expandAs(result,tensor,template)
if template then
return result:expand(tensor,template:size())
end
return result:expand(tensor:size())
end
torch.expandAs = Tensor.expandAs
function Tensor.repeatTensor(result,tensor,...)
-- get sizes
local sizes = {...}
local t = torch.type(tensor)
if (t == 'number' or t == 'torch.LongStorage') then
table.insert(sizes,1,tensor)
tensor = result
result = tensor.new()
end
-- if not contiguous, then force the tensor to be contiguous
if not tensor:isContiguous() then tensor = tensor:clone() end
-- check type
local size
if torch.type(sizes[1])=='torch.LongStorage' then
size = sizes[1]
else
size = torch.LongStorage(#sizes)
for i,s in ipairs(sizes) do
size[i] = s
end
end
if size:size() < tensor:dim() then
error('Number of dimensions of repeat dims can not be smaller than number of dimensions of tensor')
end
local xtensor = tensor.new():set(tensor)
local xsize = xtensor:size():totable()
for i=1,size:size()-tensor:dim() do
table.insert(xsize,1,1)
end
size = torch.DoubleTensor(xsize):cmul(torch.DoubleTensor(size:totable())):long():storage()
xtensor:resize(torch.LongStorage(xsize))
result:resize(size)
local urtensor = result.new(result)
for i=1,xtensor:dim() do
urtensor = urtensor:unfold(i,xtensor:size(i),xtensor:size(i))
end
for i=1,urtensor:dim()-xtensor:dim() do
table.insert(xsize,1,1)
end
xtensor:resize(torch.LongStorage(xsize))
local xxtensor = xtensor:expandAs(urtensor)
urtensor:copy(xxtensor)
return result
end
torch.repeatTensor = Tensor.repeatTensor
--- One of the size elements can be -1,
--- a new LongStorage is then returned.
--- The length of the unspecified dimension
--- is inferred from the number of remaining elements.
local function specifyFully(size, nElements)
local nCoveredElements = 1
local remainingDim = nil
local sizes = size:totable()
for i = 1, #sizes do
local wantedDimSize = sizes[i]
if wantedDimSize == -1 then
if remainingDim then
error("Only one of torch.view dimensions can be -1.")
end
remainingDim = i
else
nCoveredElements = nCoveredElements * wantedDimSize
end
end
if not remainingDim then
return size
end
assert(nElements % nCoveredElements == 0, "The number of covered elements is not a multiple of all elements.")
local copy = torch.LongStorage(sizes)
copy[remainingDim] = nElements / nCoveredElements
return copy
end
-- TODO : This should be implemented in TH and and wrapped.
function Tensor.view(result, src, ...)
local size = ...
local view, tensor
local function istensor(tensor)
return torch.typename(tensor) and torch.typename(tensor):find('torch.*Tensor')
end
local function isstorage(storage)
return torch.typename(storage) and torch.typename(storage) == 'torch.LongStorage'
end
if istensor(result) and istensor(src) and type(size) == 'number' then
size = torch.LongStorage{...}
view = result
tensor = src
elseif istensor(result) and istensor(src) and isstorage(size) then
size = size
view = result
tensor = src
elseif istensor(result) and isstorage(src) and size == nil then
size = src
tensor = result
view = tensor.new()
elseif istensor(result) and type(src) == 'number' then
size = {...}
table.insert(size,1,src)
size = torch.LongStorage(size)
tensor = result
view = tensor.new()
else
local t1 = 'torch.Tensor, torch.Tensor, number [, number ]*'
local t2 = 'torch.Tensor, torch.Tensor, torch.LongStorage'
local t3 = 'torch.Tensor, torch.LongStorage'
local t4 = 'torch.Tensor, number [, number ]*'
error(string.format('torch.view, expected (%s) or\n (%s) or\n (%s)\n or (%s)', t1, t2, t3, t4))
end
local origNElement = tensor:nElement()
size = specifyFully(size, origNElement)
assert(tensor:isContiguous(), "expecting a contiguous tensor")
view:set(tensor:storage(), tensor:storageOffset(), size)
if view:nElement() ~= origNElement then
local inputSize = table.concat(tensor:size():totable(), "x")
local outputSize = table.concat(size:totable(), "x")
error(string.format("Wrong size for view. Input size: %s. Output size: %s",
inputSize, outputSize))
end
return view
end
torch.view = Tensor.view
function Tensor.viewAs(result, src, template)
if template and torch.typename(template) then
return result:view(src, template:size())
elseif template == nil then
template = src
src = result
result = src.new()
return result:view(src, template:size())
else
local t1 = 'torch.Tensor, torch.Tensor, torch.LongStorage'
local t2 = 'torch.Tensor, torch.LongStorage'
error(string.format('expecting (%s) or (%s)', t1, t2))
end
end
torch.viewAs = Tensor.viewAs
function Tensor.split(result, tensor, splitSize, dim)
if torch.type(result) ~= 'table' then
dim = splitSize
splitSize = tensor
tensor = result
result = {}
else
-- empty existing result table before using it
for k,v in pairs(result) do
result[k] = nil
end
end
dim = dim or 1
local start = 1
while start <= tensor:size(dim) do
local size = math.min(splitSize, tensor:size(dim) - start + 1)
local split = tensor:narrow(dim, start, size)
table.insert(result, split)
start = start + size
end
return result
end
torch.split = Tensor.split
function Tensor.chunk(result, tensor, nChunk, dim)
if torch.type(result) ~= 'table' then
dim = nChunk
nChunk = tensor
tensor = result
result = {}
end
dim = dim or 1
local splitSize = math.ceil(tensor:size(dim)/nChunk)
return torch.split(result, tensor, splitSize, dim)
end
torch.chunk = Tensor.chunk
function Tensor.totable(tensor)
local result = {}
local dim = tensor:dim()
if dim == 1 then
tensor:apply(function(i) table.insert(result, i) end)
elseif dim > 0 then
for i = 1, tensor:size(1) do
table.insert(result, tensor[i]:totable())
end
end
return result
end
torch.totable = Tensor.totable
function Tensor.permute(tensor, ...)
local perm = {...}
local nDims = tensor:dim()
assert(#perm == nDims, 'Invalid permutation')
local j
for i, p in ipairs(perm) do
if p ~= i and p ~= 0 then
j = i
repeat
assert(0 < perm[j] and perm[j] <= nDims, 'Invalid permutation')
tensor = tensor:transpose(j, perm[j])
j, perm[j] = perm[j], 0
until perm[j] == i
perm[j] = j
end
end
return tensor
end
torch.permute = Tensor.permute
for _,type in ipairs(types) do
local metatable = torch.getmetatable('torch.' .. type .. 'Tensor')
for funcname, func in pairs(Tensor) do
if funcname ~= 'totable' or type ~='Half' then
rawset(metatable, funcname, func)
else
local function Tensor__totable(self)
local host_tensor = self:float()
return self:float():totable()
end
rawset(torch.getmetatable('torch.HalfTensor'), 'totable', Tensor__totable)
end
end
end
TensorMath.lua 0000664 0000000 0000000 00000151271 13162462543 0013662 0 ustar 00root root 0000000 0000000 local wrap = require 'cwrap'
require 'torchcwrap'
local interface = wrap.CInterface.new()
local method = wrap.CInterface.new()
local argtypes = wrap.CInterface.argtypes
argtypes['ptrdiff_t'] = wrap.types.ptrdiff_t
interface:print([[
#include "TH.h"
#include "THMath.h"
#include "luaT.h"
#include "utils.h"
]])
-- specific to torch: we generate a 'dispatch' function
-- first we create a helper function
-- note that it let the "torch" table on the stack
interface:print([[
static const void* torch_istensortype(lua_State *L, const char *tname)
{
if(!tname)
return NULL;
if(!luaT_pushmetatable(L, tname))
return NULL;
lua_pushstring(L, "torch");
lua_rawget(L, -2);
if(lua_istable(L, -1))
return tname;
else
{
lua_pop(L, 2);
return NULL;
}
return NULL;
}
]])
interface:print([[
static int torch_isnonemptytable(lua_State *L, int idx)
{
int empty;
if (!lua_istable(L, idx)) return 0;
lua_rawgeti(L, idx, 1);
empty = lua_isnil(L, -1);
lua_pop(L, 1);
return !empty;
}
]])
interface:print([[
static const void* torch_istensorarray(lua_State *L, int idx)
{
const char* tname;
int tensor_idx;
if (!torch_isnonemptytable(L, idx)) return 0;
lua_checkstack(L, 3);
lua_rawgeti(L, idx, 1);
tensor_idx = lua_gettop(L);
tname = (torch_istensortype(L, luaT_typename(L, -1)));
lua_remove(L, tensor_idx);
return tname;
}
]])
interface.dispatchregistry = {}
function interface:wrap(name, ...)
-- usual stuff
wrap.CInterface.wrap(self, name, ...)
-- dispatch function
if not interface.dispatchregistry[name] then
interface.dispatchregistry[name] = true
table.insert(interface.dispatchregistry, {name=name, wrapname=string.format("torch_%s", name)})
interface:print(string.gsub([[
static int torch_NAME(lua_State *L)
{
int narg = lua_gettop(L);
const void *tname;
if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */
{
}
else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */
{
}
else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */
{
}
else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING
&& (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */
{
lua_remove(L, -2);
}
else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L))))
luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor");
lua_pushstring(L, "NAME");
lua_rawget(L, -2);
if(lua_isfunction(L, -1))
{
lua_insert(L, 1);
lua_pop(L, 2); /* the two tables we put on the stack above */
lua_call(L, lua_gettop(L)-1, LUA_MULTRET);
}
else
return luaL_error(L, "%s does not implement the torch.NAME() function", tname);
return lua_gettop(L);
}
]], 'NAME', name))
end
end
function interface:dispatchregister(name)
local txt = self.txt
table.insert(txt, string.format('static const struct luaL_Reg %s [] = {', name))
for _,reg in ipairs(self.dispatchregistry) do
table.insert(txt, string.format('{"%s", %s},', reg.name, reg.wrapname))
end
table.insert(txt, '{NULL, NULL}')
table.insert(txt, '};')
table.insert(txt, '')
self.dispatchregistry = {}
end
interface:print('/* WARNING: autogenerated file */')
interface:print('')
local function wrap(...)
local args = {...}
-- interface
interface:wrap(...)
-- method: we override things possibly in method table field
for _,x in ipairs(args) do
if type(x) == 'table' then -- ok, now we have a list of args
for _, arg in ipairs(x) do
if arg.method then
for k,v in pairs(arg.method) do
if v == 'nil' then -- special case, we erase the field
arg[k] = nil
else
arg[k] = v
end
end
end
end
end
end
local unpack = unpack or table.unpack
method:wrap(unpack(args))
end
local reals = {ByteTensor='unsigned char',
CharTensor='char',
ShortTensor='short',
IntTensor='int',
LongTensor='long',
FloatTensor='float',
HalfTensor='half',
DoubleTensor='double'}
local accreals = {ByteTensor='long',
CharTensor='long',
ShortTensor='long',
IntTensor='long',
LongTensor='long',
FloatTensor='double',
HalfTensor='float',
DoubleTensor='double'}
for _,Tensor in ipairs({"ByteTensor", "CharTensor",
"ShortTensor", "IntTensor", "LongTensor",
"FloatTensor", "HalfTensor", "DoubleTensor"}) do
local real = reals[Tensor]
local accreal = accreals[Tensor]
function interface.luaname2wrapname(self, name)
return string.format('torch_%s_%s', Tensor, name)
end
function method.luaname2wrapname(self, name)
return string.format('m_torch_%s_%s', Tensor, name)
end
local function cname(name)
return string.format('TH%s_%s', Tensor, name)
end
local function lastdim(argn)
return function(arg)
return string.format("TH%s_nDimension(%s)", Tensor, arg.args[argn]:carg())
end
end
local function lastdimarray(argn)
return function(arg)
return string.format("TH%s_nDimension(arg%d_data[0])", Tensor, arg.args[argn].i)
end
end
if Tensor ~= 'HalfTensor' then
wrap("zero",
cname("zero"),
{{name=Tensor, returned=true}})
wrap("fill",
cname("fill"),
{{name=Tensor, returned=true},
{name=real}})
wrap("zeros",
cname("zeros"),
{{name=Tensor, default=true, returned=true, method={default='nil'}},
{name="LongArg"}})
wrap("ones",
cname("ones"),
{{name=Tensor, default=true, returned=true, method={default='nil'}},
{name="LongArg"}})
wrap("reshape",
cname("reshape"),
{{name=Tensor, default=true, returned=true},
{name=Tensor},
{name="LongArg"}})
wrap("gather",
cname("gather"),
{{name=Tensor, default=true, returned=true,
init=function(arg)
return table.concat(
{
arg.__metatable.init(arg),
string.format("THLongStorage* %s_size = THLongTensor_newSizeOf(%s);", arg:carg(), arg.args[4]:carg()),
string.format("TH%s_resize(%s, %s_size, NULL);", Tensor, arg:carg(), arg:carg()),
string.format("THLongStorage_free(%s_size);", arg:carg())
}, '\n')
end
},
{name=Tensor},
{name="index"},
{name="IndexTensor", noreadadd=true}})
wrap("scatter",
cname("scatter"),
{{name=Tensor, returned=true},
{name="index"},
{name="IndexTensor", noreadadd=true},
{name=Tensor}},
cname("scatterFill"),
{{name=Tensor, returned=true},
{name="index"},
{name="IndexTensor", noreadadd=true},
{name=real}})
wrap("dot",
cname("dot"),
{{name=Tensor},
{name=Tensor},
{name=accreal, creturned=true}})
wrap("equal",
cname("equal"),
{{name=Tensor},
{name=Tensor},
{name="boolean", creturned=true}})
wrap("add",
cname("add"),
{{name=Tensor, default=true, returned=true, method={default='nil'}},
{name=Tensor, method={default=1}},
{name=real}},
cname("cadd"),
{{name=Tensor, default=true, returned=true, method={default='nil'}},
{name=Tensor, method={default=1}},
{name=real, default=1},
{name=Tensor}})
wrap("csub",
cname("sub"),
{{name=Tensor, default=true, returned=true, method={default='nil'}},
{name=Tensor, method={default=1}},
{name=real}},
cname("csub"),
{{name=Tensor, default=true, returned=true, method={default='nil'}},
{name=Tensor, method={default=1}},
{name=real, default=1},
{name=Tensor}})
wrap("mul",
cname("mul"),
{{name=Tensor, default=true, returned=true, method={default='nil'}},
{name=Tensor, method={default=1}},
{name=real}})
wrap("div",
cname("div"),
{{name=Tensor, default=true, returned=true, method={default='nil'}},
{name=Tensor, method={default=1}},
{name=real}})
wrap("lshift",
cname("lshift"),
{{name=Tensor, default=true, returned=true, method={default='nil'}},
{name=Tensor, method={default=1}},
{name=real}})
wrap("rshift",
cname("rshift"),
{{name=Tensor, default=true, returned=true, method={default='nil'}},
{name=Tensor, method={default=1}},
{name=real}})
wrap("fmod",
cname("fmod"),
{{name=Tensor, default=true, returned=true, method={default='nil'}},
{name=Tensor, method={default=1}},
{name=real}})
wrap("remainder",
cname("remainder"),
{{name=Tensor, default=true, returned=true, method={default='nil'}},
{name=Tensor, method={default=1}},
{name=real}})
wrap("bitand",
cname("bitand"),
{{name=Tensor, default=true, returned=true, method={default='nil'}},
{name=Tensor, method={default=1}},
{name=real}})
wrap("bitor",
cname("bitor"),
{{name=Tensor, default=true, returned=true, method={default='nil'}},
{name=Tensor, method={default=1}},
{name=real}})
wrap("bitxor",
cname("bitxor"),
{{name=Tensor, default=true, returned=true, method={default='nil'}},
{name=Tensor, method={default=1}},
{name=real}})
-- mod alias
wrap("mod",
cname("fmod"),
{{name=Tensor, default=true, returned=true, method={default='nil'}},
{name=Tensor, method={default=1}},
{name=real}})
wrap("clamp",
cname("clamp"),
{{name=Tensor, default=true, returned=true, method={default='nil'}},
{name=Tensor, method={default=1}},
{name=real},
{name=real}})
wrap("match",
cname("match"),
{{name=Tensor, default=true, returned=true, method={default='nil'}},
{name=Tensor},
{name=Tensor},
{name=real, default=1}
})
wrap("cmul",
cname("cmul"),
{{name=Tensor, default=true, returned=true, method={default='nil'}},
{name=Tensor, method={default=1}},
{name=Tensor}})
wrap("cpow",
cname("cpow"),
{{name=Tensor, default=true, returned=true, method={default='nil'}},
{name=Tensor, method={default=1}},
{name=Tensor}})
wrap("cdiv",
cname("cdiv"),
{{name=Tensor, default=true, returned=true, method={default='nil'}},
{name=Tensor, method={default=1}},
{name=Tensor}})
wrap("clshift",
cname("clshift"),
{{name=Tensor, default=true, returned=true, method={default='nil'}},
{name=Tensor, method={default=1}},
{name=Tensor}})
wrap("crshift",
cname("crshift"),
{{name=Tensor, default=true, returned=true, method={default='nil'}},
{name=Tensor, method={default=1}},
{name=Tensor}})
wrap("cfmod",
cname("cfmod"),
{{name=Tensor, default=true, returned=true, method={default='nil'}},
{name=Tensor, method={default=1}},
{name=Tensor}})
wrap("cremainder",
cname("cremainder"),
{{name=Tensor, default=true, returned=true, method={default='nil'}},
{name=Tensor, method={default=1}},
{name=Tensor}})
wrap("cbitand",
cname("cbitand"),
{{name=Tensor, default=true, returned=true, method={default='nil'}},
{name=Tensor, method={default=1}},
{name=Tensor}})
wrap("cbitor",
cname("cbitor"),
{{name=Tensor, default=true, returned=true, method={default='nil'}},
{name=Tensor, method={default=1}},
{name=Tensor}})
wrap("cbitxor",
cname("cbitxor"),
{{name=Tensor, default=true, returned=true, method={default='nil'}},
{name=Tensor, method={default=1}},
{name=Tensor}})
-- cmod alias
wrap("cmod",
cname("cfmod"),
{{name=Tensor, default=true, returned=true, method={default='nil'}},
{name=Tensor, method={default=1}},
{name=Tensor}})
wrap("addcmul",
cname("addcmul"),
{{name=Tensor, default=true, returned=true, method={default='nil'}},
{name=Tensor, method={default=1}},
{name=real, default=1},
{name=Tensor},
{name=Tensor}})
wrap("addcdiv",
cname("addcdiv"),
{{name=Tensor, default=true, returned=true, method={default='nil'}},
{name=Tensor, method={default=1}},
{name=real, default=1},
{name=Tensor},
{name=Tensor}})
wrap("mv",
cname("addmv"),
{{name=Tensor, default=true, returned=true, method={default='nil'},
init=function(arg)
return table.concat(
{
arg.__metatable.init(arg),
string.format("TH%s_resize1d(%s, %s->size[0]);", Tensor, arg:carg(), arg.args[5]:carg())
}, '\n')
end,
precall=function(arg)
return table.concat(
{
string.format("TH%s_zero(%s);", Tensor, arg:carg()),
arg.__metatable.precall(arg)
}, '\n')
end,
},
{name=real, default=0, invisible=true},
{name=Tensor, default=1, invisible=true},
{name=real, default=1, invisible=true},
{name=Tensor, dim=2},
{name=Tensor, dim=1}}
)
wrap("mm",
cname("addmm"),
{{name=Tensor, default=true, returned=true, method={default='nil'},
init=function(arg)
return table.concat(
{
arg.__metatable.init(arg),
string.format("TH%s_resize2d(%s, %s->size[0], %s->size[1]);", Tensor, arg:carg(), arg.args[5]:carg(), arg.args[6]:carg())
}, '\n')
end,
precall=function(arg)
return table.concat(
{
string.format("TH%s_zero(%s);", Tensor, arg:carg()),
arg.__metatable.precall(arg)
}, '\n')
end,
},
{name=real, default=0, invisible=true},
{name=Tensor, default=1, invisible=true},
{name=real, default=1, invisible=true},
{name=Tensor, dim=2},
{name=Tensor, dim=2}}
)
wrap("bmm",
cname("baddbmm"),
{{name=Tensor, default=true, returned=true, method={default='nil'},
init=function(arg)
return table.concat(
{
arg.__metatable.init(arg),
string.format("TH%s_resize3d(%s, %s->size[0], %s->size[1], %s->size[2]);",
Tensor, arg:carg(), arg.args[5]:carg(), arg.args[5]:carg(), arg.args[6]:carg())
}, '\n')
end,
precall=function(arg)
return table.concat(
{
string.format("TH%s_zero(%s);", Tensor, arg:carg()),
arg.__metatable.precall(arg)
}, '\n')
end,
},
{name=real, default=0, invisible=true},
{name=Tensor, default=1, invisible=true},
{name=real, default=1, invisible=true},
{name=Tensor, dim=3},
{name=Tensor, dim=3}}
)
wrap("ger",
cname("addr"),
{{name=Tensor, default=true, returned=true, method={default='nil'},
init=function(arg)
return table.concat(
{
arg.__metatable.init(arg),
string.format("TH%s_resize2d(%s, %s->size[0], %s->size[0]);", Tensor, arg:carg(), arg.args[5]:carg(), arg.args[6]:carg())
}, '\n')
end,
precall=function(arg)
return table.concat(
{
string.format("TH%s_zero(%s);", Tensor, arg:carg()),
arg.__metatable.precall(arg)
}, '\n')
end
},
{name=real, default=1, invisible=true},
{name=Tensor, default=1, invisible=true},
{name=real, default=1, invisible=true},
{name=Tensor, dim=1},
{name=Tensor, dim=1}}
)
for _,f in ipairs({
{name="addmv", dim1=1, dim2=2, dim3=1},
{name="addmm", dim1=2, dim2=2, dim3=2},
{name="addr", dim1=2, dim2=1, dim3=1},
{name="addbmm", dim1=2, dim2=3, dim3=3},
{name="baddbmm", dim1=3, dim2=3, dim3=3},
}
) do
interface:wrap(f.name,
cname(f.name),
{{name=Tensor, default=true, returned=true},
{name=real, default=1},
{name=Tensor, dim=f.dim1},
{name=real, default=1},
{name=Tensor, dim=f.dim2},
{name=Tensor, dim=f.dim3}})
-- there is an ambiguity here, hence the more complicated setup
method:wrap(f.name,
cname(f.name),
{{name=Tensor, returned=true, dim=f.dim1},
{name=real, default=1, invisible=true},
{name=Tensor, default=1, dim=f.dim1},
{name=real, default=1},
{name=Tensor, dim=f.dim2},
{name=Tensor, dim=f.dim3}},
cname(f.name),
{{name=Tensor, returned=true, dim=f.dim1},
{name=real},
{name=Tensor, default=1, dim=f.dim1},
{name=real},
{name=Tensor, dim=f.dim2},
{name=Tensor, dim=f.dim3}})
end
wrap("numel",
cname("numel"),
{{name=Tensor},
{name="ptrdiff_t", creturned=true}})
for _,name in ipairs({"cumsum", "cumprod"}) do
wrap(name,
cname(name),
{{name=Tensor, default=true, returned=true},
{name=Tensor},
{name="index", default=1}})
end
wrap("sum",
cname("sumall"),
{{name=Tensor},
{name=accreal, creturned=true}},
cname("sum"),
{{name=Tensor, default=true, returned=true},
{name=Tensor},
{name="index"},
{name="boolean", default=true, invisible=true}})
wrap("prod",
cname("prodall"),
{{name=Tensor},
{name=accreal, creturned=true}},
cname("prod"),
{{name=Tensor, default=true, returned=true},
{name=Tensor},
{name="index"},
{name="boolean", default=true, invisible=true}})
for _,name in ipairs({"min", "max"}) do
wrap(name,
cname(name .. "all"),
{{name=Tensor},
{name=real, creturned=true}},
cname(name),
{{name=Tensor, default=true, returned=true},
{name="IndexTensor", default=true, returned=true, noreadadd=true},
{name=Tensor},
{name="index"},
{name="boolean", default=true, invisible=true}})
end
for _,name in ipairs({"cmin", "cmax"}) do
wrap(name,
cname(name),
{{name=Tensor, default=true, returned=true},
{name=Tensor, method={default=1}},
{name=Tensor}},
cname(name .. "Value"),
{{name=Tensor, default=true, returned=true},
{name=Tensor, method={default=1}},
{name=real}})
end
wrap("trace",
cname("trace"),
{{name=Tensor},
{name=accreal, creturned=true}})
wrap("cross",
cname("cross"),
{{name=Tensor, default=true, returned=true},
{name=Tensor},
{name=Tensor},
{name="index", default=0}})
wrap("diag",
cname("diag"),
{{name=Tensor, default=true, returned=true},
{name=Tensor},
{name="long", default=0}})
wrap("eye",
cname("eye"),
{{name=Tensor, default=true, returned=true, method={default='nil'}},
{name="long"},
{name="long", default=0}})
wrap("range",
cname("range"),
{{name=Tensor, default=true, returned=true, method={default='nil'}},
{name=accreal},
{name=accreal},
{name=accreal, default=1}})
wrap("randperm",
cname("randperm"),
{{name=Tensor, default=true, returned=true, method={default='nil'},
postcall=function(arg)
return table.concat(
{
arg.__metatable.postcall(arg),
string.format("TH%s_add(%s, %s, 1);", Tensor, arg:carg(), arg:carg())
}, '\n')
end},
{name="Generator", default=true},
{name="long"}})
wrap("sort",
cname("sort"),
{{name=Tensor, default=true, returned=true},
{name="IndexTensor", default=true, returned=true, noreadadd=true},
{name=Tensor},
{name="index", default=lastdim(3)},
{name="boolean", default=0}})
wrap("topk",
cname("topk"),
{{name=Tensor, default=true, returned=true},
{name="IndexTensor", default=true, returned=true, noreadadd=true},
{name=Tensor},
{name="long", default=1},
{name="index", default=lastdim(3)},
{name="boolean", default=0},
{name="boolean", default=0}})
wrap("kthvalue",
cname("kthvalue"),
{{name=Tensor, default=true, returned=true},
{name="IndexTensor", default=true, returned=true, noreadadd=true},
{name=Tensor},
{name="long"},
{name="index", default=lastdim(3)},
{name="boolean", default=true, invisible=true}})
wrap("mode",
cname("mode"),
{{name=Tensor, default=true, returned=true},
{name="IndexTensor", default=true, returned=true, noreadadd=true},
{name=Tensor},
{name="index", default=lastdim(3)},
{name="boolean", default=true, invisible=true}})
wrap("median",
cname("median"),
{{name=Tensor, default=true, returned=true},
{name="IndexTensor", default=true, returned=true, noreadadd=true},
{name=Tensor},
{name="index", default=lastdim(3)},
{name="boolean", default=true, invisible=true}})
wrap("tril",
cname("tril"),
{{name=Tensor, default=true, returned=true},
{name=Tensor},
{name="int", default=0}})
wrap("triu",
cname("triu"),
{{name=Tensor, default=true, returned=true},
{name=Tensor},
{name="int", default=0}})
wrap("cat",
cname("cat"),
{{name=Tensor, default=true, returned=true},
{name=Tensor},
{name=Tensor},
{name="index", default=-1}},
cname("catArray"),
{{name=Tensor, default=true, returned=true},
{name=Tensor .. "Array"},
{name="index", default=-1}})
if Tensor == 'ByteTensor' then -- we declare this only once
interface:print(
[[
static long THRandom_random2__(THGenerator *gen, long a, long b)
{
THArgCheck(b >= a, 2, "upper bound must be larger than lower bound");
return((THRandom_random(gen) % (b+1-a)) + a);
}
static long THRandom_random1__(THGenerator *gen, long b)
{
THArgCheck(b > 0, 1, "upper bound must be strictly positive");
return(THRandom_random(gen) % b + 1);
}
]])
end
interface:print(string.gsub(
[[
static void THTensor_random2__(THTensor *self, THGenerator *gen, long a, long b)
{
THArgCheck(b >= a, 2, "upper bound must be larger than lower bound");
TH_TENSOR_APPLY(real, self, *self_data = ((THRandom_random(gen) % (b+1-a)) + a);)
}
static void THTensor_random1__(THTensor *self, THGenerator *gen, long b)
{
THArgCheck(b > 0, 1, "upper bound must be strictly positive");
TH_TENSOR_APPLY(real, self, *self_data = (THRandom_random(gen) % b + 1);)
}
]], 'Tensor', Tensor):gsub('real', real))
wrap('random',
'THRandom_random2__',
{{name='Generator', default=true},
{name='long'},
{name='long'},
{name='long', creturned=true}},
'THRandom_random1__',
{{name='Generator', default=true},
{name='long'},
{name='long', creturned=true}},
'THRandom_random',
{{name='Generator', default=true},
{name='long', creturned=true}},
cname("random2__"),
{{name=Tensor, returned=true},
{name='Generator', default=true},
{name='long'},
{name='long'}},
cname("random1__"),
{{name=Tensor, returned=true},
{name='Generator', default=true},
{name='long'}},
cname("random"),
{{name=Tensor, returned=true},
{name='Generator', default=true}})
wrap("geometric",
"THRandom_geometric",
{{name="Generator", default=true},
{name="double"},
{name="double", creturned=true}},
cname("geometric"),
{{name=Tensor, returned=true},
{name="Generator", default=true},
{name="double"}})
wrap("bernoulli",
"THRandom_bernoulli",
{{name="Generator", default=true},
{name="double", default=0.5},
{name="double", creturned=true}},
cname("bernoulli"),
{{name=Tensor, returned=true},
{name="Generator", default=true},
{name="double", default=0.5}},
cname("bernoulli_FloatTensor"),
{{name=Tensor, returned=true},
{name="Generator", default=true},
{name="FloatTensor"}},
cname("bernoulli_DoubleTensor"),
{{name=Tensor, returned=true},
{name="Generator", default=true},
{name="DoubleTensor"}})
wrap("squeeze",
cname("squeeze"),
{{name=Tensor, default=true, returned=true, postcall=function(arg)
local txt = {}
if arg.returned then
table.insert(txt, string.format('if(arg%d->nDimension == 1 && arg%d->size[0] == 1)', arg.i, arg.i)) -- number
table.insert(txt, string.format('lua_pushnumber(L, (lua_Number)(*TH%s_data(arg%d)));', Tensor, arg.i))
end
return table.concat(txt, '\n')
end},
{name=Tensor}},
cname("squeeze1d"),
{{name=Tensor, default=true, returned=true,
postcall=
function(arg)
local txt = {}
if arg.returned then
table.insert(txt, string.format('if(!hasdims && arg%d->nDimension == 1 && arg%d->size[0] == 1)', arg.i, arg.i)) -- number
table.insert(txt, string.format('lua_pushnumber(L, (lua_Number)(*TH%s_data(arg%d)));}', Tensor, arg.i))
end
return table.concat(txt, '\n')
end},
{name=Tensor,
precall=
function(arg)
return string.format('{int hasdims = arg%d->nDimension > 1;', arg.i)
end},
{name="index"}})
wrap("sign",
cname("sign"),
{{name=Tensor, default=true, returned=true, method={default='nil'}},
{name=Tensor, method={default=1}}})
wrap("conv2",
cname("conv2Dmul"),
{{name=Tensor, default=true, returned=true},
{name=real, default=0, invisible=true},
{name=real, default=1, invisible=true},
{name=Tensor, dim=2},
{name=Tensor, dim=2},
{name=real, default=1, invisible=true},
{name=real, default=1, invisible=true},
{name='charoption', values={'V', 'F'}, default='V'},
{name='charoption', default="C", invisible=true}},
cname("conv2Dcmul"),
{{name=Tensor, default=true, returned=true},
{name=real, default=0, invisible=true},
{name=real, default=1, invisible=true},
{name=Tensor, dim=3},
{name=Tensor, dim=3},
{name=real, default=1, invisible=true},
{name=real, default=1, invisible=true},
{name='charoption', values={'V', 'F'}, default='V'},
{name='charoption', default="C", invisible=true}},
cname("conv2Dmv"),
{{name=Tensor, default=true, returned=true},
{name=real, default=0, invisible=true},
{name=real, default=1, invisible=true},
{name=Tensor, dim=3},
{name=Tensor, dim=4},
{name=real, default=1, invisible=true},
{name=real, default=1, invisible=true},
{name='charoption', values={'V', 'F'}, default='V'},
{name='charoption', default="C", invisible=true}}
)
wrap("xcorr2",
cname("conv2Dmul"),
{{name=Tensor, default=true, returned=true},
{name=real, default=0, invisible=true},
{name=real, default=1, invisible=true},
{name=Tensor, dim=2},
{name=Tensor, dim=2},
{name=real, default=1, invisible=true},
{name=real, default=1, invisible=true},
{name='charoption', values={'V', 'F'}, default='V'},
{name='charoption', default="X", invisible=true}},
cname("conv2Dcmul"),
{{name=Tensor, default=true, returned=true},
{name=real, default=0, invisible=true},
{name=real, default=1, invisible=true},
{name=Tensor, dim=3},
{name=Tensor, dim=3},
{name=real, default=1, invisible=true},
{name=real, default=1, invisible=true},
{name='charoption', values={'V', 'F'}, default='V'},
{name='charoption', default="X", invisible=true}},
cname("conv2Dmv"),
{{name=Tensor, default=true, returned=true},
{name=real, default=0, invisible=true},
{name=real, default=1, invisible=true},
{name=Tensor, dim=3},
{name=Tensor, dim=4},
{name=real, default=1, invisible=true},
{name=real, default=1, invisible=true},
{name='charoption', values={'V', 'F'}, default='V'},
{name='charoption', default="X", invisible=true}}
)
wrap("conv3",
cname("conv3Dmul"),
{{name=Tensor, default=true, returned=true},
{name=real, default=0, invisible=true},
{name=real, default=1, invisible=true},
{name=Tensor, dim=3},
{name=Tensor, dim=3},
{name=real, default=1, invisible=true},
{name=real, default=1, invisible=true},
{name=real, default=1, invisible=true},
{name='charoption', values={'V', 'F'}, default='V'},
{name='charoption', default="C", invisible=true}},
cname("conv3Dcmul"),
{{name=Tensor, default=true, returned=true},
{name=real, default=0, invisible=true},
{name=real, default=1, invisible=true},
{name=Tensor, dim=4},
{name=Tensor, dim=4},
{name=real, default=1, invisible=true},
{name=real, default=1, invisible=true},
{name=real, default=1, invisible=true},
{name='charoption', values={'V', 'F'}, default='V'},
{name='charoption', default="C", invisible=true}},
cname("conv3Dmv"),
{{name=Tensor, default=true, returned=true},
{name=real, default=0, invisible=true},
{name=real, default=1, invisible=true},
{name=Tensor, dim=4},
{name=Tensor, dim=5},
{name=real, default=1, invisible=true},
{name=real, default=1, invisible=true},
{name=real, default=1, invisible=true},
{name='charoption', values={'V', 'F'}, default='V'},
{name='charoption', default="C", invisible=true}}
)
wrap("xcorr3",
cname("conv3Dmul"),
{{name=Tensor, default=true, returned=true},
{name=real, default=0, invisible=true},
{name=real, default=1, invisible=true},
{name=Tensor, dim=3},
{name=Tensor, dim=3},
{name=real, default=1, invisible=true},
{name=real, default=1, invisible=true},
{name=real, default=1, invisible=true},
{name='charoption', values={'V', 'F'}, default='V'},
{name='charoption', default="X", invisible=true}},
cname("conv3Dcmul"),
{{name=Tensor, default=true, returned=true},
{name=real, default=0, invisible=true},
{name=real, default=1, invisible=true},
{name=Tensor, dim=4},
{name=Tensor, dim=4},
{name=real, default=1, invisible=true},
{name=real, default=1, invisible=true},
{name=real, default=1, invisible=true},
{name='charoption', values={'V', 'F'}, default='V'},
{name='charoption', default="X", invisible=true}},
cname("conv3Dmv"),
{{name=Tensor, default=true, returned=true},
{name=real, default=0, invisible=true},
{name=real, default=1, invisible=true},
{name=Tensor, dim=4},
{name=Tensor, dim=5},
{name=real, default=1, invisible=true},
{name=real, default=1, invisible=true},
{name=real, default=1, invisible=true},
{name='charoption', values={'V', 'F'}, default='V'},
{name='charoption', default="X", invisible=true}}
)
for _,name in pairs({'lt','gt','le','ge','eq','ne'}) do
wrap(name,
cname(name .. 'Value'),
{{name='ByteTensor',default=true, returned=true},
{name=Tensor},
{name=real}},
cname(name .. 'ValueT'),
{{name=Tensor, returned=true},
{name=Tensor},
{name=real}},
cname(name .. 'Tensor'),
{{name='ByteTensor',default=true, returned=true},
{name=Tensor},
{name=Tensor}},
cname(name .. 'TensorT'),
{{name=Tensor, returned=true},
{name=Tensor},
{name=Tensor}})
end
wrap("nonzero",
cname("nonzero"),
{{name="IndexTensor", default=true, returned=true},
{name=Tensor}})
end -- ~= HalfTensor
if Tensor == 'ByteTensor' then
-- Logical accumulators only apply to ByteTensor
for _,name in ipairs({'all', 'any'}) do
wrap(name,
cname('logical' .. name),
{{name=Tensor},
{name="boolean", creturned=true}})
end
end
if Tensor == 'IntTensor' then
wrap("abs",
cname("abs"),
{{name=Tensor, default=true, returned=true, method={default='nil'}},
{name=Tensor, method={default=1}}},
"abs",
{{name=real},
{name=real, creturned=true}})
elseif Tensor == 'LongTensor' then
wrap("abs",
cname("abs"),
{{name=Tensor, default=true, returned=true, method={default='nil'}},
{name=Tensor, method={default=1}}},
"labs",
{{name=real},
{name=real, creturned=true}})
end
if Tensor == 'FloatTensor' or Tensor == 'DoubleTensor' then
wrap("mean",
cname("meanall"),
{{name=Tensor},
{name=accreal, creturned=true}},
cname("mean"),
{{name=Tensor, default=true, returned=true},
{name=Tensor},
{name="index"},
{name="boolean", default=true, invisible=true}})
for _,name in ipairs({"var", "std"}) do
wrap(name,
cname(name .. "all"),
{{name=Tensor},
{name="boolean", default=false},
{name=accreal, creturned=true}
},
cname(name),
{{name=Tensor, default=true, returned=true},
{name=Tensor},
{name="index"},
{name="boolean", default=false},
{name="boolean", default=true, invisible=true}})
end
wrap("histc",
cname("histc"),
{{name=Tensor, default=true, returned=true},
{name=Tensor},
{name="long",default=100},
{name="double",default=0},
{name="double",default=0}})
wrap("bhistc",
cname("bhistc"),
{{name=Tensor, default=true, returned=true},
{name=Tensor},
{name="long",default=100},
{name="double",default=0},
{name="double",default=0}})
wrap("norm",
cname("normall"),
{{name=Tensor},
{name=real, default=2},
{name=accreal, creturned=true}},
cname("norm"),
{{name=Tensor, default=true, returned=true},
{name=Tensor},
{name=real},
{name="index"},
{name="boolean", default=true, invisible=true}})
wrap("renorm",
cname("renorm"),
{{name=Tensor, default=true, returned=true, method={default='nil'}},
{name=Tensor, method={default=1}},
{name=real},
{name="index"},
{name=real}})
wrap("dist",
cname("dist"),
{{name=Tensor},
{name=Tensor},
{name=real, default=2},
{name=accreal, creturned=true}})
wrap("linspace",
cname("linspace"),
{{name=Tensor, default=true, returned=true, method={default='nil'}},
{name=real},
{name=real},
{name="long", default=100}})
wrap("logspace",
cname("logspace"),
{{name=Tensor, default=true, returned=true, method={default='nil'}},
{name=real},
{name=real},
{name="long", default=100}})
for _,name in ipairs({"log", "log1p", "exp",
"cos", "acos", "cosh",
"sin", "asin", "sinh",
"tan", "atan", "tanh",
"sqrt", "round", "ceil",
"floor", "trunc", }) do
wrap(name,
cname(name),
{{name=Tensor, default=true, returned=true, method={default='nil'}},
{name=Tensor, method={default=1}}},
name,
{{name=real},
{name=real, creturned=true}})
end
wrap("abs",
cname("abs"),
{{name=Tensor, default=true, returned=true, method={default='nil'}},
{name=Tensor, method={default=1}}},
"fabs",
{{name=real},
{name=real, creturned=true}})
wrap("frac",
cname("frac"),
{{name=Tensor, default=true, returned=true, method={default='nil'}},
{name=Tensor, method={default=1}}},
"TH_frac",
{{name=real},
{name=real, creturned=true}})
wrap("rsqrt",
cname("rsqrt"),
{{name=Tensor, default=true, returned=true, method={default='nil'}},
{name=Tensor, method={default=1}}},
"TH_rsqrt",
{{name=real},
{name=real, creturned=true}})
wrap("sigmoid",
cname("sigmoid"),
{{name=Tensor, default=true, returned=true, method={default='nil'}},
{name=Tensor, method={default=1}}},
"TH_sigmoid",
{{name=real},
{name=real, creturned=true}})
wrap("neg",
cname("neg"),
{{name=Tensor, default=true, returned=true, method={default='nil'}},
{name=Tensor, method={default=1}}})
wrap("cinv",
cname("cinv"),
{{name=Tensor, default=true, returned=true, method={default='nil'}},
{name=Tensor, method={default=1}}})
wrap("lerp",
cname("lerp"),
{{name=Tensor, default=true, returned=true, method={default='nil'}},
{name=Tensor, method={default=1}},
{name=Tensor},
{name=real}},
"TH_lerp",
{{name=real},
{name=real},
{name=real},
{name=real, creturned=true}})
wrap("atan2",
cname("atan2"),
{{name=Tensor, default=true, returned=true, method={default='nil'}},
{name=Tensor, method={default=1}},
{name=Tensor}},
"atan2",
{{name=real},
{name=real},
{name=real, creturned=true}})
wrap("pow",
cname("pow"),
{{name=Tensor, default=true, returned=true, method={default='nil'}},
{name=Tensor, method={default=1}},
{name=real}},
cname("tpow"),
{{name=Tensor, default=true, returned=true, method={default='nil'}},
{name=real},
{name=Tensor, method={default=1}}},
"pow",
{{name=real},
{name=real},
{name=real, creturned=true}})
wrap("rand",
cname("rand"),
{{name=Tensor, default=true, returned=true, method={default='nil'}},
{name='Generator', default=true},
{name="LongArg"}})
wrap("randn",
cname("randn"),
{{name=Tensor, default=true, returned=true, method={default='nil'}},
{name='Generator', default=true},
{name="LongArg"}})
wrap("multinomial",
cname("multinomial"),
{{name="IndexTensor", default=true, returned=true, method={default='nil'}},
{name='Generator', default=true},
{name=Tensor},
{name="int"},
{name="boolean", default=false}})
wrap("multinomialAliasSetup_",
cname("multinomialAliasSetup"),
{{name=Tensor},
{name="IndexTensor", default=true, returned=true, method={default='nil'}},
{name=Tensor, default=true, returned=true, method={default='nil'}}})
wrap("multinomialAlias_",
cname("multinomialAliasDraw"),
{{name="IndexTensor", default=true, returned=true, method={default='nil'}},
{name='Generator', default=true},
{name="IndexTensor"},
{name=Tensor}
})
for _,f in ipairs({{name='uniform', a=0, b=1},
{name='normal', a=0, b=1},
{name='cauchy', a=0, b=1},
{name='logNormal', a=1, b=2}}) do
wrap(f.name,
string.format("THRandom_%s", f.name),
{{name='Generator', default=true},
{name="double", default=f.a},
{name="double", default=f.b},
{name="double", creturned=true}},
cname(f.name),
{{name=Tensor, returned=true},
{name='Generator', default=true},
{name=real, default=f.a},
{name=real, default=f.b}})
end
for _,f in ipairs({{name='exponential'}}) do
wrap(f.name,
string.format("THRandom_%s", f.name),
{{name='Generator', default=true},
{name="double", default=f.a},
{name="double", creturned=true}},
cname(f.name),
{{name=Tensor, returned=true},
{name='Generator', default=true},
{name=real, default=f.a}})
end
for _,name in ipairs({"gesv","gels"}) do
interface:wrap(name,
cname(name),
{{name=Tensor, returned=true},
{name=Tensor, returned=true},
{name=Tensor},
{name=Tensor}},
cname(name),
{{name=Tensor, default=true, returned=true, invisible=true},
{name=Tensor, default=true, returned=true, invisible=true},
{name=Tensor},
{name=Tensor}}
)
end
interface:wrap("trtrs",
cname("trtrs"),
{{name=Tensor, returned=true},
{name=Tensor, returned=true},
{name=Tensor},
{name=Tensor},
{name='charoption', values={'U', 'L'}, default='U'}, -- uplo
{name='charoption', values={'N', 'T'}, default='N'}, -- trans
{name='charoption', values={'N', 'U'}, default='N'}}, -- diag
cname("trtrs"),
{{name=Tensor, default=true, returned=true, invisible=true},
{name=Tensor, default=true, returned=true, invisible=true},
{name=Tensor},
{name=Tensor},
{name='charoption', values={'U', 'L'}, default='U'}, -- uplo
{name='charoption', values={'N', 'T'}, default='N'}, -- trans
{name='charoption', values={'N', 'U'}, default='N'}} -- diag
)
interface:wrap("symeig",
cname("syev"),
{{name=Tensor, returned=true},
{name=Tensor, returned=true},
{name=Tensor},
{name='charoption', values={'N', 'V'}, default='N'},
{name='charoption', values={'U', 'L'}, default='U'}},
cname("syev"),
{{name=Tensor, default=true, returned=true, invisible=true},
{name=Tensor, default=true, returned=true, invisible=true},
{name=Tensor},
{name='charoption', values={'N', 'V'}, default='N'},
{name='charoption', values={'U', 'L'}, default='U'}}
)
interface:wrap("eig",
cname("geev"),
{{name=Tensor, returned=true},
{name=Tensor, returned=true},
{name=Tensor},
{name='charoption', values={'N', 'V'}, default='N'}},
cname("geev"),
{{name=Tensor, default=true, returned=true, invisible=true},
{name=Tensor, default=true, returned=true, invisible=true},
{name=Tensor},
{name='charoption', values={'N', 'V'}, default='N'}}
)
interface:wrap("svd",
cname("gesvd"),
{{name=Tensor, returned=true},
{name=Tensor, returned=true},
{name=Tensor, returned=true},
{name=Tensor},
{name='charoption', values={'A', 'S'}, default='S'}},
cname("gesvd"),
{{name=Tensor, default=true, returned=true, invisible=true},
{name=Tensor, default=true, returned=true, invisible=true},
{name=Tensor, default=true, returned=true, invisible=true},
{name=Tensor},
{name='charoption', values={'A', 'S'}, default='S'}}
)
interface:wrap("inverse",
cname("getri"),
{{name=Tensor, returned=true},
{name=Tensor}},
cname("getri"),
{{name=Tensor, default=true, returned=true, invisible=true},
{name=Tensor}}
)
interface:wrap("potrf",
cname("potrf"),
{{name=Tensor, returned=true},
{name=Tensor},
{name='charoption', values={'U', 'L'}, default='U'}}, -- uplo
cname("potrf"),
{{name=Tensor, default=true, returned=true, invisible=true},
{name=Tensor},
{name='charoption', values={'U', 'L'}, default='U'}}
)
interface:wrap("potrs",
cname("potrs"),
{{name=Tensor, returned=true},
{name=Tensor},
{name=Tensor},
{name='charoption', values={'U', 'L'}, default='U'}}, -- uplo
cname("potrs"),
{{name=Tensor, default=true, returned=true, invisible=true},
{name=Tensor},
{name=Tensor},
{name='charoption', values={'U', 'L'}, default='U'}}
)
interface:wrap("potri",
cname("potri"),
{{name=Tensor, returned=true},
{name=Tensor},
{name='charoption', values={'U', 'L'}, default='U'}}, -- uplo
cname("potri"),
{{name=Tensor, default=true, returned=true, invisible=true},
{name=Tensor},
{name='charoption', values={'U', 'L'}, default='U'}} -- uplo
)
interface:wrap("pstrf",
cname("pstrf"),
{{name=Tensor, returned=true},
{name='IntTensor', returned=true},
{name=Tensor},
{name='charoption', values={'U', 'L'}, default='U'}, -- uplo
{name=real, default=-1}},
cname("pstrf"),
{{name=Tensor, default=true, returned=true, invisible=true},
{name='IntTensor', default=true, returned=true, invisible=true},
{name=Tensor},
{name='charoption', values={'U', 'L'}, default='U'}, -- uplo
{name=real, default=-1}}
)
interface:wrap("qr",
cname("qr"),
{{name=Tensor, returned=true},
{name=Tensor, returned=true},
{name=Tensor}},
cname("qr"),
{{name=Tensor, default=true, returned=true, invisible=true},
{name=Tensor, default=true, returned=true, invisible=true},
{name=Tensor}}
)
interface:wrap("geqrf",
cname("geqrf"),
{{name=Tensor, returned=true},
{name=Tensor, returned=true},
{name=Tensor}},
cname("geqrf"),
{{name=Tensor, default=true, returned=true, invisible=true},
{name=Tensor, default=true, returned=true, invisible=true},
{name=Tensor}}
)
interface:wrap("orgqr",
cname("orgqr"),
{{name=Tensor, returned=true},
{name=Tensor},
{name=Tensor}},
cname("orgqr"),
{{name=Tensor, default=true, returned=true, invisible=true},
{name=Tensor},
{name=Tensor}}
)
interface:wrap("ormqr",
cname("ormqr"),
{{name=Tensor, returned=true},
{name=Tensor},
{name=Tensor},
{name=Tensor},
{name='charoption', values={'L', 'R'}, default='L'},
{name='charoption', values={'N', 'T'}, default='N'}},
cname("ormqr"),
{{name=Tensor, default=true, returned=true, invisible=true},
{name=Tensor},
{name=Tensor},
{name=Tensor},
{name='charoption', values={'L', 'R'}, default='L'},
{name='charoption', values={'N', 'T'}, default='N'}}
)
end
method:register(string.format("m_torch_%sMath__", Tensor))
interface:print(method:tostring())
method:clearhistory()
interface:register(string.format("torch_%sMath__", Tensor))
interface:print(string.gsub([[
static void torch_TensorMath_init(lua_State *L)
{
luaT_pushmetatable(L, "torch.Tensor");
/* register methods */
luaT_setfuncs(L, m_torch_TensorMath__, 0);
/* register functions into the "torch" field of the tensor metaclass */
lua_pushstring(L, "torch");
lua_newtable(L);
luaT_setfuncs(L, torch_TensorMath__, 0);
lua_rawset(L, -3);
lua_pop(L, 1);
}
]], 'Tensor', Tensor))
end
interface:dispatchregister("torch_TensorMath__")
interface:print([[
void torch_TensorMath_init(lua_State *L)
{
torch_ByteTensorMath_init(L);
torch_CharTensorMath_init(L);
torch_ShortTensorMath_init(L);
torch_IntTensorMath_init(L);
torch_LongTensorMath_init(L);
torch_FloatTensorMath_init(L);
torch_DoubleTensorMath_init(L);
luaT_setfuncs(L, torch_TensorMath__, 0);
}
]])
if arg[1] then
interface:tofile(arg[1])
else
print(interface:tostring())
end
TensorOperator.c 0000664 0000000 0000000 00000000444 13162462543 0014220 0 ustar 00root root 0000000 0000000 #include "general.h"
#define torch_TensorOperator_(NAME) TH_CONCAT_4(torch_,Real,TensorOperator_,NAME)
#define torch_Tensor_id TH_CONCAT_3(torch_,Real,Tensor_id)
#define torch_Tensor TH_CONCAT_STRING_3(torch.,Real,Tensor)
#include "generic/TensorOperator.c"
#include "THGenerateAllTypes.h"
TestSuite.lua 0000664 0000000 0000000 00000001277 13162462543 0013527 0 ustar 00root root 0000000 0000000 function torch.TestSuite()
local obj = {
__tests = {},
__isTestSuite = true
}
local metatable = {}
function metatable:__index(key)
return self.__tests[key]
end
function metatable:__newindex(key, value)
if self.__tests[key] ~= nil then
error("Test " .. tostring(key) .. " is already defined.")
end
if type(value) ~= "function" then
if type(value) == "table" then
error("Nested tables of tests are not supported")
else
error("Only functions are supported as members of a TestSuite")
end
end
self.__tests[key] = value
end
setmetatable(obj, metatable)
return obj
end
Tester.lua 0000664 0000000 0000000 00000067303 13162462543 0013046 0 ustar 00root root 0000000 0000000
-- Lua 5.2 compatibility
local unpack = unpack or table.unpack
local check = {} -- helper functions, defined at the bottom of the file
local Tester = torch.class('torch.Tester')
function Tester:__init()
self.errors = {}
self.tests = {}
self.warnings = {}
self._warningCount = {}
self.disabledTests = {}
self._currentTestName = ''
-- To maintain backwards compatibility (at least for a short while),
-- disable exact dimension checking of tensors when :assertTensorEq is
-- called. Thus {{1}} == {1} when this flag is true.
--
-- Note that other methods that suppose tensor checking (such as
-- :assertGeneralEq) ignore this flag, since previously they didn't
-- exist or support tensor equality checks at all, so there is no
-- old code that uses these functions and relies on the behaviour.
--
-- Note also that if the dimension check fails with this flag is true, then
-- will show a warning.
self._assertTensorEqIgnoresDims = true
end
function Tester:setEarlyAbort(earlyAbort)
self.earlyAbort = earlyAbort
end
function Tester:setRethrowErrors(rethrow)
self.rethrow = rethrow
end
function Tester:setSummaryOnly(summaryOnly)
self.summaryOnly = summaryOnly
end
-- Add a success to the test.
function Tester:_success()
local name = self._currentTestName
self.assertionPass[name] = self.assertionPass[name] + 1
return true
end
function Tester:_addDebugInfo(message)
local ss = debug.traceback('tester', 3) or ''
ss = ss:match('.-\n([^\n]+\n[^\n]+)\n[^\n]+xpcall') or ''
local name = self._currentTestName
return (name ~= '' and name .. '\n' or '') .. message .. '\n' .. ss
end
-- Add a failure to the test.
function Tester:_failure(message)
if self.rethrow then error(message, 2) end
local name = self._currentTestName
self.assertionFail[name] = self.assertionFail[name] + 1
self.errors[#self.errors + 1] = self:_addDebugInfo(message)
return false
end
-- Add a warning to the test
function Tester:_warning(message)
local name = self._currentTestName
self._warningCount[name] = (self._warningCount[name] or 0) + 1
self.warnings[#self.warnings + 1] = self:_addDebugInfo(message)
end
-- Call this during a test run with `condition = true` to log a success, or with
-- `condition = false` to log a failure (using `message`).
function Tester:_assert_sub(condition, message)
if condition then
return self:_success()
else
return self:_failure(message)
end
end
local function getMessage(message, ...)
assert(next{...} == nil, "Unexpected arguments passed to test function")
if message then
assert(type(message) == 'string', 'message parameter must be a string')
if message ~= '' then
return message .. '\n'
end
end
return ''
end
--[[ Historically, some test functions have accepted both a message and a
tolerance, and some just a message (e.g., assertTableEq). Now assertTableEq
accepts both a tolerance and a message, so allow the two arguments to be passed
in either order to maintain backwards compatibility (and more generally,
for convenience). (We still document the ordering as "tolerance, message" for
clarity.) This function also sanitizes them (ensures they are non-nil, etc).
]]
local function getToleranceAndMessage(defaultTolerance, ...)
local args = {...}
local message = nil
local tolerance = nil
for _, a in ipairs(args) do
if type(a) == 'string' then
if message then
error("Unexpected string argument; already have message", a)
end
message = a .. '\n'
elseif type(a) == 'number' then
if tolerance then
error("Unexpected number argument; already have tolerance", a)
end
tolerance = a
assert(tolerance >= 0, "tolerance cannot be negative")
else
error("Unrecognized argument; should be a tolerance or message", a)
end
end
message = message or ''
tolerance = tolerance or defaultTolerance
return tolerance, message
end
function Tester:assert(condition, ...)
local message = getMessage(...)
if type(condition) ~= 'boolean' then
self:_warning(" :assert should only be used for boolean conditions. "
.. "To check for non-nil variables, do this explicitly: "
.. "Tester:assert(var ~= nil).")
end
return self:_assert_sub(condition,
string.format('%sBOOL violation condition=%s',
message, tostring(condition)))
end
function Tester:assertGeneralEq(got, expected, ...)
return self:_eqOrNeq(got, expected, false, ...)
end
function Tester:eq(got, expected, ...)
return self:assertGeneralEq(got, expected, ...)
end
function Tester:assertGeneralNe(got, unexpected, ...)
return self:_eqOrNeq(got, unexpected, true, ...)
end
function Tester:ne(got, unexpected, ...)
return self:assertGeneralNe(got, unexpected, ...)
end
function Tester:_eqOrNeq(got, expected, negate, ...)
local tolerance, message = getToleranceAndMessage(0, ...)
local success, subMessage = check.areEq(got, expected, tolerance, negate)
subMessage = subMessage or ''
return self:_assert_sub(success, message .. subMessage)
end
function Tester:assertlt(a, b, ...)
local message = getMessage(...)
return self:_assert_sub(a < b,
string.format('%sLT failed: %s >= %s',
message, tostring(a), tostring(b)))
end
function Tester:assertgt(a, b, ...)
local message = getMessage(...)
return self:_assert_sub(a > b,
string.format('%sGT failed: %s <= %s',
message, tostring(a), tostring(b)))
end
function Tester:assertle(a, b, ...)
local message = getMessage(...)
return self:_assert_sub(a <= b,
string.format('%sLE failed: %s > %s',
message, tostring(a), tostring(b)))
end
function Tester:assertge(a, b, ...)
local message = getMessage(...)
return self:_assert_sub(a >= b,
string.format('%sGE failed: %s < %s',
message, tostring(a), tostring(b)))
end
function Tester:assertalmosteq(a, b, ...)
local tolerance, message = getToleranceAndMessage(1e-16, ...)
local diff = math.abs(a - b)
return self:_assert_sub(
diff <= tolerance,
string.format(
'%sALMOST_EQ failed: %s ~= %s with tolerance=%s',
message, tostring(a), tostring(b), tostring(tolerance)))
end
function Tester:asserteq(a, b, ...)
local message = getMessage(...)
return self:_assert_sub(a == b,
string.format('%sEQ failed: %s ~= %s',
message, tostring(a), tostring(b)))
end
function Tester:assertne(a, b, ...)
local message = getMessage(...)
if type(a) == type(b) and type(a) == 'table' or type(a) == 'userdata' then
self:_warning(" :assertne should only be used to compare basic lua "
.. "objects (numbers, booleans, etc). Consider using "
.. "either :assertGeneralNe or :assert(a ~= b).")
end
return self:_assert_sub(a ~= b,
string.format('%sNE failed: %s == %s',
message, tostring(a), tostring(b)))
end
function Tester:assertTensorEq(ta, tb, ...)
return self:_assertTensorEqOrNeq(ta, tb, false, ...)
end
function Tester:assertTensorNe(ta, tb, ...)
return self:_assertTensorEqOrNeq(ta, tb, true, ...)
end
function Tester:_assertTensorEqOrNeq(ta, tb, negate, ...)
assert(torch.isTensor(ta), "First argument should be a Tensor")
assert(torch.isTensor(tb), "Second argument should be a Tensor")
local tolerance, message = getToleranceAndMessage(0, ...)
local success, subMessage =
check.areTensorsEq(ta, tb, tolerance, negate,
self._assertTensorEqIgnoresDims)
subMessage = subMessage or ''
if self._assertTensorEqIgnoresDims and (not negate) and success
and not ta:isSameSizeAs(tb) then
self:_warning("Tensors have the same content but different dimensions. "
.. "For backwards compatibility, they are considered equal, "
.. "but this may change in the future. Consider using :eq "
.. "to check for equality instead.")
end
return self:_assert_sub(success, message .. subMessage)
end
function Tester:assertTableEq(ta, tb, ...)
return self:_assertTableEqOrNeq(ta, tb, false, ...)
end
function Tester:assertTableNe(ta, tb, ...)
return self:_assertTableEqOrNeq(ta, tb, true, ...)
end
function Tester:_assertTableEqOrNeq(ta, tb, negate, ...)
assert(type(ta) == 'table', "First argument should be a Table")
assert(type(tb) == 'table', "Second argument should be a Table")
return self:_eqOrNeq(ta, tb, negate, ...)
end
function Tester:assertError(f, ...)
return self:assertErrorObj(f, function() return true end, ...)
end
function Tester:assertNoError(f, ...)
local message = getMessage(...)
local status, err = pcall(f)
return self:_assert_sub(status,
string.format('%sERROR violation: err=%s', message,
tostring(err)))
end
function Tester:assertErrorMsg(f, errmsg, ...)
return self:assertErrorObj(f, function(err) return err == errmsg end, ...)
end
function Tester:assertErrorPattern(f, errPattern, ...)
local function errcomp(err)
return string.find(err, errPattern) ~= nil
end
return self:assertErrorObj(f, errcomp, ...)
end
function Tester:assertErrorObj(f, errcomp, ...)
local message = getMessage(...)
local status, err = pcall(f)
return self:_assert_sub((not status) and errcomp(err),
string.format('%sERROR violation: err=%s', message,
tostring(err)))
end
function Tester:add(f, name)
if type(f) == "table" then
assert(name == nil, "Name parameter is forbidden for a table of tests, "
.. "since its use is ambiguous")
if f.__isTestSuite then
f = f.__tests
else
self:_warning("Should use TestSuite rather than plain lua table")
end
for i, v in pairs(f) do
-- We forbid nested tests because the "expected" behaviour when a named
-- test is run in the case that the named test is in fact a table of
-- tests is not supported. Similar issue with _setUp and _tearDown
-- functions inside nested tests.
assert(type(v) ~= 'table', "Nested sets of tests are not supported")
self:add(v, i)
end
return self
end
assert(type(f) == 'function',
"Only tables of functions and functions supported")
if name == '_setUp' then
assert(not self._setUp, "Only one set-up function allowed")
self._setUp = f
elseif name == '_tearDown' then
assert(not self._tearDown, "Only one tear-down function allowed")
self._tearDown = f
else
name = name or 'unknown'
if self.tests[name] ~= nil then
error('Test with name ' .. name .. ' already exists!')
end
self.tests[name] = f
end
return self
end
function Tester:disable(testNames)
if type(testNames) == 'string' then
testNames = {testNames}
end
assert(type(testNames) == 'table', "Expecting name or list for disable")
for _, name in ipairs(testNames) do
assert(self.tests[name], "Unrecognized test '" .. name .. "'")
self.disabledTests[name] = true
end
return self
end
function Tester:run(testNames)
local tests = self:_getTests(testNames)
self.assertionPass = {}
self.assertionFail = {}
self.haveWarning = {}
self.testError = {}
for name in pairs(tests) do
self.assertionPass[name] = 0
self.assertionFail[name] = 0
self.testError[name] = 0
self._warningCount[name] = 0
end
self:_run(tests)
self:_report(tests)
-- Throws an error on test failure/error, so that test script returns
-- with nonzero return value.
for name in pairs(tests) do
assert(self.assertionFail[name] == 0,
'An error was found while running tests!')
assert(self.testError[name] == 0,
'An error was found while running tests!')
end
return 0
end
local function pluralize(num, str)
local stem = num .. ' ' .. str
if num == 1 then
return stem
else
return stem .. 's'
end
end
local NCOLS = 80
local coloured
local enable_colors, c = pcall(require, 'sys.colors')
if arg and enable_colors then -- have we been invoked from the commandline?
coloured = function(str, colour)
return colour .. str .. c.none
end
else
c = {}
coloured = function(str)
return str
end
end
function Tester:_run(tests)
local ntests = 0
for _ in pairs(tests) do
ntests = ntests + 1
end
local ntestsAsString = string.format('%u', ntests)
local cfmt = string.format('%%%uu/%u ', ntestsAsString:len(), ntestsAsString)
local cfmtlen = ntestsAsString:len() * 2 + 2
local function bracket(str)
return '[' .. str .. ']'
end
io.write('Running ' .. pluralize(ntests, 'test') .. '\n')
local i = 1
for name, fn in pairs(tests) do
self._currentTestName = name
-- TODO: compute max length of name and cut it down to size if needed
local strinit = coloured(string.format(cfmt, i), c.cyan)
.. self._currentTestName .. ' '
.. string.rep('.',
NCOLS - 6 - 2 -
cfmtlen - self._currentTestName:len())
.. ' '
io.write(strinit .. bracket(coloured('WAIT', c.cyan)))
io.flush()
local status, message, pass, skip
if self.disabledTests[name] then
skip = true
else
skip = false
if self._setUp then
self._setUp(name)
end
if self.rethrow then
status = true
local nerr = #self.errors
message = fn()
pass = nerr == #self.errors
else
status, message, pass = self:_pcall(fn)
end
if self._tearDown then
self._tearDown(name)
end
end
io.write('\r')
io.write(strinit)
if skip then
io.write(bracket(coloured('SKIP', c.yellow)))
elseif not status then
self.testError[name] = 1
io.write(bracket(coloured('ERROR', c.magenta)))
elseif not pass then
io.write(bracket(coloured('FAIL', c.red)))
else
io.write(bracket(coloured('PASS', c.green)))
if self._warningCount[name] > 0 then
io.write('\n' .. string.rep(' ', NCOLS - 10))
io.write(bracket(coloured('+warning', c.yellow)))
end
end
io.write('\n')
io.flush()
if self.earlyAbort and (i < ntests) and (not status or not pass)
and (not skip) then
io.write('Aborting on first error, not all tests have been executed\n')
break
end
i = i + 1
collectgarbage()
end
end
function Tester:_pcall(f)
local nerr = #self.errors
local stat, result = xpcall(f, debug.traceback)
if not stat then
self.errors[#self.errors + 1] =
self._currentTestName .. '\n Function call failed\n' .. result .. '\n'
end
return stat, result, stat and (nerr == #self.errors)
end
function Tester:_getTests(testNames)
if testNames == nil then
return self.tests
end
if type(testNames) == 'string' then
testNames = {testNames}
end
assert(type(testNames) == 'table',
"Only accept a name or table of test names (or nil for all tests)")
local function getMatchingNames(pattern)
local matchingNames = {}
for name in pairs(self.tests) do
if string.match(name, pattern) then
table.insert(matchingNames, name)
end
end
return matchingNames
end
local tests = {}
for _, pattern in ipairs(testNames) do
local matchingNames = getMatchingNames(pattern)
assert(#matchingNames > 0, "Couldn't find test '" .. pattern .. "'")
for _, name in ipairs(matchingNames) do
tests[name] = self.tests[name]
end
end
return tests
end
function Tester:_report(tests)
local ntests = 0
local nfailures = 0
local nerrors = 0
local nskipped = 0
local nwarnings = 0
self.countasserts = 0
for name in pairs(tests) do
ntests = ntests + 1
self.countasserts = self.countasserts + self.assertionFail[name]
+ self.assertionPass[name]
if self.assertionFail[name] > 0 then
nfailures = nfailures + 1
end
if self.testError[name] > 0 then
nerrors = nerrors + 1
end
if self._warningCount[name] > 0 then
nwarnings = nwarnings + 1
end
if self.disabledTests[name] then
nskipped = nskipped + 1
end
end
if self._warningCount[''] then
nwarnings = nwarnings + self._warningCount['']
end
io.write('Completed ' .. pluralize(self.countasserts, 'assert'))
io.write(' in ' .. pluralize(ntests, 'test') .. ' with ')
io.write(coloured(pluralize(nfailures, 'failure'),
nfailures == 0 and c.green or c.red))
io.write(' and ')
io.write(coloured(pluralize(nerrors, 'error'),
nerrors == 0 and c.green or c.magenta))
if nwarnings > 0 then
io.write(' and ')
io.write(coloured(pluralize(nwarnings, 'warning'), c.yellow))
end
if nskipped > 0 then
io.write(' and ')
io.write(coloured(nskipped .. ' disabled', c.yellow))
end
io.write('\n')
-- Prints off a message separated by -----
local haveSection = false
local function addSection(text)
local function printDashes()
io.write(string.rep('-', NCOLS) .. '\n')
end
if not haveSection then
printDashes()
haveSection = true
end
io.write(text .. '\n')
printDashes()
end
if not self.summaryOnly then
for _, v in ipairs(self.errors) do
addSection(v)
end
for _, v in ipairs(self.warnings) do
addSection(v)
end
end
end
--[[ Tests for tensor equality between two tensors of matching sizes and types.
Tests whether the maximum element-wise difference between `ta` and `tb` is less
than or equal to `tolerance`.
Arguments:
* `ta` (tensor)
* `tb` (tensor)
* `tolerance` (number) maximum elementwise difference between `ta` and `tb`.
* `negate` (boolean) if true, we invert success and failure.
* `storage` (boolean) if true, we print an error message referring to Storages
rather than Tensors.
Returns:
1. success, boolean that indicates success
2. failure_message, string or nil
]]
function check.areSameFormatTensorsEq(ta, tb, tolerance, negate, storage)
local function ensureHasAbs(t)
-- Byte, Char and Short Tensors don't have abs
return t.abs and t or t:double()
end
ta = ensureHasAbs(ta)
tb = ensureHasAbs(tb)
local diff = ta:clone():add(-1, tb):abs()
local err = diff:max()
local success = err <= tolerance
if negate then
success = not success
end
local errMessage
if not success then
local prefix = storage and 'Storage' or 'Tensor'
local violation = negate and 'NE(==)' or 'EQ(==)'
errMessage = string.format('%s%s violation: max diff=%s, tolerance=%s',
prefix,
violation,
tostring(err),
tostring(tolerance))
end
return success, errMessage
end
--[[ Tests for tensor equality.
Tests whether the maximum element-wise difference between `ta` and `tb` is less
than or equal to `tolerance`.
Arguments:
* `ta` (tensor)
* `tb` (tensor)
* `tolerance` (number) maximum elementwise difference between `ta` and `tb`.
* `negate` (boolean) if negate is true, we invert success and failure.
* `ignoreTensorDims` (boolean, default false) if true, then tensors of the same
size but different dimensions can still be considered equal, e.g.,
{{1}} == {1}. For backwards compatibility.
Returns:
1. success, boolean that indicates success
2. failure_message, string or nil
]]
function check.areTensorsEq(ta, tb, tolerance, negate, ignoreTensorDims)
ignoreTensorDims = ignoreTensorDims or false
if not ignoreTensorDims and ta:dim() ~= tb:dim() then
return negate, 'The tensors have different dimensions'
end
if ta:type() ~= tb:type() then
return negate, 'The tensors have different types'
end
-- If we are comparing two empty tensors, return true.
-- This is needed because some functions below cannot be applied to tensors
-- of dimension 0.
if ta:dim() == 0 and tb:dim() == 0 then
return not negate, 'Both tensors are empty'
end
local sameSize
if ignoreTensorDims then
sameSize = ta:nElement() == tb:nElement()
else
sameSize = ta:isSameSizeAs(tb)
end
if not sameSize then
return negate, 'The tensors have different sizes'
end
return check.areSameFormatTensorsEq(ta, tb, tolerance, negate, false)
end
local typesMatching = {
['torch.ByteStorage'] = torch.ByteTensor,
['torch.CharStorage'] = torch.CharTensor,
['torch.ShortStorage'] = torch.ShortTensor,
['torch.IntStorage'] = torch.IntTensor,
['torch.LongStorage'] = torch.LongTensor,
['torch.FloatStorage'] = torch.FloatTensor,
['torch.DoubleStorage'] = torch.DoubleTensor,
['torch.HalfStorage'] = torch.HalfTensor,
}
--[[ Tests for storage equality.
Tests whether the maximum element-wise difference between `sa` and `sb` is less
than or equal to `tolerance`.
Arguments:
* `sa` (storage)
* `sb` (storage)
* `tolerance` (number) maximum elementwise difference between `a` and `b`.
* `negate` (boolean) if negate is true, we invert success and failure.
Returns:
1. success, boolean that indicates success
2. failure_message, string or nil
]]
function check.areStoragesEq(sa, sb, tolerance, negate)
if sa:size() ~= sb:size() then
return negate, 'The storages have different sizes'
end
local typeOfsa = torch.type(sa)
local typeOfsb = torch.type(sb)
if typeOfsa ~= typeOfsb then
return negate, 'The storages have different types'
end
local ta = typesMatching[typeOfsa](sa)
local tb = typesMatching[typeOfsb](sb)
return check.areSameFormatTensorsEq(ta, tb, tolerance, negate, true)
end
--[[ Tests for general (deep) equality.
The types of `got` and `expected` must match.
Tables are compared recursively. Keys and types of the associated values must
match, recursively. Numbers are compared with the given tolerance.
Torch tensors and storages are compared with the given tolerance on their
elementwise difference. Other types are compared for strict equality with the
regular Lua == operator.
Arguments:
* `got`
* `expected`
* `tolerance` (number) maximum elementwise difference between `a` and `b`.
* `negate` (boolean) if negate is true, we invert success and failure.
Returns:
1. success, boolean that indicates success
2. failure_message, string or nil
]]
function check.areEq(got, expected, tolerance, negate)
local errMessage
if type(got) ~= type(expected) then
if not negate then
errMessage = 'EQ failed: values have different types (first: '
.. type(got) .. ', second: ' .. type(expected) .. ')'
end
return negate, errMessage
elseif type(got) == 'number' then
local diff = math.abs(got - expected)
local ok = (diff <= tolerance)
if negate then
ok = not ok
end
if not ok then
if negate then
errMessage = string.format("NE failed: %s == %s",
tostring(got), tostring(expected))
else
errMessage = string.format("EQ failed: %s ~= %s",
tostring(got), tostring(expected))
end
if tolerance > 0 then
errMessage = errMessage .. " with tolerance=" .. tostring(tolerance)
end
end
return ok, errMessage
elseif type(expected) == "table" then
return check.areTablesEq(got, expected, tolerance, negate)
elseif torch.isTensor(got) then
return check.areTensorsEq(got, expected, tolerance, negate)
elseif torch.isStorage(got) then
return check.areStoragesEq(got, expected, tolerance, negate)
else
-- Below: we have the same type which is either userdata or a lua type
-- which is not a number.
local ok = (got == expected)
if negate then
ok = not ok
end
if not ok then
if negate then
errMessage = string.format("NE failed: %s (%s) == %s (%s)",
tostring(got), type(got),
tostring(expected), type(expected))
else
errMessage = string.format("EQ failed: %s (%s) ~= %s (%s)",
tostring(got), type(got),
tostring(expected), type(expected))
end
end
return ok, errMessage
end
end
--[[ Tests for (deep) table equality.
Tables are compared recursively. Keys and types of the associated values must
match, recursively. Numbers are compared with the given tolerance.
Torch tensors and storages are compared with the given tolerance on their
elementwise difference. Other types are compared for strict equality with the
regular Lua == operator.
Arguments:
* `t1` (table)
* `t2` (table)
* `tolerance` (number) maximum elementwise difference between `a` and `b`.
* `negate` (boolean) if negate is true, we invert success and failure.
Returns:
1. success, boolean that indicates success
2. failure_message, string or nil
]]
function check.areTablesEq(t1, t2, tolerance, negate)
-- Implementation detail: Instead of doing a depth-first table comparison
-- check (for example, using recursion), let's do a breadth-first search
-- using a queue. Why? Because if we have two tables that are quite deep
-- (e.g., a gModule from nngraph), then if they are different then it's
-- more useful to the user to show how they differ at as-shallow-a-depth
-- as possible.
local queue = {}
queue._head = 1
queue._tail = 1
function queue.isEmpty()
return queue._tail == queue._head
end
function queue.pop()
queue._head = queue._head + 1
return queue[queue._head - 1]
end
function queue.push(value)
queue[queue._tail] = value
queue._tail = queue._tail + 1
end
queue.push({t1, t2})
while not queue.isEmpty() do
local location
t1, t2, location = unpack(queue.pop())
local function toSublocation(key)
local keyAsString = tostring(key)
return (location and location .. "." .. keyAsString) or keyAsString
end
for key, value1 in pairs(t1) do
local sublocation = toSublocation(key)
if t2[key] == nil then
return negate, string.format(
"Entry %s missing in second table (is %s in first)",
sublocation, tostring(value1))
end
local value2 = t2[key]
if type(value1) == 'table' and type(value2) == 'table' then
queue.push({value1, value2, sublocation})
else
local ok, message = check.areEq(value1, value2, tolerance, false)
if not ok then
message = 'At table location ' .. sublocation .. ': ' .. message
return negate, message
end
end
end
for key, value2 in pairs(t2) do
local sublocation = toSublocation(key)
if t1[key] == nil then
return negate, string.format(
"Entry %s missing in first table (is %s in second)",
sublocation, tostring(value2))
end
end
end
return not negate, 'The tables are equal'
end
Timer.c 0000664 0000000 0000000 00000011174 13162462543 0012314 0 ustar 00root root 0000000 0000000 #include "general.h"
#ifdef _WIN32
#include
#include
#define TimeType __int64
static __declspec( thread ) TimeType ticksPerSecond = 0;
/*
* There is an example of getrusage for windows in following link:
* https://github.com/openvswitch/ovs/blob/master/lib/getrusage-windows.c
*/
#else
#include
#include
#define TimeType double
#endif
typedef struct _Timer
{
int isRunning;
TimeType totalrealtime;
TimeType totalusertime;
TimeType totalsystime;
TimeType startrealtime;
TimeType startusertime;
TimeType startsystime;
} Timer;
static TimeType torch_Timer_realtime()
{
#ifdef _WIN32
TimeType current;
QueryPerformanceCounter(¤t);
return current;
#else
struct timeval current;
gettimeofday(¤t, NULL);
return (current.tv_sec + current.tv_usec/1000000.0);
#endif
}
static TimeType torch_Timer_usertime()
{
#ifdef _WIN32
return torch_Timer_realtime();
#else
struct rusage current;
getrusage(RUSAGE_SELF, ¤t);
return (current.ru_utime.tv_sec + current.ru_utime.tv_usec/1000000.0);
#endif
}
static TimeType torch_Timer_systime()
{
#ifdef _WIN32
return 0;
#else
struct rusage current;
getrusage(RUSAGE_SELF, ¤t);
return (current.ru_stime.tv_sec + current.ru_stime.tv_usec/1000000.0);
#endif
}
static int torch_Timer_new(lua_State *L)
{
#ifdef _WIN32
if (ticksPerSecond == 0)
{
assert(sizeof(LARGE_INTEGER) == sizeof(__int64));
QueryPerformanceFrequency(&ticksPerSecond);
}
#endif
Timer *timer = luaT_alloc(L, sizeof(Timer));
timer->isRunning = 1;
timer->totalrealtime = 0;
timer->totalusertime = 0;
timer->totalsystime = 0;
timer->startrealtime = torch_Timer_realtime();
timer->startusertime = torch_Timer_usertime();
timer->startsystime = torch_Timer_systime();
luaT_pushudata(L, timer, "torch.Timer");
return 1;
}
static int torch_Timer_reset(lua_State *L)
{
Timer *timer = luaT_checkudata(L, 1, "torch.Timer");
timer->totalrealtime = 0;
timer->totalusertime = 0;
timer->totalsystime = 0;
timer->startrealtime = torch_Timer_realtime();
timer->startusertime = torch_Timer_usertime();
timer->startsystime = torch_Timer_systime();
lua_settop(L, 1);
return 1;
}
static int torch_Timer_free(lua_State *L)
{
Timer *timer = luaT_checkudata(L, 1, "torch.Timer");
luaT_free(L, timer);
return 0;
}
static int torch_Timer_stop(lua_State *L)
{
Timer *timer = luaT_checkudata(L, 1, "torch.Timer");
if(timer->isRunning)
{
TimeType realtime = torch_Timer_realtime() - timer->startrealtime;
TimeType usertime = torch_Timer_usertime() - timer->startusertime;
TimeType systime = torch_Timer_systime() - timer->startsystime;
timer->totalrealtime += realtime;
timer->totalusertime += usertime;
timer->totalsystime += systime;
timer->isRunning = 0;
}
lua_settop(L, 1);
return 1;
}
static int torch_Timer_resume(lua_State *L)
{
Timer *timer = luaT_checkudata(L, 1, "torch.Timer");
if(!timer->isRunning)
{
timer->isRunning = 1;
timer->startrealtime = torch_Timer_realtime();
timer->startusertime = torch_Timer_usertime();
timer->startsystime = torch_Timer_systime();
}
lua_settop(L, 1);
return 1;
}
static int torch_Timer_time(lua_State *L)
{
Timer *timer = luaT_checkudata(L, 1, "torch.Timer");
double realtime = (timer->isRunning ? (timer->totalrealtime + torch_Timer_realtime() - timer->startrealtime) : timer->totalrealtime);
double usertime = (timer->isRunning ? (timer->totalusertime + torch_Timer_usertime() - timer->startusertime) : timer->totalusertime);
double systime = (timer->isRunning ? (timer->totalsystime + torch_Timer_systime() - timer->startsystime) : timer->totalsystime);
#ifdef _WIN32
realtime /= ticksPerSecond;
usertime /= ticksPerSecond;
systime /= ticksPerSecond;
#endif
lua_createtable(L, 0, 3);
lua_pushnumber(L, realtime);
lua_setfield(L, -2, "real");
lua_pushnumber(L, usertime);
lua_setfield(L, -2, "user");
lua_pushnumber(L, systime);
lua_setfield(L, -2, "sys");
return 1;
}
static int torch_Timer___tostring__(lua_State *L)
{
Timer *timer = luaT_checkudata(L, 1, "torch.Timer");
lua_pushfstring(L, "torch.Timer [status: %s]", (timer->isRunning ? "running" : "stopped"));
return 1;
}
static const struct luaL_Reg torch_Timer__ [] = {
{"reset", torch_Timer_reset},
{"stop", torch_Timer_stop},
{"resume", torch_Timer_resume},
{"time", torch_Timer_time},
{"__tostring__", torch_Timer___tostring__},
{NULL, NULL}
};
void torch_Timer_init(lua_State *L)
{
luaT_newmetatable(L, "torch.Timer", NULL, torch_Timer_new, torch_Timer_free, NULL);
luaT_setfuncs(L, torch_Timer__, 0);
lua_pop(L, 1);
}
cmake/ 0000775 0000000 0000000 00000000000 13162462543 0012144 5 ustar 00root root 0000000 0000000 cmake/TorchConfig.cmake.in 0000664 0000000 0000000 00000002652 13162462543 0015765 0 ustar 00root root 0000000 0000000 # This (ugly) setup assumes:
# CMAKE_PREFIX_PATH = LUA_BINDIR
# CMAKE_INSTALL_PREFIX = PREFIX
# Define Torch basic subpaths
SET(Torch_INSTALL_PREFIX "@Torch_INSTALL_PREFIX@")
SET(Torch_INSTALL_BIN_SUBDIR "@Torch_INSTALL_BIN_SUBDIR@")
SET(Torch_INSTALL_MAN_SUBDIR "@Torch_INSTALL_MAN_SUBDIR@")
SET(Torch_INSTALL_LIB_SUBDIR "@Torch_INSTALL_LIB_SUBDIR@")
SET(Torch_INSTALL_SHARE_SUBDIR "@Torch_INSTALL_SHARE_SUBDIR@")
SET(Torch_INSTALL_INCLUDE_SUBDIR "@Torch_INSTALL_INCLUDE_SUBDIR@")
SET(Torch_INSTALL_CMAKE_SUBDIR "@Torch_INSTALL_CMAKE_SUBDIR@")
SET(Torch_INSTALL_LUA_PATH_SUBDIR "@Torch_INSTALL_LUA_PATH_SUBDIR@")
SET(Torch_INSTALL_LUA_CPATH_SUBDIR "@Torch_INSTALL_LUA_CPATH_SUBDIR@")
SET(Torch_INSTALL_CMAKE_RIDBUS "@Torch_INSTALL_CMAKE_RIDBUS@")
FILE(RELATIVE_PATH Torch_INSTALL_LUA_PATH_SUBDIR "${Torch_INSTALL_PREFIX}" "${CMAKE_INSTALL_PREFIX}/lua")
FILE(RELATIVE_PATH Torch_INSTALL_LUA_CPATH_SUBDIR "${Torch_INSTALL_PREFIX}" "${CMAKE_INSTALL_PREFIX}/lib")
SET(CMAKE_MODULE_PATH "${Torch_INSTALL_PREFIX}/${Torch_INSTALL_CMAKE_SUBDIR}" "${CMAKE_MODULE_PATH}")
SET(CMAKE_INSTALL_PREFIX "${Torch_INSTALL_PREFIX}") # override
INCLUDE(TorchPathsInit)
INCLUDE(TorchPackage)
INCLUDE(TorchWrap)
# Define Torch basic targets
INCLUDE(TorchExports)
INCLUDE_DIRECTORIES("${Torch_INSTALL_INCLUDE}")
INCLUDE_DIRECTORIES("${Torch_INSTALL_INCLUDE}/TH")
LINK_DIRECTORIES("${Torch_INSTALL_LIB}")
MESSAGE(STATUS "Found Torch7 in ${Torch_INSTALL_PREFIX}")
cmake/TorchExports.cmake 0000664 0000000 0000000 00000001077 13162462543 0015617 0 ustar 00root root 0000000 0000000 INSTALL(EXPORT TH-exports
DESTINATION "${Torch_INSTALL_CMAKE_SUBDIR}"
FILE "TorchExports.cmake")
CONFIGURE_FILE("cmake/TorchConfig.cmake.in" "${CMAKE_CURRENT_BINARY_DIR}/cmake-exports/TorchConfig.cmake" @ONLY)
CONFIGURE_FILE("cmake/TorchWrap.cmake.in" "${CMAKE_CURRENT_BINARY_DIR}/cmake-exports/TorchWrap.cmake" @ONLY)
INSTALL(
FILES
"${CMAKE_CURRENT_BINARY_DIR}/cmake-exports/TorchConfig.cmake"
"${CMAKE_CURRENT_BINARY_DIR}/cmake-exports/TorchWrap.cmake"
"cmake/TorchPathsInit.cmake"
"cmake/TorchPackage.cmake"
DESTINATION "${Torch_INSTALL_CMAKE_SUBDIR}")
cmake/TorchPackage.cmake 0000664 0000000 0000000 00000003551 13162462543 0015505 0 ustar 00root root 0000000 0000000 # -*- cmake -*-
MACRO(ADD_TORCH_LIBRARY package type src)
IF ("${type}" STREQUAL "STATIC")
if ("${src}" MATCHES "cu$" OR "${src}" MATCHES "cu;")
CUDA_ADD_LIBRARY(${package} STATIC ${src})
else()
ADD_LIBRARY(${package} STATIC ${src})
endif()
ELSE()
if ("${src}" MATCHES "cu$" OR "${src}" MATCHES "cu;")
CUDA_ADD_LIBRARY(${package} ${type} ${src})
else()
ADD_LIBRARY(${package} ${type} ${src})
endif()
ENDIF()
ENDMACRO()
MACRO(ADD_TORCH_PACKAGE package src luasrc)
INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR})
INCLUDE_DIRECTORIES(${Torch_LUA_INCLUDE_DIR})
### C/C++ sources
# As per CMake doc, macro arguments are not variables, so simple test syntax not working
IF(NOT "${src}" STREQUAL "")
ADD_TORCH_LIBRARY(${package} MODULE "${src}")
### Torch packages supposes libraries prefix is "lib"
SET_TARGET_PROPERTIES(${package} PROPERTIES
PREFIX "lib"
IMPORT_PREFIX "lib"
INSTALL_NAME_DIR "@executable_path/${Torch_INSTALL_BIN2CPATH}")
IF(APPLE)
SET_TARGET_PROPERTIES(${package} PROPERTIES
LINK_FLAGS "-undefined dynamic_lookup")
ENDIF()
IF (BUILD_STATIC OR "$ENV{STATIC_TH}" STREQUAL "YES")
ADD_TORCH_LIBRARY(${package}_static STATIC "${src}")
SET_TARGET_PROPERTIES(${package}_static PROPERTIES
COMPILE_FLAGS "-fPIC")
SET_TARGET_PROPERTIES(${package}_static PROPERTIES
PREFIX "lib" IMPORT_PREFIX "lib" OUTPUT_NAME "${package}")
ENDIF()
INSTALL(TARGETS ${package}
RUNTIME DESTINATION ${Torch_INSTALL_LUA_CPATH_SUBDIR}
LIBRARY DESTINATION ${Torch_INSTALL_LUA_CPATH_SUBDIR})
ENDIF(NOT "${src}" STREQUAL "")
### lua sources
IF(NOT "${luasrc}" STREQUAL "")
INSTALL(FILES ${luasrc}
DESTINATION ${Torch_INSTALL_LUA_PATH_SUBDIR}/${package})
ENDIF(NOT "${luasrc}" STREQUAL "")
ENDMACRO(ADD_TORCH_PACKAGE)
cmake/TorchPaths.cmake 0000664 0000000 0000000 00000002507 13162462543 0015231 0 ustar 00root root 0000000 0000000 # workaround another annoying cmake bug
# http://public.kitware.com/Bug/view.php?id=14462
# https://awesome.naquadah.org/bugs/index.php?do=details&task_id=869
MACRO(NORMALIZE_PATH _path_)
get_filename_component(${_path_}_abs "${${_path_}}" ABSOLUTE)
SET(${_path_} "${${_path_}_abs}")
ENDMACRO()
NORMALIZE_PATH(LUA_BINDIR)
NORMALIZE_PATH(LUA_LIBDIR)
NORMALIZE_PATH(LUA_INCDIR)
NORMALIZE_PATH(LUADIR)
NORMALIZE_PATH(LIBDIR)
GET_FILENAME_COMPONENT(CMAKE_INSTALL_PREFIX "${LUA_BINDIR}" PATH)
SET(Torch_INSTALL_PREFIX ${CMAKE_INSTALL_PREFIX})
FILE(RELATIVE_PATH Torch_INSTALL_BIN_SUBDIR "${CMAKE_INSTALL_PREFIX}" "${LUA_BINDIR}")
FILE(RELATIVE_PATH Torch_INSTALL_LIB_SUBDIR "${CMAKE_INSTALL_PREFIX}" "${LUA_LIBDIR}")
FILE(RELATIVE_PATH Torch_INSTALL_INCLUDE_SUBDIR "${CMAKE_INSTALL_PREFIX}" "${LUA_INCDIR}")
SET(Torch_INSTALL_MAN_SUBDIR "share/man" CACHE PATH
"Install dir for man pages (relative to Torch_INSTALL_PREFIX)")
SET(Torch_INSTALL_SHARE_SUBDIR "share" CACHE PATH
"Install dir for data (relative to Torch_INSTALL_PREFIX)")
SET(Torch_INSTALL_CMAKE_SUBDIR "share/cmake/torch" CACHE PATH
"Install dir for .cmake files (relative to Torch_INSTALL_PREFIX)")
FILE(RELATIVE_PATH Torch_INSTALL_LUA_PATH_SUBDIR "${CMAKE_INSTALL_PREFIX}" "${LUADIR}")
FILE(RELATIVE_PATH Torch_INSTALL_LUA_CPATH_SUBDIR "${CMAKE_INSTALL_PREFIX}" "${LIBDIR}")
cmake/TorchPathsInit.cmake 0000664 0000000 0000000 00000004160 13162462543 0016052 0 ustar 00root root 0000000 0000000 SET(Torch_INSTALL_BIN "${Torch_INSTALL_PREFIX}/${Torch_INSTALL_BIN_SUBDIR}")
SET(Torch_INSTALL_MAN "${Torch_INSTALL_PREFIX}/${Torch_INSTALL_MAN_SUBDIR}")
SET(Torch_INSTALL_LIB "${Torch_INSTALL_PREFIX}/${Torch_INSTALL_LIB_SUBDIR}")
SET(Torch_INSTALL_SHARE "${Torch_INSTALL_PREFIX}/${Torch_INSTALL_SHARE_SUBDIR}")
SET(Torch_INSTALL_INCLUDE "${Torch_INSTALL_PREFIX}/${Torch_INSTALL_INCLUDE_SUBDIR}")
#SET(Torch_INSTALL_DOK "${Torch_INSTALL_PREFIX}/${Torch_INSTALL_DOK_SUBDIR}")
#SET(Torch_INSTALL_HTML "${Torch_INSTALL_PREFIX}/${Torch_INSTALL_HTML_SUBDIR}")
SET(Torch_INSTALL_CMAKE "${Torch_INSTALL_PREFIX}/${Torch_INSTALL_CMAKE_SUBDIR}")
SET(Torch_INSTALL_LUA_PATH "${Torch_INSTALL_PREFIX}/${Torch_INSTALL_LUA_PATH_SUBDIR}")
#SET(Torch_INSTALL_LUA_PKG_PATH "${Torch_INSTALL_PREFIX}/${Torch_INSTALL_LUA_PKG_PATH_SUBDIR}")
SET(Torch_INSTALL_LUA_CPATH "${Torch_INSTALL_PREFIX}/${Torch_INSTALL_LUA_CPATH_SUBDIR}")
#SET(Torch_INSTALL_LUAROCKS_SYSCONF "${Torch_INSTALL_PREFIX}/${Torch_INSTALL_LUAROCKS_SYSCONF_SUBDIR}")
# reverse relative path to prefix (ridbus is the palindrom of subdir)
FILE(RELATIVE_PATH Torch_INSTALL_BIN_RIDBUS "${Torch_INSTALL_BIN}" "${Torch_INSTALL_PREFIX}/.")
FILE(RELATIVE_PATH Torch_INSTALL_CMAKE_RIDBUS "${Torch_INSTALL_CMAKE}" "${Torch_INSTALL_PREFIX}/.")
GET_FILENAME_COMPONENT(Torch_INSTALL_BIN_RIDBUS "${Torch_INSTALL_BIN_RIDBUS}" PATH)
GET_FILENAME_COMPONENT(Torch_INSTALL_CMAKE_RIDBUS "${Torch_INSTALL_CMAKE_RIDBUS}" PATH)
IF(UNIX)
OPTION(WITH_RPATH "Build libraries with executable rpaths" ON)
IF(WITH_RPATH)
SET(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE)
FILE(RELATIVE_PATH Torch_INSTALL_BIN2LIB
"${Torch_INSTALL_BIN}" "${Torch_INSTALL_LIB}")
IF(APPLE)
SET(CMAKE_MACOSX_RPATH TRUE) # @rpath in libs
SET(CMAKE_INSTALL_RPATH "@executable_path/${Torch_INSTALL_BIN2LIB}") # exec
ELSE()
SET(CMAKE_INSTALL_RPATH "\$ORIGIN/${Torch_INSTALL_BIN2LIB}")
ENDIF()
ELSE()
SET(CMAKE_MACOSX_RPATH FALSE) # no @rpath in libs
ENDIF()
ENDIF(UNIX)
IF (WIN32)
SET(CMAKE_RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}")
SET(CMAKE_LIBRARY_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}")
ENDIF (WIN32)
cmake/TorchWrap.cmake 0000664 0000000 0000000 00000001470 13162462543 0015061 0 ustar 00root root 0000000 0000000 MACRO(ADD_TORCH_WRAP target luafile)
INCLUDE_DIRECTORIES("${CMAKE_CURRENT_BINARY_DIR}")
GET_FILENAME_COMPONENT(_file_ "${luafile}" NAME_WE)
SET(cfile "${_file_}.c")
IF (DEFINED CWRAP_CUSTOM_LUA)
ADD_CUSTOM_COMMAND(
OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/${cfile}"
COMMAND ${CWRAP_CUSTOM_LUA} ARGS "${CMAKE_CURRENT_SOURCE_DIR}/${luafile}" "${CMAKE_CURRENT_BINARY_DIR}/${cfile}"
WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}"
DEPENDS "${luafile}")
ELSE (DEFINED CWRAP_CUSTOM_LUA)
ADD_CUSTOM_COMMAND(
OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/${cfile}"
COMMAND ${LUA} ARGS "${CMAKE_CURRENT_SOURCE_DIR}/${luafile}" "${CMAKE_CURRENT_BINARY_DIR}/${cfile}"
WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}"
DEPENDS "${luafile}")
ENDIF (DEFINED CWRAP_CUSTOM_LUA)
ENDMACRO(ADD_TORCH_WRAP)
cmake/TorchWrap.cmake.in 0000664 0000000 0000000 00000001605 13162462543 0015466 0 ustar 00root root 0000000 0000000 MACRO(ADD_TORCH_WRAP target luafile)
INCLUDE_DIRECTORIES("${CMAKE_CURRENT_BINARY_DIR}")
GET_FILENAME_COMPONENT(_file_ "${luafile}" NAME_WE)
SET(cfile "${_file_}.c")
IF (DEFINED CWRAP_CUSTOM_LUA)
ADD_CUSTOM_COMMAND(
OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/${cfile}"
COMMAND ${CWRAP_CUSTOM_LUA} ARGS "${CMAKE_CURRENT_SOURCE_DIR}/${luafile}" "${CMAKE_CURRENT_BINARY_DIR}/${cfile}"
WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}"
DEPENDS "${luafile}")
ELSE (DEFINED CWRAP_CUSTOM_LUA)
ADD_CUSTOM_COMMAND(
OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/${cfile}"
COMMAND @LUA@ ARGS "${CMAKE_CURRENT_SOURCE_DIR}/${luafile}" "${CMAKE_CURRENT_BINARY_DIR}/${cfile}"
WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}"
DEPENDS "${luafile}")
ENDIF (DEFINED CWRAP_CUSTOM_LUA)
ADD_CUSTOM_TARGET(${target} DEPENDS "${CMAKE_CURRENT_BINARY_DIR}/${cfile}")
ENDMACRO(ADD_TORCH_WRAP)
doc/ 0000775 0000000 0000000 00000000000 13162462543 0011631 5 ustar 00root root 0000000 0000000 doc/cmdline.md 0000664 0000000 0000000 00000007502 13162462543 0013572 0 ustar 00root root 0000000 0000000
# CmdLine #
This class provides a parameter parsing framework which is very
useful when one needs to run several experiments that rely on
different parameter settings that are passed in the command line.
This class will also override the default print function to direct
all the output to a log file as well as screen at the same time.
A sample `lua` file is given below that makes use of `CmdLine`
class.
```lua
cmd = torch.CmdLine()
cmd:text()
cmd:text()
cmd:text('Training a simple network')
cmd:text()
cmd:text('Options')
cmd:option('-seed',123,'initial random seed')
cmd:option('-booloption',false,'boolean option')
cmd:option('-stroption','mystring','string option')
cmd:text()
-- parse input params
params = cmd:parse(arg)
params.rundir = cmd:string('experiment', params, {dir=true})
paths.mkdir(params.rundir)
-- create log file
cmd:log(params.rundir .. '/log', params)
```
When this file is run on the th command line as follows
```shell
# th myscript.lua
```
It will produce the following output:
```
[program started on Tue Jan 10 15:33:49 2012]
[command line arguments]
booloption false
seed 123
rundir experiment
stroption mystring
[----------------------]
booloption false
seed 123
rundir experiment
stroption mystring
```
The same output will also be written to file
`experiment/log`. Whenever one of the options are passed on the
command line and is different than the default value, the `rundir`
is name is produced to reflect the parameter setting.
```shell
# th myscript.lua -seed 456 -stroption mycustomstring
```
This will produce the following output:
```
[program started on Tue Jan 10 15:36:55 2012]
[command line arguments]
booloption false
seed 456
rundir experiment,seed=456,stroption=mycustomstring
stroption mycustomstring
[----------------------]
booloption false
seed 456
rundir experiment,seed=456,stroption=mycustomstring
stroption mycustomstring
```
and the output will be logged in
`experiment,seed=456,stroption=mycustomstring/log`
### addTime([name] [,format]) ###
Adds a prefix to every line in the log file with the date/time in the
given format with an optional name argument. The date/time format is
the same as `os.date()`. Note that the prefix is only added to the
log file, not the screen output. The default value for name is empty
and the default format is '%F %T'.
The final produced output for the following command is:
```lua
> cmd:addTime('your project name','%F %T')
> print('Your log message')
```
```
2012-02-07 08:21:56[your project name]: Your log message
```
### log(filename, parameter_table) ###
It sets the log filename to `filename` and prints the values of
parameters in the `parameter_table`. If filename is an open file
descriptor, it will write to the file instead of creating a new one.
### option(name, default, help) ###
Stores an option argument. The name should always start with '-'.
### [table] parse(arg) ###
Parses a given table, `arg` is by default the argument table that
is created by `lua` using the command line arguments passed to the
executable. Returns a table of option values.
### silent() ###
Silences the output to standard output. The only output is written to
the log file.
### [string] string(prefix, params, ignore) ###
Returns a string representation of the options by concatenating the
non-default options. `ignore` is a table `{dir=true}`, which will
ensure that option named `dir` will be ignored while creating the
string representation.
This function is useful for creating unique experiment directories that
depend on the parameter settings.
### text(string) ###
Logs a custom text message.
doc/diskfile.md 0000664 0000000 0000000 00000005310 13162462543 0013744 0 ustar 00root root 0000000 0000000
# DiskFile #
Parent classes: [File](file.md)
A `DiskFile` is a particular `File` which is able to perform basic read/write operations
on a file stored on disk. It implements all methods described in [File](file.md), and
some additional methods relative to _endian_ encoding.
By default, a `DiskFile` is in [ASCII](file.md#torch.File.ascii) mode. If changed to
the [binary](file.md#torch.File.binary) mode, the default endian encoding is the native
computer one.
The file might be open in read, write, or read-write mode, depending on the parameter
`mode` (which can take the value `"r"`, `"w"` or `"rw"` respectively)
given to the [torch.DiskFile(fileName, mode)](#torch.DiskFile).
### torch.DiskFile(fileName, [mode], [quiet]) ###
_Constructor_ which opens `fileName` on disk, using the given `mode`. Valid `mode` are
`"r"` (read), `"w"` (write) or `"rw"` (read-write). Default is read mode.
If read-write mode, the file _will be created_ if it does not exists. If it
exists, it will be positioned at the beginning of the file after opening.
If (and only if) `quiet` is `true`, no error will be raised in case of
problem opening the file: instead `nil` will be returned.
The file is opened in [ASCII](file.md#torch.File.ascii) mode by default.
### bigEndianEncoding() ###
In [binary](file.md#torch.File.binary) mode, force encoding in _big endian_.
(_big end first_: decreasing numeric significance with increasing memory
addresses)
### [boolean] isBigEndianCPU() ###
Returns `true` if, and only if, the computer CPU operates in _big endian_.
_Big end first_: decreasing numeric significance with increasing
memory addresses.
### [boolean] isLittleEndianCPU() ###
Returns `true` if, and only if, the computer CPU operates in _little endian_.
_Little end first_: increasing numeric significance with increasing
memory addresses.
### littleEndianEncoding() ###
In [binary](file.md#torch.File.binary) mode, force encoding in _little endian_.
(_little end first_: increasing numeric significance with increasing memory
addresses)
### nativeEndianEncoding() ###
In [binary](file.md#torch.File.binary) mode, force encoding in _native endian_.
### longSize([size]) ###
Longs will be written and read from the file as `size` bytes long, which
can be 0, 4 or 8. 0 means system default.
### noBuffer() ###
Disables read and write buffering on the `DiskFile`.
doc/file.md 0000664 0000000 0000000 00000033443 13162462543 0013101 0 ustar 00root root 0000000 0000000
# File #
This is an _abstract_ class. It defines most methods implemented by its
child classes, like [DiskFile](diskfile.md),
[MemoryFile](memoryfile.md) and [PipeFile](pipefile.md).
Methods defined here are intended for basic read/write functionalities.
Read/write methods might write in [ASCII](#torch.File.ascii) mode or
[binary](#torch.File.binary) mode.
In [ASCII](#torch.File.ascii) mode, numbers are converted in human readable
format (characters). Booleans are converted into `0` (false) or `1` (true).
In [binary](#torch.File.binary) mode, numbers and boolean are directly encoded
as represented in a register of the computer. While not being human
readable and less portable, the binary mode is obviously faster.
In [ASCII](#torch.File.ascii) mode, if the default option
[autoSpacing()](#torch.File.autoSpacing) is chosen, a space will be generated
after each written number or boolean. A carriage return will also be added
after each call to a write method. With this option, the spaces are
supposed to exist while reading. This option can be deactivated with
[noAutoSpacing()](#torch.File.noAutoSpacing).
A `Lua` error might or might not be generated in case of read/write error
or problem in the file. This depends on the choice made between
[quiet()](#torch.File.quiet) and [pedantic()](#torch.File.pedantic) options. It
is possible to query if an error occurred in the last operation by calling
[hasError()](#torch.File.hasError).
## Read methods ##
They are three types of reading methods:
- `[number] readTYPE()`
- `[TYPEStorage] readTYPE(n)`
- `[number] readTYPE(TYPEStorage)`
where `TYPE` can be either `Byte`, `Char`, `Short`, `Int`, `Long`, `Float` or `Double`.
A convenience method also exist for boolean types: `[boolean] readBool()`. It reads
a value on the file with `readInt()` and returns `true` if and only if this value is `1`. It is not possible
to read storages of booleans.
All these methods depends on the encoding choice: [ASCII](#torch.File.ascii)
or [binary](#torch.File.binary) mode. In [ASCII](#torch.File.ascii) mode, the
option [autoSpacing()](#torch.File.autoSpacing) and
[noAutoSpacing()](#torch.File.noAutoSpacing) have also an effect on these
methods.
If no parameter is given, one element is returned. This element is
converted to a `Lua` number when reading.
If `n` is given, `n` values of the specified type are read
and returned in a new [Storage](storage.md) of that particular type.
The storage size corresponds to the number of elements actually read.
If a `Storage` is given, the method will attempt to read a number of elements
equals to the size of the given storage, and fill up the storage with these elements.
The number of elements actually read is returned.
In case of read error, these methods will call the `Lua` error function using the default
[pedantic](#torch.File.pedantic) option, or stay quiet with the [quiet](#torch.File.quiet)
option. In the latter case, one can check if an error occurred with
[hasError()](#torch.File.hasError).
## Write methods ##
They are two types of writing methods:
- `[number] writeTYPE(number)`
- `[number] writeTYPE(TYPEStorage)`
where `TYPE` can be either `Byte`, `Char`, `Short`, `Int`, `Long`, `Float` or `Double`.
A convenience method also exist for boolean types: `writeBool(value)`. If `value` is `nil` or
not `true` a it is equivalent to a `writeInt(0)` call, else to `writeInt(1)`. It is not possible
to write storages of booleans.
All these methods depends on the encoding choice: [ASCII](#torch.File.ascii)
or [binary](#torch.File.ascii) mode. In [ASCII](#torch.File.ascii) mode, the
option [autoSpacing()](#torch.File.autoSpacing) and
[noAutoSpacing()](#torch.File.noAutoSpacing) have also an effect on these
methods.
If one `Lua` number is given, this number is converted according to the
name of the method when writing (e.g. `writeInt(3.14)` will write `3`).
If a `Storage` is given, the method will attempt to write all the elements contained
in the storage.
These methods return the number of elements actually written.
In case of write error, these methods will call the `Lua` error function using the default
[pedantic](#torch.File.pedantic) option, or stay quiet with the [quiet](#torch.File.quiet)
option. In the latter case, one can check if an error occurred with
[hasError()](#torch.File.hasError).
## Serialization methods ##
These methods allow the user to save any serializable objects on disk and
reload it later in its original state. In other words, it can perform a
_deep_ copy of an object into a given `File`.
Serializable objects are `Torch` objects having a `read()` and
`write()` method. `Lua` objects such as `table`, `number` or
`string` or _pure Lua_ functions are also serializable.
If the object to save contains several other objects (let say it is a tree
of objects), then objects appearing several times in this tree will be
_saved only once_. This saves disk space, speeds up loading/saving and
respects the dependencies between objects.
Interestingly, if the `File` is a [MemoryFile](memoryfile.md), it allows
the user to easily make a _clone_ of any serializable object:
```lua
file = torch.MemoryFile() -- creates a file in memory
file:writeObject(object) -- writes the object into file
file:seek(1) -- comes back at the beginning of the file
objectClone = file:readObject() -- gets a clone of object
```
### readObject() ###
Returns the next [serializable](#torch.File.serialization) object saved beforehand
in the file with [writeObject()](#torch.File.writeObject).
Note that objects which were [written](#torch.File.writeObject) with the same
reference have still the same reference after loading.
Example:
```lua
-- creates an array which contains twice the same tensor
array = {}
x = torch.Tensor(1)
table.insert(array, x)
table.insert(array, x)
-- array[1] and array[2] refer to the same address
-- x[1] == array[1][1] == array[2][1] == 3.14
array[1][1] = 3.14
-- write the array on disk
file = torch.DiskFile('foo.asc', 'w')
file:writeObject(array)
file:close() -- make sure the data is written
-- reload the array
file = torch.DiskFile('foo.asc', 'r')
arrayNew = file:readObject()
-- arrayNew[1] and arrayNew[2] refer to the same address!
-- arrayNew[1][1] == arrayNew[2][1] == 3.14
-- so if we do now:
arrayNew[1][1] = 2.72
-- arrayNew[1][1] == arrayNew[2][1] == 2.72 !
```
### writeObject(object) ###
Writes `object` into the file. This object can be read later using
[readObject()](#torch.File.readObject). Serializable objects are `Torch`
objects having a `read()` and `write()` method. `Lua` objects such as
`table`, `number` or `string` or pure Lua functions are also serializable.
If the object has been already written in the file, only a _reference_ to
this already saved object will be written: this saves space an speed-up
writing; it also allows to keep the dependencies between objects intact.
In returns, if one writes an object, modifies its member, and writes the
object again in the same file, the modifications will not be recorded
in the file, as only a reference to the original will be written. See
[readObject()](#torch.File.readObject) for an example.
### [string] readString(format) ###
If `format` starts with `"*l"` then returns the next line in the `File`. The end-of-line character is skipped.
If `format` starts with `"*a"` then returns all the remaining contents of the `File`.
If no data is available, then an error is raised, except if `File` is in [quiet()](#torch.File.quiet) mode where
it then returns an empty string `''` and after that you'll be able to see that last reading failed due to end of file with your_file:[hasError()](#torch.File.hasError).
Because Torch is more precise on number typing, the `Lua` format `"*n"` is not supported:
instead use one of the [number read methods](#torch.File.read).
### [number] writeString(str) ###
Writes the string `str` in the `File`. If the string cannot be written completely an error is raised, except
if `File` is in [quiet()](#torch.File.quiet) mode where it returns the number of character actually written.
## General Access and Control Methods ##
### ascii() [default] ###
The data read or written will be in `ASCII` mode: all numbers are converted
to characters (human readable format) and boolean are converted to `0`
(false) or `1` (true). The input-output format in this mode depends on the
options [autoSpacing()](#torch.File.autoSpacing) and
[noAutoSpacing()](#torch.File.noAutoSpacing).
### autoSpacing() [default] ###
In [ASCII](#torch.File.ascii) mode, write additional spaces around the elements
written on disk: if writing a [Storage](storage.md), a space will be
generated between each _element_ and a _return line_ after the last
element. If only writing one element, a _return line_ will be generated
after this element.
Those spaces are supposed to exist while reading in this mode.
This is the default behavior. You can de-activate this option with the
[noAutoSpacing()](#torch.File.noAutoSpacing) method.
### binary() ###
The data read or written will be in binary mode: the representation in the
`File` is the same that the one in the computer memory/register (not human
readable). This mode is faster than [ASCII](#torch.File.ascii) but less
portable.
### clearError() ###
Clear the error.flag returned by [hasError()](#torch.File.hasError).
### close() ###
Close the file. Any subsequent operation will generate a `Lua` error.
### noAutoSpacing() ###
In [ASCII](#torch.File.ascii) mode, do not put extra spaces between element
written on disk. This is the contrary of the option
[autoSpacing()](#torch.File.autoSpacing).
### synchronize() ###
If the child class bufferize the data while writing, ensure that the data
is actually written.
### pedantic() [default] ###
If this mode is chosen (which is the default), a `Lua` error will be
generated in case of error (which will cause the program to stop).
It is possible to use [quiet()](#torch.File.quiet) to avoid `Lua` error generation
and set a flag instead.
### [number] position() ###
Returns the current position (in bytes) in the file.
The first position is `1` (following Lua standard indexing).
### quiet() ###
If this mode is chosen instead of [pedantic()](#torch.File.pedantic), no `Lua`
error will be generated in case of read/write error. Instead, a flag will
be raised, readable through [hasError()](#torch.File.hasError). This flag can
be cleared with [clearError()](#torch.File.clearError)
Checking if a file is quiet can be performed using [isQuiet()](#torch.File.isQuiet).
### seek(position) ###
Jump into the file at the given `position` (in byte). Might generate/raise
an error in case of problem. The first position is `1` (following Lua standard indexing).
### seekEnd() ###
Jump at the end of the file. Might generate/raise an error in case of
problem.
## File state query ##
These methods allow the user to query the state of the given `File`.
### [boolean] hasError() ###
Returns if an error occurred since the last [clearError()](#torch.File.clearError) call, or since
the opening of the file if `clearError()` has never been called.
### [boolean] isQuiet() ###
Returns a boolean which tells if the file is in [quiet](#torch.File.quiet) mode or not.
### [boolean] isReadable() ###
Tells if one can read the file or not.
### [boolean] isWritable() ###
Tells if one can write in the file or not.
### [boolean] isAutoSpacing() ###
Return `true` if [autoSpacing](#torch.File.autoSpacing) has been chosen.
### referenced(ref) ###
Sets the referenced property of the File to `ref`. `ref` has to be `true`
or `false`.
By default `ref` is true, which means that a File object keeps track of
objects written (using [writeObject](#torch.File.writeObject) method) or
read (using [readObject](#torch.File.readObject) method). Objects with the
same address will be written or read only once, meaning that this approach
preserves shared memory structured.
Keeping track of references has a cost: every object which is serialized in
the file is kept alive (even if one discards the object after
writing/reading) as File needs to track their pointer. This is not always a
desirable behavior, especially when dealing with large data structures.
Another typical example when does not want reference tracking is when
one needs to push the same tensor repeatedly into a file but every time
changing its contents: calling `referenced(false)` ensures desired
behaviour.
### isReferenced() ###
Returns the state set by [referenced](#torch.File.referenced).
doc/gather.png 0000664 0000000 0000000 00000157234 13162462543 0013625 0 ustar 00root root 0000000 0000000 ‰PNG
IHDR M ’ þzõ?
¦iCCPICC Profile H‰•–PSÙÇϽéBoÒ;H¯¡ÒATB ”Rhveqׂˆ*º¢àZ YTDÛ"`ÁÊ‚ˆŠ².l¨ìá½™·óæ}3'ó›ÿ|÷ÿçÜœ™ òm&ŸŸË Á Âü<é1±qtÜ 0 K 1YB¾GhhøÇúpéFê–éŒ×?÷ý×’es„, P„ÙBV§ÕÎâD øˆ®“#âÏp)Âò$ Âu3œ<Çí3œ8ǽ³=a^? Of2É &žÍJF|ÈÈnÍå!쎰++…ÉFx=‹222gø(†‰ÿæ“üž‰O&3YÂs{™-¼7WÈOgæýŸÇñ¿+#]<ÿmd‘Sþa3{Fά.-3P¼Ä%!óÌeÏöÏrŠØ?ržYB¯¸yf3½çYœé1ÏLÁ³\#bž™aŽÐ'\âÏaI2¤/‘p×—1Ïù)ÑóœÍZ2Ï´ðÀ…/‰.‡I2' |%{Ì.dc12ˆR"ü²ÅH2°9Þ>)éç‹<%žüôPI?'ÝO¢³Ã%ÏŠ?Ø<§2B|B%çÂAàL "À'W4Ø+“Ÿ'à&§ˆèÈáÐ<–Ù"º•…¥- 3÷oîó¾£ÍÞ+ˆvmA˽€K">]ТhDÞ«<± éÛ 1¨ ´Ä²Ä‚ì9
=óƒD $T@S`ì€3p> „€V ©S@€°l … ì »A¨A8N€Ð.€Ëà:èwÀC0FÁ+0>€)‚p¢BÊ&¤™@Vä
ù@AP%@ÉC«¡MP1TU@ÕP=ô+tº ]…ú ûÐ04½…¾À(˜ËÃê°>l;Àp /‡“á,8.€·Áåp
|n†/À×á;ðü
žD ECi¡LQ(/T*•„ Ö¢ŠPe¨T#ª
Õº…B£>£±h*šŽ6E;£ýÑ‘h:½½]®C7£/¡o¡‡Ñèï
F
c‚qÂ001˜dL¦S†©ÅœÆtaî`F1°X,
k€µÇúcc±©ØUØØýØ&l¶;‚ÄápÊ8œ.Çĉp…¸½¸£¸ó¸~Ü(îž„×Ä[á}ñqx~#¾ßŽŸ"ÈôN„›GØN8Dh#Ü$Œ¦ˆ²D¢1‚˜JÜ@,'6»ˆˆïH$’6É‘´”Ä%'•“Ž“®†IŸÉrdc²9ž,&o#&wï“ßQ(}Š;%Ž"¢l£ÔS.R)Ÿ¤¨RfR)¶Ô:©J©f©~©×Òi=iéÒùÒeÒ'¥oJËdôe¼d˜2ke*eÎÈÈLÊRe-eCd3d·Ê‘½*ûB'§/ç#Ç–+;(wQn„Š¢êP½¨,ê&ê!juT+o ÏO•/–?&ß#?¡ §`£¥«P©pVaˆ†¢éÓ´tÚvÚ Ú]ÚEuEEŽâÅFÅ~ÅJªJîJ¥"¥&¥;J_”éÊ>ÊiÊ;•[”« UŒU–ªä¨PéRW•WuVe©©žP} ««…©R;¨vCmR]CÝO¯¾Wý¢ú¸MÃ]#U£TãœÆ˜&UÓU“«Yªy^ó%]îAO§—Ó/Ñ'´Ô´üµÄZÕZ=ZSÚÚ‘Úµ›´ëut’tJu:u&t5uƒuWë6è>Ð#è9è¥èíÑëÖû¨o ¿Y¿Eÿ…’Ã ß Áà‘!ÅÐÍ0˰Æð¶ÖÈÁ(Íh¿Q¯1llkœb\i|Ó6±3ášì7é[„Y丈·¨fÑ€)ÙÔÃ4Û´ÁtØŒfd¶Ñ¬Åìµ¹®yœùNónóï¶é‡,ZÊYXn´l³|kelŲª´ºmM±öµ^gÝjýÆÆÄ†csÀæž-Õ6Øv³m§í7;{;]£Ý˜½®}‚ý>ûy‡P‡W1ŽžŽëÛ?;Ù9‰œN8ýålêœæ|ÄùÅbƒÅœÅ‡¸h»0]ª]†\é® ®?»¹i¹1ÝjÜž¸ë¸³ÝkÝŸ{y¤zõxíiá)ð<íùÑËÉkW‡7ÊÛϻȻÇGÎ'Ò§ÂgÐWÛ7Ù·ÁwÂÏÖo•_‡?Æ?Чÿ CÁbÔ3&ìÖ\
$†V> 2µÃÁÁ»‚-Ñ[Â[ÒB!»B‡„f…þ¶»4tiåÒga–a«ÃºÃ©á+ĈðŒØñ0Ò0RÙ%Uõ1Ú;º$z(Æ;u¨û‡_êkUj‹k¿æª«»To__DíÈö¸AÜ0v4þhï1ïc¦ÕM´¦âãà¸øøË_~½{"ðDçI‡“§ôNí;M=]Ô5ç5O´¤´µÆ¶ö 8ÓÙæÜvú7³ß·kµWžU8»ýñ\Á¹éóùç';øã’/Œt®ì|x1æâíKK/õtv]¹ì{ùb·G÷ù+.WÚ¯:]=sÍáZËu»ëÍ7loœþÝö÷Ó=v=Í7ío¶ö:ö¶õ-î;×ïÖá–÷Ë··¯ßYr§ïnäÝ{ñC÷Ø÷^ÜO¿ÿæAöƒ©‡ëa=–y\6¨6Xó‡ÑMCvCg‡½‡o< òp„5òê©ðé×Ñ‚g”geÏ5Ÿ×¿°zÑ>æ;ÖûrÙËÑWüWSã…Êþ¹ïµáëS¹ÿuc"fbôàÍôÛï”ß~oó¾s2trðCƇ©EŸ”?Õ}vøÜý%úË󩜯¸¯åߌ¾µ}üþh:czšÏ0gG²à¤$ Þ€ ™›‰RsóñlAs3ý,â¹z¶ì ¨u ² ÿõ TÎÌ Ë!kf<Šp°µµdý«„IÖVs^ddÊÄ|šž~§ ®
€o‚éé©ýÓÓß!aïБ57—ÏT)¢ ƒc¬ºçÃ,Ôß‹ì둚· iTXtXML:com.adobe.xmp
589
402
œW @ IDATxì]˜UÖ==y˜LÎAAÀìªQ\sÜUw×ìúïšÖ]Y1g1gQ׌ŀ˜Lˆ(9Çf˜œç¿§†êéîéž©ê®î®aîïMU½z©NUWwß}÷yD ¢(Š€" (Š€"Ð" -žÕ“Š€" (Š€" (JšôAPE@PEÀJš,€¤IE@PE@PҤπ" (Š€" (PÒd$M¢(Š€" (Š€’&}E@PE@°€€’& iE@PE@P”4é3 (Š€" (Š€”4Y I“(Š€" (Š€" ¤IŸE@PE@P, ¤ÉHšDPE@P%Mú(Š€" (Š€"`%M@Ò$Š€" (Š€" (iÒg@PE@P(i² ’&QE@PE@I“>Š€" (Š€" X@ ÉBMÒÆ¨ªªBMM_¨õ;æùºº:0mà¹Àãúúz#
Ó744ùxŽy˜Ö7Ÿ¹Ï-Ó…’„„#_°ó‰‰‰`HJJBrr²w›’’âç9¦IMM5Òð÷ÓÓÓ‘™™‰œœäåå¡C‡ÈÈÈ0âYWuu5<‘'Xݧ(Š€"ÐþðÈ+ô«ýááøÿþûï6ln¹åãcM¸ù1&Y ø’ÆûÞßsfÃWYYé
Æ~àÖLÃzHH*Z
$$¾$ÄLo’ó˜ÄƒÂ´l÷Ù~ß-ó™qŒX¦‰…y¾Û`˜çY¶/É3ÉË3ãMÂFÒFŒH†LYTT„mÛ¶¡¤¤¼OYYYFÑ}ûöÅÔ©S1bÄüüóÏfuºUE@h稦)ÊÀ®»îê%#:u2H‰‘IŽ|‰RKÁ·™$iii†V„î›Á<6·LkÊSO=…óÏ?ß )¬·É
iðŸF§Ê~7 9Û·‘Œ5Ÿzê©8î¸ãpÚi§aÕªU