Skip to content

Commit

Permalink
initial commit for C-lib
Browse files Browse the repository at this point in the history
  • Loading branch information
Nicholas Leonard committed Feb 23, 2017
1 parent ef98a97 commit f9500d3
Show file tree
Hide file tree
Showing 10 changed files with 791 additions and 37 deletions.
66 changes: 56 additions & 10 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,15 +1,61 @@
SET(CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR})

CMAKE_MINIMUM_REQUIRED(VERSION 2.6 FATAL_ERROR)
CMAKE_POLICY(VERSION 2.6)
IF(LUAROCKS_PREFIX)
MESSAGE(STATUS "Installing Torch through Luarocks")
STRING(REGEX REPLACE "(.*)lib/luarocks/rocks.*" "\\1" CMAKE_INSTALL_PREFIX "${LUAROCKS_PREFIX}")
MESSAGE(STATUS "Prefix inferred from Luarocks: ${CMAKE_INSTALL_PREFIX}")
ENDIF()

FIND_PACKAGE(Torch REQUIRED)

SET(src)
FILE(GLOB luasrc *.lua)
SET(luasrc ${luasrc})
ADD_SUBDIRECTORY(test)
ADD_TORCH_PACKAGE(rnn "${src}" "${luasrc}" "Recurrent Neural Networks")
SET(BUILD_STATIC YES) # makes sure static targets are enabled in ADD_TORCH_PACKAGE

SET(CMAKE_C_FLAGS "--std=c99 -pedantic -Werror -Wall -Wextra -Wno-unused-function -D_GNU_SOURCE ${CMAKE_C_FLAGS}")
SET(src
init.c
)
SET(luasrc
init.lua
AbstractRecurrent.lua
AbstractSequencer.lua
BiSequencer.lua
BiSequencerLM.lua
CopyGrad.lua
Dropout.lua
ExpandAs.lua
FastLSTM.lua
GRU.lua
LinearNoBias.lua
LookupTableMaskZero.lua
LSTM.lua
MaskZero.lua
MaskZeroCriterion.lua
Module.lua
Mufuru.lua
NormStabilizer.lua
Padding.lua
Recurrence.lua
Recurrent.lua
RecurrentAttention.lua
recursiveUtils.lua
Recursor.lua
Repeater.lua
RepeaterCriterion.lua
SAdd.lua
SeqBRNN.lua
SeqGRU.lua
SeqLSTM.lua
SeqLSTMP.lua
SeqReverseSequence.lua
Sequencer.lua
SequencerCriterion.lua
TrimZero.lua
ZeroGrad.lua
test/bigtest.lua
test/test.lua
)

ADD_TORCH_PACKAGE(rnn "${src}" "${luasrc}" "An RNN library for Torch")

TARGET_LINK_LIBRARIES(rnn luaT TH)

SET_TARGET_PROPERTIES(rnn_static PROPERTIES COMPILE_FLAGS "-fPIC -DSTATIC_TH")

INSTALL(FILES ${luasrc} DESTINATION "${Torch_INSTALL_LUA_PATH_SUBDIR}/rnn")
40 changes: 20 additions & 20 deletions TrimZero.lua
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,10 @@
-- Decorator that zeroes the output rows of the encapsulated module
-- for commensurate input rows which are tensors of zeros

-- The only difference from `MaskZero` is that it reduces computational costs
-- by varying a batch size, if any, for the case that varying lengths
-- are provided in the input. Notice that when the lengths are consistent,
-- `MaskZero` will be faster, because `TrimZero` has an operational cost.
-- The only difference from `MaskZero` is that it reduces computational costs
-- by varying a batch size, if any, for the case that varying lengths
-- are provided in the input. Notice that when the lengths are consistent,
-- `MaskZero` will be faster, because `TrimZero` has an operational cost.

-- In short, the result is the same with `MaskZero`'s, however, `TrimZero` is
-- faster than `MaskZero` only when sentence lengths is costly vary.
Expand Down Expand Up @@ -38,7 +38,7 @@ function TrimZero:recursiveMask(output, input, mask)
else
assert(torch.isTensor(input))
output = torch.isTensor(output) and output or input.new()

-- make sure mask has the same dimension as the input tensor
if torch.type(mask) ~= 'torch.LongTensor' then
local inputSize = input:size():fill(1)
Expand All @@ -48,7 +48,7 @@ function TrimZero:recursiveMask(output, input, mask)
end
mask:resize(inputSize)
end

-- build mask
if self.batchmode then
assert(torch.find, 'install torchx package : luarocks install torchx')
Expand All @@ -67,11 +67,11 @@ function TrimZero:recursiveMask(output, input, mask)
else
output:index(input, 1, torch.LongTensor{1}):zero()
end
else
if mask:dim() == 0 or mask:view(-1)[1] == 1 then
output:resize(input:size()):zero()
else
output:resize(input:size()):copy(input)
else
if mask:dim() == 0 or mask:view(-1)[1] == 1 then
output:resize(input:size()):zero()
else
output:resize(input:size()):copy(input)
end
end
end
Expand All @@ -87,14 +87,14 @@ function TrimZero:recursiveUnMask(output, input, mask)
else
assert(torch.isTensor(input))
output = torch.isTensor(output) and output or input.new()

-- make sure output has the same dimension as the mask
local inputSize = input:size()
if self.batchmode then
inputSize[1] = mask:size(1)
end
output:resize(inputSize):zero()

-- build mask
if self.batchmode then
assert(self._maskindices)
Expand All @@ -103,7 +103,7 @@ function TrimZero:recursiveUnMask(output, input, mask)
output:indexCopy(1, mask, input)
end
else
if mask:view(-1)[1] == 0 then
if mask:view(-1)[1] == 0 then
output:copy(input)
end
end
Expand All @@ -123,17 +123,17 @@ function TrimZero:updateOutput(input)
else
error("nInputDim error: "..rmi:dim()..", "..self.nInputDim)
end

-- build mask
local vectorDim = rmi:dim()
local vectorDim = rmi:dim()
self._zeroMask = self._zeroMask or rmi.new()
self._zeroMask:norm(rmi, 2, vectorDim)
self.zeroMask = self.zeroMask or ((torch.type(rmi) == 'torch.CudaTensor') and torch.CudaTensor() or torch.ByteTensor())
self._zeroMask.eq(self.zeroMask, self._zeroMask, 0)

-- forward through decorated module
self.temp = self:recursiveMask(self.temp, input, self.zeroMask)
output = self.module:updateOutput(self.temp)
output = self.modules[1]:updateOutput(self.temp)
self.output = self:recursiveUnMask(self.output, output, self.zeroMask, true)

return self.output
Expand All @@ -143,7 +143,7 @@ function TrimZero:updateGradInput(input, gradOutput)
self.temp = self:recursiveMask(self.temp, input, self.zeroMask)
self.gradTemp = self:recursiveMask(self.gradTemp, gradOutput, self.zeroMask)

local gradInput = self.module:updateGradInput(self.temp, self.gradTemp)
local gradInput = self.modules[1]:updateGradInput(self.temp, self.gradTemp)

self.gradInput = self:recursiveUnMask(self.gradInput, gradInput, self.zeroMask)

Expand All @@ -152,5 +152,5 @@ end

function TrimZero:accGradParameters(input, gradOutput, scale)
self.temp = self:recursiveMask(self.temp, input, self.zeroMask)
self.module:accGradParameters(self.temp, gradOutput, scale)
self.modules[1]:accGradParameters(self.temp, gradOutput, scale)
end
79 changes: 79 additions & 0 deletions VariableLength.lua
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
local VariableLength, parent = torch.class("nn.VariableLength", "nn.Decorator")

-- make sure your module has been set-up for zero-masking (that is, module:maskZero())
function VariableLength:__init(module, lastOnly)
parent.__init(self, module)
-- only extract the last element of each sequence
self.lastOnly = lastOnly -- defaults to false
end

-- recursively masks input (inplace)
function VariableLength.recursiveMask(input, mask)
if torch.type(input) == 'table' then
for k,v in ipairs(input) do
self.recursiveMask(v, mask)
end
else
assert(torch.isTensor(input))

-- make sure mask has the same dimension as the input tensor
assert(mask:dim() == 2, "Expecting batchsize x seqlen mask tensor")
-- expand mask to input (if necessary)
local zeroMask
if input:dim() == 2 then
zeroMask = mask
elseif input:dim() > 2 then
local inputSize = input:size():fill(1)
inputSize[1] = input:size(1)
inputSize[2] = input:size(2)
mask:resize(inputSize)
zeroMask = mask:expandAs(input)
else
error"Expecting batchsize x seqlen [ x ...] input tensor"
end
-- zero-mask input in between sequences
input:maskedFill(zeroMask, 0)
end
end

function VariableLength:updateOutput(input)
-- input is a table of batchSize tensors
assert(torch.type(input) == 'table')
assert(torch.isTensor(input[1]))
local batchSize = #input

self._input = self._input or input[1].new()
-- mask is a binary tensor with 1 where self._input is zero (between sequence zero-mask)
self._mask = self._mask or torch.ByteTensor()

-- now we process input into _input.
-- indexes and mappedLengths are meta-information tables, explained below.
self.indexes, self.mappedLengths = self._input.nn.VariableLength_FromSamples(input, self._input, self._mask)

-- zero-mask the _input where mask is 1
self.recursiveMask(self._input, self._mask)

-- feedforward the zero-mask format through the decorated module
local output = self.modules[1]:updateOutput(self._input)

if self.lastOnly then
-- Extract the last time step of each sample.
-- self.output tensor has shape: batchSize [x outputSize]
self.output = torch.isTensor(self.output) and self.output or output.new()
self.output.nn.VariableLength_ToFinal(selfindexes, self.mappedLengths, output, self.output)
else
-- This is the revese operation of everything before updateOutput
self.output = input.nn.VariableLength_ToSamples(self.indexes, self.mappedLengths, output)
end

return self.output
end

function VariableLength:updateGradInput(input, gradInput)

return self.gradInput
end

function VariableLength:accGradParameters(input, gradInput, scale)

end
24 changes: 24 additions & 0 deletions error.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
#ifndef _ERROR_H_
#define _ERROR_H_

#include "luaT.h"
#include <string.h>

static inline int _lua_error(lua_State *L, int ret, const char* file, int line) {
int pos_ret = ret >= 0 ? ret : -ret;
return luaL_error(L, "ERROR: (%s, %d): (%d, %s)\n", file, line, pos_ret, strerror(pos_ret));
}

static inline int _lua_error_str(lua_State *L, const char *str, const char* file, int line) {
return luaL_error(L, "ERROR: (%s, %d): (%s)\n", file, line, str);
}

static inline int _lua_error_str_str(lua_State *L, const char *str, const char* file, int line, const char *extra) {
return luaL_error(L, "ERROR: (%s, %d): (%s: %s)\n", file, line, str, extra);
}

#define LUA_HANDLE_ERROR(L, ret) _lua_error(L, ret, __FILE__, __LINE__)
#define LUA_HANDLE_ERROR_STR(L, str) _lua_error_str(L, str, __FILE__, __LINE__)
#define LUA_HANDLE_ERROR_STR_STR(L, str, extra) _lua_error_str_str(L, str, __FILE__, __LINE__, extra)

#endif
Loading

0 comments on commit f9500d3

Please sign in to comment.