Skip to content

Commit

Permalink
CategoricalEntropy unit tested
Browse files Browse the repository at this point in the history
  • Loading branch information
nicholas-leonard committed Aug 15, 2015
1 parent 1f7f0d9 commit 489b0f2
Show file tree
Hide file tree
Showing 3 changed files with 86 additions and 0 deletions.
63 changes: 63 additions & 0 deletions CategoricalEntropy.lua
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
------------------------------------------------------------------------
--[[ CategoricalEntropy ]]--
-- Maximize the entropy of a categorical distribution (e.g. softmax ).
-- H(X) = E(-log(p(X)) = -sum(p(X)log(p(X))
-- where X = 1,...,N and N is the number of categories.
-- A batch with an entropy below minEntropy will be maximized.
-- d H(X=x) p(x)
-- -------- = - ---- - log(p(x)) = -1 - log(p(x))
-- d p p(x)
------------------------------------------------------------------------
local CE, parent = torch.class("nn.CategoricalEntropy", "nn.Module")

function CE:__init(scale, minEntropy)
parent.__init(self)
self.scale = scale or 1
self.minEntropy = minEntropy

-- get the P(X) using the batch as a prior
self.module = nn.Sequential()
self.module:add(nn.Sum(1)) -- sum categorical probabilities over batch
self._mul = nn.MulConstant(1)
self.module:add(self._mul) -- make them sum to one (i.e. probabilities)

-- get entropy H(X)
local concat = nn.ConcatTable()
concat:add(nn.Identity()) -- p(X)
local seq = nn.Sequential()
seq:add(nn.AddConstant(0.000001)) -- prevent log(0) = nan errors
seq:add(nn.Log())
concat:add(seq)
self.module:add(concat) -- log(p(x))
self.module:add(nn.CMulTable()) -- p(x)log(p(x))
self.module:add(nn.Sum()) -- sum(p(x)log(p(x)))
self.module:add(nn.MulConstant(-1)) -- H(x)

self.modules = {self.module}

self.minusOne = torch.Tensor{-self.scale} -- gradient descent on maximization
self.sizeAverage = true
end

function CE:updateOutput(input)
assert(input:dim() == 2, "CategoricalEntropy only works with batches")
self.output:set(input)
return self.output
end

function CE:updateGradInput(input, gradOutput, scale)
assert(input:dim() == 2, "CategoricalEntropy only works with batches")
self.gradInput:resizeAs(input):copy(gradOutput)

self._mul.constant_scalar = 1/input:sum() -- sum to one
self.entropy = self.module:updateOutput(input)[1]
if (not self.minEntropy) or (self.entropy < self.minEntropy) then
local gradEntropy = self.module:updateGradInput(input, self.minusOne, scale)
if self.sizeAverage then
gradEntropy:div(input:size(1))
end
self.gradInput:add(gradEntropy)
end

return self.gradInput
end
1 change: 1 addition & 0 deletions init.lua
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,7 @@ torch.include('dpnn', 'Clip.lua')
torch.include('dpnn', 'SpatialUniformCrop.lua')
torch.include('dpnn', 'SpatialGlimpse.lua')
torch.include('dpnn', 'ArgMax.lua')
torch.include('dpnn', 'CategoricalEntropy.lua')

-- REINFORCE
torch.include('dpnn', 'Reinforce.lua')
Expand Down
22 changes: 22 additions & 0 deletions test/test.lua
Original file line number Diff line number Diff line change
Expand Up @@ -705,6 +705,28 @@ function dpnntest.ArgMax()
mytester:assertTensorEq(gradInput, input:clone():zero(), 0.000001, "ArgMax gradInput not asLong err")
end

function dpnntest.CategoricalEntropy()
local inputSize = 5
local batchSize = 10
local minEntropy = 12
local input_ = torch.randn(batchSize, inputSize)
local input = nn.SoftMax():updateOutput(input_)
local gradOutput = torch.Tensor(batchSize, inputSize):zero()
local ce = nn.CategoricalEntropy()
local output = ce:forward(input)
mytester:assertTensorEq(input, output, 0.0000001, "CategoricalEntropy output err")
local gradInput = ce:backward(input, gradOutput)
local output2 = input:sum(1)[1]
output2:div(output2:sum())
local log2 = torch.log(output2 + 0.000001)
local entropy2 = -output2:cmul(log2):sum()
mytester:assert(math.abs(ce.entropy - entropy2) < 0.000001, "CategoricalEntropy entropy err")
local gradEntropy2 = log2:add(1) -- -1*(-1 - log(p(x))) = 1 + log(p(x))
gradEntropy2:div(input:sum())
local gradInput2 = gradEntropy2:div(batchSize):view(1,inputSize):expandAs(input)
mytester:assertTensorEq(gradInput2, gradInput, 0.000001, "CategoricalEntropy gradInput err")
end

function dpnnbigtest.Reinforce()
-- let us try to reinforce an mlp to learn a simple distribution
local n = 10
Expand Down

0 comments on commit 489b0f2

Please sign in to comment.