1
0
Fork 0
mirror of synced 2024-05-17 03:12:18 +12:00

Add some modules

This commit is contained in:
nagadomi 2018-10-14 01:18:32 +09:00
parent aef969d64b
commit 5365890fa8
5 changed files with 161 additions and 0 deletions

33
lib/EdgeFilter.lua Normal file
View file

@ -0,0 +1,33 @@
-- EdgeFilter.lua
-- from https://github.com/juefeix/lbcnn.torch
require 'cunn'
local EdgeFilter, parent = torch.class('w2nn.EdgeFilter', 'nn.SpatialConvolution')
function EdgeFilter:__init(nInputPlane)
local output = 0
parent.__init(self, nInputPlane, nInputPlane * 8, 3, 3, 1, 1, 0, 0)
end
function EdgeFilter:reset()
self.bias = nil
self.gradBias = nil
self.gradWeight:fill(0)
self.weight:fill(0)
local fi = 1
-- each channel
for ch = 1, self.nInputPlane do
for i = 0, 8 do
y = math.floor(i / 3) + 1
x = i % 3 + 1
if not (y == 2 and x == 2) then
self.weight[fi][ch][2][2] = 1
self.weight[fi][ch][y][x] = -1
fi = fi + 1
end
end
end
end
function EdgeFilter:accGradParameters(input, gradOutput, scale)
end
function EdgeFilter:updateParameters(learningRate)
end

20
lib/GradWeight.lua Normal file
View file

@ -0,0 +1,20 @@
local GradWeight, parent = torch.class('w2nn.GradWeight', 'nn.Module')
function GradWeight:__init(constant_scalar)
parent.__init(self)
assert(type(constant_scalar) == 'number', 'input is not scalar!')
self.constant_scalar = constant_scalar
end
function GradWeight:updateOutput(input)
self.output:resizeAs(input)
self.output:copy(input)
return self.output
end
function GradWeight:updateGradInput(input, gradOutput)
self.gradInput:resizeAs(gradOutput)
self.gradInput:copy(gradOutput)
self.gradInput:mul(self.constant_scalar)
return self.gradInput
end

View file

@ -0,0 +1,28 @@
-- RandomBinaryConvolution.lua
-- from https://github.com/juefeix/lbcnn.torch
local THNN = require 'nn.THNN'
local RandomBinaryConvolution, parent = torch.class('w2nn.RandomBinaryConvolution', 'nn.SpatialConvolution')
function RandomBinaryConvolution:__init(nInputPlane, nOutputPlane, kW, kH, kSparsity)
self.kSparsity = kSparsity or 0.9
parent.__init(self, nInputPlane, nOutputPlane, kW, kH, 1, 1, 0, 0)
self:reset()
end
function RandomBinaryConvolution:reset()
local numElements = self.nInputPlane*self.nOutputPlane*self.kW*self.kH
self.weight:fill(0)
self.weight = torch.reshape(self.weight,numElements)
local index = torch.Tensor(torch.floor(self.kSparsity*numElements)):random(numElements)
for i = 1, index:numel() do
self.weight[index[i]] = torch.bernoulli(0.5)*2-1
end
self.weight = torch.reshape(self.weight,self.nOutputPlane,self.nInputPlane,self.kW,self.kH)
self.bias = nil
self.gradBias = nil
self.gradWeight:fill(0)
end
function RandomBinaryConvolution:accGradParameters(input, gradOutput, scale)
end
function RandomBinaryConvolution:updateParameters(learningRate)
end

View file

@ -0,0 +1,76 @@
local RandomBinaryCriterion, parent = torch.class('w2nn.RandomBinaryCriterion','nn.Criterion')
local function create_filters(ch, n, k)
local filter = w2nn.RandomBinaryConvolution(ch, n, k, k)
-- channel identify
for i = 1, ch do
filter.weight[i]:fill(0)
filter.weight[i][i][math.floor(k/2)+1][math.floor(k/2)+1] = 1
end
return filter
end
function RandomBinaryCriterion:__init(ch, n, k)
parent.__init(self)
self.gamma = 0.1
self.n = n or 32
self.k = k or 3
self.ch = ch
self.filter1 = create_filters(self.ch, self.n, self.k)
self.filter2 = self.filter1:clone()
self.diff = torch.Tensor()
self.diff_abs = torch.Tensor()
self.square_loss_buff = torch.Tensor()
self.linear_loss_buff = torch.Tensor()
self.input = torch.Tensor()
self.target = torch.Tensor()
end
function RandomBinaryCriterion:updateOutput(input, target)
if input:dim() == 2 then
local k = math.sqrt(input:size(2) / self.ch)
input = input:reshape(input:size(1), self.ch, k, k)
end
if target:dim() == 2 then
local k = math.sqrt(target:size(2) / self.ch)
target = target:reshape(target:size(1), self.ch, k, k)
end
self.input:resizeAs(input):copy(input):clamp(0, 1)
self.target:resizeAs(target):copy(target):clamp(0, 1)
local lb1 = self.filter1:forward(self.input)
local lb2 = self.filter2:forward(self.target)
-- huber loss
self.diff:resizeAs(lb1):copy(lb1)
for i = 1, lb1:size(1) do
self.diff[i]:add(-1, lb2[i])
end
self.diff_abs:resizeAs(self.diff):copy(self.diff):abs()
local square_targets = self.diff[torch.lt(self.diff_abs, self.gamma)]
local linear_targets = self.diff[torch.ge(self.diff_abs, self.gamma)]
local square_loss = self.square_loss_buff:resizeAs(square_targets):copy(square_targets):pow(2.0):mul(0.5):sum()
local linear_loss = self.linear_loss_buff:resizeAs(linear_targets):copy(linear_targets):abs():add(-0.5 * self.gamma):mul(self.gamma):sum()
--self.outlier_rate = linear_targets:nElement() / input:nElement()
self.output = (square_loss + linear_loss) / lb1:nElement()
return self.output
end
function RandomBinaryCriterion:updateGradInput(input, target)
local d2 = false
if input:dim() == 2 then
d2 = true
local k = math.sqrt(input:size(2) / self.ch)
input = input:reshape(input:size(1), self.ch, k, k)
end
local norm = self.n / self.input:nElement()
self.gradInput:resizeAs(self.diff):copy(self.diff):mul(norm)
local outlier = torch.ge(self.diff_abs, self.gamma)
self.gradInput[outlier] = torch.sign(self.diff[outlier]) * self.gamma * norm
local grad_input = self.filter1:updateGradInput(input, self.gradInput)
if d2 then
grad_input = grad_input:reshape(grad_input:size(1), grad_input:size(2) * grad_input:size(3) * grad_input:size(4))
end
return grad_input
end

View file

@ -80,6 +80,10 @@ else
require 'Print'
require 'AuxiliaryLossTable'
require 'AuxiliaryLossCriterion'
require 'GradWeight'
require 'RandomBinaryConvolution'
require 'RandomBinaryCriterion'
require 'EdgeFilter'
return w2nn
end