2015-10-28 19:30:47 +13:00
|
|
|
require 'w2nn'
|
2015-07-11 17:52:51 +12:00
|
|
|
|
2015-10-26 13:23:52 +13:00
|
|
|
-- ref: http://arxiv.org/abs/1502.01852
|
2015-07-11 17:52:51 +12:00
|
|
|
-- ref: http://arxiv.org/abs/1501.00092
|
2015-06-13 18:02:02 +12:00
|
|
|
local srcnn = {}
|
2015-11-19 01:46:43 +13:00
|
|
|
|
2017-04-21 11:54:39 +12:00
|
|
|
local function msra_filler(mod)
|
|
|
|
local fin = mod.kW * mod.kH * mod.nInputPlane
|
|
|
|
local fout = mod.kW * mod.kH * mod.nOutputPlane
|
2016-06-08 09:58:46 +12:00
|
|
|
stdv = math.sqrt(4 / ((1.0 + 0.1 * 0.1) * (fin + fout)))
|
2017-04-21 11:54:39 +12:00
|
|
|
mod.weight:normal(0, stdv)
|
|
|
|
mod.bias:zero()
|
|
|
|
end
|
|
|
|
local function identity_filler(mod)
|
|
|
|
assert(mod.nInputPlane <= mod.nOutputPlane)
|
|
|
|
mod.weight:normal(0, 0.01)
|
|
|
|
mod.bias:zero()
|
|
|
|
local num_groups = mod.nInputPlane -- fixed
|
|
|
|
local filler_value = num_groups / mod.nOutputPlane
|
|
|
|
local in_group_size = math.floor(mod.nInputPlane / num_groups)
|
|
|
|
local out_group_size = math.floor(mod.nOutputPlane / num_groups)
|
|
|
|
local x = math.floor(mod.kW / 2)
|
|
|
|
local y = math.floor(mod.kH / 2)
|
|
|
|
for i = 0, num_groups - 1 do
|
|
|
|
for j = i * out_group_size, (i + 1) * out_group_size - 1 do
|
|
|
|
for k = i * in_group_size, (i + 1) * in_group_size - 1 do
|
|
|
|
mod.weight[j+1][k+1][y+1][x+1] = filler_value
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
function nn.SpatialConvolutionMM:reset(stdv)
|
|
|
|
msra_filler(self)
|
2015-11-19 01:46:43 +13:00
|
|
|
end
|
2016-05-13 12:49:53 +12:00
|
|
|
function nn.SpatialFullConvolution:reset(stdv)
|
2017-04-21 11:54:39 +12:00
|
|
|
msra_filler(self)
|
2016-05-13 12:49:53 +12:00
|
|
|
end
|
2017-04-21 11:54:39 +12:00
|
|
|
function nn.SpatialDilatedConvolution:reset(stdv)
|
|
|
|
identity_filler(self)
|
|
|
|
end
|
|
|
|
|
2015-12-09 11:04:04 +13:00
|
|
|
if cudnn and cudnn.SpatialConvolution then
|
2015-11-19 01:46:43 +13:00
|
|
|
function cudnn.SpatialConvolution:reset(stdv)
|
2017-04-21 11:54:39 +12:00
|
|
|
msra_filler(self)
|
2015-11-19 01:46:43 +13:00
|
|
|
end
|
2016-05-13 12:49:53 +12:00
|
|
|
function cudnn.SpatialFullConvolution:reset(stdv)
|
2017-04-21 11:54:39 +12:00
|
|
|
msra_filler(self)
|
|
|
|
end
|
|
|
|
if cudnn.SpatialDilatedConvolution then
|
|
|
|
function cudnn.SpatialDilatedConvolution:reset(stdv)
|
|
|
|
identity_filler(self)
|
|
|
|
end
|
2016-05-13 12:49:53 +12:00
|
|
|
end
|
2015-11-19 01:46:43 +13:00
|
|
|
end
|
2016-03-12 11:23:42 +13:00
|
|
|
function nn.SpatialConvolutionMM:clearState()
|
|
|
|
if self.gradWeight then
|
2016-03-28 22:38:01 +13:00
|
|
|
self.gradWeight:resize(self.nOutputPlane, self.nInputPlane * self.kH * self.kW):zero()
|
2016-03-12 11:23:42 +13:00
|
|
|
end
|
|
|
|
if self.gradBias then
|
2016-03-28 22:38:01 +13:00
|
|
|
self.gradBias:resize(self.nOutputPlane):zero()
|
2016-03-12 11:23:42 +13:00
|
|
|
end
|
|
|
|
return nn.utils.clear(self, 'finput', 'fgradInput', '_input', '_gradOutput', 'output', 'gradInput')
|
|
|
|
end
|
2015-10-29 22:05:33 +13:00
|
|
|
function srcnn.channels(model)
|
2016-05-13 12:49:53 +12:00
|
|
|
if model.w2nn_channels ~= nil then
|
|
|
|
return model.w2nn_channels
|
|
|
|
else
|
|
|
|
return model:get(model:size() - 1).weight:size(1)
|
|
|
|
end
|
2015-10-29 22:05:33 +13:00
|
|
|
end
|
2016-04-23 12:18:12 +12:00
|
|
|
function srcnn.backend(model)
|
|
|
|
local conv = model:findModules("cudnn.SpatialConvolution")
|
2016-05-15 06:04:08 +12:00
|
|
|
local fullconv = model:findModules("cudnn.SpatialFullConvolution")
|
|
|
|
if #conv > 0 or #fullconv > 0 then
|
2016-04-23 12:18:12 +12:00
|
|
|
return "cudnn"
|
|
|
|
else
|
|
|
|
return "cunn"
|
|
|
|
end
|
|
|
|
end
|
|
|
|
function srcnn.color(model)
|
|
|
|
local ch = srcnn.channels(model)
|
|
|
|
if ch == 3 then
|
|
|
|
return "rgb"
|
|
|
|
else
|
|
|
|
return "y"
|
|
|
|
end
|
|
|
|
end
|
|
|
|
function srcnn.name(model)
|
2016-05-14 19:51:36 +12:00
|
|
|
if model.w2nn_arch_name ~= nil then
|
2016-05-13 12:49:53 +12:00
|
|
|
return model.w2nn_arch_name
|
2016-04-23 12:18:12 +12:00
|
|
|
else
|
2016-05-13 12:49:53 +12:00
|
|
|
local conv = model:findModules("nn.SpatialConvolutionMM")
|
|
|
|
if #conv == 0 then
|
|
|
|
conv = model:findModules("cudnn.SpatialConvolution")
|
|
|
|
end
|
|
|
|
if #conv == 7 then
|
|
|
|
return "vgg_7"
|
|
|
|
elseif #conv == 12 then
|
|
|
|
return "vgg_12"
|
|
|
|
else
|
2016-05-14 19:51:36 +12:00
|
|
|
error("unsupported model")
|
2016-05-13 12:49:53 +12:00
|
|
|
end
|
2016-04-23 12:18:12 +12:00
|
|
|
end
|
|
|
|
end
|
|
|
|
function srcnn.offset_size(model)
|
2016-05-13 12:49:53 +12:00
|
|
|
if model.w2nn_offset ~= nil then
|
|
|
|
return model.w2nn_offset
|
|
|
|
else
|
|
|
|
local name = srcnn.name(model)
|
|
|
|
if name:match("vgg_") then
|
|
|
|
local conv = model:findModules("nn.SpatialConvolutionMM")
|
|
|
|
if #conv == 0 then
|
|
|
|
conv = model:findModules("cudnn.SpatialConvolution")
|
|
|
|
end
|
|
|
|
local offset = 0
|
|
|
|
for i = 1, #conv do
|
|
|
|
offset = offset + (conv[i].kW - 1) / 2
|
|
|
|
end
|
|
|
|
return math.floor(offset)
|
|
|
|
else
|
2016-05-14 19:51:36 +12:00
|
|
|
error("unsupported model")
|
2016-05-13 12:49:53 +12:00
|
|
|
end
|
2016-04-23 12:18:12 +12:00
|
|
|
end
|
2016-05-13 12:49:53 +12:00
|
|
|
end
|
2016-05-14 19:51:36 +12:00
|
|
|
function srcnn.scale_factor(model)
|
|
|
|
if model.w2nn_scale_factor ~= nil then
|
|
|
|
return model.w2nn_scale_factor
|
2016-05-13 12:49:53 +12:00
|
|
|
else
|
|
|
|
local name = srcnn.name(model)
|
2016-05-14 19:51:36 +12:00
|
|
|
if name == "upconv_7" then
|
|
|
|
return 2
|
|
|
|
elseif name == "upconv_8_4x" then
|
|
|
|
return 4
|
2016-05-13 12:49:53 +12:00
|
|
|
else
|
2016-05-14 19:51:36 +12:00
|
|
|
return 1
|
2016-05-13 12:49:53 +12:00
|
|
|
end
|
2016-04-23 12:18:12 +12:00
|
|
|
end
|
|
|
|
end
|
|
|
|
local function SpatialConvolution(backend, nInputPlane, nOutputPlane, kW, kH, dW, dH, padW, padH)
|
|
|
|
if backend == "cunn" then
|
|
|
|
return nn.SpatialConvolutionMM(nInputPlane, nOutputPlane, kW, kH, dW, dH, padW, padH)
|
|
|
|
elseif backend == "cudnn" then
|
|
|
|
return cudnn.SpatialConvolution(nInputPlane, nOutputPlane, kW, kH, dW, dH, padW, padH)
|
|
|
|
else
|
|
|
|
error("unsupported backend:" .. backend)
|
|
|
|
end
|
|
|
|
end
|
2017-02-12 21:46:07 +13:00
|
|
|
srcnn.SpatialConvolution = SpatialConvolution
|
|
|
|
|
2016-06-08 09:58:46 +12:00
|
|
|
local function SpatialFullConvolution(backend, nInputPlane, nOutputPlane, kW, kH, dW, dH, padW, padH, adjW, adjH)
|
2016-05-13 12:49:53 +12:00
|
|
|
if backend == "cunn" then
|
2016-06-08 09:58:46 +12:00
|
|
|
return nn.SpatialFullConvolution(nInputPlane, nOutputPlane, kW, kH, dW, dH, padW, padH, adjW, adjH)
|
2016-05-13 12:49:53 +12:00
|
|
|
elseif backend == "cudnn" then
|
|
|
|
return cudnn.SpatialFullConvolution(nInputPlane, nOutputPlane, kW, kH, dW, dH, padW, padH)
|
|
|
|
else
|
|
|
|
error("unsupported backend:" .. backend)
|
|
|
|
end
|
|
|
|
end
|
2017-02-12 21:46:07 +13:00
|
|
|
srcnn.SpatialFullConvolution = SpatialFullConvolution
|
|
|
|
|
2016-10-21 03:41:39 +13:00
|
|
|
local function ReLU(backend)
|
|
|
|
if backend == "cunn" then
|
|
|
|
return nn.ReLU(true)
|
|
|
|
elseif backend == "cudnn" then
|
|
|
|
return cudnn.ReLU(true)
|
|
|
|
else
|
|
|
|
error("unsupported backend:" .. backend)
|
|
|
|
end
|
|
|
|
end
|
2017-02-12 21:46:07 +13:00
|
|
|
srcnn.ReLU = ReLU
|
|
|
|
|
2018-10-27 20:59:56 +13:00
|
|
|
local function Sigmoid(backend)
|
|
|
|
if backend == "cunn" then
|
|
|
|
return nn.Sigmoid(true)
|
|
|
|
elseif backend == "cudnn" then
|
|
|
|
return cudnn.Sigmoid(true)
|
|
|
|
else
|
|
|
|
error("unsupported backend:" .. backend)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
srcnn.ReLU = ReLU
|
|
|
|
|
2016-10-21 03:41:39 +13:00
|
|
|
local function SpatialMaxPooling(backend, kW, kH, dW, dH, padW, padH)
|
|
|
|
if backend == "cunn" then
|
|
|
|
return nn.SpatialMaxPooling(kW, kH, dW, dH, padW, padH)
|
|
|
|
elseif backend == "cudnn" then
|
|
|
|
return cudnn.SpatialMaxPooling(kW, kH, dW, dH, padW, padH)
|
|
|
|
else
|
|
|
|
error("unsupported backend:" .. backend)
|
|
|
|
end
|
|
|
|
end
|
2017-02-12 21:46:07 +13:00
|
|
|
srcnn.SpatialMaxPooling = SpatialMaxPooling
|
2016-04-23 12:18:12 +12:00
|
|
|
|
2017-04-21 11:54:39 +12:00
|
|
|
local function SpatialAveragePooling(backend, kW, kH, dW, dH, padW, padH)
|
|
|
|
if backend == "cunn" then
|
|
|
|
return nn.SpatialAveragePooling(kW, kH, dW, dH, padW, padH)
|
|
|
|
elseif backend == "cudnn" then
|
|
|
|
return cudnn.SpatialAveragePooling(kW, kH, dW, dH, padW, padH)
|
|
|
|
else
|
|
|
|
error("unsupported backend:" .. backend)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
srcnn.SpatialAveragePooling = SpatialAveragePooling
|
|
|
|
|
|
|
|
local function SpatialDilatedConvolution(backend, nInputPlane, nOutputPlane, kW, kH, dW, dH, padW, padH, dilationW, dilationH)
|
|
|
|
if backend == "cunn" then
|
|
|
|
return nn.SpatialDilatedConvolution(nInputPlane, nOutputPlane, kW, kH, dW, dH, padW, padH, dilationW, dilationH)
|
|
|
|
elseif backend == "cudnn" then
|
|
|
|
if cudnn.SpatialDilatedConvolution then
|
|
|
|
-- cudnn v 6
|
|
|
|
return cudnn.SpatialDilatedConvolution(nInputPlane, nOutputPlane, kW, kH, dW, dH, padW, padH, dilationW, dilationH)
|
|
|
|
else
|
|
|
|
return nn.SpatialDilatedConvolution(nInputPlane, nOutputPlane, kW, kH, dW, dH, padW, padH, dilationW, dilationH)
|
|
|
|
end
|
|
|
|
else
|
|
|
|
error("unsupported backend:" .. backend)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
srcnn.SpatialDilatedConvolution = SpatialDilatedConvolution
|
|
|
|
|
2018-10-28 20:03:52 +13:00
|
|
|
local function GlobalAveragePooling(n_output)
|
|
|
|
local gap = nn.Sequential()
|
|
|
|
gap:add(nn.Mean(-1, -1)):add(nn.Mean(-1, -1))
|
|
|
|
gap:add(nn.View(-1, n_output, 1, 1))
|
|
|
|
return gap
|
|
|
|
end
|
|
|
|
srcnn.GlobalAveragePooling = GlobalAveragePooling
|
2017-04-21 11:54:39 +12:00
|
|
|
|
2018-11-01 23:59:07 +13:00
|
|
|
-- Squeeze and Excitation Block
|
|
|
|
local function SEBlock(backend, n_output, r)
|
|
|
|
local con = nn.ConcatTable(2)
|
|
|
|
local attention = nn.Sequential()
|
|
|
|
local n_mid = math.floor(n_output / r)
|
|
|
|
attention:add(GlobalAveragePooling(n_output))
|
|
|
|
attention:add(SpatialConvolution(backend, n_output, n_mid, 1, 1, 1, 1, 0, 0))
|
|
|
|
attention:add(nn.ReLU(true))
|
|
|
|
attention:add(SpatialConvolution(backend, n_mid, n_output, 1, 1, 1, 1, 0, 0))
|
|
|
|
attention:add(nn.Sigmoid(true)) -- don't use cudnn sigmoid
|
|
|
|
con:add(nn.Identity())
|
|
|
|
con:add(attention)
|
|
|
|
return con
|
|
|
|
end
|
|
|
|
-- I devised this arch for the block size and global average pooling problem,
|
|
|
|
-- but SEBlock may possibly learn multi-scale input or just a normalization. No problems occur.
|
|
|
|
-- So this arch is not used.
|
|
|
|
local function SpatialSEBlock(backend, ave_size, n_output, r)
|
|
|
|
local con = nn.ConcatTable(2)
|
|
|
|
local attention = nn.Sequential()
|
|
|
|
local n_mid = math.floor(n_output / r)
|
|
|
|
attention:add(SpatialAveragePooling(backend, ave_size, ave_size, ave_size, ave_size))
|
|
|
|
attention:add(SpatialConvolution(backend, n_output, n_mid, 1, 1, 1, 1, 0, 0))
|
|
|
|
attention:add(nn.ReLU(true))
|
|
|
|
attention:add(SpatialConvolution(backend, n_mid, n_output, 1, 1, 1, 1, 0, 0))
|
|
|
|
attention:add(nn.Sigmoid(true))
|
|
|
|
attention:add(nn.SpatialUpSamplingNearest(ave_size, ave_size))
|
|
|
|
con:add(nn.Identity())
|
|
|
|
con:add(attention)
|
|
|
|
return con
|
|
|
|
end
|
|
|
|
local function ResBlock(backend, i, o)
|
|
|
|
local seq = nn.Sequential()
|
|
|
|
local con = nn.ConcatTable()
|
|
|
|
local conv = nn.Sequential()
|
|
|
|
conv:add(SpatialConvolution(backend, i, o, 3, 3, 1, 1, 0, 0))
|
|
|
|
conv:add(nn.LeakyReLU(0.1, true))
|
|
|
|
conv:add(SpatialConvolution(backend, o, o, 3, 3, 1, 1, 0, 0))
|
|
|
|
conv:add(nn.LeakyReLU(0.1, true))
|
|
|
|
con:add(conv)
|
|
|
|
if i == o then
|
|
|
|
con:add(nn.SpatialZeroPadding(-2, -2, -2, -2)) -- identity + de-padding
|
|
|
|
else
|
|
|
|
local seq = nn.Sequential()
|
|
|
|
seq:add(SpatialConvolution(backend, i, o, 1, 1, 1, 1, 0, 0))
|
|
|
|
seq:add(nn.SpatialZeroPadding(-2, -2, -2, -2))
|
|
|
|
con:add(seq)
|
|
|
|
end
|
|
|
|
seq:add(con)
|
|
|
|
seq:add(nn.CAddTable())
|
|
|
|
return seq
|
|
|
|
end
|
|
|
|
local function ResBlockSE(backend, i, o)
|
|
|
|
local seq = nn.Sequential()
|
|
|
|
local con = nn.ConcatTable()
|
|
|
|
local conv = nn.Sequential()
|
|
|
|
conv:add(SpatialConvolution(backend, i, o, 3, 3, 1, 1, 0, 0))
|
|
|
|
conv:add(nn.LeakyReLU(0.1, true))
|
|
|
|
conv:add(SpatialConvolution(backend, o, o, 3, 3, 1, 1, 0, 0))
|
|
|
|
conv:add(nn.LeakyReLU(0.1, true))
|
|
|
|
conv:add(SEBlock(backend, o, 8))
|
|
|
|
conv:add(w2nn.ScaleTable())
|
|
|
|
con:add(conv)
|
|
|
|
if i == o then
|
|
|
|
con:add(nn.SpatialZeroPadding(-2, -2, -2, -2)) -- identity + de-padding
|
|
|
|
else
|
|
|
|
local seq = nn.Sequential()
|
|
|
|
seq:add(SpatialConvolution(backend, i, o, 1, 1, 1, 1, 0, 0))
|
|
|
|
seq:add(nn.SpatialZeroPadding(-2, -2, -2, -2))
|
|
|
|
con:add(seq)
|
|
|
|
end
|
|
|
|
seq:add(con)
|
|
|
|
seq:add(nn.CAddTable())
|
|
|
|
return seq
|
|
|
|
end
|
|
|
|
local function ResGroup(backend, n, n_output)
|
|
|
|
local seq = nn.Sequential()
|
|
|
|
local res = nn.Sequential()
|
|
|
|
local con = nn.ConcatTable(2)
|
|
|
|
local depad = -2 * n
|
|
|
|
for i = 1, n do
|
|
|
|
res:add(ResBlock(backend, n_output, n_output))
|
|
|
|
end
|
|
|
|
con:add(res)
|
|
|
|
con:add(nn.SpatialZeroPadding(depad, depad, depad, depad))
|
|
|
|
seq:add(con)
|
|
|
|
seq:add(nn.CAddTable())
|
|
|
|
return seq
|
|
|
|
end
|
|
|
|
local function ResGroupSE(backend, n, n_output)
|
|
|
|
local seq = nn.Sequential()
|
|
|
|
local res = nn.Sequential()
|
|
|
|
local con = nn.ConcatTable(2)
|
|
|
|
local depad = -2 * n
|
|
|
|
for i = 1, n do
|
|
|
|
res:add(ResBlockSE(backend, n_output, n_output))
|
|
|
|
end
|
|
|
|
con:add(res)
|
|
|
|
con:add(nn.SpatialZeroPadding(depad, depad, depad, depad))
|
|
|
|
seq:add(con)
|
|
|
|
seq:add(nn.CAddTable())
|
|
|
|
return seq
|
|
|
|
end
|
|
|
|
|
2016-04-23 12:18:12 +12:00
|
|
|
-- VGG style net(7 layers)
|
|
|
|
function srcnn.vgg_7(backend, ch)
|
2015-06-13 18:02:02 +12:00
|
|
|
local model = nn.Sequential()
|
2016-04-23 12:18:12 +12:00
|
|
|
model:add(SpatialConvolution(backend, ch, 32, 3, 3, 1, 1, 0, 0))
|
2016-05-15 06:04:08 +12:00
|
|
|
model:add(nn.LeakyReLU(0.1, true))
|
2016-04-23 12:18:12 +12:00
|
|
|
model:add(SpatialConvolution(backend, 32, 32, 3, 3, 1, 1, 0, 0))
|
2016-05-15 06:04:08 +12:00
|
|
|
model:add(nn.LeakyReLU(0.1, true))
|
2016-04-23 12:18:12 +12:00
|
|
|
model:add(SpatialConvolution(backend, 32, 64, 3, 3, 1, 1, 0, 0))
|
2016-05-15 06:04:08 +12:00
|
|
|
model:add(nn.LeakyReLU(0.1, true))
|
2016-04-23 12:18:12 +12:00
|
|
|
model:add(SpatialConvolution(backend, 64, 64, 3, 3, 1, 1, 0, 0))
|
2016-05-15 06:04:08 +12:00
|
|
|
model:add(nn.LeakyReLU(0.1, true))
|
2016-04-23 12:18:12 +12:00
|
|
|
model:add(SpatialConvolution(backend, 64, 128, 3, 3, 1, 1, 0, 0))
|
2016-05-15 06:04:08 +12:00
|
|
|
model:add(nn.LeakyReLU(0.1, true))
|
2016-04-23 12:18:12 +12:00
|
|
|
model:add(SpatialConvolution(backend, 128, 128, 3, 3, 1, 1, 0, 0))
|
2016-05-15 06:04:08 +12:00
|
|
|
model:add(nn.LeakyReLU(0.1, true))
|
2016-04-23 12:18:12 +12:00
|
|
|
model:add(SpatialConvolution(backend, 128, ch, 3, 3, 1, 1, 0, 0))
|
2016-09-15 00:23:29 +12:00
|
|
|
model:add(w2nn.InplaceClip01())
|
2015-05-16 17:48:05 +12:00
|
|
|
model:add(nn.View(-1):setNumInputDims(3))
|
2016-05-13 12:49:53 +12:00
|
|
|
|
|
|
|
model.w2nn_arch_name = "vgg_7"
|
|
|
|
model.w2nn_offset = 7
|
2016-05-14 19:51:36 +12:00
|
|
|
model.w2nn_scale_factor = 1
|
2016-05-13 12:49:53 +12:00
|
|
|
model.w2nn_channels = ch
|
2015-10-26 13:23:52 +13:00
|
|
|
--model:cuda()
|
|
|
|
--print(model:forward(torch.Tensor(32, ch, 92, 92):uniform():cuda()):size())
|
2015-05-16 17:48:05 +12:00
|
|
|
|
2015-10-26 13:23:52 +13:00
|
|
|
return model
|
2015-05-16 17:48:05 +12:00
|
|
|
end
|
2018-10-28 20:03:52 +13:00
|
|
|
|
2016-06-08 09:58:46 +12:00
|
|
|
-- Upconvolution
|
2016-05-13 12:49:53 +12:00
|
|
|
function srcnn.upconv_7(backend, ch)
|
|
|
|
local model = nn.Sequential()
|
2016-06-08 09:58:46 +12:00
|
|
|
model:add(SpatialConvolution(backend, ch, 16, 3, 3, 1, 1, 0, 0))
|
2016-05-15 06:04:08 +12:00
|
|
|
model:add(nn.LeakyReLU(0.1, true))
|
2016-06-08 09:58:46 +12:00
|
|
|
model:add(SpatialConvolution(backend, 16, 32, 3, 3, 1, 1, 0, 0))
|
2016-05-15 06:04:08 +12:00
|
|
|
model:add(nn.LeakyReLU(0.1, true))
|
2016-05-13 12:49:53 +12:00
|
|
|
model:add(SpatialConvolution(backend, 32, 64, 3, 3, 1, 1, 0, 0))
|
2016-05-15 06:04:08 +12:00
|
|
|
model:add(nn.LeakyReLU(0.1, true))
|
2016-05-13 12:49:53 +12:00
|
|
|
model:add(SpatialConvolution(backend, 64, 128, 3, 3, 1, 1, 0, 0))
|
2016-05-15 06:04:08 +12:00
|
|
|
model:add(nn.LeakyReLU(0.1, true))
|
2016-05-13 12:49:53 +12:00
|
|
|
model:add(SpatialConvolution(backend, 128, 128, 3, 3, 1, 1, 0, 0))
|
2016-05-15 06:04:08 +12:00
|
|
|
model:add(nn.LeakyReLU(0.1, true))
|
2016-06-08 09:58:46 +12:00
|
|
|
model:add(SpatialConvolution(backend, 128, 256, 3, 3, 1, 1, 0, 0))
|
|
|
|
model:add(nn.LeakyReLU(0.1, true))
|
2016-07-12 14:13:56 +12:00
|
|
|
model:add(SpatialFullConvolution(backend, 256, ch, 4, 4, 2, 2, 3, 3):noBias())
|
2016-09-15 00:23:29 +12:00
|
|
|
model:add(w2nn.InplaceClip01())
|
2016-06-08 09:58:46 +12:00
|
|
|
model:add(nn.View(-1):setNumInputDims(3))
|
2016-05-13 12:49:53 +12:00
|
|
|
|
2018-10-14 05:21:23 +13:00
|
|
|
|
2016-05-13 12:49:53 +12:00
|
|
|
model.w2nn_arch_name = "upconv_7"
|
2016-06-08 09:58:46 +12:00
|
|
|
model.w2nn_offset = 14
|
2016-05-14 19:51:36 +12:00
|
|
|
model.w2nn_scale_factor = 2
|
2016-05-13 12:49:53 +12:00
|
|
|
model.w2nn_resize = true
|
|
|
|
model.w2nn_channels = ch
|
|
|
|
|
|
|
|
return model
|
|
|
|
end
|
2016-10-08 21:21:01 +13:00
|
|
|
|
|
|
|
-- large version of upconv_7
|
|
|
|
-- This model able to beat upconv_7 (PSNR: +0.3 ~ +0.8) but this model is 2x slower than upconv_7.
|
|
|
|
function srcnn.upconv_7l(backend, ch)
|
|
|
|
local model = nn.Sequential()
|
|
|
|
model:add(SpatialConvolution(backend, ch, 32, 3, 3, 1, 1, 0, 0))
|
|
|
|
model:add(nn.LeakyReLU(0.1, true))
|
|
|
|
model:add(SpatialConvolution(backend, 32, 64, 3, 3, 1, 1, 0, 0))
|
|
|
|
model:add(nn.LeakyReLU(0.1, true))
|
|
|
|
model:add(SpatialConvolution(backend, 64, 128, 3, 3, 1, 1, 0, 0))
|
|
|
|
model:add(nn.LeakyReLU(0.1, true))
|
|
|
|
model:add(SpatialConvolution(backend, 128, 192, 3, 3, 1, 1, 0, 0))
|
|
|
|
model:add(nn.LeakyReLU(0.1, true))
|
|
|
|
model:add(SpatialConvolution(backend, 192, 256, 3, 3, 1, 1, 0, 0))
|
|
|
|
model:add(nn.LeakyReLU(0.1, true))
|
|
|
|
model:add(SpatialConvolution(backend, 256, 512, 3, 3, 1, 1, 0, 0))
|
|
|
|
model:add(nn.LeakyReLU(0.1, true))
|
|
|
|
model:add(SpatialFullConvolution(backend, 512, ch, 4, 4, 2, 2, 3, 3):noBias())
|
|
|
|
model:add(w2nn.InplaceClip01())
|
|
|
|
model:add(nn.View(-1):setNumInputDims(3))
|
|
|
|
|
|
|
|
model.w2nn_arch_name = "upconv_7l"
|
|
|
|
model.w2nn_offset = 14
|
|
|
|
model.w2nn_scale_factor = 2
|
|
|
|
model.w2nn_resize = true
|
|
|
|
model.w2nn_channels = ch
|
|
|
|
|
|
|
|
--model:cuda()
|
|
|
|
--print(model:forward(torch.Tensor(32, ch, 92, 92):uniform():cuda()):size())
|
|
|
|
|
|
|
|
return model
|
|
|
|
end
|
|
|
|
|
2017-01-21 20:53:10 +13:00
|
|
|
function srcnn.resnet_14l(backend, ch)
|
2016-11-09 07:06:26 +13:00
|
|
|
local model = nn.Sequential()
|
|
|
|
model:add(SpatialConvolution(backend, ch, 32, 3, 3, 1, 1, 0, 0))
|
|
|
|
model:add(nn.LeakyReLU(0.1, true))
|
2018-11-01 23:59:07 +13:00
|
|
|
model:add(ResBlock(backend, 32, 64))
|
|
|
|
model:add(ResBlock(backend, 64, 64))
|
|
|
|
model:add(ResBlock(backend, 64, 128))
|
|
|
|
model:add(ResBlock(backend, 128, 128))
|
|
|
|
model:add(ResBlock(backend, 128, 256))
|
|
|
|
model:add(ResBlock(backend, 256, 256))
|
2016-11-09 07:06:26 +13:00
|
|
|
model:add(SpatialFullConvolution(backend, 256, ch, 4, 4, 2, 2, 3, 3):noBias())
|
|
|
|
model:add(w2nn.InplaceClip01())
|
|
|
|
model:add(nn.View(-1):setNumInputDims(3))
|
2017-01-21 20:53:10 +13:00
|
|
|
model.w2nn_arch_name = "resnet_14l"
|
2016-11-09 07:06:26 +13:00
|
|
|
model.w2nn_offset = 28
|
|
|
|
model.w2nn_scale_factor = 2
|
|
|
|
model.w2nn_resize = true
|
|
|
|
model.w2nn_channels = ch
|
|
|
|
|
|
|
|
--model:cuda()
|
|
|
|
--print(model:forward(torch.Tensor(32, ch, 92, 92):uniform():cuda()):size())
|
|
|
|
|
|
|
|
return model
|
|
|
|
end
|
|
|
|
|
2018-11-01 23:59:07 +13:00
|
|
|
-- ResNet_with SEBlock for fast conversion
|
|
|
|
function srcnn.upresnet_s(backend, ch)
|
|
|
|
local model = nn.Sequential()
|
|
|
|
model:add(SpatialConvolution(backend, ch, 64, 3, 3, 1, 1, 0, 0))
|
|
|
|
model:add(nn.LeakyReLU(0.1, true))
|
|
|
|
model:add(ResGroupSE(backend, 3, 64))
|
|
|
|
model:add(SpatialConvolution(backend, 64, 64, 3, 3, 1, 1, 0, 0))
|
|
|
|
model:add(nn.LeakyReLU(0.1, true))
|
|
|
|
model:add(SpatialFullConvolution(backend, 64, ch, 4, 4, 2, 2, 3, 3):noBias())
|
|
|
|
model:add(w2nn.InplaceClip01())
|
|
|
|
model.w2nn_arch_name = "upresnet_s"
|
|
|
|
model.w2nn_offset = 18
|
|
|
|
model.w2nn_scale_factor = 2
|
|
|
|
model.w2nn_resize = true
|
|
|
|
model.w2nn_channels = ch
|
|
|
|
|
|
|
|
return model
|
|
|
|
end
|
|
|
|
-- Cascaded ResNet with SEBlock
|
|
|
|
function srcnn.upcresnet(backend, ch)
|
|
|
|
local function resnet(backend, ch, deconv)
|
|
|
|
local model = nn.Sequential()
|
|
|
|
model:add(SpatialConvolution(backend, ch, 64, 3, 3, 1, 1, 0, 0))
|
|
|
|
model:add(nn.LeakyReLU(0.1, true))
|
|
|
|
model:add(ResGroupSE(backend, 2, 64))
|
|
|
|
model:add(SpatialConvolution(backend, 64, 64, 3, 3, 1, 1, 0, 0))
|
|
|
|
model:add(nn.LeakyReLU(0.1, true))
|
|
|
|
if deconv then
|
|
|
|
model:add(SpatialFullConvolution(backend, 64, ch, 4, 4, 2, 2, 3, 3))
|
|
|
|
else
|
|
|
|
model:add(SpatialConvolution(backend, 64, ch, 3, 3, 1, 1, 0, 0))
|
|
|
|
end
|
|
|
|
return model
|
|
|
|
end
|
|
|
|
local model = nn.Sequential()
|
|
|
|
local con = nn.ConcatTable()
|
|
|
|
local aux_con = nn.ConcatTable()
|
|
|
|
|
|
|
|
-- 2 cascade
|
|
|
|
model:add(resnet(backend, ch, true))
|
|
|
|
con:add(nn.Sequential():add(resnet(backend, ch, false)):add(nn.SpatialZeroPadding(-1, -1, -1, -1))) -- output is odd
|
|
|
|
con:add(nn.SpatialZeroPadding(-8, -8, -8, -8))
|
|
|
|
|
|
|
|
aux_con:add(nn.Sequential():add(nn.CAddTable()):add(w2nn.InplaceClip01())) -- cascaded unet output
|
|
|
|
aux_con:add(nn.Sequential():add(nn.SelectTable(2)):add(w2nn.InplaceClip01())) -- single unet output
|
|
|
|
|
|
|
|
model:add(con)
|
|
|
|
model:add(aux_con)
|
|
|
|
model:add(w2nn.AuxiliaryLossTable(1)) -- auxiliary loss for single unet output
|
|
|
|
|
|
|
|
model.w2nn_arch_name = "upcresnet"
|
|
|
|
model.w2nn_offset = 22
|
|
|
|
model.w2nn_scale_factor = 2
|
|
|
|
model.w2nn_resize = true
|
|
|
|
model.w2nn_channels = ch
|
|
|
|
|
|
|
|
return model
|
|
|
|
end
|
|
|
|
|
2016-10-21 03:41:39 +13:00
|
|
|
-- for segmentation
|
|
|
|
function srcnn.fcn_v1(backend, ch)
|
2016-10-24 13:10:17 +13:00
|
|
|
-- input_size = 120
|
2016-10-21 03:41:39 +13:00
|
|
|
local model = nn.Sequential()
|
2016-10-24 13:10:17 +13:00
|
|
|
--i = 120
|
|
|
|
--model:cuda()
|
|
|
|
--print(model:forward(torch.Tensor(32, ch, i, i):uniform():cuda()):size())
|
2016-10-21 03:41:39 +13:00
|
|
|
|
|
|
|
model:add(SpatialConvolution(backend, ch, 32, 5, 5, 2, 2, 0, 0))
|
|
|
|
model:add(nn.LeakyReLU(0.1, true))
|
2016-10-24 13:10:17 +13:00
|
|
|
model:add(SpatialConvolution(backend, 32, 32, 3, 3, 1, 1, 0, 0))
|
|
|
|
model:add(nn.LeakyReLU(0.1, true))
|
|
|
|
model:add(SpatialMaxPooling(backend, 2, 2, 2, 2))
|
|
|
|
|
2016-10-21 03:41:39 +13:00
|
|
|
model:add(SpatialConvolution(backend, 32, 64, 3, 3, 1, 1, 0, 0))
|
|
|
|
model:add(nn.LeakyReLU(0.1, true))
|
2016-10-24 13:10:17 +13:00
|
|
|
model:add(SpatialConvolution(backend, 64, 64, 3, 3, 1, 1, 0, 0))
|
|
|
|
model:add(nn.LeakyReLU(0.1, true))
|
2016-10-21 03:41:39 +13:00
|
|
|
model:add(SpatialMaxPooling(backend, 2, 2, 2, 2))
|
|
|
|
|
|
|
|
model:add(SpatialConvolution(backend, 64, 128, 3, 3, 1, 1, 0, 0))
|
|
|
|
model:add(nn.LeakyReLU(0.1, true))
|
2016-10-24 13:10:17 +13:00
|
|
|
model:add(SpatialConvolution(backend, 128, 128, 3, 3, 1, 1, 0, 0))
|
|
|
|
model:add(nn.LeakyReLU(0.1, true))
|
2016-10-21 03:41:39 +13:00
|
|
|
model:add(SpatialMaxPooling(backend, 2, 2, 2, 2))
|
|
|
|
|
2016-10-24 13:10:17 +13:00
|
|
|
model:add(SpatialConvolution(backend, 128, 256, 1, 1, 1, 1, 0, 0))
|
2016-10-21 03:41:39 +13:00
|
|
|
model:add(nn.LeakyReLU(0.1, true))
|
2016-10-24 13:10:17 +13:00
|
|
|
model:add(nn.Dropout(0.5, false, true))
|
2016-10-21 03:41:39 +13:00
|
|
|
|
2016-10-24 13:10:17 +13:00
|
|
|
model:add(SpatialFullConvolution(backend, 256, 128, 2, 2, 2, 2, 0, 0))
|
|
|
|
model:add(nn.LeakyReLU(0.1, true))
|
|
|
|
model:add(SpatialFullConvolution(backend, 128, 128, 2, 2, 2, 2, 0, 0))
|
2016-10-21 03:41:39 +13:00
|
|
|
model:add(nn.LeakyReLU(0.1, true))
|
2016-10-24 13:10:17 +13:00
|
|
|
model:add(SpatialConvolution(backend, 128, 64, 3, 3, 1, 1, 0, 0))
|
2016-10-21 03:41:39 +13:00
|
|
|
model:add(nn.LeakyReLU(0.1, true))
|
2016-10-24 13:10:17 +13:00
|
|
|
model:add(SpatialFullConvolution(backend, 64, 64, 2, 2, 2, 2, 0, 0))
|
2016-10-21 03:41:39 +13:00
|
|
|
model:add(nn.LeakyReLU(0.1, true))
|
2016-10-24 13:10:17 +13:00
|
|
|
model:add(SpatialConvolution(backend, 64, 32, 3, 3, 1, 1, 0, 0))
|
|
|
|
model:add(nn.LeakyReLU(0.1, true))
|
|
|
|
model:add(SpatialFullConvolution(backend, 32, ch, 4, 4, 2, 2, 3, 3))
|
2016-10-21 03:41:39 +13:00
|
|
|
|
|
|
|
model:add(w2nn.InplaceClip01())
|
|
|
|
model:add(nn.View(-1):setNumInputDims(3))
|
|
|
|
model.w2nn_arch_name = "fcn_v1"
|
2016-10-24 13:10:17 +13:00
|
|
|
model.w2nn_offset = 36
|
2016-10-21 03:41:39 +13:00
|
|
|
model.w2nn_scale_factor = 1
|
|
|
|
model.w2nn_channels = ch
|
2016-10-24 13:10:17 +13:00
|
|
|
model.w2nn_input_size = 120
|
2017-01-09 16:51:45 +13:00
|
|
|
--model.w2nn_gcn = true
|
2016-10-21 03:41:39 +13:00
|
|
|
|
|
|
|
return model
|
|
|
|
end
|
2018-10-14 05:21:23 +13:00
|
|
|
|
2018-10-29 08:07:05 +13:00
|
|
|
local function unet_branch(backend, insert, backend, n_input, n_output, depad)
|
|
|
|
local block = nn.Sequential()
|
|
|
|
local con = nn.ConcatTable(2)
|
|
|
|
local model = nn.Sequential()
|
|
|
|
|
|
|
|
block:add(SpatialConvolution(backend, n_input, n_input, 2, 2, 2, 2, 0, 0))-- downsampling
|
|
|
|
block:add(nn.LeakyReLU(0.1, true))
|
|
|
|
block:add(insert)
|
|
|
|
block:add(SpatialFullConvolution(backend, n_output, n_output, 2, 2, 2, 2, 0, 0))-- upsampling
|
|
|
|
block:add(nn.LeakyReLU(0.1, true))
|
|
|
|
con:add(nn.SpatialZeroPadding(-depad, -depad, -depad, -depad))
|
|
|
|
con:add(block)
|
|
|
|
model:add(con)
|
|
|
|
model:add(nn.CAddTable())
|
|
|
|
return model
|
|
|
|
end
|
|
|
|
local function unet_conv(backend, n_input, n_middle, n_output, se)
|
|
|
|
local model = nn.Sequential()
|
|
|
|
model:add(SpatialConvolution(backend, n_input, n_middle, 3, 3, 1, 1, 0, 0))
|
|
|
|
model:add(nn.LeakyReLU(0.1, true))
|
|
|
|
model:add(SpatialConvolution(backend, n_middle, n_output, 3, 3, 1, 1, 0, 0))
|
|
|
|
model:add(nn.LeakyReLU(0.1, true))
|
|
|
|
if se then
|
2018-10-31 03:44:28 +13:00
|
|
|
model:add(SEBlock(backend, n_output, 8))
|
2018-10-29 08:07:05 +13:00
|
|
|
model:add(w2nn.ScaleTable())
|
|
|
|
end
|
|
|
|
return model
|
|
|
|
end
|
2018-10-28 20:03:52 +13:00
|
|
|
|
2018-10-29 08:07:05 +13:00
|
|
|
-- Cascaded Residual Channel Attention U-Net
|
2018-10-26 00:44:55 +13:00
|
|
|
function srcnn.upcunet(backend, ch)
|
2018-10-19 08:49:10 +13:00
|
|
|
-- Residual U-Net
|
2018-10-29 08:07:05 +13:00
|
|
|
local function unet(backend, ch, deconv)
|
|
|
|
local block1 = unet_conv(backend, 128, 256, 128, true)
|
2018-10-17 02:02:42 +13:00
|
|
|
local block2 = nn.Sequential()
|
2018-10-29 08:07:05 +13:00
|
|
|
block2:add(unet_conv(backend, 64, 64, 128, true))
|
|
|
|
block2:add(unet_branch(backend, block1, backend, 128, 128, 4))
|
|
|
|
block2:add(unet_conv(backend, 128, 64, 64, true))
|
2018-10-17 02:02:42 +13:00
|
|
|
local model = nn.Sequential()
|
2018-10-31 20:04:30 +13:00
|
|
|
model:add(unet_conv(backend, ch, 32, 64, false))
|
2018-10-29 08:07:05 +13:00
|
|
|
model:add(unet_branch(backend, block2, backend, 64, 64, 16))
|
2018-10-17 02:02:42 +13:00
|
|
|
model:add(SpatialConvolution(backend, 64, 64, 3, 3, 1, 1, 0, 0))
|
|
|
|
model:add(nn.LeakyReLU(0.1))
|
|
|
|
if deconv then
|
|
|
|
model:add(SpatialFullConvolution(backend, 64, ch, 4, 4, 2, 2, 3, 3))
|
|
|
|
else
|
|
|
|
model:add(SpatialConvolution(backend, 64, ch, 3, 3, 1, 1, 0, 0))
|
|
|
|
end
|
|
|
|
return model
|
|
|
|
end
|
|
|
|
local model = nn.Sequential()
|
|
|
|
local con = nn.ConcatTable()
|
|
|
|
local aux_con = nn.ConcatTable()
|
|
|
|
|
2018-10-26 00:44:55 +13:00
|
|
|
-- 2 cascade
|
2018-10-17 02:02:42 +13:00
|
|
|
model:add(unet(backend, ch, true))
|
|
|
|
con:add(unet(backend, ch, false))
|
|
|
|
con:add(nn.SpatialZeroPadding(-20, -20, -20, -20))
|
|
|
|
|
|
|
|
aux_con:add(nn.Sequential():add(nn.CAddTable()):add(w2nn.InplaceClip01())) -- cascaded unet output
|
|
|
|
aux_con:add(nn.Sequential():add(nn.SelectTable(2)):add(w2nn.InplaceClip01())) -- single unet output
|
|
|
|
|
|
|
|
model:add(con)
|
|
|
|
model:add(aux_con)
|
|
|
|
model:add(w2nn.AuxiliaryLossTable(1)) -- auxiliary loss for single unet output
|
|
|
|
|
2018-10-26 00:44:55 +13:00
|
|
|
model.w2nn_arch_name = "upcunet"
|
2018-10-17 02:02:42 +13:00
|
|
|
model.w2nn_offset = 60
|
|
|
|
model.w2nn_scale_factor = 2
|
|
|
|
model.w2nn_channels = ch
|
|
|
|
model.w2nn_resize = true
|
2018-10-28 20:03:52 +13:00
|
|
|
model.w2nn_valid_input_size = {}
|
|
|
|
for i = 76, 512, 4 do
|
|
|
|
table.insert(model.w2nn_valid_input_size, i)
|
|
|
|
end
|
2018-10-17 02:02:42 +13:00
|
|
|
|
|
|
|
return model
|
|
|
|
end
|
2018-10-29 08:07:05 +13:00
|
|
|
-- cunet for 1x
|
|
|
|
function srcnn.cunet(backend, ch)
|
|
|
|
local function unet(backend, ch)
|
|
|
|
local block1 = unet_conv(backend, 128, 256, 128, true)
|
2018-10-27 18:59:51 +13:00
|
|
|
local block2 = nn.Sequential()
|
2018-10-29 08:07:05 +13:00
|
|
|
block2:add(unet_conv(backend, 64, 64, 128, true))
|
|
|
|
block2:add(unet_branch(backend, block1, backend, 128, 128, 4))
|
|
|
|
block2:add(unet_conv(backend, 128, 64, 64, true))
|
2018-10-14 05:21:23 +13:00
|
|
|
|
2018-10-28 20:03:52 +13:00
|
|
|
local model = nn.Sequential()
|
2018-10-31 20:04:30 +13:00
|
|
|
model:add(unet_conv(backend, ch, 32, 64, false))
|
2018-10-29 08:07:05 +13:00
|
|
|
model:add(unet_branch(backend, block2, backend, 64, 64, 16))
|
2018-10-28 20:03:52 +13:00
|
|
|
model:add(SpatialConvolution(backend, 64, 64, 3, 3, 1, 1, 0, 0))
|
|
|
|
model:add(nn.LeakyReLU(0.1))
|
2018-10-29 08:07:05 +13:00
|
|
|
model:add(SpatialConvolution(backend, 64, ch, 3, 3, 1, 1, 0, 0))
|
|
|
|
|
2018-10-28 20:03:52 +13:00
|
|
|
return model
|
|
|
|
end
|
|
|
|
local model = nn.Sequential()
|
|
|
|
local con = nn.ConcatTable()
|
|
|
|
local aux_con = nn.ConcatTable()
|
|
|
|
|
|
|
|
-- 2 cascade
|
2018-10-29 08:07:05 +13:00
|
|
|
model:add(unet(backend, ch))
|
|
|
|
con:add(unet(backend, ch))
|
2018-10-28 20:03:52 +13:00
|
|
|
con:add(nn.SpatialZeroPadding(-20, -20, -20, -20))
|
|
|
|
|
|
|
|
aux_con:add(nn.Sequential():add(nn.CAddTable()):add(w2nn.InplaceClip01())) -- cascaded unet output
|
|
|
|
aux_con:add(nn.Sequential():add(nn.SelectTable(2)):add(w2nn.InplaceClip01())) -- single unet output
|
|
|
|
|
|
|
|
model:add(con)
|
|
|
|
model:add(aux_con)
|
|
|
|
model:add(w2nn.AuxiliaryLossTable(1)) -- auxiliary loss for single unet output
|
|
|
|
|
2018-10-29 08:07:05 +13:00
|
|
|
model.w2nn_arch_name = "cunet"
|
|
|
|
model.w2nn_offset = 40
|
|
|
|
model.w2nn_scale_factor = 1
|
2018-10-28 20:03:52 +13:00
|
|
|
model.w2nn_channels = ch
|
2018-10-29 08:07:05 +13:00
|
|
|
model.w2nn_resize = false
|
2018-10-28 20:03:52 +13:00
|
|
|
model.w2nn_valid_input_size = {}
|
2018-10-29 08:07:05 +13:00
|
|
|
for i = 100, 512, 4 do
|
2018-10-28 20:03:52 +13:00
|
|
|
table.insert(model.w2nn_valid_input_size, i)
|
|
|
|
end
|
|
|
|
|
|
|
|
return model
|
|
|
|
end
|
|
|
|
|
2018-11-01 23:59:07 +13:00
|
|
|
function srcnn.upcunet_s_p0(backend, ch)
|
2018-10-31 05:02:28 +13:00
|
|
|
-- Residual U-Net
|
2018-11-01 23:59:07 +13:00
|
|
|
local function unet1(backend, ch, deconv)
|
|
|
|
local block1 = unet_conv(backend, 64, 128, 64, true)
|
|
|
|
local model = nn.Sequential()
|
|
|
|
model:add(unet_conv(backend, ch, 32, 64, false))
|
|
|
|
model:add(unet_branch(backend, block1, backend, 64, 64, 4))
|
|
|
|
model:add(SpatialConvolution(backend, 64, 64, 3, 3, 1, 1, 0, 0))
|
|
|
|
model:add(nn.LeakyReLU(0.1))
|
|
|
|
if deconv then
|
|
|
|
model:add(SpatialFullConvolution(backend, 64, ch, 4, 4, 2, 2, 3, 3))
|
|
|
|
else
|
|
|
|
model:add(SpatialConvolution(backend, 64, ch, 3, 3, 1, 1, 0, 0))
|
|
|
|
end
|
|
|
|
return model
|
|
|
|
end
|
|
|
|
local model = nn.Sequential()
|
|
|
|
local con = nn.ConcatTable()
|
|
|
|
local aux_con = nn.ConcatTable()
|
|
|
|
|
|
|
|
-- 2 cascade
|
|
|
|
model:add(unet1(backend, ch, true))
|
|
|
|
con:add(unet1(backend, ch, false))
|
|
|
|
con:add(nn.SpatialZeroPadding(-8, -8, -8, -8))
|
|
|
|
--con:add(nn.SpatialZeroPadding(-20, -20, -20, -20))
|
|
|
|
|
|
|
|
aux_con:add(nn.Sequential():add(nn.CAddTable()):add(w2nn.InplaceClip01())) -- cascaded unet output
|
|
|
|
aux_con:add(nn.Sequential():add(nn.SelectTable(2)):add(w2nn.InplaceClip01())) -- single unet output
|
|
|
|
|
|
|
|
model:add(con)
|
|
|
|
model:add(aux_con)
|
|
|
|
model:add(w2nn.AuxiliaryLossTable(1)) -- auxiliary loss for single unet output
|
|
|
|
|
|
|
|
model.w2nn_arch_name = "upcunet_s_p0"
|
|
|
|
model.w2nn_offset = 24
|
|
|
|
model.w2nn_scale_factor = 2
|
|
|
|
model.w2nn_channels = ch
|
|
|
|
model.w2nn_resize = true
|
|
|
|
model.w2nn_valid_input_size = {}
|
|
|
|
for i = 76, 512, 4 do
|
|
|
|
table.insert(model.w2nn_valid_input_size, i)
|
|
|
|
end
|
|
|
|
|
|
|
|
return model
|
|
|
|
end
|
|
|
|
function srcnn.upcunet_s_p1(backend, ch)
|
|
|
|
-- Residual U-Net
|
|
|
|
local function unet1(backend, ch, deconv)
|
|
|
|
local block1 = unet_conv(backend, 64, 128, 64, true)
|
|
|
|
local model = nn.Sequential()
|
|
|
|
model:add(unet_conv(backend, ch, 32, 64, false))
|
|
|
|
model:add(unet_branch(backend, block1, backend, 64, 64, 4))
|
|
|
|
model:add(SpatialConvolution(backend, 64, 64, 3, 3, 1, 1, 0, 0))
|
|
|
|
model:add(nn.LeakyReLU(0.1))
|
|
|
|
if deconv then
|
|
|
|
model:add(SpatialFullConvolution(backend, 64, ch, 4, 4, 2, 2, 3, 3))
|
|
|
|
else
|
|
|
|
model:add(SpatialConvolution(backend, 64, ch, 3, 3, 1, 1, 0, 0))
|
|
|
|
end
|
|
|
|
return model
|
|
|
|
end
|
|
|
|
local function unet2(backend, ch, deconv)
|
2018-10-31 05:02:28 +13:00
|
|
|
local block1 = unet_conv(backend, 128, 256, 128, true)
|
|
|
|
local block2 = nn.Sequential()
|
2018-11-01 23:59:07 +13:00
|
|
|
block2:add(unet_conv(backend, 64, 64, 128, true))
|
2018-10-31 05:02:28 +13:00
|
|
|
block2:add(unet_branch(backend, block1, backend, 128, 128, 4))
|
2018-11-01 23:59:07 +13:00
|
|
|
block2:add(unet_conv(backend, 128, 64, 64, true))
|
2018-10-31 05:02:28 +13:00
|
|
|
local model = nn.Sequential()
|
2018-11-01 23:59:07 +13:00
|
|
|
model:add(unet_conv(backend, ch, 32, 64, false))
|
|
|
|
model:add(unet_branch(backend, block2, backend, 64, 64, 16))
|
|
|
|
model:add(SpatialConvolution(backend, 64, 64, 3, 3, 1, 1, 0, 0))
|
2018-10-31 05:02:28 +13:00
|
|
|
model:add(nn.LeakyReLU(0.1))
|
|
|
|
if deconv then
|
|
|
|
model:add(SpatialFullConvolution(backend, 64, ch, 4, 4, 2, 2, 3, 3))
|
|
|
|
else
|
|
|
|
model:add(SpatialConvolution(backend, 64, ch, 3, 3, 1, 1, 0, 0))
|
|
|
|
end
|
|
|
|
return model
|
|
|
|
end
|
|
|
|
local model = nn.Sequential()
|
|
|
|
local con = nn.ConcatTable()
|
|
|
|
local aux_con = nn.ConcatTable()
|
|
|
|
|
|
|
|
-- 2 cascade
|
2018-11-01 23:59:07 +13:00
|
|
|
model:add(unet1(backend, ch, true))
|
|
|
|
con:add(unet2(backend, ch, false))
|
|
|
|
--con:add(nn.SpatialZeroPadding(-8, -8, -8, -8))
|
2018-10-31 05:02:28 +13:00
|
|
|
con:add(nn.SpatialZeroPadding(-20, -20, -20, -20))
|
|
|
|
|
|
|
|
aux_con:add(nn.Sequential():add(nn.CAddTable()):add(w2nn.InplaceClip01())) -- cascaded unet output
|
|
|
|
aux_con:add(nn.Sequential():add(nn.SelectTable(2)):add(w2nn.InplaceClip01())) -- single unet output
|
|
|
|
|
|
|
|
model:add(con)
|
|
|
|
model:add(aux_con)
|
|
|
|
model:add(w2nn.AuxiliaryLossTable(1)) -- auxiliary loss for single unet output
|
|
|
|
|
2018-11-01 23:59:07 +13:00
|
|
|
model.w2nn_arch_name = "upcunet_s_p1"
|
|
|
|
model.w2nn_offset = 36
|
|
|
|
model.w2nn_scale_factor = 2
|
|
|
|
model.w2nn_channels = ch
|
|
|
|
model.w2nn_resize = true
|
|
|
|
model.w2nn_valid_input_size = {}
|
|
|
|
for i = 76, 512, 4 do
|
|
|
|
table.insert(model.w2nn_valid_input_size, i)
|
|
|
|
end
|
|
|
|
|
|
|
|
return model
|
|
|
|
end
|
|
|
|
|
|
|
|
function srcnn.upcunet_s_p2(backend, ch)
|
|
|
|
-- Residual U-Net
|
|
|
|
local function unet1(backend, ch, deconv)
|
|
|
|
local block1 = unet_conv(backend, 64, 128, 64, true)
|
|
|
|
local model = nn.Sequential()
|
|
|
|
model:add(unet_conv(backend, ch, 32, 64, false))
|
|
|
|
model:add(unet_branch(backend, block1, backend, 64, 64, 4))
|
|
|
|
model:add(SpatialConvolution(backend, 64, 64, 3, 3, 1, 1, 0, 0))
|
|
|
|
model:add(nn.LeakyReLU(0.1))
|
|
|
|
if deconv then
|
|
|
|
model:add(SpatialFullConvolution(backend, 64, ch, 4, 4, 2, 2, 3, 3))
|
|
|
|
else
|
|
|
|
model:add(SpatialConvolution(backend, 64, ch, 3, 3, 1, 1, 0, 0))
|
|
|
|
end
|
|
|
|
return model
|
|
|
|
end
|
|
|
|
local function unet2(backend, ch, deconv)
|
|
|
|
local block1 = unet_conv(backend, 128, 256, 128, true)
|
|
|
|
local block2 = nn.Sequential()
|
|
|
|
block2:add(unet_conv(backend, 64, 64, 128, true))
|
|
|
|
block2:add(unet_branch(backend, block1, backend, 128, 128, 4))
|
|
|
|
block2:add(unet_conv(backend, 128, 64, 64, true))
|
|
|
|
local model = nn.Sequential()
|
|
|
|
model:add(unet_conv(backend, ch, 32, 64, false))
|
|
|
|
model:add(unet_branch(backend, block2, backend, 64, 64, 16))
|
|
|
|
model:add(SpatialConvolution(backend, 64, 64, 3, 3, 1, 1, 0, 0))
|
|
|
|
model:add(nn.LeakyReLU(0.1))
|
|
|
|
if deconv then
|
|
|
|
model:add(SpatialFullConvolution(backend, 64, ch, 4, 4, 2, 2, 3, 3))
|
|
|
|
else
|
|
|
|
model:add(SpatialConvolution(backend, 64, ch, 3, 3, 1, 1, 0, 0))
|
|
|
|
end
|
|
|
|
return model
|
|
|
|
end
|
|
|
|
local model = nn.Sequential()
|
|
|
|
local con = nn.ConcatTable()
|
|
|
|
local aux_con = nn.ConcatTable()
|
|
|
|
|
|
|
|
-- 2 cascade
|
|
|
|
model:add(unet2(backend, ch, true))
|
|
|
|
con:add(unet1(backend, ch, false))
|
|
|
|
con:add(nn.SpatialZeroPadding(-8, -8, -8, -8))
|
|
|
|
--con:add(nn.SpatialZeroPadding(-20, -20, -20, -20))
|
|
|
|
|
|
|
|
aux_con:add(nn.Sequential():add(nn.CAddTable()):add(w2nn.InplaceClip01())) -- cascaded unet output
|
|
|
|
aux_con:add(nn.Sequential():add(nn.SelectTable(2)):add(w2nn.InplaceClip01())) -- single unet output
|
|
|
|
|
|
|
|
model:add(con)
|
|
|
|
model:add(aux_con)
|
|
|
|
model:add(w2nn.AuxiliaryLossTable(1)) -- auxiliary loss for single unet output
|
|
|
|
|
|
|
|
model.w2nn_arch_name = "upcunet_s_p2"
|
|
|
|
model.w2nn_offset = 48
|
2018-10-31 05:02:28 +13:00
|
|
|
model.w2nn_scale_factor = 2
|
|
|
|
model.w2nn_channels = ch
|
|
|
|
model.w2nn_resize = true
|
|
|
|
model.w2nn_valid_input_size = {}
|
|
|
|
for i = 76, 512, 4 do
|
|
|
|
table.insert(model.w2nn_valid_input_size, i)
|
|
|
|
end
|
|
|
|
|
|
|
|
return model
|
|
|
|
end
|
|
|
|
function srcnn.cunet_s(backend, ch)
|
|
|
|
local function unet(backend, ch)
|
|
|
|
local block1 = unet_conv(backend, 128, 256, 128, true)
|
|
|
|
local block2 = nn.Sequential()
|
|
|
|
block2:add(unet_conv(backend, 32, 64, 128, true))
|
|
|
|
block2:add(unet_branch(backend, block1, backend, 128, 128, 4))
|
|
|
|
block2:add(unet_conv(backend, 128, 64, 32, true))
|
|
|
|
|
|
|
|
local model = nn.Sequential()
|
2018-10-31 20:04:30 +13:00
|
|
|
model:add(unet_conv(backend, ch, 32, 32, false))
|
2018-10-31 05:02:28 +13:00
|
|
|
model:add(unet_branch(backend, block2, backend, 32, 32, 16))
|
|
|
|
model:add(SpatialConvolution(backend, 32, 64, 3, 3, 1, 1, 0, 0))
|
|
|
|
model:add(nn.LeakyReLU(0.1))
|
|
|
|
model:add(SpatialConvolution(backend, 64, ch, 3, 3, 1, 1, 0, 0))
|
|
|
|
|
|
|
|
return model
|
|
|
|
end
|
|
|
|
local model = nn.Sequential()
|
|
|
|
local con = nn.ConcatTable()
|
|
|
|
local aux_con = nn.ConcatTable()
|
|
|
|
|
|
|
|
-- 2 cascade
|
|
|
|
model:add(unet(backend, ch))
|
|
|
|
con:add(unet(backend, ch))
|
|
|
|
con:add(nn.SpatialZeroPadding(-20, -20, -20, -20))
|
|
|
|
|
|
|
|
aux_con:add(nn.Sequential():add(nn.CAddTable()):add(w2nn.InplaceClip01())) -- cascaded unet output
|
|
|
|
aux_con:add(nn.Sequential():add(nn.SelectTable(2)):add(w2nn.InplaceClip01())) -- single unet output
|
|
|
|
|
|
|
|
model:add(con)
|
|
|
|
model:add(aux_con)
|
|
|
|
model:add(w2nn.AuxiliaryLossTable(1)) -- auxiliary loss for single unet output
|
|
|
|
|
|
|
|
model.w2nn_arch_name = "cunet_s"
|
|
|
|
model.w2nn_offset = 40
|
|
|
|
model.w2nn_scale_factor = 1
|
|
|
|
model.w2nn_channels = ch
|
|
|
|
model.w2nn_resize = false
|
|
|
|
model.w2nn_valid_input_size = {}
|
|
|
|
for i = 100, 512, 4 do
|
|
|
|
table.insert(model.w2nn_valid_input_size, i)
|
|
|
|
end
|
|
|
|
|
|
|
|
return model
|
|
|
|
end
|
|
|
|
|
2018-10-27 18:59:51 +13:00
|
|
|
local function bench()
|
|
|
|
local sys = require 'sys'
|
2018-10-28 00:38:04 +13:00
|
|
|
cudnn.benchmark = true
|
2018-10-27 18:59:51 +13:00
|
|
|
local model = nil
|
2018-11-01 23:59:07 +13:00
|
|
|
local arch = {"upconv_7", "upresnet_s","upcresnet", "resnet_14l", "upcunet", "upcunet_s_p0", "upcunet_s_p1", "upcunet_s_p2"}
|
|
|
|
--local arch = {"upconv_7", "upcunet","upcunet_v0", "upcunet_s", "vgg_7", "cunet", "cunet_s"}
|
2018-10-28 00:38:04 +13:00
|
|
|
local backend = "cudnn"
|
2018-10-31 05:02:28 +13:00
|
|
|
local ch = 3
|
|
|
|
local batch_size = 1
|
2018-11-01 23:59:07 +13:00
|
|
|
local output_size = 320
|
2018-10-27 18:59:51 +13:00
|
|
|
for k = 1, #arch do
|
2018-10-31 05:02:28 +13:00
|
|
|
model = srcnn[arch[k]](backend, ch):cuda()
|
2018-10-28 00:38:04 +13:00
|
|
|
model:evaluate()
|
|
|
|
local dummy = nil
|
2018-11-01 23:59:07 +13:00
|
|
|
local crop_size = (output_size + model.w2nn_offset * 2) / 2
|
|
|
|
local dummy = torch.Tensor(batch_size, ch, output_size, output_size):zero():cuda()
|
|
|
|
|
|
|
|
print(arch[k], output_size, crop_size)
|
2018-10-28 00:38:04 +13:00
|
|
|
-- warn
|
2018-11-01 23:59:07 +13:00
|
|
|
for i = 1, 4 do
|
2018-10-31 05:02:28 +13:00
|
|
|
local x = torch.Tensor(batch_size, ch, crop_size, crop_size):uniform():cuda()
|
2018-10-28 00:38:04 +13:00
|
|
|
model:forward(x)
|
|
|
|
end
|
2018-10-27 18:59:51 +13:00
|
|
|
t = sys.clock()
|
2018-11-01 23:59:07 +13:00
|
|
|
for i = 1, 100 do
|
2018-10-31 05:02:28 +13:00
|
|
|
local x = torch.Tensor(batch_size, ch, crop_size, crop_size):uniform():cuda()
|
2018-10-28 00:38:04 +13:00
|
|
|
local z = model:forward(x)
|
2018-11-01 23:59:07 +13:00
|
|
|
dummy:add(z)
|
2018-10-27 18:59:51 +13:00
|
|
|
end
|
|
|
|
print(arch[k], sys.clock() - t)
|
2018-10-28 00:38:04 +13:00
|
|
|
model:clearState()
|
2018-10-27 18:59:51 +13:00
|
|
|
end
|
|
|
|
end
|
2015-10-26 13:23:52 +13:00
|
|
|
function srcnn.create(model_name, backend, color)
|
2016-04-23 12:18:12 +12:00
|
|
|
model_name = model_name or "vgg_7"
|
|
|
|
backend = backend or "cunn"
|
|
|
|
color = color or "rgb"
|
2015-10-26 13:23:52 +13:00
|
|
|
local ch = 3
|
2015-06-23 05:27:28 +12:00
|
|
|
if color == "rgb" then
|
|
|
|
ch = 3
|
|
|
|
elseif color == "y" then
|
|
|
|
ch = 1
|
|
|
|
else
|
2016-04-23 12:18:12 +12:00
|
|
|
error("unsupported color: " .. color)
|
2015-10-26 13:23:52 +13:00
|
|
|
end
|
2016-05-13 12:49:53 +12:00
|
|
|
if srcnn[model_name] then
|
2016-05-15 06:04:08 +12:00
|
|
|
local model = srcnn[model_name](backend, ch)
|
2016-06-08 09:58:46 +12:00
|
|
|
assert(model.w2nn_offset % model.w2nn_scale_factor == 0)
|
2016-05-15 06:04:08 +12:00
|
|
|
return model
|
2015-10-26 13:23:52 +13:00
|
|
|
else
|
2016-04-23 12:18:12 +12:00
|
|
|
error("unsupported model_name: " .. model_name)
|
2015-06-23 05:27:28 +12:00
|
|
|
end
|
2015-06-13 18:02:02 +12:00
|
|
|
end
|
2016-10-08 21:21:01 +13:00
|
|
|
--[[
|
2018-11-01 23:59:07 +13:00
|
|
|
local model = srcnn.resnet_s("cunn", 3):cuda()
|
2018-10-14 05:21:23 +13:00
|
|
|
print(model)
|
|
|
|
model:training()
|
2018-11-01 23:59:07 +13:00
|
|
|
print(model:forward(torch.Tensor(1, 3, 128, 128):zero():cuda()):size())
|
2018-10-28 20:03:52 +13:00
|
|
|
bench()
|
2018-10-29 08:07:05 +13:00
|
|
|
os.exit()
|
2016-10-08 21:21:01 +13:00
|
|
|
--]]
|
2018-10-28 00:38:04 +13:00
|
|
|
|
2015-06-13 18:02:02 +12:00
|
|
|
return srcnn
|