1
0
Fork 0
mirror of synced 2024-05-06 14:02:22 +12:00
waifu2x/tools/rebuild_model.lua
nagadomi a210090033 Convert model files; Add new pretrained model
- Add new pretrained model to ./models/upconv_7
- Move old models to ./models/vgg_7
- Use nn.LeakyReLU instead of w2nn.LeakyReLU
- Add useful attribute to .json

New JSON attribute:
The first layer has `model_config` attribute.
It contains:
  model_arch: architecture name of model. see `lib/srcnn.lua`
  scale_factor: if scale_factor > 1, model:forward() changes image resolution with scale_factor.
  channels: input/output channels. if channels == 3, model is RGB model.
  offset: pixel size that is to be removed from output.
          for example:
            (scale_factor=1, offset=7, input=100x100) => output=(100-7)x(100-7)
            (scale_factor=2, offset=12, input=100x100) => output=(100*2-12)x(100*2-12)
And each layer has `class_name` attribute.
2016-05-15 03:04:08 +09:00

79 lines
2.4 KiB
Lua

require 'pl'
local __FILE__ = (function() return string.gsub(debug.getinfo(2, 'S').source, "^@", "") end)()
package.path = path.join(path.dirname(__FILE__), "..", "lib", "?.lua;") .. package.path
require 'os'
require 'w2nn'
local srcnn = require 'srcnn'
local function rebuild(old_model, model, backend)
local targets = {
{"nn.SpatialConvolutionMM",
{cunn = "nn.SpatialConvolutionMM",
cudnn = "cudnn.SpatialConvolution"
}
},
{"cudnn.SpatialConvolution",
{cunn = "nn.SpatialConvolutionMM",
cudnn = "cudnn.SpatialConvolution"
}
},
{"nn.SpatialFullConvolution",
{cunn = "nn.SpatialFullConvolution",
cudnn = "cudnn.SpatialFullConvolution"
}
},
{"cudnn.SpatialFullConvolution",
{cunn = "nn.SpatialFullConvolution",
cudnn = "cudnn.SpatialFullConvolution"
}
}
}
if backend:len() == 0 then
backend = srcnn.backend(old_model)
end
local new_model = srcnn.create(model, backend, srcnn.color(old_model))
for k = 1, #targets do
local weight_from = old_model:findModules(targets[k][1])
local weight_to = new_model:findModules(targets[k][2][backend])
if #weight_from > 0 then
if #weight_from ~= #weight_to then
error(targets[k][1] .. ": weight_from: " .. #weight_from .. ", weight_to: " .. #weight_to)
end
for i = 1, #weight_from do
local from = weight_from[i]
local to = weight_to[i]
if to.weight then
to.weight:copy(from.weight)
end
if to.bias then
to.bias:copy(from.bias)
end
end
end
end
new_model:cuda()
new_model:evaluate()
return new_model
end
local cmd = torch.CmdLine()
cmd:text()
cmd:text("waifu2x rebuild cunn model")
cmd:text("Options:")
cmd:option("-i", "", 'Specify the input model')
cmd:option("-o", "", 'Specify the output model')
cmd:option("-backend", "", 'Specify the CUDA backend (cunn|cudnn)')
cmd:option("-model", "vgg_7", 'Specify the model architecture (vgg_7|vgg_12|upconv_7|upconv_8_4x|dilated_7)')
cmd:option("-iformat", "ascii", 'Specify the input format (ascii|binary)')
cmd:option("-oformat", "ascii", 'Specify the output format (ascii|binary)')
local opt = cmd:parse(arg)
if not path.isfile(opt.i) then
cmd:help()
os.exit(-1)
end
local old_model = torch.load(opt.i, opt.iformat)
local new_model = rebuild(old_model, opt.model, opt.backend)
torch.save(opt.o, new_model, opt.oformat)