2015-11-08 22:31:46 +13:00
|
|
|
require 'pl'
|
2015-10-28 19:30:47 +13:00
|
|
|
local __FILE__ = (function() return string.gsub(debug.getinfo(2, 'S').source, "^@", "") end)()
|
|
|
|
package.path = path.join(path.dirname(__FILE__), "lib", "?.lua;") .. package.path
|
2015-05-16 17:48:05 +12:00
|
|
|
require 'optim'
|
|
|
|
require 'xlua'
|
|
|
|
|
2015-10-28 19:30:47 +13:00
|
|
|
require 'w2nn'
|
|
|
|
local settings = require 'settings'
|
|
|
|
local srcnn = require 'srcnn'
|
|
|
|
local minibatch_adam = require 'minibatch_adam'
|
|
|
|
local iproc = require 'iproc'
|
|
|
|
local reconstruct = require 'reconstruct'
|
|
|
|
local compression = require 'compression'
|
|
|
|
local pairwise_transform = require 'pairwise_transform'
|
|
|
|
local image_loader = require 'image_loader'
|
2015-05-16 17:48:05 +12:00
|
|
|
|
|
|
|
local function save_test_scale(model, rgb, file)
|
2015-10-26 13:23:52 +13:00
|
|
|
local up = reconstruct.scale(model, settings.scale, rgb)
|
2015-05-16 17:48:05 +12:00
|
|
|
image.save(file, up)
|
|
|
|
end
|
|
|
|
local function save_test_jpeg(model, rgb, file)
|
2015-10-26 13:23:52 +13:00
|
|
|
local im, count = reconstruct.image(model, rgb)
|
2015-05-16 17:48:05 +12:00
|
|
|
image.save(file, im)
|
|
|
|
end
|
|
|
|
local function split_data(x, test_size)
|
|
|
|
local index = torch.randperm(#x)
|
|
|
|
local train_size = #x - test_size
|
|
|
|
local train_x = {}
|
|
|
|
local valid_x = {}
|
|
|
|
for i = 1, train_size do
|
|
|
|
train_x[i] = x[index[i]]
|
|
|
|
end
|
|
|
|
for i = 1, test_size do
|
|
|
|
valid_x[i] = x[index[train_size + i]]
|
|
|
|
end
|
|
|
|
return train_x, valid_x
|
|
|
|
end
|
2015-11-30 21:18:52 +13:00
|
|
|
local function make_validation_set(x, transformer, n, patches)
|
2015-05-16 17:48:05 +12:00
|
|
|
n = n or 4
|
|
|
|
local data = {}
|
|
|
|
for i = 1, #x do
|
2015-11-30 21:18:52 +13:00
|
|
|
for k = 1, math.max(n / patches, 1) do
|
|
|
|
local xy = transformer(x[i], true, patches)
|
|
|
|
local tx = torch.Tensor(patches, xy[1][1]:size(1), xy[1][1]:size(2), xy[1][1]:size(3))
|
|
|
|
local ty = torch.Tensor(patches, xy[1][2]:size(1), xy[1][2]:size(2), xy[1][2]:size(3))
|
2015-10-26 13:23:52 +13:00
|
|
|
for j = 1, #xy do
|
2015-11-01 01:56:20 +13:00
|
|
|
tx[j]:copy(xy[j][1])
|
|
|
|
ty[j]:copy(xy[j][2])
|
2015-10-26 13:23:52 +13:00
|
|
|
end
|
2015-11-01 01:56:20 +13:00
|
|
|
table.insert(data, {x = tx, y = ty})
|
2015-05-16 17:48:05 +12:00
|
|
|
end
|
|
|
|
xlua.progress(i, #x)
|
|
|
|
collectgarbage()
|
|
|
|
end
|
|
|
|
return data
|
|
|
|
end
|
|
|
|
local function validate(model, criterion, data)
|
|
|
|
local loss = 0
|
|
|
|
for i = 1, #data do
|
|
|
|
local z = model:forward(data[i].x:cuda())
|
|
|
|
loss = loss + criterion:forward(z, data[i].y:cuda())
|
2015-11-01 01:56:20 +13:00
|
|
|
if i % 100 == 0 then
|
|
|
|
xlua.progress(i, #data)
|
2015-05-16 17:48:05 +12:00
|
|
|
collectgarbage()
|
|
|
|
end
|
|
|
|
end
|
2015-11-01 01:56:20 +13:00
|
|
|
xlua.progress(#data, #data)
|
2015-05-16 17:48:05 +12:00
|
|
|
return loss / #data
|
|
|
|
end
|
|
|
|
|
2015-10-26 13:23:52 +13:00
|
|
|
local function create_criterion(model)
|
|
|
|
if reconstruct.is_rgb(model) then
|
|
|
|
local offset = reconstruct.offset_size(model)
|
|
|
|
local output_w = settings.crop_size - offset * 2
|
|
|
|
local weight = torch.Tensor(3, output_w * output_w)
|
2015-11-01 01:56:20 +13:00
|
|
|
weight[1]:fill(0.29891 * 3) -- R
|
|
|
|
weight[2]:fill(0.58661 * 3) -- G
|
|
|
|
weight[3]:fill(0.11448 * 3) -- B
|
2015-11-08 09:44:14 +13:00
|
|
|
return w2nn.ClippedWeightedHuberCriterion(weight, 0.1, {0.0, 1.0}):cuda()
|
2015-10-26 13:23:52 +13:00
|
|
|
else
|
|
|
|
return nn.MSECriterion():cuda()
|
|
|
|
end
|
|
|
|
end
|
|
|
|
local function transformer(x, is_validation, n, offset)
|
2015-10-28 19:30:47 +13:00
|
|
|
x = compression.decompress(x)
|
2015-11-30 21:18:52 +13:00
|
|
|
n = n or settings.patches
|
|
|
|
|
2015-10-26 13:23:52 +13:00
|
|
|
if is_validation == nil then is_validation = false end
|
2015-11-07 11:18:22 +13:00
|
|
|
local random_color_noise_rate = nil
|
|
|
|
local random_overlay_rate = nil
|
2015-11-06 14:08:54 +13:00
|
|
|
local active_cropping_rate = nil
|
2015-10-26 13:23:52 +13:00
|
|
|
local active_cropping_tries = nil
|
|
|
|
if is_validation then
|
2015-11-30 21:18:52 +13:00
|
|
|
active_cropping_rate = 0
|
2015-10-26 13:23:52 +13:00
|
|
|
active_cropping_tries = 0
|
2015-11-07 11:18:22 +13:00
|
|
|
random_color_noise_rate = 0.0
|
|
|
|
random_overlay_rate = 0.0
|
2015-10-26 13:23:52 +13:00
|
|
|
else
|
|
|
|
active_cropping_rate = settings.active_cropping_rate
|
|
|
|
active_cropping_tries = settings.active_cropping_tries
|
2015-11-07 11:18:22 +13:00
|
|
|
random_color_noise_rate = settings.random_color_noise_rate
|
|
|
|
random_overlay_rate = settings.random_overlay_rate
|
2015-10-26 13:23:52 +13:00
|
|
|
end
|
|
|
|
|
|
|
|
if settings.method == "scale" then
|
|
|
|
return pairwise_transform.scale(x,
|
|
|
|
settings.scale,
|
|
|
|
settings.crop_size, offset,
|
|
|
|
n,
|
2015-11-07 11:18:22 +13:00
|
|
|
{
|
|
|
|
random_half_rate = settings.random_half_rate,
|
|
|
|
random_color_noise_rate = random_color_noise_rate,
|
|
|
|
random_overlay_rate = random_overlay_rate,
|
2015-11-27 22:36:36 +13:00
|
|
|
random_unsharp_mask_rate = settings.random_unsharp_mask_rate,
|
2015-11-07 11:18:22 +13:00
|
|
|
max_size = settings.max_size,
|
|
|
|
active_cropping_rate = active_cropping_rate,
|
|
|
|
active_cropping_tries = active_cropping_tries,
|
|
|
|
rgb = (settings.color == "rgb")
|
2015-10-26 13:23:52 +13:00
|
|
|
})
|
|
|
|
elseif settings.method == "noise" then
|
|
|
|
return pairwise_transform.jpeg(x,
|
2015-11-06 14:08:54 +13:00
|
|
|
settings.style,
|
2015-10-26 13:23:52 +13:00
|
|
|
settings.noise_level,
|
|
|
|
settings.crop_size, offset,
|
|
|
|
n,
|
2015-11-07 11:18:22 +13:00
|
|
|
{
|
|
|
|
random_half_rate = settings.random_half_rate,
|
|
|
|
random_color_noise_rate = random_color_noise_rate,
|
|
|
|
random_overlay_rate = random_overlay_rate,
|
2015-11-27 22:36:36 +13:00
|
|
|
random_unsharp_mask_rate = settings.random_unsharp_mask_rate,
|
2015-11-07 11:18:22 +13:00
|
|
|
max_size = settings.max_size,
|
2015-11-26 21:10:57 +13:00
|
|
|
jpeg_chroma_subsampling_rate = settings.jpeg_chroma_subsampling_rate,
|
2015-11-07 11:18:22 +13:00
|
|
|
active_cropping_rate = active_cropping_rate,
|
|
|
|
active_cropping_tries = active_cropping_tries,
|
|
|
|
nr_rate = settings.nr_rate,
|
|
|
|
rgb = (settings.color == "rgb")
|
2015-10-26 13:23:52 +13:00
|
|
|
})
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2015-11-30 21:18:52 +13:00
|
|
|
local function resampling(x, y, train_x, transformer, input_size, target_size)
|
|
|
|
print("## resampling")
|
|
|
|
for t = 1, #train_x do
|
|
|
|
xlua.progress(t, #train_x)
|
|
|
|
local xy = transformer(train_x[t], false, settings.patches)
|
|
|
|
for i = 1, #xy do
|
|
|
|
local index = (t - 1) * settings.patches + i
|
|
|
|
x[index]:copy(xy[i][1])
|
|
|
|
y[index]:copy(xy[i][2])
|
|
|
|
end
|
|
|
|
if t % 50 == 0 then
|
|
|
|
collectgarbage()
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2015-05-16 17:48:05 +12:00
|
|
|
local function train()
|
2015-11-30 21:18:52 +13:00
|
|
|
local LR_MIN = 1.0e-5
|
2015-10-26 13:23:52 +13:00
|
|
|
local model = srcnn.create(settings.method, settings.backend, settings.color)
|
|
|
|
local offset = reconstruct.offset_size(model)
|
|
|
|
local pairwise_func = function(x, is_validation, n)
|
|
|
|
return transformer(x, is_validation, n, offset)
|
|
|
|
end
|
|
|
|
local criterion = create_criterion(model)
|
2015-05-16 17:48:05 +12:00
|
|
|
local x = torch.load(settings.images)
|
2015-11-06 14:08:54 +13:00
|
|
|
local train_x, valid_x = split_data(x, math.floor(settings.validation_rate * #x))
|
2015-05-16 17:48:05 +12:00
|
|
|
local adam_config = {
|
|
|
|
learningRate = settings.learning_rate,
|
|
|
|
xBatchSize = settings.batch_size,
|
|
|
|
}
|
2015-11-30 21:18:52 +13:00
|
|
|
local lrd_count = 0
|
2015-06-23 05:27:28 +12:00
|
|
|
local ch = nil
|
|
|
|
if settings.color == "y" then
|
|
|
|
ch = 1
|
|
|
|
elseif settings.color == "rgb" then
|
|
|
|
ch = 3
|
|
|
|
end
|
2015-05-16 17:48:05 +12:00
|
|
|
local best_score = 100000.0
|
|
|
|
print("# make validation-set")
|
2015-11-01 01:56:20 +13:00
|
|
|
local valid_xy = make_validation_set(valid_x, pairwise_func,
|
|
|
|
settings.validation_crops,
|
2015-11-30 21:18:52 +13:00
|
|
|
settings.patches)
|
2015-05-16 17:48:05 +12:00
|
|
|
valid_x = nil
|
|
|
|
|
|
|
|
collectgarbage()
|
|
|
|
model:cuda()
|
|
|
|
print("load .. " .. #train_x)
|
2015-11-30 21:18:52 +13:00
|
|
|
|
|
|
|
local x = torch.Tensor(settings.patches * #train_x,
|
|
|
|
ch, settings.crop_size, settings.crop_size)
|
|
|
|
local y = torch.Tensor(settings.patches * #train_x,
|
|
|
|
ch * (settings.crop_size - offset * 2) * (settings.crop_size - offset * 2)):zero()
|
|
|
|
|
2015-05-16 17:48:05 +12:00
|
|
|
for epoch = 1, settings.epoch do
|
|
|
|
model:training()
|
|
|
|
print("# " .. epoch)
|
2015-11-30 21:18:52 +13:00
|
|
|
resampling(x, y, train_x, pairwise_func)
|
|
|
|
for i = 1, settings.inner_epoch do
|
|
|
|
print(minibatch_adam(model, criterion, x, y, adam_config))
|
|
|
|
model:evaluate()
|
|
|
|
print("# validation")
|
|
|
|
local score = validate(model, criterion, valid_xy)
|
|
|
|
if score < best_score then
|
|
|
|
local test_image = image_loader.load_float(settings.test) -- reload
|
2015-06-13 18:02:02 +12:00
|
|
|
lrd_count = 0
|
2015-11-30 21:18:52 +13:00
|
|
|
best_score = score
|
|
|
|
print("* update best model")
|
2015-12-04 22:49:34 +13:00
|
|
|
if settings.save_history then
|
|
|
|
local model_clone = model:clone()
|
|
|
|
w2nn.cleanup_model(model_clone)
|
|
|
|
torch.save(string.format(settings.model_file, epoch, i), model_clone)
|
|
|
|
if settings.method == "noise" then
|
|
|
|
local log = path.join(settings.model_dir,
|
|
|
|
("noise%d_best.%d-%d.png"):format(settings.noise_level,
|
|
|
|
epoch, i))
|
|
|
|
save_test_jpeg(model, test_image, log)
|
|
|
|
elseif settings.method == "scale" then
|
|
|
|
local log = path.join(settings.model_dir,
|
|
|
|
("scale%.1f_best.%d-%d.png"):format(settings.scale,
|
|
|
|
epoch, i))
|
|
|
|
save_test_scale(model, test_image, log)
|
|
|
|
end
|
|
|
|
else
|
|
|
|
torch.save(settings.model_file, model)
|
|
|
|
if settings.method == "noise" then
|
|
|
|
local log = path.join(settings.model_dir,
|
|
|
|
("noise%d_best.png"):format(settings.noise_level))
|
|
|
|
save_test_jpeg(model, test_image, log)
|
|
|
|
elseif settings.method == "scale" then
|
|
|
|
local log = path.join(settings.model_dir,
|
|
|
|
("scale%.1f_best.png"):format(settings.scale))
|
|
|
|
save_test_scale(model, test_image, log)
|
|
|
|
end
|
2015-11-30 21:18:52 +13:00
|
|
|
end
|
|
|
|
else
|
|
|
|
lrd_count = lrd_count + 1
|
|
|
|
if lrd_count > 2 and adam_config.learningRate > LR_MIN then
|
|
|
|
adam_config.learningRate = adam_config.learningRate * 0.8
|
|
|
|
print("* learning rate decay: " .. adam_config.learningRate)
|
|
|
|
lrd_count = 0
|
|
|
|
end
|
2015-05-16 17:48:05 +12:00
|
|
|
end
|
2015-11-30 21:18:52 +13:00
|
|
|
print("current: " .. score .. ", best: " .. best_score)
|
|
|
|
collectgarbage()
|
2015-05-16 17:48:05 +12:00
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
2015-11-13 23:26:58 +13:00
|
|
|
if settings.gpu > 0 then
|
|
|
|
cutorch.setDevice(settings.gpu)
|
|
|
|
end
|
2015-05-16 17:48:05 +12:00
|
|
|
torch.manualSeed(settings.seed)
|
|
|
|
cutorch.manualSeed(settings.seed)
|
|
|
|
print(settings)
|
|
|
|
train()
|