1
0
Fork 0
mirror of synced 2024-06-22 04:40:15 +12:00

Add noise_scale training

This commit is contained in:
nagadomi 2016-06-08 06:39:36 +09:00
parent 3b09bff8cf
commit 307ae40883
4 changed files with 232 additions and 11 deletions

View file

@ -3,7 +3,6 @@ local pairwise_transform = {}
pairwise_transform = tablex.update(pairwise_transform, require('pairwise_transform_scale'))
pairwise_transform = tablex.update(pairwise_transform, require('pairwise_transform_jpeg'))
print(pairwise_transform)
pairwise_transform = tablex.update(pairwise_transform, require('pairwise_transform_jpeg_scale'))
return pairwise_transform

View file

@ -0,0 +1,175 @@
local pairwise_utils = require 'pairwise_transform_utils'
local iproc = require 'iproc'
local gm = require 'graphicsmagick'
local pairwise_transform = {}
local function add_jpeg_noise_(x, quality, options)
for i = 1, #quality do
x = gm.Image(x, "RGB", "DHW")
x:format("jpeg"):depth(8)
if torch.uniform() < options.jpeg_chroma_subsampling_rate then
-- YUV 420
x:samplingFactors({2.0, 1.0, 1.0})
else
-- YUV 444
x:samplingFactors({1.0, 1.0, 1.0})
end
local blob, len = x:toBlob(quality[i])
x:fromBlob(blob, len)
x = x:toTensor("byte", "RGB", "DHW")
end
return x
end
local function add_jpeg_noise(src, style, level, options)
if style == "art" then
if level == 1 then
return add_jpeg_noise_(src, {torch.random(65, 85)}, options)
elseif level == 2 or level == 3 then
-- level 2/3 adjusting by -nr_rate. for level3, -nr_rate=1
local r = torch.uniform()
if r > 0.6 then
return add_jpeg_noise_(src, {torch.random(27, 70)}, options)
elseif r > 0.3 then
local quality1 = torch.random(37, 70)
local quality2 = quality1 - torch.random(5, 10)
return add_jpeg_noise_(src, {quality1, quality2}, options)
else
local quality1 = torch.random(52, 70)
local quality2 = quality1 - torch.random(5, 15)
local quality3 = quality1 - torch.random(15, 25)
return add_jpeg_noise_(src, {quality1, quality2, quality3}, options)
end
else
error("unknown noise level: " .. level)
end
elseif style == "photo" then
-- level adjusting by -nr_rate
return add_jpeg_noise_(src, {torch.random(30, 70)}, options)
else
error("unknown style: " .. style)
end
end
function pairwise_transform.jpeg_scale(src, scale, style, noise_level, size, offset, n, options)
local filters = options.downsampling_filters
if options.data.filters then
filters = options.data.filters
end
local unstable_region_offset = 8
local downsampling_filter = filters[torch.random(1, #filters)]
local blur = torch.uniform(options.resize_blur_min, options.resize_blur_max)
local y = pairwise_utils.preprocess(src, size, options)
assert(y:size(2) % 4 == 0 and y:size(3) % 4 == 0)
local down_scale = 1.0 / scale
local x
if options.gamma_correction then
local small = iproc.scale_with_gamma22(y, y:size(3) * down_scale,
y:size(2) * down_scale, downsampling_filter, blur)
if options.x_upsampling then
x = iproc.scale(small, y:size(3), y:size(2), options.upsampling_filter)
else
x = small
end
else
local small = iproc.scale(y, y:size(3) * down_scale,
y:size(2) * down_scale, downsampling_filter, blur)
if options.x_upsampling then
x = iproc.scale(small, y:size(3), y:size(2), options.upsampling_filter)
else
x = small
end
end
x = add_jpeg_noise(x, style, noise_level, options)
local scale_inner = scale
if options.x_upsampling then
scale_inner = 1
end
x = iproc.crop(x, unstable_region_offset, unstable_region_offset,
x:size(3) - unstable_region_offset, x:size(2) - unstable_region_offset)
y = iproc.crop(y, unstable_region_offset * scale_inner, unstable_region_offset * scale_inner,
y:size(3) - unstable_region_offset * scale_inner, y:size(2) - unstable_region_offset * scale_inner)
if options.x_upsampling then
assert(x:size(2) % 4 == 0 and x:size(3) % 4 == 0)
assert(x:size(1) == y:size(1) and x:size(2) == y:size(2) and x:size(3) == y:size(3))
else
assert(x:size(1) == y:size(1) and x:size(2) * scale == y:size(2) and x:size(3) * scale == y:size(3))
end
local batch = {}
local lowres_y = gm.Image(y, "RGB", "DHW"):
size(y:size(3) * 0.5, y:size(2) * 0.5, "Box"):
size(y:size(3), y:size(2), "Box"):
toTensor(t, "RGB", "DHW")
local xs = {}
local ys = {}
local lowreses = {}
for j = 1, 2 do
-- TTA
local xi, yi, ri
if j == 1 then
xi = x
yi = y
ri = lowres_y
else
xi = x:transpose(2, 3):contiguous()
yi = y:transpose(2, 3):contiguous()
ri = lowres_y:transpose(2, 3):contiguous()
end
local xv = image.vflip(xi)
local yv = image.vflip(yi)
local rv = image.vflip(ri)
table.insert(xs, xi)
table.insert(ys, yi)
table.insert(lowreses, ri)
table.insert(xs, xv)
table.insert(ys, yv)
table.insert(lowreses, rv)
table.insert(xs, image.hflip(xi))
table.insert(ys, image.hflip(yi))
table.insert(lowreses, image.hflip(ri))
table.insert(xs, image.hflip(xv))
table.insert(ys, image.hflip(yv))
table.insert(lowreses, image.hflip(rv))
end
for i = 1, n do
local t = (i % #xs) + 1
local xc, yc = pairwise_utils.active_cropping(xs[t], ys[t], lowreses[t],
size,
scale_inner,
options.active_cropping_rate,
options.active_cropping_tries)
xc = iproc.byte2float(xc)
yc = iproc.byte2float(yc)
if options.rgb then
else
yc = image.rgb2yuv(yc)[1]:reshape(1, yc:size(2), yc:size(3))
xc = image.rgb2yuv(xc)[1]:reshape(1, xc:size(2), xc:size(3))
end
table.insert(batch, {xc, iproc.crop(yc, offset, offset, size - offset, size - offset)})
end
return batch
end
function pairwise_transform.test_jpeg_scale(src)
torch.setdefaulttensortype("torch.FloatTensor")
local options = {random_color_noise_rate = 0.5,
random_half_rate = 0.5,
random_overlay_rate = 0.5,
random_unsharp_mask_rate = 0.5,
active_cropping_rate = 0.5,
active_cropping_tries = 10,
max_size = 256,
x_upsampling = false,
downsampling_filters = "Box",
rgb = true
}
local image = require 'image'
local src = image.lena()
for i = 1, 10 do
local xy = pairwise_transform.jpeg_scale(src, 2.0, "art", 1, 128, 7, 1, options)
image.display({image = xy[1][1], legend = "y:" .. (i * 10), min = 0, max = 1})
image.display({image = xy[1][2], legend = "x:" .. (i * 10), min = 0, max = 1})
end
end
return pairwise_transform

View file

@ -23,7 +23,7 @@ cmd:option("-data_dir", "./data", 'path to data directory')
cmd:option("-backend", "cunn", '(cunn|cudnn)')
cmd:option("-test", "images/miku_small.png", 'path to test image')
cmd:option("-model_dir", "./models", 'model directory')
cmd:option("-method", "scale", 'method to training (noise|scale)')
cmd:option("-method", "scale", 'method to training (noise|scale|noise_scale)')
cmd:option("-model", "vgg_7", 'model architecture (vgg_7|vgg_12|upconv_7|upconv_8_4x|dilated_7)')
cmd:option("-noise_level", 1, '(1|2|3)')
cmd:option("-style", "art", '(art|photo)')
@ -33,13 +33,13 @@ cmd:option("-random_overlay_rate", 0.0, 'data augmentation using flipped image o
cmd:option("-random_half_rate", 0.0, 'data augmentation using half resolution image (0.0-1.0)')
cmd:option("-random_unsharp_mask_rate", 0.0, 'data augmentation using unsharp mask (0.0-1.0)')
cmd:option("-scale", 2.0, 'scale factor (2)')
cmd:option("-learning_rate", 0.0005, 'learning rate for adam')
cmd:option("-learning_rate", 0.00025, 'learning rate for adam')
cmd:option("-crop_size", 48, 'crop size')
cmd:option("-max_size", 256, 'if image is larger than N, image will be crop randomly')
cmd:option("-batch_size", 8, 'mini batch size')
cmd:option("-patches", 16, 'number of patch samples')
cmd:option("-inner_epoch", 4, 'number of inner epochs')
cmd:option("-epoch", 50, 'number of epochs to run')
cmd:option("-batch_size", 16, 'mini batch size')
cmd:option("-patches", 64, 'number of patch samples')
cmd:option("-inner_epoch", 1, 'number of inner epochs')
cmd:option("-epoch", 100, 'number of epochs to run')
cmd:option("-thread", -1, 'number of CPU threads')
cmd:option("-jpeg_chroma_subsampling_rate", 0.0, 'the rate of YUV 4:2:0/YUV 4:4:4 in denoising training (0.0-1.0)')
cmd:option("-validation_rate", 0.05, 'validation-set rate (number_of_training_images * validation_rate > 1)')
@ -54,12 +54,12 @@ cmd:option("-gamma_correction", 0, 'Resizing with colorspace correction(sRGB:gam
cmd:option("-upsampling_filter", "Box", 'upsampling filter for 2x scale training (dev)')
cmd:option("-max_training_image_size", -1, 'if training image is larger than N, image will be crop randomly when data converting')
cmd:option("-use_transparent_png", 0, 'use transparent png (0|1)')
cmd:option("-resize_blur_min", 0.85, 'min blur parameter for ResizeImage')
cmd:option("-resize_blur_min", 0.95, 'min blur parameter for ResizeImage')
cmd:option("-resize_blur_max", 1.05, 'max blur parameter for ResizeImage')
cmd:option("-oracle_rate", 0.0, '')
cmd:option("-oracle_rate", 0.1, '')
cmd:option("-oracle_drop_rate", 0.5, '')
cmd:option("-learning_rate_decay", 3.0e-7, 'learning rate decay (learning_rate * 1/(1+num_of_data*patches*epoch))')
cmd:option("-loss", "rgb", 'loss (rgb|y)')
cmd:option("-loss", "y", 'loss (rgb|y)')
local function to_bool(settings, name)
if settings[name] == 1 then
@ -92,6 +92,15 @@ if settings.save_history then
settings.model_dir, settings.scale)
settings.model_file_best = string.format("%s/scale%.1fx_model.t7",
settings.model_dir, settings.scale)
elseif settings.method == "noise_scale" then
settings.model_file = string.format("%s/noise%d_scale%.1fx_model.%%d-%%d.t7",
settings.model_dir,
settings.noise_level,
settings.scale)
settings.model_file_best = string.format("%s/noise%d_scale%.1fx_model.t7",
settings.model_dir,
settings.noise_level,
settings.scale)
else
error("unknown method: " .. settings.method)
end
@ -102,6 +111,9 @@ else
elseif settings.method == "scale" then
settings.model_file = string.format("%s/scale%.1fx_model.t7",
settings.model_dir, settings.scale)
elseif settings.method == "noise_scale" then
settings.model_file = string.format("%s/noise%d_scale%.1fx_model.t7",
settings.model_dir, settings.noise_level, settings.scale)
else
error("unknown method: " .. settings.method)
end

View file

@ -178,6 +178,30 @@ local function transformer(model, x, is_validation, n, offset)
settings.noise_level,
settings.crop_size, offset,
n, conf)
elseif settings.method == "noise_scale" then
local conf = tablex.update({
downsampling_filters = settings.downsampling_filters,
upsampling_filter = settings.upsampling_filter,
random_half_rate = settings.random_half_rate,
random_color_noise_rate = random_color_noise_rate,
random_overlay_rate = random_overlay_rate,
random_unsharp_mask_rate = settings.random_unsharp_mask_rate,
max_size = settings.max_size,
jpeg_chroma_subsampling_rate = settings.jpeg_chroma_subsampling_rate,
nr_rate = settings.nr_rate,
active_cropping_rate = active_cropping_rate,
active_cropping_tries = active_cropping_tries,
rgb = (settings.color == "rgb"),
gamma_correction = settings.gamma_correction,
x_upsampling = not reconstruct.has_resize(model),
resize_blur_min = settings.resize_blur_min,
resize_blur_max = settings.resize_blur_max}, meta)
return pairwise_transform.jpeg_scale(x,
settings.scale,
settings.style,
settings.noise_level,
settings.crop_size, offset,
n, conf)
end
end
@ -364,6 +388,12 @@ local function train()
("scale%.1f_best.%d-%d.png"):format(settings.scale,
epoch, i))
save_test_scale(model, test_image, log)
elseif settings.method == "noise_scale" then
local log = path.join(settings.model_dir,
("noise%d_scale%.1f_best.%d-%d.png"):format(settings.noise_level,
settings.scale,
epoch, i))
save_test_scale(model, test_image, log)
end
else
torch.save(settings.model_file, model:clearState(), "ascii")
@ -375,6 +405,11 @@ local function train()
local log = path.join(settings.model_dir,
("scale%.1f_best.png"):format(settings.scale))
save_test_scale(model, test_image, log)
elseif settings.method == "noise_scale" then
local log = path.join(settings.model_dir,
("noise%d_scale%.1f_best.png"):format(settings.noise_level,
settings.scale))
save_test_scale(model, test_image, log)
end
end
end