1
0
Fork 0
mirror of synced 2024-05-20 04:42:19 +12:00

add a little support for video

This commit is contained in:
nagadomi 2015-05-24 22:09:42 +09:00
parent badfcec987
commit 7e34dff543
2 changed files with 108 additions and 21 deletions

View file

@ -144,6 +144,33 @@ th waifu2x.lua -m noise_scale -noise_level 2 -i input_image.png -o output_image.
See also `images/gen.sh`.
### Video Encoding
\* `avconv` is `ffmpeg` on Ubuntu 14.04.
Extracting images and audio from a video. (range: 00:09:00 ~ 00:12:00)
```
mkdir frames
avconv -i data/raw.avi -ss 00:09:00 -t 00:03:00 -r 24 -f image2 frames/%06d.png
avconv -i data/raw.avi -ss 00:09:00 -t 00:03:00 audio.mp3
```
Generating a image list.
```
find ./frames -name "*.png" |sort > data/frame.txt
```
waifu2x (for example, noise reduction)
```
mkdir new_frames
th waifu2x.lua -m noise -noise_level 1 -l data/frame.txt -o new_frames/%d.png
```
Generating a video from waifu2xed images and audio.
```
avconv -f image2 -r 24 -i new_frames/%d.png -i audio.mp3 -r 24 -vcodec libx264 -crf 16 video.mp4
```
## Training Your Own Model
### Data Preparation

View file

@ -11,50 +11,110 @@ local BLOCK_OFFSET = 7
torch.setdefaulttensortype('torch.FloatTensor')
local function waifu2x()
local cmd = torch.CmdLine()
cmd:text()
cmd:text("waifu2x")
cmd:text("Options:")
cmd:option("-i", "images/miku_small.png", 'path of input image')
cmd:option("-o", "(auto)", 'path of output')
cmd:option("-model_dir", "./models", 'model directory')
cmd:option("-m", "noise_scale", 'method (noise|scale|noise_scale)')
cmd:option("-noise_level", 1, '(1|2)')
cmd:option("-crop_size", 128, 'crop size')
local opt = cmd:parse(arg)
local function convert_image(opt)
local x = image_loader.load_float(opt.i)
local new_x = nil
local t = sys.clock()
if opt.o == "(auto)" then
local name = path.basename(opt.i)
local e = path.extension(name)
local base = name:sub(0, name:len() - e:len())
opt.o = path.join(path.dirname(opt.i), string.format("%s(%s).png", base, opt.m))
end
local x = image_loader.load_float(opt.i)
local new_x = nil
local t = sys.clock()
if opt.m == "noise" then
local model = torch.load(path.join(opt.model_dir,
("noise%d_model.t7"):format(opt.noise_level)), "ascii")
model:evaluate()
new_x = reconstruct.image(model, x, BLOCK_OFFSET)
elseif opt.m == "scale" then
local model = torch.load(path.join(opt.model_dir, "scale2.0x_model.t7"), "ascii")
local model = torch.load(path.join(opt.model_dir, ("scale%.1fx_model.t7"):format(opt.scale)), "ascii")
model:evaluate()
new_x = reconstruct.scale(model, 2.0, x, BLOCK_OFFSET)
new_x = reconstruct.scale(model, opt.scale, x, BLOCK_OFFSET)
elseif opt.m == "noise_scale" then
local noise_model = torch.load(path.join(opt.model_dir,
("noise%d_model.t7"):format(opt.noise_level)), "ascii")
local scale_model = torch.load(path.join(opt.model_dir, "scale2.0x_model.t7"), "ascii")
local scale_model = torch.load(path.join(opt.model_dir, ("scale%.1fx_model.t7"):format(opt.scale)), "ascii")
noise_model:evaluate()
scale_model:evaluate()
x = reconstruct.image(noise_model, x, BLOCK_OFFSET)
new_x = reconstruct.scale(scale_model, 2.0, x, BLOCK_OFFSET)
new_x = reconstruct.scale(scale_model, opt.scale, x, BLOCK_OFFSET)
else
error("undefined method:" .. opt.method)
end
image.save(opt.o, new_x)
print(opt.o .. ": " .. (sys.clock() - t) .. " sec")
end
local function convert_frames(opt)
local noise1_model = torch.load(path.join(opt.model_dir, "noise1_model.t7"), "ascii")
local noise2_model = torch.load(path.join(opt.model_dir, "noise2_model.t7"), "ascii")
local scale_model = torch.load(path.join(opt.model_dir, ("scale%.1fx_model.t7"):format(opt.scale)), "ascii")
noise1_model:evaluate()
noise2_model:evaluate()
scale_model:evaluate()
local fp = io.open(opt.l)
local count = 0
local lines = {}
for line in fp:lines() do
table.insert(lines, line)
end
fp:close()
for i = 1, #lines do
local x = image_loader.load_float(lines[i])
local new_x = nil
if opt.m == "noise" and opt.noise_level == 1 then
new_x = reconstruct.image(noise1_model, x, BLOCK_OFFSET)
elseif opt.m == "noise" and opt.noise_level == 2 then
new_x = reconstruct.image(noise2_model, x, BLOCK_OFFSET)
elseif opt.m == "scale" then
new_x = reconstruct.scale(scale_model, opt.scale, x, BLOCK_OFFSET)
elseif opt.m == "noise_scale" and opt.noise_level == 1 then
x = reconstruct.image(noise1_model, x, BLOCK_OFFSET)
new_x = reconstruct.scale(scale_model, opt.scale, x, BLOCK_OFFSET)
elseif opt.m == "noise_scale" and opt.noise_level == 2 then
x = reconstruct.image(noise2_model, x, BLOCK_OFFSET)
new_x = reconstruct.scale(scale_model, opt.scale, x, BLOCK_OFFSET)
else
error("undefined method:" .. opt.method)
end
local output = nil
if opt.o == "(auto)" then
local name = path.basename(lines[i])
local e = path.extension(name)
local base = name:sub(0, name:len() - e:len())
output = path.join(path.dirname(opt.i), string.format("%s(%s).png", base, opt.m))
else
output = string.format(opt.o, i)
end
image.save(output, new_x)
xlua.progress(i, #lines)
if i % 10 == 0 then
collectgarbage()
end
end
end
local function waifu2x()
local cmd = torch.CmdLine()
cmd:text()
cmd:text("waifu2x")
cmd:text("Options:")
cmd:option("-i", "images/miku_small.png", 'path of the input image')
cmd:option("-l", "", 'path of the image-list')
cmd:option("-scale", 2, 'scale factor')
cmd:option("-o", "(auto)", 'path of the output file')
cmd:option("-model_dir", "./models", 'model directory')
cmd:option("-m", "noise_scale", 'method (noise|scale|noise_scale)')
cmd:option("-noise_level", 1, '(1|2)')
cmd:option("-crop_size", 128, 'patch size per process')
local opt = cmd:parse(arg)
if string.len(opt.l) == 0 then
convert_image(opt)
else
convert_frames(opt)
end
end
waifu2x()