From 0a683fcb341dc4398c10813bdd51e20a6d8b2ce8 Mon Sep 17 00:00:00 2001 From: nagadomi Date: Mon, 22 Jun 2015 18:17:41 +0000 Subject: [PATCH] multi GPU server --- appendix/run-web.sh | 9 ++++++++- web.lua | 18 +++++++++++++----- 2 files changed, 21 insertions(+), 6 deletions(-) diff --git a/appendix/run-web.sh b/appendix/run-web.sh index 03c32ab..077bda2 100755 --- a/appendix/run-web.sh +++ b/appendix/run-web.sh @@ -1,4 +1,11 @@ #!/bin/zsh # waifu2x daemon script +gpu=1 +port=8812 +if [ $# -eq 2 ]; then + gpu=$1 + port=$2 +fi source /home/ubuntu/.zshrc -stdbuf -o 0 th web.lua >> ./waifu2x.log 2>&1 +echo stdbuf -o 0 th web.lua -gpu $gpu -port $port >> ./waifu2x_${port}.log 2>&1 +stdbuf -o 0 th web.lua -gpu $gpu -port $port >> ./waifu2x_${port}.log 2>&1 diff --git a/web.lua b/web.lua index 51ce9b2..dfad0b4 100644 --- a/web.lua +++ b/web.lua @@ -4,13 +4,21 @@ local uuid = require 'uuid' local ffi = require 'ffi' local md5 = require 'md5' require 'pl' - -torch.setdefaulttensortype('torch.FloatTensor') -torch.setnumthreads(4) - require './lib/portable' require './lib/LeakyReLU' +local cmd = torch.CmdLine() +cmd:text() +cmd:text("waifu2x-api") +cmd:text("Options:") +cmd:option("-port", 8812, 'listen port') +cmd:option("-gpu", 1, 'Device ID') +cmd:option("-core", 2, 'number of CPU cores') +local opt = cmd:parse(arg) +cutorch.setDevice(opt.gpu) +torch.setdefaulttensortype('torch.FloatTensor') +torch.setnumthreads(opt.core) + local iproc = require './lib/iproc' local reconstruct = require './lib/reconstruct' local image_loader = require './lib/image_loader' @@ -196,5 +204,5 @@ local app = turbo.web.Application:new( {"^/api$", APIHandler}, } ) -app:listen(8812, "0.0.0.0", {max_body_size = CURL_MAX_SIZE}) +app:listen(opt.port, "0.0.0.0", {max_body_size = CURL_MAX_SIZE}) turbo.ioloop.instance():start()