From 7ed4f8deedc71028de42f40deae1cf4111e46028 Mon Sep 17 00:00:00 2001 From: Pig Monkey Date: Fri, 21 Sep 2018 17:41:11 -0700 Subject: [PATCH] support a configurable output directory Closes #94 --- README.md | 1 + archiver/config.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 89b822c9..cec97712 100644 --- a/README.md +++ b/README.md @@ -170,6 +170,7 @@ env CHROME_BINARY=google-chrome-stable RESOLUTION=1440,900 FETCH_PDF=False ./arc - user agent: `WGET_USER_AGENT` values: [`Wget/1.19.1`]/`"Mozilla/5.0 ..."`/`...` - chrome profile: `CHROME_USER_DATA_DIR` values: [`~/Library/Application\ Support/Google/Chrome/Default`]/`/tmp/chrome-profile`/`...` To capture sites that require a user to be logged in, you must specify a path to a chrome profile (which loads the cookies needed for the user to be logged in). If you don't have an existing chrome profile, create one with `chromium-browser --disable-gpu --user-data-dir=/tmp/chrome-profile`, and log into the sites you need. Then set `CHROME_USER_DATA_DIR=/tmp/chrome-profile` to make Bookmark Archiver use that profile. + - output directory: `OUTPUT_DIR` values: [`$REPO_DIR/output`]/`/srv/www/bookmarks`/`...` Optionally output the archives to an alternative directory. (See defaults & more at the top of `config.py`) diff --git a/archiver/config.py b/archiver/config.py index 113d32a7..4b99be93 100644 --- a/archiver/config.py +++ b/archiver/config.py @@ -35,7 +35,7 @@ FOOTER_INFO = os.getenv('FOOTER_INFO', 'Content is hosted ### Paths REPO_DIR = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')) -OUTPUT_DIR = os.path.join(REPO_DIR, 'output') +OUTPUT_DIR = os.getenv('OUTPUT_DIR', os.path.join(REPO_DIR, 'output')) ARCHIVE_DIR = os.path.join(OUTPUT_DIR, 'archive') SOURCES_DIR = os.path.join(OUTPUT_DIR, 'sources')