1
0
Fork 0
mirror of synced 2024-06-27 02:20:36 +12:00

fetch warc file inline with wget instead of as separate step

This commit is contained in:
Nick Sweeting 2019-01-14 22:40:55 -05:00
parent 2d86d4dfe0
commit c42fcd42d7
2 changed files with 12 additions and 57 deletions

View file

@ -105,9 +105,6 @@ def archive_link(link_dir, link, overwrite=True):
if FETCH_DOM:
link = fetch_dom(link_dir, link, overwrite=overwrite)
if FETCH_WARC:
link = fetch_warc(link_dir, link, overwrite=overwrite)
if SUBMIT_ARCHIVE_DOT_ORG:
link = archive_dot_org(link_dir, link, overwrite=overwrite)
@ -191,7 +188,7 @@ def attach_result_to_link(method):
@attach_result_to_link('wget')
def fetch_wget(link_dir, link, requisites=FETCH_WGET_REQUISITES, timeout=TIMEOUT):
def fetch_wget(link_dir, link, requisites=FETCH_WGET_REQUISITES, warc=FETCH_WARC, timeout=TIMEOUT):
"""download full site using wget"""
domain_dir = os.path.join(link_dir, link['domain'])
@ -199,6 +196,11 @@ def fetch_wget(link_dir, link, requisites=FETCH_WGET_REQUISITES, timeout=TIMEOUT
if os.path.exists(domain_dir) and existing_file:
return {'output': existing_file, 'status': 'skipped'}
if warc:
warc_dir = os.path.join(link_dir, 'warc')
os.makedirs(warc_dir, exist_ok=True)
warc_path = os.path.join('warc', str(int(datetime.now().timestamp())))
# WGET CLI Docs: https://www.gnu.org/software/wget/manual/wget.html
CMD = [
'wget',
@ -212,6 +214,7 @@ def fetch_wget(link_dir, link, requisites=FETCH_WGET_REQUISITES, timeout=TIMEOUT
'--span-hosts',
'--no-parent',
'--restrict-file-names=unix',
*(('--warc-file={}'.format(warc_path),) if warc else ()),
*(('--page-requisites',) if FETCH_WGET_REQUISITES else ()),
*(('--user-agent="{}"'.format(WGET_USER_AGENT),) if WGET_USER_AGENT else ()),
*((() if CHECK_SSL_VALIDITY else ('--no-check-certificate',))),
@ -239,7 +242,7 @@ def fetch_wget(link_dir, link, requisites=FETCH_WGET_REQUISITES, timeout=TIMEOUT
except Exception as e:
end()
print(' Run to see full output:', 'cd {}; {}'.format(link_dir, ' '.join(CMD)))
print(' {}Failed: {} {}{}'.format(ANSI['red'], e.__class__.__name__, e, ANSI['reset']))
print(' {}Warning: {} {}{}'.format(ANSI['red'], e.__class__.__name__, e, ANSI['reset']))
output = e
return {
@ -540,61 +543,13 @@ def fetch_git(link_dir, link, timeout=TIMEOUT):
'output': output,
}
@attach_result_to_link('warc')
def fetch_warc(link_dir, link, timeout=TIMEOUT):
"""download full site using wget's warc saving feature"""
output = os.path.join(link_dir, 'warc')
if os.path.exists(output) and os.listdir(output):
return {'output': 'warc', 'status': 'skipped'}
os.makedirs(output, exist_ok=True)
CMD = [
'wget',
'--warc-file="{}"'.format(int(datetime.now().timestamp())),
*(('--user-agent="{}"'.format(WGET_USER_AGENT),) if WGET_USER_AGENT else ()),
*((() if CHECK_SSL_VALIDITY else ('--no-check-certificate',))),
link['url'],
]
end = progress(timeout, prefix=' ')
try:
result = run(CMD, stdout=PIPE, stderr=PIPE, cwd=output, timeout=timeout + 1) # warc/at-00000.warc.gz
end()
# Check for common failure cases
if result.returncode > 0:
print(' got wget response code {}:'.format(result.returncode))
if result.returncode != 8:
print('\n'.join(' ' + line for line in (result.stderr or result.stdout).decode().rsplit('\n', 10)[-10:] if line.strip()))
if b'403: Forbidden' in result.stderr:
raise Exception('403 Forbidden (try changing WGET_USER_AGENT)')
if b'404: Not Found' in result.stderr:
raise Exception('404 Not Found')
if b'ERROR 500: Internal Server Error' in result.stderr:
raise Exception('500 Internal Server Error')
if result.returncode == 4:
raise Exception('Failed warc download')
except Exception as e:
end()
print(' Run to see full output:', 'cd {}; {}'.format(link_dir, ' '.join(CMD)))
print(' {}Failed: {} {}{}'.format(ANSI['red'], e.__class__.__name__, e, ANSI['reset']))
output = e
return {
'cmd': CMD,
'output': output,
}
def chrome_headless(binary=CHROME_BINARY, user_data_dir=CHROME_USER_DATA_DIR):
args = [binary, '--headless'] # '--disable-gpu'
if not CHROME_SANDBOX:
args.append('--no-sandbox')
default_profile = os.path.expanduser('~/Library/Application Support/Google/Chrome/Default')
if user_data_dir:
args.append('--user-data-dir="{}"'.format(user_data_dir))
args.append('--user-data-dir={}'.format(user_data_dir))
elif os.path.exists(default_profile):
args.append('--user-data-dir="{}"'.format(default_profile))
args.append('--user-data-dir={}'.format(default_profile))
return args

View file

@ -23,7 +23,7 @@ FETCH_WGET_REQUISITES = os.getenv('FETCH_WGET_REQUISITES', 'True'
FETCH_PDF = os.getenv('FETCH_PDF', 'True' ).lower() == 'true'
FETCH_SCREENSHOT = os.getenv('FETCH_SCREENSHOT', 'True' ).lower() == 'true'
FETCH_DOM = os.getenv('FETCH_DOM', 'True' ).lower() == 'true'
FETCH_WARC = os.getenv('FETCH_WARC', 'False' ).lower() == 'true'
FETCH_WARC = os.getenv('FETCH_WARC', 'True' ).lower() == 'true'
FETCH_GIT = os.getenv('FETCH_GIT', 'True' ).lower() == 'true'
FETCH_MEDIA = os.getenv('FETCH_MEDIA', 'False' ).lower() == 'true'
FETCH_FAVICON = os.getenv('FETCH_FAVICON', 'True' ).lower() == 'true'
@ -42,7 +42,7 @@ FOOTER_INFO = os.getenv('FOOTER_INFO', 'Content is hosted
### Paths
REPO_DIR = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'))
OUTPUT_DIR = os.getenv('OUTPUT_DIR', os.path.join(REPO_DIR, 'output'))
OUTPUT_DIR = os.path.abspath(os.getenv('OUTPUT_DIR', os.path.join(REPO_DIR, 'output')))
ARCHIVE_DIR = os.path.join(OUTPUT_DIR, 'archive')
SOURCES_DIR = os.path.join(OUTPUT_DIR, 'sources')