1
0
Fork 0
mirror of synced 2024-07-02 13:00:21 +12:00
ArchiveBox/archivebox/archive_methods.py

595 lines
20 KiB
Python
Raw Normal View History

import os
import re
2018-04-17 22:57:41 +12:00
import sys
from functools import wraps
2018-04-25 19:49:26 +12:00
from collections import defaultdict
from datetime import datetime
2018-04-17 19:22:59 +12:00
from peekable import Peekable
2018-04-18 01:13:38 +12:00
from index import wget_output_path, parse_json_link_index, write_link_index
from links import links_after_timestamp
from config import (
CHROME_BINARY,
FETCH_WGET,
FETCH_WGET_REQUISITES,
FETCH_PDF,
FETCH_SCREENSHOT,
2018-06-11 10:45:41 +12:00
FETCH_DOM,
2019-01-12 01:02:49 +13:00
FETCH_WARC,
2019-01-11 23:18:49 +13:00
FETCH_GIT,
2019-01-11 23:52:29 +13:00
FETCH_MEDIA,
RESOLUTION,
2018-01-09 13:43:25 +13:00
CHECK_SSL_VALIDITY,
SUBMIT_ARCHIVE_DOT_ORG,
FETCH_FAVICON,
WGET_USER_AGENT,
CHROME_USER_DATA_DIR,
2018-10-14 15:12:26 +13:00
CHROME_SANDBOX,
TIMEOUT,
2019-01-12 00:33:35 +13:00
MEDIA_TIMEOUT,
ANSI,
2018-06-11 13:26:11 +12:00
ARCHIVE_DIR,
2019-01-11 23:27:25 +13:00
GIT_DOMAINS,
)
from util import (
check_dependencies,
progress,
chmod_file,
pretty_path,
run, PIPE, DEVNULL
)
2017-10-23 22:57:34 +13:00
_RESULTS_TOTALS = { # globals are bad, mmkay
'skipped': 0,
'succeded': 0,
'failed': 0,
}
2017-10-31 00:09:33 +13:00
def archive_links(archive_path, links, source=None, resume=None):
2017-10-23 22:57:34 +13:00
check_dependencies()
2018-04-17 19:22:59 +12:00
to_archive = Peekable(links_after_timestamp(links, resume))
idx, link = 0, to_archive.peek(0)
2018-04-17 23:30:06 +12:00
2017-10-23 22:57:34 +13:00
try:
for idx, link in enumerate(to_archive):
2018-06-11 13:26:11 +12:00
link_dir = os.path.join(ARCHIVE_DIR, link['timestamp'])
2017-10-30 21:31:28 +13:00
archive_link(link_dir, link)
2017-10-23 22:57:34 +13:00
except (KeyboardInterrupt, SystemExit, Exception) as e:
2018-04-17 23:30:06 +12:00
print('{lightyellow}[X] [{now}] Downloading paused on link {timestamp} ({idx}/{total}){reset}'.format(
2017-10-23 22:57:34 +13:00
**ANSI,
2018-04-17 19:22:59 +12:00
now=datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
2018-04-17 23:30:06 +12:00
idx=idx+1,
2018-04-17 19:22:59 +12:00
timestamp=link['timestamp'],
2018-04-17 23:30:06 +12:00
total=len(links),
2017-10-23 22:57:34 +13:00
))
print(' Continue where you left off by running:')
2018-04-18 13:14:55 +12:00
print(' {} {}'.format(
2018-06-11 13:58:48 +12:00
pretty_path(sys.argv[0]),
2017-10-23 22:57:34 +13:00
link['timestamp'],
))
if not isinstance(e, KeyboardInterrupt):
raise e
raise SystemExit(1)
2018-04-18 09:16:29 +12:00
def archive_link(link_dir, link, overwrite=True):
2017-10-23 22:57:34 +13:00
"""download the DOM, PDF, and a screenshot into a folder named after the link's timestamp"""
try:
update_existing = os.path.exists(link_dir)
if update_existing:
link = {
**parse_json_link_index(link_dir),
**link,
}
else:
os.makedirs(link_dir)
log_link_archive(link_dir, link, update_existing)
2017-10-23 22:57:34 +13:00
if FETCH_FAVICON:
link = fetch_favicon(link_dir, link, overwrite=overwrite)
2019-01-11 23:52:29 +13:00
if FETCH_WGET:
link = fetch_wget(link_dir, link, overwrite=overwrite)
2017-10-23 22:57:34 +13:00
if FETCH_PDF:
link = fetch_pdf(link_dir, link, overwrite=overwrite)
2017-10-23 22:57:34 +13:00
if FETCH_SCREENSHOT:
link = fetch_screenshot(link_dir, link, overwrite=overwrite)
2017-10-23 22:57:34 +13:00
if FETCH_DOM:
link = fetch_dom(link_dir, link, overwrite=overwrite)
2018-06-11 10:45:41 +12:00
if SUBMIT_ARCHIVE_DOT_ORG:
link = archive_dot_org(link_dir, link, overwrite=overwrite)
2017-10-23 22:57:34 +13:00
if FETCH_GIT:
link = fetch_git(link_dir, link, overwrite=overwrite)
2019-01-11 23:18:49 +13:00
if FETCH_MEDIA:
link = fetch_media(link_dir, link, overwrite=overwrite)
2019-01-11 23:52:29 +13:00
write_link_index(link_dir, link)
2017-10-23 22:57:34 +13:00
except Exception as err:
print(' ! Failed to archive link: {err.__class__.__name__}: {err}')
2017-10-23 22:57:34 +13:00
return link
2017-10-31 00:09:33 +13:00
def log_link_archive(link_dir, link, update_existing):
2018-04-18 01:11:27 +12:00
print('[{symbol_color}{symbol}{reset}] [{now}] "{title}"\n {blue}{url}{reset}'.format(
2017-10-30 21:36:42 +13:00
symbol='*' if update_existing else '+',
symbol_color=ANSI['black' if update_existing else 'green'],
2018-04-18 01:11:27 +12:00
now=datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
2017-10-30 21:36:42 +13:00
**link,
**ANSI,
))
print(' > {}{}'.format(pretty_path(link_dir), '' if update_existing else ' (new)'))
2018-04-17 23:30:06 +12:00
if link['type']:
print(' i {}'.format(link['type']))
2017-10-30 21:36:42 +13:00
2017-10-23 22:57:34 +13:00
def attach_result_to_link(method):
2017-10-23 22:57:34 +13:00
"""
Instead of returning a result={output:'...', status:'success'} object,
attach that result to the links's history & latest fields, then return
the updated link object.
"""
def decorator(fetch_func):
@wraps(fetch_func)
2017-10-31 00:09:33 +13:00
def timed_fetch_func(link_dir, link, overwrite=False, **kwargs):
# initialize methods and history json field on link
2017-10-23 22:57:34 +13:00
link['latest'] = link.get('latest') or {}
link['latest'][method] = link['latest'].get(method) or None
link['history'] = link.get('history') or {}
link['history'][method] = link['history'].get(method) or []
start_ts = datetime.now().timestamp()
# if a valid method output is already present, dont run the fetch function
2017-10-23 22:57:34 +13:00
if link['latest'][method] and not overwrite:
2018-04-17 23:30:06 +12:00
print('{}'.format(method))
result = None
else:
2018-04-17 23:30:06 +12:00
print(' > {}'.format(method))
2017-10-31 00:09:33 +13:00
result = fetch_func(link_dir, link, **kwargs)
end_ts = datetime.now().timestamp()
duration = str(end_ts * 1000 - start_ts * 1000).split('.')[0]
# append a history item recording fail/success
history_entry = {
'timestamp': str(start_ts).split('.')[0],
}
if result is None:
history_entry['status'] = 'skipped'
elif isinstance(result.get('output'), Exception):
history_entry['status'] = 'failed'
history_entry['duration'] = duration
history_entry.update(result or {})
link['history'][method].append(history_entry)
else:
history_entry['status'] = 'succeded'
history_entry['duration'] = duration
history_entry.update(result or {})
link['history'][method].append(history_entry)
2017-10-23 22:57:34 +13:00
link['latest'][method] = result['output']
2018-04-18 09:16:29 +12:00
_RESULTS_TOTALS[history_entry['status']] += 1
return link
return timed_fetch_func
return decorator
@attach_result_to_link('wget')
def fetch_wget(link_dir, link, requisites=FETCH_WGET_REQUISITES, warc=FETCH_WARC, timeout=TIMEOUT):
"""download full site using wget"""
2018-04-18 01:13:38 +12:00
domain_dir = os.path.join(link_dir, link['domain'])
2018-04-18 09:16:29 +12:00
existing_file = wget_output_path(link)
if os.path.exists(domain_dir) and existing_file:
return {'output': existing_file, 'status': 'skipped'}
if warc:
warc_dir = os.path.join(link_dir, 'warc')
os.makedirs(warc_dir, exist_ok=True)
warc_path = os.path.join('warc', str(int(datetime.now().timestamp())))
# WGET CLI Docs: https://www.gnu.org/software/wget/manual/wget.html
CMD = [
2019-01-12 16:13:51 +13:00
'wget',
# '--server-response', # print headers for better error parsing
2019-01-12 16:13:51 +13:00
'--no-verbose',
'--adjust-extension',
'--convert-links',
'--force-directories',
'--backup-converted',
'--span-hosts',
'--no-parent',
'--restrict-file-names=unix',
'--timeout={}'.format(timeout),
*(() if warc else ('--timestamping',)),
*(('--warc-file={}'.format(warc_path),) if warc else ()),
2019-01-12 16:13:51 +13:00
*(('--page-requisites',) if FETCH_WGET_REQUISITES else ()),
2019-01-12 13:53:47 +13:00
*(('--user-agent="{}"'.format(WGET_USER_AGENT),) if WGET_USER_AGENT else ()),
2018-01-09 13:43:25 +13:00
*((() if CHECK_SSL_VALIDITY else ('--no-check-certificate',))),
link['url'],
]
end = progress(timeout, prefix=' ')
try:
2019-01-21 08:08:00 +13:00
result = run(CMD, stdout=PIPE, stderr=PIPE, cwd=link_dir, timeout=timeout) # index.html
end()
2018-04-18 01:11:27 +12:00
output = wget_output_path(link, look_in=domain_dir)
2018-06-11 13:14:46 +12:00
output_tail = [' ' + line for line in (result.stdout + result.stderr).decode().rsplit('\n', 3)[-3:] if line.strip()]
# parse out number of files downloaded from "Downloaded: 76 files, 4.0M in 1.6s (2.52 MB/s)"
files_downloaded = (
int(output_tail[-1].strip().split(' ', 2)[1] or 0)
if 'Downloaded:' in output_tail[-1]
else 0
)
2018-06-11 13:14:46 +12:00
# Check for common failure cases
if result.returncode > 0 and files_downloaded < 1:
2019-01-21 08:08:00 +13:00
print(' Got wget response code {}:'.format(result.returncode))
print('\n'.join(output_tail))
2018-06-18 11:09:01 +12:00
if b'403: Forbidden' in result.stderr:
raise Exception('403 Forbidden (try changing WGET_USER_AGENT)')
if b'404: Not Found' in result.stderr:
raise Exception('404 Not Found')
if b'ERROR 500: Internal Server Error' in result.stderr:
raise Exception('500 Internal Server Error')
2019-01-21 08:08:00 +13:00
raise Exception('Got an error from the server')
except Exception as e:
end()
2019-01-21 08:08:00 +13:00
print(' {}Some resources were skipped: {}{}'.format(ANSI['lightyellow'], e, ANSI['reset']))
print(' Run to see full output:')
print(' cd {};'.format(link_dir))
print(' {}'.format(' '.join(CMD)))
output = e
return {
'cmd': CMD,
'output': output,
}
@attach_result_to_link('pdf')
2017-10-31 00:09:33 +13:00
def fetch_pdf(link_dir, link, timeout=TIMEOUT, user_data_dir=CHROME_USER_DATA_DIR):
"""print PDF of site to file using chrome --headless"""
if link['type'] in ('PDF', 'image'):
2018-04-18 01:13:38 +12:00
return {'output': wget_output_path(link)}
2017-10-31 00:09:33 +13:00
if os.path.exists(os.path.join(link_dir, 'output.pdf')):
return {'output': 'output.pdf', 'status': 'skipped'}
CMD = [
2017-10-31 00:09:33 +13:00
*chrome_headless(user_data_dir=user_data_dir),
'--print-to-pdf',
'--hide-scrollbars',
2019-01-21 08:08:00 +13:00
'--timeout={}'.format((timeout) * 1000),
*(() if CHECK_SSL_VALIDITY else ('--disable-web-security', '--ignore-certificate-errors')),
link['url']
]
end = progress(timeout, prefix=' ')
try:
2019-01-21 08:08:00 +13:00
result = run(CMD, stdout=PIPE, stderr=PIPE, cwd=link_dir, timeout=timeout) # output.pdf
end()
if result.returncode:
2017-10-19 11:47:19 +13:00
print(' ', (result.stderr or result.stdout).decode())
raise Exception('Failed to print PDF')
2018-03-15 13:04:04 +13:00
chmod_file('output.pdf', cwd=link_dir)
output = 'output.pdf'
except Exception as e:
end()
2018-04-18 01:11:27 +12:00
print(' {}Failed: {} {}{}'.format(ANSI['red'], e.__class__.__name__, e, ANSI['reset']))
2019-01-21 08:08:00 +13:00
print(' Run to see full output:')
print(' cd {};'.format(link_dir))
print(' {}'.format(' '.join(CMD)))
output = e
return {
'cmd': CMD,
'output': output,
}
@attach_result_to_link('screenshot')
2017-10-31 00:09:33 +13:00
def fetch_screenshot(link_dir, link, timeout=TIMEOUT, user_data_dir=CHROME_USER_DATA_DIR, resolution=RESOLUTION):
"""take screenshot of site using chrome --headless"""
if link['type'] in ('PDF', 'image'):
2018-04-18 01:13:38 +12:00
return {'output': wget_output_path(link)}
2017-10-31 00:09:33 +13:00
if os.path.exists(os.path.join(link_dir, 'screenshot.png')):
return {'output': 'screenshot.png', 'status': 'skipped'}
CMD = [
2017-10-31 00:09:33 +13:00
*chrome_headless(user_data_dir=user_data_dir),
'--screenshot',
'--window-size={}'.format(resolution),
2018-06-18 11:09:09 +12:00
'--hide-scrollbars',
2019-01-21 08:08:00 +13:00
'--timeout={}'.format((timeout) * 1000),
*(() if CHECK_SSL_VALIDITY else ('--disable-web-security', '--ignore-certificate-errors')),
2018-06-18 11:09:09 +12:00
# '--full-page', # TODO: make this actually work using ./bin/screenshot fullPage: true
link['url'],
]
end = progress(timeout, prefix=' ')
try:
2019-01-21 08:08:00 +13:00
result = run(CMD, stdout=PIPE, stderr=PIPE, cwd=link_dir, timeout=timeout) # sreenshot.png
end()
if result.returncode:
2017-10-19 11:47:19 +13:00
print(' ', (result.stderr or result.stdout).decode())
raise Exception('Failed to take screenshot')
2017-10-31 00:09:33 +13:00
chmod_file('screenshot.png', cwd=link_dir)
output = 'screenshot.png'
except Exception as e:
end()
2018-04-18 01:11:27 +12:00
print(' {}Failed: {} {}{}'.format(ANSI['red'], e.__class__.__name__, e, ANSI['reset']))
2019-01-21 08:08:00 +13:00
print(' Run to see full output:')
print(' cd {};'.format(link_dir))
print(' {}'.format(' '.join(CMD)))
output = e
return {
'cmd': CMD,
'output': output,
}
2018-06-11 10:45:41 +12:00
@attach_result_to_link('dom')
def fetch_dom(link_dir, link, timeout=TIMEOUT, user_data_dir=CHROME_USER_DATA_DIR):
"""print HTML of site to file using chrome --dump-html"""
if link['type'] in ('PDF', 'image'):
return {'output': wget_output_path(link)}
output_path = os.path.join(link_dir, 'output.html')
if os.path.exists(output_path):
return {'output': 'output.html', 'status': 'skipped'}
CMD = [
*chrome_headless(user_data_dir=user_data_dir),
'--dump-dom',
2019-01-21 08:08:00 +13:00
'--timeout={}'.format((timeout) * 1000),
2018-06-11 10:45:41 +12:00
link['url']
]
end = progress(timeout, prefix=' ')
try:
with open(output_path, 'w+') as f:
2019-01-21 08:08:00 +13:00
result = run(CMD, stdout=f, stderr=PIPE, cwd=link_dir, timeout=timeout) # output.html
2018-06-11 10:45:41 +12:00
end()
if result.returncode:
print(' ', (result.stderr).decode())
raise Exception('Failed to fetch DOM')
chmod_file('output.html', cwd=link_dir)
output = 'output.html'
except Exception as e:
end()
print(' {}Failed: {} {}{}'.format(ANSI['red'], e.__class__.__name__, e, ANSI['reset']))
2019-01-21 08:08:00 +13:00
print(' Run to see full output:')
print(' cd {};'.format(link_dir))
print(' {}'.format(' '.join(CMD)))
2018-06-11 10:45:41 +12:00
output = e
return {
'cmd': CMD,
'output': output,
}
@attach_result_to_link('archive_org')
2017-10-31 00:09:33 +13:00
def archive_dot_org(link_dir, link, timeout=TIMEOUT):
"""submit site to archive.org for archiving via their service, save returned archive url"""
2017-10-31 00:09:33 +13:00
path = os.path.join(link_dir, 'archive.org.txt')
if os.path.exists(path):
archive_org_url = open(path, 'r').read().strip()
return {'output': archive_org_url, 'status': 'skipped'}
submit_url = 'https://web.archive.org/save/{}'.format(link['url'])
success = False
CMD = [
'curl',
'--location',
'--head',
'--max-time', str(timeout),
'--get',
*(() if CHECK_SSL_VALIDITY else ('--insecure',)),
submit_url,
]
end = progress(timeout, prefix=' ')
try:
2019-01-21 08:08:00 +13:00
result = run(CMD, stdout=PIPE, stderr=DEVNULL, cwd=link_dir, timeout=timeout) # archive.org.txt
end()
# Parse archive.org response headers
2018-04-25 19:49:26 +12:00
headers = defaultdict(list)
# lowercase all the header names and store in dict
for header in result.stdout.splitlines():
if b':' not in header or not header.strip():
continue
name, val = header.decode().split(':', 1)
headers[name.lower().strip()].append(val.strip())
# Get successful archive url in "content-location" header or any errors
content_location = headers['content-location']
errors = headers['x-archive-wayback-runtime-error']
if content_location:
2018-04-25 19:49:26 +12:00
saved_url = 'https://web.archive.org{}'.format(content_location[0])
success = True
2018-04-25 19:49:26 +12:00
elif len(errors) == 1 and 'RobotAccessControlException' in errors[0]:
output = submit_url
# raise Exception('Archive.org denied by {}/robots.txt'.format(link['domain']))
elif errors:
2018-04-25 19:49:26 +12:00
raise Exception(', '.join(errors))
else:
raise Exception('Failed to find "content-location" URL header in Archive.org response.')
except Exception as e:
end()
2018-04-18 01:11:27 +12:00
print(' {}Failed: {} {}{}'.format(ANSI['red'], e.__class__.__name__, e, ANSI['reset']))
2019-01-21 08:08:00 +13:00
print(' Run to see full output:')
print(' {}'.format(' '.join(CMD)))
output = e
if success:
2017-10-31 00:09:33 +13:00
with open(os.path.join(link_dir, 'archive.org.txt'), 'w', encoding='utf-8') as f:
f.write(saved_url)
2017-10-31 00:09:33 +13:00
chmod_file('archive.org.txt', cwd=link_dir)
output = saved_url
return {
'cmd': CMD,
'output': output,
}
@attach_result_to_link('favicon')
2017-10-31 00:09:33 +13:00
def fetch_favicon(link_dir, link, timeout=TIMEOUT):
"""download site favicon from google's favicon api"""
2017-10-31 00:09:33 +13:00
if os.path.exists(os.path.join(link_dir, 'favicon.ico')):
return {'output': 'favicon.ico', 'status': 'skipped'}
2019-01-21 08:08:00 +13:00
CMD = [
'curl',
'--max-time', str(timeout),
'https://www.google.com/s2/favicons?domain={domain}'.format(**link),
]
2017-10-31 00:09:33 +13:00
fout = open('{}/favicon.ico'.format(link_dir), 'w')
end = progress(timeout, prefix=' ')
try:
2019-01-21 08:08:00 +13:00
run(CMD, stdout=fout, stderr=DEVNULL, cwd=link_dir, timeout=timeout) # favicon.ico
fout.close()
end()
2017-10-31 00:09:33 +13:00
chmod_file('favicon.ico', cwd=link_dir)
output = 'favicon.ico'
except Exception as e:
fout.close()
end()
2018-04-18 01:11:27 +12:00
print(' {}Failed: {} {}{}'.format(ANSI['red'], e.__class__.__name__, e, ANSI['reset']))
2019-01-21 08:08:00 +13:00
print(' Run to see full output:')
print(' {}'.format(' '.join(CMD)))
output = e
return {
'cmd': CMD,
'output': output,
}
2019-01-11 23:52:29 +13:00
@attach_result_to_link('media')
2019-01-12 00:33:35 +13:00
def fetch_media(link_dir, link, timeout=MEDIA_TIMEOUT, overwrite=False):
2019-01-11 23:52:29 +13:00
"""Download playlists or individual video, audio, and subtitles using youtube-dl"""
2019-01-12 00:33:35 +13:00
# import ipdb; ipdb.set_trace()
output = os.path.join(link_dir, 'media')
already_done = os.path.exists(output) # and os.listdir(output)
2019-01-12 00:50:42 +13:00
if already_done and not overwrite:
2019-01-11 23:52:29 +13:00
return {'output': 'media', 'status': 'skipped'}
2019-01-12 00:33:35 +13:00
os.makedirs(output, exist_ok=True)
2019-01-11 23:52:29 +13:00
CMD = [
'youtube-dl',
'--write-description',
'--write-info-json',
'--write-annotations',
'--yes-playlist',
2019-01-12 00:33:35 +13:00
'--write-thumbnail',
2019-01-11 23:52:29 +13:00
'--no-call-home',
'--no-check-certificate',
2019-01-12 00:33:35 +13:00
'--user-agent',
2019-01-11 23:52:29 +13:00
'--all-subs',
'-x',
2019-01-12 00:33:35 +13:00
'-k',
2019-01-11 23:52:29 +13:00
'--audio-format', 'mp3',
'--audio-quality', '320K',
'--embed-thumbnail',
'--add-metadata',
2019-01-12 00:50:42 +13:00
link['url'],
2019-01-11 23:52:29 +13:00
]
end = progress(timeout, prefix=' ')
try:
2019-01-12 00:33:35 +13:00
result = run(CMD, stdout=PIPE, stderr=PIPE, cwd=output, timeout=timeout + 1) # audio/audio.mp3
2019-01-11 23:52:29 +13:00
end()
if result.returncode:
2019-01-12 00:33:35 +13:00
if b'ERROR: Unsupported URL' in result.stderr:
pass
else:
print(' got youtubedl response code {}:'.format(result.returncode))
2019-01-12 00:50:42 +13:00
print(result.stderr)
2019-01-12 00:33:35 +13:00
raise Exception('Failed to download media')
2019-01-11 23:52:29 +13:00
except Exception as e:
end()
print(' {}Failed: {} {}{}'.format(ANSI['red'], e.__class__.__name__, e, ANSI['reset']))
2019-01-21 08:08:00 +13:00
print(' Run to see full output:')
print(' cd {};'.format(link_dir))
print(' {}'.format(' '.join(CMD)))
2019-01-12 00:33:35 +13:00
output = e
2019-01-11 23:52:29 +13:00
return {
'cmd': CMD,
'output': output,
}
2019-01-12 01:02:49 +13:00
2019-01-11 23:18:49 +13:00
@attach_result_to_link('git')
def fetch_git(link_dir, link, timeout=TIMEOUT):
"""download full site using git"""
2019-01-11 23:27:25 +13:00
if not (link['domain'] in GIT_DOMAINS
2019-01-11 23:18:49 +13:00
or link['url'].endswith('.git')
or link['type'] == 'git'):
return
if os.path.exists(os.path.join(link_dir, 'git')):
return {'output': 'git', 'status': 'skipped'}
2019-02-05 16:21:08 +13:00
CMD = ['git', 'clone', '--mirror', '--recursive', link['url'].split('#')[0], 'git']
2019-01-11 23:18:49 +13:00
output = 'git'
end = progress(timeout, prefix=' ')
try:
result = run(CMD, stdout=PIPE, stderr=PIPE, cwd=link_dir, timeout=timeout + 1) # git/<reponame>
end()
if result.returncode > 0:
print(' got git response code {}:'.format(result.returncode))
raise Exception('Failed git download')
except Exception as e:
end()
print(' {}Failed: {} {}{}'.format(ANSI['red'], e.__class__.__name__, e, ANSI['reset']))
2019-01-21 08:08:00 +13:00
print(' Run to see full output:')
print(' cd {};'.format(link_dir))
print(' {}'.format(' '.join(CMD)))
2019-01-11 23:18:49 +13:00
output = e
return {
'cmd': CMD,
'output': output,
}
2017-10-31 00:09:33 +13:00
def chrome_headless(binary=CHROME_BINARY, user_data_dir=CHROME_USER_DATA_DIR):
2018-06-11 10:45:41 +12:00
args = [binary, '--headless'] # '--disable-gpu'
2018-10-14 15:12:26 +13:00
if not CHROME_SANDBOX:
args.append('--no-sandbox')
2017-10-31 00:09:33 +13:00
default_profile = os.path.expanduser('~/Library/Application Support/Google/Chrome/Default')
if user_data_dir:
args.append('--user-data-dir={}'.format(user_data_dir))
2017-10-31 00:09:33 +13:00
elif os.path.exists(default_profile):
args.append('--user-data-dir={}'.format(default_profile))
2017-10-31 00:09:33 +13:00
return args