1
0
Fork 0
mirror of synced 2024-06-01 18:20:20 +12:00
ArchiveBox/archivebox/extractors/archive_org.py

120 lines
4 KiB
Python
Raw Normal View History

2019-04-28 09:26:24 +12:00
__package__ = 'archivebox.extractors'
2020-09-16 07:05:48 +12:00
from pathlib import Path
2019-04-28 09:26:24 +12:00
from typing import Optional, List, Dict, Tuple
from collections import defaultdict
2019-05-01 15:13:04 +12:00
from ..index.schema import Link, ArchiveResult, ArchiveOutput, ArchiveError
2020-06-26 14:14:40 +12:00
from ..system import run, chmod_file
2019-04-28 09:26:24 +12:00
from ..util import (
enforce_types,
is_static_file,
dedupe,
2019-04-28 09:26:24 +12:00
)
from ..config import (
TIMEOUT,
CURL_ARGS,
CURL_EXTRA_ARGS,
2020-06-26 14:14:40 +12:00
CHECK_SSL_VALIDITY,
2019-04-28 09:26:24 +12:00
SAVE_ARCHIVE_DOT_ORG,
CURL_BINARY,
CURL_VERSION,
2020-06-26 14:14:40 +12:00
CURL_USER_AGENT,
2019-04-28 09:26:24 +12:00
)
from ..logging_util import TimedProgress
2019-04-28 09:26:24 +12:00
@enforce_types
def should_save_archive_dot_org(link: Link, out_dir: Optional[Path]=None, overwrite: Optional[bool]=False) -> bool:
2019-04-28 09:26:24 +12:00
if is_static_file(link.url):
return False
out_dir = out_dir or Path(link.link_dir)
if not overwrite and (out_dir / 'archive.org.txt').exists():
# if open(path, 'r', encoding='utf-8').read().strip() != 'None':
2019-04-28 09:26:24 +12:00
return False
return SAVE_ARCHIVE_DOT_ORG
@enforce_types
2020-09-16 07:05:48 +12:00
def save_archive_dot_org(link: Link, out_dir: Optional[Path]=None, timeout: int=TIMEOUT) -> ArchiveResult:
2019-04-28 09:26:24 +12:00
"""submit site to archive.org for archiving via their service, save returned archive url"""
2020-09-16 07:05:48 +12:00
out_dir = out_dir or Path(link.link_dir)
2019-04-28 09:26:24 +12:00
output: ArchiveOutput = 'archive.org.txt'
archive_org_url = None
submit_url = 'https://web.archive.org/save/{}'.format(link.url)
2024-03-02 09:50:32 +13:00
# later options take precedence
options = [
2024-03-02 09:50:32 +13:00
*CURL_ARGS,
*CURL_EXTRA_ARGS,
'--head',
2019-04-28 09:26:24 +12:00
'--max-time', str(timeout),
2020-06-26 14:14:40 +12:00
*(['--user-agent', '{}'.format(CURL_USER_AGENT)] if CURL_USER_AGENT else []),
2019-04-28 09:26:24 +12:00
*([] if CHECK_SSL_VALIDITY else ['--insecure']),
]
cmd = [
CURL_BINARY,
*dedupe(options),
2019-04-28 09:26:24 +12:00
submit_url,
]
status = 'succeeded'
timer = TimedProgress(timeout, prefix=' ')
try:
2020-09-16 07:05:48 +12:00
result = run(cmd, cwd=str(out_dir), timeout=timeout)
2019-04-28 09:26:24 +12:00
content_location, errors = parse_archive_dot_org_response(result.stdout)
if content_location:
2020-11-01 00:55:27 +13:00
archive_org_url = content_location[0]
2019-04-28 09:26:24 +12:00
elif len(errors) == 1 and 'RobotAccessControlException' in errors[0]:
archive_org_url = None
# raise ArchiveError('Archive.org denied by {}/robots.txt'.format(domain(link.url)))
elif errors:
raise ArchiveError(', '.join(errors))
else:
raise ArchiveError('Failed to find "content-location" URL header in Archive.org response.')
except Exception as err:
status = 'failed'
output = err
finally:
timer.end()
if output and not isinstance(output, Exception):
# instead of writing None when archive.org rejects the url write the
# url to resubmit it to archive.org. This is so when the user visits
# the URL in person, it will attempt to re-archive it, and it'll show the
# nicer error message explaining why the url was rejected if it fails.
archive_org_url = archive_org_url or submit_url
2020-09-16 07:05:48 +12:00
with open(str(out_dir / output), 'w', encoding='utf-8') as f:
2019-04-28 09:26:24 +12:00
f.write(archive_org_url)
2020-09-16 07:05:48 +12:00
chmod_file('archive.org.txt', cwd=str(out_dir))
2019-04-28 09:26:24 +12:00
output = archive_org_url
return ArchiveResult(
cmd=cmd,
2020-09-16 07:05:48 +12:00
pwd=str(out_dir),
2019-04-28 09:26:24 +12:00
cmd_version=CURL_VERSION,
output=output,
status=status,
**timer.stats,
)
@enforce_types
def parse_archive_dot_org_response(response: bytes) -> Tuple[List[str], List[str]]:
# Parse archive.org response headers
headers: Dict[str, List[str]] = defaultdict(list)
# lowercase all the header names and store in dict
for header in response.splitlines():
if b':' not in header or not header.strip():
continue
name, val = header.decode().split(':', 1)
headers[name.lower().strip()].append(val.strip())
# Get successful archive url in "content-location" header or any errors
2020-07-22 17:46:38 +12:00
content_location = headers.get('content-location', headers['location'])
2019-04-28 09:26:24 +12:00
errors = headers['x-archive-wayback-runtime-error']
return content_location, errors