1
0
Fork 0
mirror of synced 2024-07-04 14:00:49 +12:00
ArchiveBox/archivebox/links.py

124 lines
3.7 KiB
Python
Raw Normal View History

2017-10-19 13:08:33 +13:00
"""
In ArchiveBox, a Link represents a single entry that we track in the
2017-10-19 13:08:33 +13:00
json index. All links pass through all archiver functions and the latest,
2017-10-23 22:58:41 +13:00
most up-to-date canonical output for each is stored in "latest".
2017-10-19 13:08:33 +13:00
Link {
2019-03-23 08:09:39 +13:00
timestamp: str, (how we uniquely id links)
url: str,
title: str,
tags: str,
sources: [str],
2017-10-19 13:08:33 +13:00
history: {
pdf: [
2019-03-23 08:09:39 +13:00
{start_ts, end_ts, duration, cmd, pwd, status, output},
2017-10-19 13:08:33 +13:00
...
],
2019-03-23 08:09:39 +13:00
...
2017-10-19 13:08:33 +13:00
},
}
"""
2018-04-18 02:30:25 +12:00
from html import unescape
from collections import OrderedDict
2018-04-17 19:22:59 +12:00
2017-10-23 22:58:41 +13:00
from util import (
2019-03-31 08:04:43 +13:00
scheme,
merge_links,
check_link_structure,
check_links_structure,
2017-10-23 22:58:41 +13:00
)
2019-03-25 01:40:26 +13:00
from config import (
URL_BLACKLIST,
)
2017-10-23 22:58:41 +13:00
def validate_links(links):
check_links_structure(links)
links = archivable_links(links) # remove chrome://, about:, mailto: etc.
links = uniquefied_links(links) # merge/dedupe duplicate timestamps & urls
links = sorted_links(links) # deterministically sort the links based on timstamp, url
if not links:
print('[X] No links found :(')
raise SystemExit(1)
2018-04-18 02:30:25 +12:00
for link in links:
2019-03-23 16:10:11 +13:00
link['title'] = unescape(link['title'].strip()) if link['title'] else None
check_link_structure(link)
return list(links)
2017-10-23 22:58:41 +13:00
def archivable_links(links):
"""remove chrome://, about:// or other schemed links that cant be archived"""
2019-03-31 08:00:21 +13:00
for link in links:
2019-03-31 08:04:43 +13:00
scheme_is_valid = scheme(link['url']) in ('http', 'https', 'ftp')
2019-03-31 08:00:21 +13:00
not_blacklisted = (not URL_BLACKLIST.match(link['url'])) if URL_BLACKLIST else True
if scheme_is_valid and not_blacklisted:
yield link
def uniquefied_links(sorted_links):
"""
ensures that all non-duplicate links have monotonically increasing timestamps
"""
unique_urls = OrderedDict()
lower = lambda url: url.lower().strip()
without_www = lambda url: url.replace('://www.', '://', 1)
without_trailing_slash = lambda url: url[:-1] if url[-1] == '/' else url.replace('/?', '?')
for link in sorted_links:
2017-10-19 13:33:31 +13:00
fuzzy_url = without_www(without_trailing_slash(lower(link['url'])))
if fuzzy_url in unique_urls:
# merge with any other links that share the same url
2017-10-19 13:33:31 +13:00
link = merge_links(unique_urls[fuzzy_url], link)
unique_urls[fuzzy_url] = link
unique_timestamps = OrderedDict()
2017-10-19 13:33:31 +13:00
for link in unique_urls.values():
link['timestamp'] = lowest_uniq_timestamp(unique_timestamps, link['timestamp'])
unique_timestamps[link['timestamp']] = link
return unique_timestamps.values()
2017-10-23 22:58:41 +13:00
def sorted_links(links):
sort_func = lambda link: (link['timestamp'].split('.', 1)[0], link['url'])
2017-10-23 22:58:41 +13:00
return sorted(links, key=sort_func, reverse=True)
def links_after_timestamp(links, timestamp=None):
if not timestamp:
yield from links
return
for link in links:
try:
if float(link['timestamp']) <= float(timestamp):
yield link
except (ValueError, TypeError):
print('Resume value and all timestamp values must be valid numbers.')
2017-10-19 13:33:31 +13:00
def lowest_uniq_timestamp(used_timestamps, timestamp):
"""resolve duplicate timestamps by appending a decimal 1234, 1234 -> 1234.1, 1234.2"""
2017-10-19 13:33:31 +13:00
timestamp = timestamp.split('.')[0]
nonce = 0
# first try 152323423 before 152323423.0
if timestamp not in used_timestamps:
return timestamp
new_timestamp = '{}.{}'.format(timestamp, nonce)
while new_timestamp in used_timestamps:
nonce += 1
new_timestamp = '{}.{}'.format(timestamp, nonce)
return new_timestamp
2019-03-31 08:00:21 +13:00