From a95912679ecea5254618428170342f5adcf3ab13 Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Mon, 23 Oct 2017 04:58:41 -0500 Subject: [PATCH] refactoring and fancy new link index --- index.py | 115 ++++++++------------------ links.py | 97 +++++++++++----------- parse.py | 80 +++++++++++++------ templates/index.html | 2 +- templates/index_row.html | 2 +- templates/link_index.html | 9 ++- util.py | 164 ++++++++++++++++++++++++++++++++++---- 7 files changed, 295 insertions(+), 174 deletions(-) diff --git a/index.py b/index.py index 29bfabb1..7c5d2cfe 100644 --- a/index.py +++ b/index.py @@ -1,5 +1,4 @@ import os -import re import json from datetime import datetime @@ -14,20 +13,15 @@ from config import ( ANSI, GIT_SHA, ) -from util import chmod_file +from util import ( + chmod_file, + html_appended_url, + derived_link_info, +) ### Homepage index for all the links -def parse_json_links_index(out_dir): - """load the index in a given directory and merge it with the given link""" - index_path = os.path.join(out_dir, 'index.json') - if os.path.exists(index_path): - with open(index_path, 'r', encoding='utf-8') as f: - return json.load(f)['links'] - - return [] - def write_links_index(out_dir, links): """create index.html file for a given list of links""" @@ -44,8 +38,6 @@ def write_links_index(out_dir, links): write_json_links_index(out_dir, links) write_html_links_index(out_dir, links) - chmod_file(out_dir, permissions=ARCHIVE_PERMISSIONS) - def write_json_links_index(out_dir, links): """write the json link index to a given path""" @@ -65,6 +57,15 @@ def write_json_links_index(out_dir, links): chmod_file(path) +def parse_json_links_index(out_dir): + """load the index in a given directory and merge it with the given link""" + index_path = os.path.join(out_dir, 'index.json') + if os.path.exists(index_path): + with open(index_path, 'r', encoding='utf-8') as f: + return json.load(f)['links'] + + return [] + def write_html_links_index(out_dir, links): """write the html link index to a given path""" @@ -91,17 +92,11 @@ def write_html_links_index(out_dir, links): with open(path, 'w', encoding='utf-8') as f: f.write(Template(index_html).substitute(**template_vars)) + chmod_file(path) + ### Individual link index -def parse_json_link_index(out_dir): - """load the index in a given directory and merge it with the given link""" - existing_index = os.path.join(out_dir, 'index.json') - if os.path.exists(existing_index): - with open(existing_index, 'r', encoding='utf-8') as f: - return json.load(f) - return {} - def write_link_index(out_dir, link): link['updated'] = str(datetime.now().timestamp()) write_json_link_index(out_dir, link) @@ -112,85 +107,39 @@ def write_json_link_index(out_dir, link): path = os.path.join(out_dir, 'index.json') + print(' √ Updating: index.json') + with open(path, 'w', encoding='utf-8') as f: json.dump(link, f, indent=4, default=str) chmod_file(path) +def parse_json_link_index(out_dir): + """load the json link index from a given directory""" + existing_index = os.path.join(out_dir, 'index.json') + if os.path.exists(existing_index): + with open(existing_index, 'r', encoding='utf-8') as f: + return json.load(f) + return {} + def write_html_link_index(out_dir, link): with open(LINK_INDEX_TEMPLATE, 'r', encoding='utf-8') as f: link_html = f.read() path = os.path.join(out_dir, 'index.html') + print(' √ Updating: index.html') + with open(path, 'w', encoding='utf-8') as f: f.write(Template(link_html).substitute({ **link, - **link['methods'], + **link['latest'], 'type': link['type'] or 'website', - 'tags': link['tags'] or '', + 'tags': link['tags'] or 'untagged', 'bookmarked': datetime.fromtimestamp(float(link['timestamp'])).strftime('%Y-%m-%d %H:%M'), 'updated': datetime.fromtimestamp(float(link['updated'])).strftime('%Y-%m-%d %H:%M'), - 'archive_org': link['methods']['archive_org'] or 'https://web.archive.org/save/{}'.format(link['url']), - 'wget': link['methods']['wget'] or link['domain'], + 'archive_org': link['latest']['archive_org'] or 'https://web.archive.org/save/{}'.format(link['url']), + 'wget': link['latest']['wget'] or link['domain'], })) chmod_file(path) - - - -def html_appended_url(link): - """calculate the path to the wgetted .html file, since wget may - adjust some paths to be different than the base_url path. - - See docs on wget --adjust-extension.""" - - if link['type'] in ('PDF', 'image'): - return link['base_url'] - - split_url = link['url'].split('#', 1) - query = ('%3F' + link['url'].split('?', 1)[-1]) if '?' in link['url'] else '' - - if re.search(".+\\.[Hh][Tt][Mm][Ll]?$", split_url[0], re.I | re.M): - # already ends in .html - return link['base_url'] - else: - # .html needs to be appended - without_scheme = split_url[0].split('://', 1)[-1].split('?', 1)[0] - if without_scheme.endswith('/'): - if query: - return '#'.join([without_scheme + 'index.html' + query + '.html', *split_url[1:]]) - return '#'.join([without_scheme + 'index.html', *split_url[1:]]) - else: - if query: - return '#'.join([without_scheme + '/index.html' + query + '.html', *split_url[1:]]) - elif '/' in without_scheme: - return '#'.join([without_scheme + '.html', *split_url[1:]]) - return link['base_url'] + '/index.html' - - -def derived_link_info(link): - """extend link info with the archive urls and other derived data""" - - link_info = { - **link, - 'date': datetime.fromtimestamp(float(link['timestamp'])).strftime('%Y-%m-%d %H:%M'), - 'google_favicon_url': 'https://www.google.com/s2/favicons?domain={domain}'.format(**link), - 'favicon_url': 'archive/{timestamp}/favicon.ico'.format(**link), - 'files_url': 'archive/{timestamp}/'.format(**link), - 'archive_url': 'archive/{}/{}'.format(link['timestamp'], html_appended_url(link)), - 'pdf_link': 'archive/{timestamp}/output.pdf'.format(**link), - 'screenshot_link': 'archive/{timestamp}/screenshot.png'.format(**link), - 'archive_org_url': 'https://web.archive.org/web/{base_url}'.format(**link), - } - - # PDF and images are handled slightly differently - # wget, screenshot, & pdf urls all point to the same file - if link['type'] in ('PDF', 'image'): - link_info.update({ - 'archive_url': 'archive/{timestamp}/{base_url}'.format(**link), - 'pdf_link': 'archive/{timestamp}/{base_url}'.format(**link), - 'screenshot_link': 'archive/{timestamp}/{base_url}'.format(**link), - 'title': '{title} ({type})'.format(**link), - }) - return link_info diff --git a/links.py b/links.py index 9eb3cfa6..22242d17 100644 --- a/links.py +++ b/links.py @@ -1,18 +1,11 @@ -from util import ( - domain, - base_url, - get_str_between, - get_link_type, -) - """ In Bookmark Archiver, a Link represents a single entry that we track in the json index. All links pass through all archiver functions and the latest, -most up-to-date canonical output for each is stored in "latest_archives". -. +most up-to-date canonical output for each is stored in "latest". + Link { - timestamp: float, (how we uniquely id links) _ _ _ _ ___ + timestamp: str, (how we uniquely id links) _ _ _ _ ___ url: str, | \ / \ |\| ' | base_url: str, |_/ \_/ | | | domain: str, _ _ _ _ _ _ @@ -20,7 +13,7 @@ Link { type: str, | /"| | | | \_, title: str, ,-'"`-. sources: [str], /// / @ @ \ \\\\ - latest_archives: { :=| ,._,. |=: / + latest: { \ :=| ,._,. |=: / ..., || ,\ \_../ /. || pdf: 'output.pdf', ||','`-._))'`.`|| wget: 'example.com/1234/index.html' `-' (/ `-' @@ -39,10 +32,18 @@ Link { """ +from util import ( + domain, + base_url, + get_str_between, + get_link_type, +) + + def validate_links(links): - links = valid_links(links) # remove chrome://, about:, mailto: etc. - links = uniquefied_links(links) # fix duplicate timestamps, returns sorted list - links = sorted_links(links) # deterministically sort the links + links = archivable_links(links) # remove chrome://, about:, mailto: etc. + links = uniquefied_links(links) # merge/dedupe duplicate timestamps & urls + links = sorted_links(links) # deterministically sort the links based on timstamp, url if not links: print('[X] No links found :(') @@ -50,34 +51,14 @@ def validate_links(links): return list(links) -def sorted_links(links): - return sorted( - links, - key=lambda link: (link['timestamp'], link['url']), - reverse=True, - ) -def merge_links(link1, link2): - """deterministially merge two links, favoring longer field values over shorter, - and "cleaner" values over worse ones. - """ - longer = lambda a, b, key: a[key] if len(a[key]) > len(b[key]) else b[key] - earlier = lambda a, b, key: a[key] if a[key] < b[key] else b[key] - - url = longer(link1, link2, 'url') - longest_title = longer(link1, link2, 'title') - cleanest_title = link1['title'] if '://' not in link1['title'] else link2['title'] - link = { - 'url': url, - 'domain': domain(url), - 'base_url': base_url(url), - 'timestamp': earlier(link1, link2, 'timestamp'), - 'tags': longer(link1, link2, 'tags'), - 'title': longest_title if '://' not in longest_title else cleanest_title, - 'sources': list(set(link1['sources'] + link2['sources'])), - } - link['type'] = get_link_type(link) - return link +def archivable_links(links): + """remove chrome://, about:// or other schemed links that cant be archived""" + return ( + link + for link in links + if any(link['url'].startswith(s) for s in ('http://', 'https://', 'ftp://')) + ) def uniquefied_links(sorted_links): """ @@ -104,13 +85,33 @@ def uniquefied_links(sorted_links): return unique_timestamps.values() -def valid_links(links): - """remove chrome://, about:// or other schemed links that cant be archived""" - return ( - link - for link in links - if any(link['url'].startswith(s) for s in ('http://', 'https://', 'ftp://')) - ) +def sorted_links(links): + sort_func = lambda link: (link['timestamp'], link['url']) + return sorted(links, key=sort_func, reverse=True) + + + +def merge_links(a, b): + """deterministially merge two links, favoring longer field values over shorter, + and "cleaner" values over worse ones. + """ + longer = lambda key: a[key] if len(a[key]) > len(b[key]) else b[key] + earlier = lambda key: a[key] if a[key] < b[key] else b[key] + + url = longer('url') + longest_title = longer('title') + cleanest_title = a['title'] if '://' not in a['title'] else b['title'] + link = { + 'timestamp': earlier('timestamp'), + 'url': url, + 'domain': domain(url), + 'base_url': base_url(url), + 'tags': longer('tags'), + 'title': longest_title if '://' not in longest_title else cleanest_title, + 'sources': list(set(a.get('sources', []) + b.get('sources', []))), + } + link['type'] = get_link_type(link) + return link def links_after_timestamp(links, timestamp=None): if not timestamp: diff --git a/parse.py b/parse.py index c1d23a5b..0a203fe2 100644 --- a/parse.py +++ b/parse.py @@ -1,32 +1,36 @@ +""" +Everything related to parsing links from bookmark services. + +For a list of supported services, see the README.md. +For examples of supported files see examples/. + +Parsed link schema: { + 'url': 'https://example.com/example/?abc=123&xyc=345#lmnop', + 'domain': 'example.com', + 'base_url': 'example.com/example/', + 'timestamp': '15442123124234', + 'tags': 'abc,def', + 'title': 'Example.com Page Title', + 'sources': ['ril_export.html', 'downloads/getpocket.com.txt'], +} +""" + import re import json + from datetime import datetime from util import ( domain, base_url, - get_str_between, + str_between, get_link_type, ) -def parse_export(path): - """parse a list of links dictionaries from a bookmark export file""" - - links = [] - with open(path, 'r', encoding='utf-8') as file: - for service, parser_func in get_parsers().items(): - # otherwise try all parsers until one works - try: - links += list(parser_func(file)) - if links: - break - except Exception as e: - pass +def get_parsers(file): + """return all parsers that work on a given file, defaults to all of them""" - return links - -def get_parsers(): return { 'pocket': parse_pocket_export, 'pinboard': parse_json_export, @@ -34,12 +38,32 @@ def get_parsers(): 'rss': parse_rss_export, } +def parse_links(path): + """parse a list of links dictionaries from a bookmark export file""" + + links = [] + with open(path, 'r', encoding='utf-8') as file: + for parser_func in get_parsers(file).values(): + # otherwise try all parsers until one works + try: + links += list(parser_func(file)) + if links: + break + except (ValueError, TypeError): + # parser not supported on this file + pass + + return links + + def parse_pocket_export(html_file): """Parse Pocket-format bookmarks export files (produced by getpocket.com/export/)""" html_file.seek(0) - pattern = re.compile("^\\s*
  • (.+)
  • ", re.UNICODE) # see sample input in ./example_ril_export.html + pattern = re.compile("^\\s*
  • (.+)
  • ", re.UNICODE) for line in html_file: + # example line + #
  • example title
  • match = pattern.search(line) if match: fixed_url = match.group(1).replace('http://www.readability.com/read?url=', '') # remove old readability prefixes to get original url @@ -62,6 +86,8 @@ def parse_json_export(json_file): json_file.seek(0) json_content = json.load(json_file) for line in json_content: + # example line + # {"href":"http:\/\/www.reddit.com\/r\/example","description":"title here","extended":"","meta":"18a973f09c9cc0608c116967b64e0419","hash":"910293f019c2f4bb1a749fb937ba58e3","time":"2014-06-14T15:51:42Z","shared":"no","toread":"no","tags":"reddit android"}] if line: erg = line time = datetime.strptime(erg['time'].split(',', 1)[0], '%Y-%m-%dT%H:%M:%SZ') @@ -96,11 +122,12 @@ def parse_rss_export(rss_file): leading_removed = trailing_removed.split('', 1)[-1] rows = leading_removed.split('\n') - row = lambda key: [r for r in rows if r.startswith('<{}>'.format(key))][0] + def get_row(key): + return [r for r in rows if r.startswith('<{}>'.format(key))][0] - title = get_str_between(row('title'), '', '') - ts_str = get_str_between(row('pubDate'), '', '') + title = str_between(get_row('title'), '', '') + ts_str = str_between(get_row('pubDate'), '', '') time = datetime.strptime(ts_str, "%a, %d %b %Y %H:%M:%S %z") info = { @@ -112,17 +139,20 @@ def parse_rss_export(rss_file): 'title': title, 'sources': [rss_file.name], } - info['type'] = get_link_type(info) - # import ipdb; ipdb.set_trace() + yield info def parse_bookmarks_export(html_file): """Parse netscape-format bookmarks export files (produced by all browsers)""" + html_file.seek(0) pattern = re.compile("]*>(.+)", re.UNICODE | re.IGNORECASE) for line in html_file: + # example line + #
    example bookmark title + match = pattern.search(line) if match: url = match.group(1) @@ -137,6 +167,6 @@ def parse_bookmarks_export(html_file): 'title': match.group(3), 'sources': [html_file.name], } - info['type'] = get_link_type(info) + yield info diff --git a/templates/index.html b/templates/index.html index 2b125790..3b997de6 100644 --- a/templates/index.html +++ b/templates/index.html @@ -68,7 +68,7 @@ Archived Sites
    - Archived with: Bookmark Archiver on $date_updated + Bookmark Archiver diff --git a/templates/index_row.html b/templates/index_row.html index 8ea4a4e9..508b29f0 100644 --- a/templates/index_row.html +++ b/templates/index_row.html @@ -4,7 +4,7 @@ $title $tags - 📂 + 📂 📄 🖼 🏛 diff --git a/templates/link_index.html b/templates/link_index.html index 9f6a5d10..a4eb35df 100644 --- a/templates/link_index.html +++ b/templates/link_index.html @@ -140,7 +140,7 @@ [-] - + Archive Icon $title
    @@ -221,6 +221,7 @@