From a682a9c478ce71becbb08c7ee14fbf0d7840243b Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Tue, 18 Aug 2020 08:27:47 -0400 Subject: [PATCH 01/11] make all parsers accept arbitrary meta kwargs --- archivebox/parsers/generic_json.py | 2 +- archivebox/parsers/generic_rss.py | 2 +- archivebox/parsers/generic_txt.py | 2 +- archivebox/parsers/medium_rss.py | 2 +- archivebox/parsers/netscape_html.py | 2 +- archivebox/parsers/pinboard_rss.py | 2 +- archivebox/parsers/pocket_html.py | 2 +- archivebox/parsers/shaarli_rss.py | 2 +- 8 files changed, 8 insertions(+), 8 deletions(-) diff --git a/archivebox/parsers/generic_json.py b/archivebox/parsers/generic_json.py index 8b20e6f4..e6ed6772 100644 --- a/archivebox/parsers/generic_json.py +++ b/archivebox/parsers/generic_json.py @@ -13,7 +13,7 @@ from ..util import ( @enforce_types -def parse_generic_json_export(json_file: IO[str]) -> Iterable[Link]: +def parse_generic_json_export(json_file: IO[str], **_kwargs) -> Iterable[Link]: """Parse JSON-format bookmarks export files (produced by pinboard.in/export/, or wallabag)""" json_file.seek(0) diff --git a/archivebox/parsers/generic_rss.py b/archivebox/parsers/generic_rss.py index 3a62bb88..28318444 100644 --- a/archivebox/parsers/generic_rss.py +++ b/archivebox/parsers/generic_rss.py @@ -12,7 +12,7 @@ from ..util import ( ) @enforce_types -def parse_generic_rss_export(rss_file: IO[str]) -> Iterable[Link]: +def parse_generic_rss_export(rss_file: IO[str], **_kwargs) -> Iterable[Link]: """Parse RSS XML-format files into links""" rss_file.seek(0) diff --git a/archivebox/parsers/generic_txt.py b/archivebox/parsers/generic_txt.py index dd0fe7f5..e296ec7e 100644 --- a/archivebox/parsers/generic_txt.py +++ b/archivebox/parsers/generic_txt.py @@ -16,7 +16,7 @@ from ..util import ( @enforce_types -def parse_generic_txt_export(text_file: IO[str]) -> Iterable[Link]: +def parse_generic_txt_export(text_file: IO[str], **_kwargs) -> Iterable[Link]: """Parse raw links from each line in a text file""" text_file.seek(0) diff --git a/archivebox/parsers/medium_rss.py b/archivebox/parsers/medium_rss.py index 11379677..8f14f773 100644 --- a/archivebox/parsers/medium_rss.py +++ b/archivebox/parsers/medium_rss.py @@ -14,7 +14,7 @@ from ..util import ( @enforce_types -def parse_medium_rss_export(rss_file: IO[str]) -> Iterable[Link]: +def parse_medium_rss_export(rss_file: IO[str], **_kwargs) -> Iterable[Link]: """Parse Medium RSS feed files into links""" rss_file.seek(0) diff --git a/archivebox/parsers/netscape_html.py b/archivebox/parsers/netscape_html.py index 894e2318..a063023c 100644 --- a/archivebox/parsers/netscape_html.py +++ b/archivebox/parsers/netscape_html.py @@ -14,7 +14,7 @@ from ..util import ( @enforce_types -def parse_netscape_html_export(html_file: IO[str]) -> Iterable[Link]: +def parse_netscape_html_export(html_file: IO[str], **_kwargs) -> Iterable[Link]: """Parse netscape-format bookmarks export files (produced by all browsers)""" html_file.seek(0) diff --git a/archivebox/parsers/pinboard_rss.py b/archivebox/parsers/pinboard_rss.py index eb21c7ef..98ff14a3 100644 --- a/archivebox/parsers/pinboard_rss.py +++ b/archivebox/parsers/pinboard_rss.py @@ -14,7 +14,7 @@ from ..util import ( @enforce_types -def parse_pinboard_rss_export(rss_file: IO[str]) -> Iterable[Link]: +def parse_pinboard_rss_export(rss_file: IO[str], **_kwargs) -> Iterable[Link]: """Parse Pinboard RSS feed files into links""" rss_file.seek(0) diff --git a/archivebox/parsers/pocket_html.py b/archivebox/parsers/pocket_html.py index 3eae58c4..653f21b8 100644 --- a/archivebox/parsers/pocket_html.py +++ b/archivebox/parsers/pocket_html.py @@ -14,7 +14,7 @@ from ..util import ( @enforce_types -def parse_pocket_html_export(html_file: IO[str]) -> Iterable[Link]: +def parse_pocket_html_export(html_file: IO[str], **_kwargs) -> Iterable[Link]: """Parse Pocket-format bookmarks export files (produced by getpocket.com/export/)""" html_file.seek(0) diff --git a/archivebox/parsers/shaarli_rss.py b/archivebox/parsers/shaarli_rss.py index ae5bfa96..4a925f46 100644 --- a/archivebox/parsers/shaarli_rss.py +++ b/archivebox/parsers/shaarli_rss.py @@ -13,7 +13,7 @@ from ..util import ( @enforce_types -def parse_shaarli_rss_export(rss_file: IO[str]) -> Iterable[Link]: +def parse_shaarli_rss_export(rss_file: IO[str], **_kwargs) -> Iterable[Link]: """Parse Shaarli-specific RSS XML-format files into links""" rss_file.seek(0) From 15efb2d5ed1163fb5f6388646fb167efa7dd1afa Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Tue, 18 Aug 2020 08:29:05 -0400 Subject: [PATCH 02/11] new generic_html parser for extracting hrefs --- archivebox.egg-info/SOURCES.txt | 1 + archivebox/index/__init__.py | 4 +- archivebox/main.py | 6 +-- archivebox/parsers/__init__.py | 81 +++++++++++++++++------------- archivebox/parsers/generic_html.py | 53 +++++++++++++++++++ 5 files changed, 106 insertions(+), 39 deletions(-) create mode 100644 archivebox/parsers/generic_html.py diff --git a/archivebox.egg-info/SOURCES.txt b/archivebox.egg-info/SOURCES.txt index 14e510a1..ee6a2fc5 100644 --- a/archivebox.egg-info/SOURCES.txt +++ b/archivebox.egg-info/SOURCES.txt @@ -70,6 +70,7 @@ archivebox/index/json.py archivebox/index/schema.py archivebox/index/sql.py archivebox/parsers/__init__.py +archivebox/parsers/generic_html.py archivebox/parsers/generic_json.py archivebox/parsers/generic_rss.py archivebox/parsers/generic_txt.py diff --git a/archivebox/index/__init__.py b/archivebox/index/__init__.py index b7c8ebff..784c879c 100644 --- a/archivebox/index/__init__.py +++ b/archivebox/index/__init__.py @@ -301,14 +301,14 @@ def load_main_index_meta(out_dir: str=OUTPUT_DIR) -> Optional[dict]: @enforce_types -def parse_links_from_source(source_path: str) -> Tuple[List[Link], List[Link]]: +def parse_links_from_source(source_path: str, root_url: Optional[str]=None) -> Tuple[List[Link], List[Link]]: from ..parsers import parse_links new_links: List[Link] = [] # parse and validate the import file - raw_links, parser_name = parse_links(source_path) + raw_links, parser_name = parse_links(source_path, root_url=root_url) new_links = validate_links(raw_links) if parser_name: diff --git a/archivebox/main.py b/archivebox/main.py index 2e17594b..b65c6e64 100644 --- a/archivebox/main.py +++ b/archivebox/main.py @@ -548,7 +548,7 @@ def add(urls: Union[str, List[str]], # save verbatim args to sources write_ahead_log = save_text_as_source('\n'.join(urls), filename='{ts}-import.txt', out_dir=out_dir) - new_links += parse_links_from_source(write_ahead_log) + new_links += parse_links_from_source(write_ahead_log, root_url=None) # If we're going one level deeper, download each link and look for more links new_links_depth = [] @@ -556,9 +556,9 @@ def add(urls: Union[str, List[str]], log_crawl_started(new_links) for new_link in new_links: downloaded_file = save_file_as_source(new_link.url, filename=f'{new_link.timestamp}-crawl-{new_link.domain}.txt', out_dir=out_dir) - new_links_depth += parse_links_from_source(downloaded_file) + new_links_depth += parse_links_from_source(downloaded_file, root_url=new_link.url) - imported_links = new_links + new_links_depth + imported_links = list({link.url: link for link in (new_links + new_links_depth)}.values()) all_links, new_links = dedupe_links(all_links, imported_links) write_main_index(links=all_links, out_dir=out_dir, finished=not new_links) diff --git a/archivebox/parsers/__init__.py b/archivebox/parsers/__init__.py index bde71c27..930e1ade 100644 --- a/archivebox/parsers/__init__.py +++ b/archivebox/parsers/__init__.py @@ -11,7 +11,7 @@ import re import os from io import StringIO -from typing import IO, Tuple, List +from typing import IO, Tuple, List, Optional from datetime import datetime from ..system import atomic_write @@ -38,26 +38,29 @@ from .medium_rss import parse_medium_rss_export from .netscape_html import parse_netscape_html_export from .generic_rss import parse_generic_rss_export from .generic_json import parse_generic_json_export +from .generic_html import parse_generic_html_export from .generic_txt import parse_generic_txt_export PARSERS = ( - # Specialized parsers - ('Pocket HTML', parse_pocket_html_export), - ('Pinboard RSS', parse_pinboard_rss_export), - ('Shaarli RSS', parse_shaarli_rss_export), - ('Medium RSS', parse_medium_rss_export), - - # General parsers - ('Netscape HTML', parse_netscape_html_export), - ('Generic RSS', parse_generic_rss_export), - ('Generic JSON', parse_generic_json_export), + # Specialized parsers + ('Pocket HTML', parse_pocket_html_export), + ('Pinboard RSS', parse_pinboard_rss_export), + ('Shaarli RSS', parse_shaarli_rss_export), + ('Medium RSS', parse_medium_rss_export), + + # General parsers + ('Netscape HTML', parse_netscape_html_export), + ('Generic RSS', parse_generic_rss_export), + ('Generic JSON', parse_generic_json_export), + ('Generic HTML', parse_generic_html_export), + + # Fallback parser + ('Plain Text', parse_generic_txt_export), +) - # Fallback parser - ('Plain Text', parse_generic_txt_export), - ) @enforce_types -def parse_links_memory(urls: List[str]): +def parse_links_memory(urls: List[str], root_url: Optional[str]=None): """ parse a list of URLS without touching the filesystem """ @@ -68,17 +71,16 @@ def parse_links_memory(urls: List[str]): file = StringIO() file.writelines(urls) file.name = "io_string" - output = _parse(file, timer) - - if output is not None: - return output - + links, parser = run_parser_functions(file, timer, root_url=root_url) timer.end() - return [], 'Failed to parse' + + if parser is None: + return [], 'Failed to parse' + return links, parser @enforce_types -def parse_links(source_file: str) -> Tuple[List[Link], str]: +def parse_links(source_file: str, root_url: Optional[str]=None) -> Tuple[List[Link], str]: """parse a list of URLs with their metadata from an RSS feed, bookmarks export, or text file """ @@ -87,28 +89,39 @@ def parse_links(source_file: str) -> Tuple[List[Link], str]: timer = TimedProgress(TIMEOUT * 4) with open(source_file, 'r', encoding='utf-8') as file: - output = _parse(file, timer) - - if output is not None: - return output + links, parser = run_parser_functions(file, timer, root_url=root_url) timer.end() - return [], 'Failed to parse' + if parser is None: + return [], 'Failed to parse' + return links, parser + + +def run_parser_functions(to_parse: IO[str], timer, root_url: Optional[str]=None) -> Tuple[List[Link], Optional[str]]: + most_links: List[Link] = [] + best_parser_name = None -def _parse(to_parse: IO[str], timer) -> Tuple[List[Link], str]: for parser_name, parser_func in PARSERS: try: - links = list(parser_func(to_parse)) - if links: - timer.end() - return links, parser_name - except Exception as err: # noqa - pass + parsed_links = list(parser_func(to_parse, root_url=root_url)) + if not parsed_links: + raise Exception('no links found') + + # print(f'[√] Parser {parser_name} succeeded: {len(parsed_links)} links parsed') + if len(parsed_links) > len(most_links): + most_links = parsed_links + best_parser_name = parser_name + + except Exception as err: # noqa # Parsers are tried one by one down the list, and the first one # that succeeds is used. To see why a certain parser was not used # due to error or format incompatibility, uncomment this line: + # print('[!] Parser {} failed: {} {}'.format(parser_name, err.__class__.__name__, err)) # raise + pass + timer.end() + return most_links, best_parser_name @enforce_types diff --git a/archivebox/parsers/generic_html.py b/archivebox/parsers/generic_html.py new file mode 100644 index 00000000..4c632f04 --- /dev/null +++ b/archivebox/parsers/generic_html.py @@ -0,0 +1,53 @@ +__package__ = 'archivebox.parsers' + + +import re + +from typing import IO, Iterable, Optional +from datetime import datetime + +from ..index.schema import Link +from ..util import ( + htmldecode, + enforce_types, + URL_REGEX, +) +from html.parser import HTMLParser +from urllib.parse import urljoin + + +class HrefParser(HTMLParser): + def __init__(self): + super().__init__() + self.urls = [] + + def handle_starttag(self, tag, attrs): + if tag == "a": + for attr, value in attrs: + if attr == "href": + self.urls.append(value) + + +@enforce_types +def parse_generic_html_export(html_file: IO[str], root_url: Optional[str]=None, **_kwargs) -> Iterable[Link]: + """Parse Pocket-format bookmarks export files (produced by getpocket.com/export/)""" + + html_file.seek(0) + for line in html_file: + parser = HrefParser() + # example line + #
  • example title
  • + parser.feed(line) + for url in parser.urls: + if root_url: + # resolve relative urls /home.html -> https://example.com/home.html + url = urljoin(root_url, url) + + for archivable_url in re.findall(URL_REGEX, url): + yield Link( + url=htmldecode(archivable_url), + timestamp=str(datetime.now().timestamp()), + title=None, + tags=None, + sources=[html_file.name], + ) From b0c0a676f8255218b66cb6d7553f5bf97a2fa9ed Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Tue, 18 Aug 2020 08:29:46 -0400 Subject: [PATCH 03/11] re-enable readability and singlefile by default now that its less noisy --- archivebox/config/__init__.py | 4 ++-- archivebox/extractors/__init__.py | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/archivebox/config/__init__.py b/archivebox/config/__init__.py index 74cbaa88..066be01f 100644 --- a/archivebox/config/__init__.py +++ b/archivebox/config/__init__.py @@ -108,8 +108,8 @@ CONFIG_DEFAULTS: Dict[str, ConfigDefaultDict] = { 'DEPENDENCY_CONFIG': { 'USE_CURL': {'type': bool, 'default': True}, 'USE_WGET': {'type': bool, 'default': True}, - 'USE_SINGLEFILE': {'type': bool, 'default': False}, - 'USE_READABILITY': {'type': bool, 'default': False}, + 'USE_SINGLEFILE': {'type': bool, 'default': True}, + 'USE_READABILITY': {'type': bool, 'default': True}, 'USE_GIT': {'type': bool, 'default': True}, 'USE_CHROME': {'type': bool, 'default': True}, 'USE_YOUTUBEDL': {'type': bool, 'default': True}, diff --git a/archivebox/extractors/__init__.py b/archivebox/extractors/__init__.py index a341083a..b730aa54 100644 --- a/archivebox/extractors/__init__.py +++ b/archivebox/extractors/__init__.py @@ -12,6 +12,7 @@ from ..index import ( patch_main_index, ) from ..util import enforce_types +from ..config import ANSI from ..logging_util import ( log_archiving_started, log_archiving_paused, From f18d92570e4d4876098a2761c0a5dcfb9c6eb198 Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Tue, 18 Aug 2020 08:30:09 -0400 Subject: [PATCH 04/11] wip attempt to fix timestamp unique constraint errors --- archivebox/index/__init__.py | 37 ++++++++++++++++++++---------------- archivebox/index/sql.py | 4 ++++ 2 files changed, 25 insertions(+), 16 deletions(-) diff --git a/archivebox/index/__init__.py b/archivebox/index/__init__.py index 784c879c..b0695064 100644 --- a/archivebox/index/__init__.py +++ b/archivebox/index/__init__.py @@ -129,7 +129,7 @@ def validate_links(links: Iterable[Link]) -> List[Link]: try: links = archivable_links(links) # remove chrome://, about:, mailto: etc. links = sorted_links(links) # deterministically sort the links based on timstamp, url - links = uniquefied_links(links) # merge/dedupe duplicate timestamps & urls + links = fix_duplicate_links(links) # merge/dedupe duplicate timestamps & urls finally: timer.end() @@ -144,34 +144,39 @@ def archivable_links(links: Iterable[Link]) -> Iterable[Link]: urlparse(link.url) except ValueError: continue - scheme_is_valid = scheme(link.url) in ('http', 'https', 'ftp') - not_blacklisted = (not URL_BLACKLIST_PTN.match(link.url)) if URL_BLACKLIST_PTN else True - if scheme_is_valid and not_blacklisted: - yield link + if scheme(link.url) not in ('http', 'https', 'ftp'): + continue + if URL_BLACKLIST_PTN and URL_BLACKLIST_PTN.search(link.url): + continue + + yield link @enforce_types -def uniquefied_links(sorted_links: Iterable[Link]) -> Iterable[Link]: +def fix_duplicate_links(sorted_links: Iterable[Link]) -> Iterable[Link]: """ ensures that all non-duplicate links have monotonically increasing timestamps """ + from core.models import Snapshot unique_urls: OrderedDict[str, Link] = OrderedDict() for link in sorted_links: - if link.base_url in unique_urls: + if link.url in unique_urls: # merge with any other links that share the same url - link = merge_links(unique_urls[link.base_url], link) - unique_urls[link.base_url] = link + link = merge_links(unique_urls[link.url], link) + unique_urls[link.url] = link - unique_timestamps: OrderedDict[str, Link] = OrderedDict() - for link in unique_urls.values(): - new_link = link.overwrite( - timestamp=lowest_uniq_timestamp(unique_timestamps, link.timestamp), - ) - unique_timestamps[new_link.timestamp] = new_link + # unique_timestamps: OrderedDict[str, Link] = OrderedDict() + # for link in unique_urls.values(): + # closest_non_duplicate_ts = lowest_uniq_timestamp(unique_timestamps, link.timestamp) + # if closest_non_duplicate_ts != link.timestamp: + # link = link.overwrite(timestamp=closest_non_duplicate_ts) + # Snapshot.objects.filter(url=link.url).update(timestamp=link.timestamp) + # unique_timestamps[link.timestamp] = link - return unique_timestamps.values() + # return unique_timestamps.values() + return unique_urls.values() @enforce_types diff --git a/archivebox/index/sql.py b/archivebox/index/sql.py index 60db8dc6..183aeef8 100644 --- a/archivebox/index/sql.py +++ b/archivebox/index/sql.py @@ -39,6 +39,10 @@ def write_sql_main_index(links: List[Link], out_dir: str=OUTPUT_DIR) -> None: with transaction.atomic(): for link in links: info = {k: v for k, v in link._asdict().items() if k in Snapshot.keys} + try: + info['timestamp'] = Snapshot.objects.get(url=link.url).timestamp + except Snapshot.DoesNotExist: + pass Snapshot.objects.update_or_create(url=link.url, defaults=info) @enforce_types From c9b3bab84d41aa8e789436130c8cf1f3df06e4b8 Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Tue, 18 Aug 2020 08:49:26 -0400 Subject: [PATCH 05/11] fix pull title not working --- archivebox/core/admin.py | 2 +- archivebox/extractors/__init__.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/archivebox/core/admin.py b/archivebox/core/admin.py index 941cedab..97bce1a5 100644 --- a/archivebox/core/admin.py +++ b/archivebox/core/admin.py @@ -33,7 +33,7 @@ def update_titles(modeladmin, request, queryset): archive_links([ snapshot.as_link() for snapshot in queryset - ], overwrite=True, methods=('title',), out_dir=OUTPUT_DIR) + ], overwrite=True, methods=('title','favicon'), out_dir=OUTPUT_DIR) update_titles.short_description = "Pull title" def overwrite_snapshots(modeladmin, request, queryset): diff --git a/archivebox/extractors/__init__.py b/archivebox/extractors/__init__.py index b730aa54..ab80716a 100644 --- a/archivebox/extractors/__init__.py +++ b/archivebox/extractors/__init__.py @@ -63,10 +63,10 @@ def archive_link(link: Link, overwrite: bool=False, methods: Optional[Iterable[s ARCHIVE_METHODS = get_default_archive_methods() - if methods is not None: + if methods: ARCHIVE_METHODS = [ method for method in ARCHIVE_METHODS - if method[1] in methods + if method[0] in methods ] out_dir = out_dir or link.link_dir From 6087e30d38d060599325312f0e009eaf3e282324 Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Tue, 18 Aug 2020 09:17:01 -0400 Subject: [PATCH 06/11] dont allow ui editing of db fields for now --- archivebox/core/admin.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/archivebox/core/admin.py b/archivebox/core/admin.py index 97bce1a5..c9588c45 100644 --- a/archivebox/core/admin.py +++ b/archivebox/core/admin.py @@ -58,9 +58,9 @@ delete_snapshots.short_description = "Delete" class SnapshotAdmin(admin.ModelAdmin): list_display = ('added', 'title_str', 'url_str', 'files', 'size') sort_fields = ('title_str', 'url_str', 'added') - readonly_fields = ('id', 'url', 'timestamp', 'num_outputs', 'is_archived', 'url_hash', 'added', 'updated') + readonly_fields = ('id', 'url', 'timestamp', 'title', 'tags', 'num_outputs', 'is_archived', 'url_hash', 'added', 'updated') search_fields = ('url', 'timestamp', 'title', 'tags') - fields = ('title', 'tags', *readonly_fields) + fields = (*readonly_fields,) list_filter = ('added', 'updated', 'tags') ordering = ['-added'] actions = [delete_snapshots, overwrite_snapshots, update_snapshots, update_titles, verify_snapshots] From 273588c75a8e8c30e6ff1eefd9d04dc486e7001c Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Tue, 18 Aug 2020 09:17:21 -0400 Subject: [PATCH 07/11] change main link to point to link details index to reduce confusion --- archivebox/core/admin.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/archivebox/core/admin.py b/archivebox/core/admin.py index c9588c45..ed31c6a0 100644 --- a/archivebox/core/admin.py +++ b/archivebox/core/admin.py @@ -82,12 +82,12 @@ class SnapshotAdmin(admin.ModelAdmin): '' '' '' - '' + '' '{}' '', obj.archive_path, obj.archive_path, canon['favicon_path'], - obj.archive_path, canon['wget_path'] or '', + obj.archive_path, 'fetched' if obj.latest_title or obj.title else 'pending', urldecode(htmldecode(obj.latest_title or obj.title or ''))[:128] or 'Pending...' ) + mark_safe(f'{tags}') From 430d51756b861e93540dcc837c591e35386ef1c5 Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Tue, 18 Aug 2020 09:17:37 -0400 Subject: [PATCH 08/11] show archive dir size in link details page --- archivebox/index/html.py | 2 ++ archivebox/themes/legacy/link_details.html | 4 ++++ 2 files changed, 6 insertions(+) diff --git a/archivebox/index/html.py b/archivebox/index/html.py index 4c6ae8bb..9ce14988 100644 --- a/archivebox/index/html.py +++ b/archivebox/index/html.py @@ -8,6 +8,7 @@ from typing import List, Optional, Iterator, Mapping from .schema import Link from ..system import atomic_write, copy_and_overwrite +from ..logging_util import printable_filesize from ..util import ( enforce_types, ts_to_date, @@ -140,6 +141,7 @@ def link_details_template(link: Link) -> str: ) or 'about:blank', 'extension': link.extension or 'html', 'tags': link.tags or 'untagged', + 'size': printable_filesize(link.archive_size) if link.archive_size else 'pending', 'status': 'archived' if link.is_archived else 'not yet archived', 'status_color': 'success' if link.is_archived else 'danger', 'oldest_archive_date': ts_to_date(link.oldest_archive_date), diff --git a/archivebox/themes/legacy/link_details.html b/archivebox/themes/legacy/link_details.html index 1dabae2d..f3f54e47 100644 --- a/archivebox/themes/legacy/link_details.html +++ b/archivebox/themes/legacy/link_details.html @@ -298,6 +298,10 @@
    Errors
    ❌ $num_failures +
    +
    Size
    + $size +
    From e29bfba0531845efd135028002dfae35e92ebb24 Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Tue, 18 Aug 2020 09:17:56 -0400 Subject: [PATCH 09/11] change iframe panel size and shape on details index --- archivebox/themes/legacy/link_details.html | 133 +++++++++++---------- 1 file changed, 67 insertions(+), 66 deletions(-) diff --git a/archivebox/themes/legacy/link_details.html b/archivebox/themes/legacy/link_details.html index f3f54e47..cd7252ac 100644 --- a/archivebox/themes/legacy/link_details.html +++ b/archivebox/themes/legacy/link_details.html @@ -316,99 +316,100 @@
    -
    +
    - - - -

    Local Archive

    -

    archive/$domain

    + + + +

    Wget > WARC

    +

    archive/$domain

    +
    +
    +
    +
    +
    + +
    + + + +

    Chrome > SingleFile

    +

    archive/singlefile.html

    -
    +
    - -
    - - - -

    HTML

    -

    archive/output.html

    + +
    + + + +

    Archive.Org

    +

    web.archive.org/web/...

    -
    +
    - -
    - - - -

    SingleFile

    -

    archive/singlefile.html

    + +
    + + + +

    Original

    +

    $domain

    -
    +
    +
    - -
    - - - -

    Readability

    -

    archive/readability/...

    + +
    + + + +

    Chrome > PDF

    +

    archive/output.pdf

    -
    +
    - -
    - - - -

    PDF

    -

    archive/output.pdf

    + +
    + + + +

    Chrome > Screenshot

    +

    archive/screenshot.png

    -
    +
    - -
    - - - -

    Screenshot

    -

    archive/screenshot.png

    + +
    + + + +

    Chrome > HTML

    +

    archive/output.html

    -
    +
    - -
    - - - -

    Archive.Org

    -

    web.archive.org/web/...

    -
    -
    -
    -
    -
    - -
    - - - -

    Original

    -

    $domain

    + +
    + + + +

    Readability

    +

    archive/readability/...

    From 61ab952dab4c74fe29aee267ed8ea540fb0fe94f Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Tue, 18 Aug 2020 09:20:05 -0400 Subject: [PATCH 10/11] fix parser docstring --- archivebox/parsers/generic_html.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/archivebox/parsers/generic_html.py b/archivebox/parsers/generic_html.py index 4c632f04..74b3d1fc 100644 --- a/archivebox/parsers/generic_html.py +++ b/archivebox/parsers/generic_html.py @@ -30,7 +30,7 @@ class HrefParser(HTMLParser): @enforce_types def parse_generic_html_export(html_file: IO[str], root_url: Optional[str]=None, **_kwargs) -> Iterable[Link]: - """Parse Pocket-format bookmarks export files (produced by getpocket.com/export/)""" + """Parse Generic HTML for href tags and use only the url (support for title coming later)""" html_file.seek(0) for line in html_file: From e87f1d57a396af555cc9fed98d3a0088173bdba5 Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Tue, 18 Aug 2020 09:22:12 -0400 Subject: [PATCH 11/11] fix linters --- archivebox/extractors/__init__.py | 1 - archivebox/index/__init__.py | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/archivebox/extractors/__init__.py b/archivebox/extractors/__init__.py index ab80716a..23d5cfd0 100644 --- a/archivebox/extractors/__init__.py +++ b/archivebox/extractors/__init__.py @@ -12,7 +12,6 @@ from ..index import ( patch_main_index, ) from ..util import enforce_types -from ..config import ANSI from ..logging_util import ( log_archiving_started, log_archiving_paused, diff --git a/archivebox/index/__init__.py b/archivebox/index/__init__.py index b0695064..99894e16 100644 --- a/archivebox/index/__init__.py +++ b/archivebox/index/__init__.py @@ -157,7 +157,7 @@ def fix_duplicate_links(sorted_links: Iterable[Link]) -> Iterable[Link]: """ ensures that all non-duplicate links have monotonically increasing timestamps """ - from core.models import Snapshot + # from core.models import Snapshot unique_urls: OrderedDict[str, Link] = OrderedDict()