__package__ = 'archivebox.parsers' from typing import IO, Iterable from datetime import datetime from ..index.schema import Link from ..util import ( htmldecode, enforce_types, str_between, ) @enforce_types def parse_wallabag_atom_export(rss_file: IO[str], **_kwargs) -> Iterable[Link]: """Parse Wallabag Atom files into links""" rss_file.seek(0) entries = rss_file.read().split('')[1:] for entry in entries: # example entry: # # <![CDATA[Orient Ray vs Mako: Is There Much Difference? - iknowwatches.com]]> # # https://iknowwatches.com/orient-ray-vs-mako/ # wallabag:wallabag.drycat.fr:milosh:entry:14041 # 2020-10-18T09:14:02+02:00 # 2020-10-18T09:13:56+02:00 # # # trailing_removed = entry.split('', 1)[0] leading_removed = trailing_removed.strip() rows = leading_removed.split('\n') def get_row(key): return [r.strip() for r in rows if r.strip().startswith('<{}'.format(key))][0] title = str_between(get_row('title'), '<![CDATA[', ']]>').strip() url = str_between(get_row('link rel="via"'), '', '') ts_str = str_between(get_row('published'), '', '') time = datetime.strptime(ts_str, "%Y-%m-%dT%H:%M:%S%z") try: tags = str_between(get_row('category'), 'label="', '" />') except: tags = None yield Link( url=htmldecode(url), timestamp=str(time.timestamp()), title=htmldecode(title) or None, tags=tags or '', sources=[rss_file.name], )