1
0
Fork 0
mirror of synced 2024-06-01 10:09:49 +12:00
ArchiveBox/archivebox/extractors/title.py

161 lines
5.1 KiB
Python
Raw Normal View History

2019-04-28 09:26:24 +12:00
__package__ = 'archivebox.extractors'
2019-05-01 15:13:04 +12:00
import re
from html.parser import HTMLParser
2020-09-16 07:05:48 +12:00
from pathlib import Path
2019-04-28 09:26:24 +12:00
from typing import Optional
2019-05-01 15:13:04 +12:00
from ..index.schema import Link, ArchiveResult, ArchiveOutput, ArchiveError
2019-04-28 09:26:24 +12:00
from ..util import (
enforce_types,
2019-05-01 15:13:04 +12:00
download_url,
htmldecode,
dedupe,
2019-04-28 09:26:24 +12:00
)
from ..config import (
TIMEOUT,
2020-06-26 14:14:40 +12:00
CHECK_SSL_VALIDITY,
2019-04-28 09:26:24 +12:00
SAVE_TITLE,
CURL_BINARY,
2020-10-16 02:58:22 +13:00
CURL_ARGS,
CURL_EXTRA_ARGS,
2019-04-28 09:26:24 +12:00
CURL_VERSION,
2020-06-26 14:14:40 +12:00
CURL_USER_AGENT,
2019-04-28 09:26:24 +12:00
)
from ..logging_util import TimedProgress
2019-05-01 15:13:04 +12:00
HTML_TITLE_REGEX = re.compile(
r'<title.*?>' # start matching text after <title> tag
r'([^<>]+)', # get everything up to these symbols
re.IGNORECASE | re.MULTILINE | re.DOTALL | re.UNICODE,
)
class TitleParser(HTMLParser):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.title_tag = ""
self.title_og = ""
self.inside_title_tag = False
@property
def title(self):
return self.title_tag or self.title_og or None
def handle_starttag(self, tag, attrs):
if tag.lower() == "title" and not self.title_tag:
self.inside_title_tag = True
elif tag.lower() == "meta" and not self.title_og:
attrs = dict(attrs)
if attrs.get("property") == "og:title" and attrs.get("content"):
self.title_og = attrs.get("content")
def handle_data(self, data):
if self.inside_title_tag and data:
self.title_tag += data.strip()
def handle_endtag(self, tag):
if tag.lower() == "title":
self.inside_title_tag = False
2019-04-28 09:26:24 +12:00
2022-02-09 04:17:52 +13:00
@enforce_types
def get_html(link: Link, path: Path, timeout: int=TIMEOUT) -> str:
"""
Try to find wget, singlefile and then dom files.
If none is found, download the url again.
"""
canonical = link.canonical_outputs()
abs_path = path.absolute()
# prefer chrome-generated DOM dump to singlefile as singlefile output often includes HUGE url(data:image/...base64) strings that crash parsers
sources = [canonical["dom_path"], canonical["singlefile_path"], canonical["wget_path"]]
2022-02-09 04:17:52 +13:00
document = None
for source in sources:
try:
with open(abs_path / source, "r", encoding="utf-8") as f:
document = f.read()
break
except (FileNotFoundError, TypeError, UnicodeDecodeError):
2022-02-09 04:17:52 +13:00
continue
if document is None:
return download_url(link.url, timeout=timeout)
else:
return document
2019-04-28 09:26:24 +12:00
@enforce_types
def should_save_title(link: Link, out_dir: Optional[str]=None, overwrite: Optional[bool]=False) -> bool:
# if link already has valid title, skip it
if not overwrite and link.title and not link.title.lower().startswith('http'):
2019-04-28 09:26:24 +12:00
return False
return SAVE_TITLE
def extract_title_with_regex(html):
match = re.search(HTML_TITLE_REGEX, html)
output = htmldecode(match.group(1).strip()) if match else None
return output
2019-04-28 09:26:24 +12:00
@enforce_types
2020-09-16 07:05:48 +12:00
def save_title(link: Link, out_dir: Optional[Path]=None, timeout: int=TIMEOUT) -> ArchiveResult:
2019-04-28 09:26:24 +12:00
"""try to guess the page's title from its content"""
from core.models import Snapshot
2019-04-28 09:26:24 +12:00
output: ArchiveOutput = None
2024-03-02 09:50:32 +13:00
# later options take precedence
options = [
2024-03-02 09:50:32 +13:00
*CURL_ARGS,
*CURL_EXTRA_ARGS,
2020-06-26 14:14:40 +12:00
'--max-time', str(timeout),
*(['--user-agent', '{}'.format(CURL_USER_AGENT)] if CURL_USER_AGENT else []),
*([] if CHECK_SSL_VALIDITY else ['--insecure']),
]
cmd = [
CURL_BINARY,
*dedupe(options),
2019-04-28 09:26:24 +12:00
link.url,
]
status = 'succeeded'
timer = TimedProgress(timeout, prefix=' ')
try:
2022-02-09 04:17:52 +13:00
html = get_html(link, out_dir, timeout=timeout)
try:
# try using relatively strict html parser first
parser = TitleParser()
parser.feed(html)
output = parser.title
if output is None:
raise
except Exception:
# fallback to regex that can handle broken/malformed html
output = extract_title_with_regex(html)
# if title is better than the one in the db, update db with new title
if isinstance(output, str) and output:
if not link.title or len(output) >= len(link.title):
Snapshot.objects.filter(url=link.url,
timestamp=link.timestamp)\
.update(title=output)
else:
# if no content was returned, dont save a title (because it might be a temporary error)
if not html:
raise ArchiveError('Unable to detect page title')
# output = html[:128] # use first bit of content as the title
output = link.base_url # use the filename as the title (better UX)
2019-04-28 09:26:24 +12:00
except Exception as err:
status = 'failed'
output = err
finally:
timer.end()
return ArchiveResult(
cmd=cmd,
2020-09-16 07:05:48 +12:00
pwd=str(out_dir),
2019-04-28 09:26:24 +12:00
cmd_version=CURL_VERSION,
output=output,
status=status,
**timer.stats,
)