1
0
Fork 0
mirror of synced 2024-06-13 15:54:37 +12:00
bulk-downloader-for-reddit/bulkredditdownloader/site_downloaders/redgifs.py

49 lines
1.5 KiB
Python
Raw Normal View History

#!/usr/bin/env python3
import json
2021-02-07 01:29:13 +13:00
import pathlib
import urllib.request
from bs4 import BeautifulSoup
2021-02-07 20:08:24 +13:00
from bulkredditdownloader.site_downloaders.gif_delivery_network import GifDeliveryNetwork
2021-02-07 14:05:18 +13:00
from bulkredditdownloader.errors import NotADownloadableLinkError
class Redgifs(GifDeliveryNetwork):
2021-02-07 01:29:13 +13:00
def __init__(self, directory: pathlib.Path, post: dict):
2021-02-07 14:33:19 +13:00
super().__init__(directory, post)
self.download()
def download(self):
super().download()
@staticmethod
def _get_link(url: str) -> str:
"""Extract direct link to the video from page's source
and return it
"""
if '.webm' in url or '.mp4' in url or '.gif' in url:
return url
if url[-1:] == '/':
url = url[:-1]
url = urllib.request.Request(
"https://redgifs.com/watch/" + url.split('/')[-1])
2021-01-18 10:58:51 +13:00
url.add_header(
'User-Agent',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36 OPR/54.0.2952.64')
page_source = (urllib.request.urlopen(url).read().decode())
soup = BeautifulSoup(page_source, "html.parser")
attributes = {"data-react-helmet": "true", "type": "application/ld+json"}
content = soup.find("script", attrs=attributes)
if content is None:
raise NotADownloadableLinkError("Could not read the page source")
2021-01-18 10:58:51 +13:00
return json.loads(content.contents[0])["video"]["contentUrl"]