1
0
Fork 0
mirror of synced 2024-06-26 18:10:26 +12:00
bulk-downloader-for-reddit/bulkredditdownloader/downloaders/gfycat.py

55 lines
1.8 KiB
Python
Raw Normal View History

import json
import os
import urllib.request
from bs4 import BeautifulSoup
2021-02-07 14:33:19 +13:00
from bulkredditdownloader.downloaders.base_downloader import BaseDownloader
2021-02-07 14:09:31 +13:00
from bulkredditdownloader.downloaders.gif_delivery_network import GifDeliveryNetwork
2021-02-07 14:05:18 +13:00
from bulkredditdownloader.errors import NotADownloadableLinkError
from bulkredditdownloader.utils import GLOBAL
2021-02-07 01:29:13 +13:00
import pathlib
2021-02-07 14:33:19 +13:00
class Gfycat(BaseDownloader):
2021-02-07 01:29:13 +13:00
def __init__(self, directory: pathlib.Path, post: dict):
2021-02-07 14:33:19 +13:00
super().__init__(directory, post)
try:
post['MEDIAURL'] = self.getLink(post['CONTENTURL'])
except IndexError:
raise NotADownloadableLinkError("Could not read the page source")
2021-02-07 14:33:19 +13:00
post['EXTENSION'] = self.getExtension(post['MEDIAURL'])
if not os.path.exists(directory):
os.makedirs(directory)
filename = GLOBAL.config['filename'].format(**post) + post["EXTENSION"]
short_filename = post['POSTID'] + post['EXTENSION']
2021-02-07 14:33:19 +13:00
self.getFile(filename, short_filename, directory, post['MEDIAURL'])
@staticmethod
2021-02-07 01:29:13 +13:00
def getLink(url: str) -> str:
"""Extract direct link to the video from page's source
and return it
"""
if '.webm' in url or '.mp4' in url or '.gif' in url:
return url
if url[-1:] == '/':
url = url[:-1]
url = "https://gfycat.com/" + url.split('/')[-1]
page_source = (urllib.request.urlopen(url).read().decode())
soup = BeautifulSoup(page_source, "html.parser")
attributes = {"data-react-helmet": "true", "type": "application/ld+json"}
content = soup.find("script", attrs=attributes)
if content is None:
return GifDeliveryNetwork.getLink(url)
return json.loads(content.contents[0])["video"]["contentUrl"]