1
0
Fork 0
mirror of synced 2024-06-14 08:14:42 +12:00
bulk-downloader-for-reddit/bulkredditdownloader/site_downloaders/gfycat.py

42 lines
1.5 KiB
Python
Raw Normal View History

#!/usr/bin/env python3
import json
import re
2021-02-25 23:40:08 +13:00
from typing import Optional
from bs4 import BeautifulSoup
2021-02-11 12:10:40 +13:00
from praw.models import Submission
from bulkredditdownloader.exceptions import SiteDownloaderError
2021-02-25 23:40:08 +13:00
from bulkredditdownloader.resource import Resource
2021-02-26 22:38:29 +13:00
from bulkredditdownloader.site_authenticator import SiteAuthenticator
2021-02-07 20:08:24 +13:00
from bulkredditdownloader.site_downloaders.gif_delivery_network import GifDeliveryNetwork
class Gfycat(GifDeliveryNetwork):
2021-02-15 18:12:27 +13:00
def __init__(self, post: Submission):
super().__init__(post)
2021-02-26 21:57:05 +13:00
def find_resources(self, authenticator: Optional[SiteAuthenticator] = None) -> list[Resource]:
2021-02-25 23:40:08 +13:00
return super().find_resources(authenticator)
@staticmethod
def _get_link(url: str) -> str:
2021-03-20 01:13:56 +13:00
gfycat_id = re.match(r'.*/(.*?)/?$', url).group(1)
url = 'https://gfycat.com/' + gfycat_id
2021-04-06 12:48:21 +12:00
response = Gfycat.retrieve_url(url)
if 'gifdeliverynetwork' in response.url:
return GifDeliveryNetwork._get_link(url)
2021-04-05 19:21:04 +12:00
soup = BeautifulSoup(response.text, 'html.parser')
2021-03-20 01:13:56 +13:00
content = soup.find('script', attrs={'data-react-helmet': 'true', 'type': 'application/ld+json'})
try:
out = json.loads(content.contents[0])['video']['contentUrl']
except (IndexError, KeyError) as e:
raise SiteDownloaderError(f'Failed to download Gfycat link {url}: {e}')
except json.JSONDecodeError as e:
raise SiteDownloaderError(f'Did not receive valid JSON data: {e}')
2021-03-20 01:13:56 +13:00
return out