1
0
Fork 0
mirror of synced 2024-05-17 10:42:39 +12:00
bulk-downloader-for-reddit/bdfr/site_downloaders/gfycat.py

43 lines
1.5 KiB
Python
Raw Normal View History

#!/usr/bin/env python3
import json
import re
2021-02-25 23:40:08 +13:00
from typing import Optional
from bs4 import BeautifulSoup
2021-02-11 12:10:40 +13:00
from praw.models import Submission
2021-04-12 19:58:32 +12:00
from bdfr.exceptions import SiteDownloaderError
from bdfr.resource import Resource
from bdfr.site_authenticator import SiteAuthenticator
2021-04-28 20:50:18 +12:00
from bdfr.site_downloaders.redgifs import Redgifs
2021-04-28 20:50:18 +12:00
class Gfycat(Redgifs):
2021-02-15 18:12:27 +13:00
def __init__(self, post: Submission):
super().__init__(post)
2021-02-26 21:57:05 +13:00
def find_resources(self, authenticator: Optional[SiteAuthenticator] = None) -> list[Resource]:
2021-02-25 23:40:08 +13:00
return super().find_resources(authenticator)
@staticmethod
2022-02-18 15:49:46 +13:00
def _get_link(url: str) -> set[str]:
2021-03-20 01:13:56 +13:00
gfycat_id = re.match(r'.*/(.*?)/?$', url).group(1)
url = 'https://gfycat.com/' + gfycat_id
2021-04-06 12:48:21 +12:00
response = Gfycat.retrieve_url(url)
2021-04-28 20:50:18 +12:00
if re.search(r'(redgifs|gifdeliverynetwork)', response.url):
2021-05-16 14:16:47 +12:00
url = url.lower() # Fixes error with old gfycat/redgifs links
2021-04-28 20:50:18 +12:00
return Redgifs._get_link(url)
2021-04-05 19:21:04 +12:00
soup = BeautifulSoup(response.text, 'html.parser')
2021-03-20 01:13:56 +13:00
content = soup.find('script', attrs={'data-react-helmet': 'true', 'type': 'application/ld+json'})
try:
out = json.loads(content.contents[0])['video']['contentUrl']
except (IndexError, KeyError, AttributeError) as e:
raise SiteDownloaderError(f'Failed to download Gfycat link {url}: {e}')
except json.JSONDecodeError as e:
raise SiteDownloaderError(f'Did not receive valid JSON data: {e}')
2022-02-18 15:49:46 +13:00
return {out,}