1
0
Fork 0
mirror of synced 2024-06-13 15:54:37 +12:00
bulk-downloader-for-reddit/bulkredditdownloader/site_downloaders/gfycat.py

37 lines
1.1 KiB
Python
Raw Normal View History

#!/usr/bin/env python3
import json
import re
2021-02-25 23:40:08 +13:00
from typing import Optional
2021-02-26 22:38:29 +13:00
import requests
from bs4 import BeautifulSoup
2021-02-11 12:10:40 +13:00
from praw.models import Submission
2021-02-25 23:40:08 +13:00
from bulkredditdownloader.resource import Resource
2021-02-26 22:38:29 +13:00
from bulkredditdownloader.site_authenticator import SiteAuthenticator
2021-02-07 20:08:24 +13:00
from bulkredditdownloader.site_downloaders.gif_delivery_network import GifDeliveryNetwork
class Gfycat(GifDeliveryNetwork):
2021-02-15 18:12:27 +13:00
def __init__(self, post: Submission):
super().__init__(post)
2021-02-26 21:57:05 +13:00
def find_resources(self, authenticator: Optional[SiteAuthenticator] = None) -> list[Resource]:
2021-02-25 23:40:08 +13:00
return super().find_resources(authenticator)
@staticmethod
def _get_link(url: str) -> str:
if re.match(r'\.(webm|mp4|gif)$', url):
return url
2021-03-20 01:13:56 +13:00
gfycat_id = re.match(r'.*/(.*?)/?$', url).group(1)
url = 'https://gfycat.com/' + gfycat_id
2021-02-26 22:38:29 +13:00
page_source = requests.get(url).text
2021-03-20 01:13:56 +13:00
soup = BeautifulSoup(page_source, 'html.parser')
content = soup.find('script', attrs={'data-react-helmet': 'true', 'type': 'application/ld+json'})
2021-03-20 01:13:56 +13:00
out = json.loads(content.contents[0]).get('video').get('contentUrl')
return out