diff --git a/bulkredditdownloader/site_downloaders/gfycat.py b/bulkredditdownloader/site_downloaders/gfycat.py index 3b6c48e..af1f45b 100644 --- a/bulkredditdownloader/site_downloaders/gfycat.py +++ b/bulkredditdownloader/site_downloaders/gfycat.py @@ -2,14 +2,14 @@ import json import re -import urllib.request from typing import Optional +import requests from bs4 import BeautifulSoup from praw.models import Submission -from bulkredditdownloader.site_authenticator import SiteAuthenticator from bulkredditdownloader.resource import Resource +from bulkredditdownloader.site_authenticator import SiteAuthenticator from bulkredditdownloader.site_downloaders.gif_delivery_network import GifDeliveryNetwork @@ -31,7 +31,7 @@ class Gfycat(GifDeliveryNetwork): url = "https://gfycat.com/" + url.split('/')[-1] - page_source = (urllib.request.urlopen(url).read().decode()) + page_source = requests.get(url).text soup = BeautifulSoup(page_source, "html.parser") attributes = {"data-react-helmet": "true", "type": "application/ld+json"} diff --git a/bulkredditdownloader/site_downloaders/gif_delivery_network.py b/bulkredditdownloader/site_downloaders/gif_delivery_network.py index 03c291d..eab4ac0 100644 --- a/bulkredditdownloader/site_downloaders/gif_delivery_network.py +++ b/bulkredditdownloader/site_downloaders/gif_delivery_network.py @@ -1,14 +1,14 @@ #!/usr/bin/env python3 -import urllib.request from typing import Optional +import requests from bs4 import BeautifulSoup from praw.models import Submission -from bulkredditdownloader.site_authenticator import SiteAuthenticator from bulkredditdownloader.errors import NotADownloadableLinkError from bulkredditdownloader.resource import Resource +from bulkredditdownloader.site_authenticator import SiteAuthenticator from bulkredditdownloader.site_downloaders.base_downloader import BaseDownloader @@ -34,7 +34,7 @@ class GifDeliveryNetwork(BaseDownloader): url = url[:-1] url = "https://www.gifdeliverynetwork.com/" + url.split('/')[-1] - page_source = (urllib.request.urlopen(url).read().decode()) + page_source = requests.get(url).text soup = BeautifulSoup(page_source, "html.parser") attributes = {"id": "mp4Source", "type": "video/mp4"} diff --git a/bulkredditdownloader/site_downloaders/redgifs.py b/bulkredditdownloader/site_downloaders/redgifs.py index 5517be2..5953fa7 100644 --- a/bulkredditdownloader/site_downloaders/redgifs.py +++ b/bulkredditdownloader/site_downloaders/redgifs.py @@ -1,15 +1,15 @@ #!/usr/bin/env python3 import json -import urllib.request from typing import Optional +import requests from bs4 import BeautifulSoup from praw.models import Submission -from bulkredditdownloader.site_authenticator import SiteAuthenticator from bulkredditdownloader.errors import NotADownloadableLinkError from bulkredditdownloader.resource import Resource +from bulkredditdownloader.site_authenticator import SiteAuthenticator from bulkredditdownloader.site_downloaders.gif_delivery_network import GifDeliveryNetwork @@ -29,15 +29,15 @@ class Redgifs(GifDeliveryNetwork): if url[-1:] == '/': url = url[:-1] - url = urllib.request.Request( - "https://redgifs.com/watch/" + url.split('/')[-1]) + url = "https://redgifs.com/watch/" + url.split('/')[-1] - url.add_header( + headers = [ 'User-Agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)' - ' Chrome/67.0.3396.87 Safari/537.36 OPR/54.0.2952.64') + ' Chrome/67.0.3396.87 Safari/537.36 OPR/54.0.2952.64' + ] - page_source = (urllib.request.urlopen(url).read().decode()) + page_source = requests.get(url, headers=headers).text soup = BeautifulSoup(page_source, "html.parser") attributes = {"data-react-helmet": "true", "type": "application/ld+json"}