1
0
Fork 0
mirror of synced 2024-06-26 10:00:20 +12:00

Replace urllib with requests

This commit is contained in:
Serene-Arc 2021-02-26 19:38:29 +10:00 committed by Ali Parlakci
parent 37a91aa4df
commit a55f35c025
3 changed files with 13 additions and 13 deletions

View file

@ -2,14 +2,14 @@
import json import json
import re import re
import urllib.request
from typing import Optional from typing import Optional
import requests
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
from praw.models import Submission from praw.models import Submission
from bulkredditdownloader.site_authenticator import SiteAuthenticator
from bulkredditdownloader.resource import Resource from bulkredditdownloader.resource import Resource
from bulkredditdownloader.site_authenticator import SiteAuthenticator
from bulkredditdownloader.site_downloaders.gif_delivery_network import GifDeliveryNetwork from bulkredditdownloader.site_downloaders.gif_delivery_network import GifDeliveryNetwork
@ -31,7 +31,7 @@ class Gfycat(GifDeliveryNetwork):
url = "https://gfycat.com/" + url.split('/')[-1] url = "https://gfycat.com/" + url.split('/')[-1]
page_source = (urllib.request.urlopen(url).read().decode()) page_source = requests.get(url).text
soup = BeautifulSoup(page_source, "html.parser") soup = BeautifulSoup(page_source, "html.parser")
attributes = {"data-react-helmet": "true", "type": "application/ld+json"} attributes = {"data-react-helmet": "true", "type": "application/ld+json"}

View file

@ -1,14 +1,14 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
import urllib.request
from typing import Optional from typing import Optional
import requests
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
from praw.models import Submission from praw.models import Submission
from bulkredditdownloader.site_authenticator import SiteAuthenticator
from bulkredditdownloader.errors import NotADownloadableLinkError from bulkredditdownloader.errors import NotADownloadableLinkError
from bulkredditdownloader.resource import Resource from bulkredditdownloader.resource import Resource
from bulkredditdownloader.site_authenticator import SiteAuthenticator
from bulkredditdownloader.site_downloaders.base_downloader import BaseDownloader from bulkredditdownloader.site_downloaders.base_downloader import BaseDownloader
@ -34,7 +34,7 @@ class GifDeliveryNetwork(BaseDownloader):
url = url[:-1] url = url[:-1]
url = "https://www.gifdeliverynetwork.com/" + url.split('/')[-1] url = "https://www.gifdeliverynetwork.com/" + url.split('/')[-1]
page_source = (urllib.request.urlopen(url).read().decode()) page_source = requests.get(url).text
soup = BeautifulSoup(page_source, "html.parser") soup = BeautifulSoup(page_source, "html.parser")
attributes = {"id": "mp4Source", "type": "video/mp4"} attributes = {"id": "mp4Source", "type": "video/mp4"}

View file

@ -1,15 +1,15 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
import json import json
import urllib.request
from typing import Optional from typing import Optional
import requests
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
from praw.models import Submission from praw.models import Submission
from bulkredditdownloader.site_authenticator import SiteAuthenticator
from bulkredditdownloader.errors import NotADownloadableLinkError from bulkredditdownloader.errors import NotADownloadableLinkError
from bulkredditdownloader.resource import Resource from bulkredditdownloader.resource import Resource
from bulkredditdownloader.site_authenticator import SiteAuthenticator
from bulkredditdownloader.site_downloaders.gif_delivery_network import GifDeliveryNetwork from bulkredditdownloader.site_downloaders.gif_delivery_network import GifDeliveryNetwork
@ -29,15 +29,15 @@ class Redgifs(GifDeliveryNetwork):
if url[-1:] == '/': if url[-1:] == '/':
url = url[:-1] url = url[:-1]
url = urllib.request.Request( url = "https://redgifs.com/watch/" + url.split('/')[-1]
"https://redgifs.com/watch/" + url.split('/')[-1])
url.add_header( headers = [
'User-Agent', 'User-Agent',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)' 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)'
' Chrome/67.0.3396.87 Safari/537.36 OPR/54.0.2952.64') ' Chrome/67.0.3396.87 Safari/537.36 OPR/54.0.2952.64'
]
page_source = (urllib.request.urlopen(url).read().decode()) page_source = requests.get(url, headers=headers).text
soup = BeautifulSoup(page_source, "html.parser") soup = BeautifulSoup(page_source, "html.parser")
attributes = {"data-react-helmet": "true", "type": "application/ld+json"} attributes = {"data-react-helmet": "true", "type": "application/ld+json"}