Compare commits
330 commits
Author | SHA1 | Date | |
---|---|---|---|
8c293a4684 | |||
0846aed44a | |||
0e23dcb8ad | |||
a01b18a0f2 | |||
63b0607f58 | |||
cf5f7bfd16 | |||
e96b167b71 | |||
105ceaf386 | |||
c9863f094a | |||
804d0eb661 | |||
5fbe64dc71 | |||
8b3b5a73e8 | |||
8e60a12517 | |||
0f94c98733 | |||
7e93101617 | |||
8f17bcf43b | |||
9b23082a78 | |||
d18c30b481 | |||
f4059d5c72 | |||
59c25d21df | |||
579c5ab8eb | |||
468b5a33ae | |||
37dd7f11a8 | |||
74fb9e0ad7 | |||
e2a14b0e14 | |||
2589e29304 | |||
241021fa39 | |||
12311029e4 | |||
77a01e1627 | |||
b64f508025 | |||
8c57dc2283 | |||
4f07e92c5e | |||
f40ac35f4a | |||
bdce6101ae | |||
271da4e6f3 | |||
e0e780a272 | |||
aced164560 | |||
8fffa20795 | |||
3fdaf35306 | |||
9a6e42fb9c | |||
954df88c86 | |||
b7aae727e5 | |||
b8442dbb15 | |||
b6edc36753 | |||
c4bece2f58 | |||
874c7e3117 | |||
2bafb1b99b | |||
0bb94040d6 | |||
13887ca7e1 | |||
7cd1a70f59 | |||
fe9cc7f29f | |||
816b7e2726 | |||
e92929ef07 | |||
c63a8842d9 | |||
7fef403757 | |||
2aea7d0d48 | |||
57ac0130a6 | |||
00c4307694 | |||
da74096cde | |||
b3e4777206 | |||
8d43cdeef7 | |||
db11e57111 | |||
83f45e7f60 | |||
5d3a539eda | |||
2e2dfe671b | |||
603e7de04d | |||
66049a4021 | |||
7cf096012e | |||
af6222e06c | |||
4e131640ad | |||
8c01a9e7a0 | |||
d0da9be376 | |||
e32d322dbd | |||
58e1d1a8f9 | |||
4ba5df6b37 | |||
b4fccd7ef8 | |||
15a9d25a9d | |||
ac91c9089c | |||
3aa740e979 | |||
1bc20f238e | |||
628739d0b8 | |||
7e3b11caf8 | |||
8af00b20bc | |||
76b441cd62 | |||
d4bfe8fa19 | |||
614c19be10 | |||
8cfc314038 | |||
47e49a2e98 | |||
8feb6517f1 | |||
bfd2d31b7b | |||
921b2d0888 | |||
5427ceb29a | |||
ee095d4814 | |||
9c3c5436b5 | |||
82230a97bc | |||
c4f636c388 | |||
002a2dac43 | |||
1dc5ead4f6 | |||
60ce138a52 | |||
0873a4a2b2 | |||
96cd7d7147 | |||
51b09a77ed | |||
3136a6488c | |||
2524070bd0 | |||
0a3b3d7b7c | |||
b30ced9be9 | |||
3278e67197 | |||
45429be27c | |||
d056647a53 | |||
69fa1f3f09 | |||
b438f81a43 | |||
324242a9bc | |||
e18014cc8a | |||
ef7fcce1cc | |||
831f49daa6 | |||
7b7167643f | |||
175513fbb7 | |||
5cb3c2c635 | |||
fecb65c53a | |||
ad12fc1b7a | |||
48c96beba2 | |||
2d2ed58b34 | |||
9ee13aea23 | |||
87104e7e6a | |||
21bf90f521 | |||
42416db8b9 | |||
4143c53ff1 | |||
9ba62f8c97 | |||
1385545e26 | |||
5341d6f12c | |||
49727aea6e | |||
0a586425d0 | |||
54a800d357 | |||
25fdd28037 | |||
f3c7d796aa | |||
77711c243a | |||
3c7f85725e | |||
dfc21295e3 | |||
5300758b3b | |||
cfd4bad1ef | |||
0e90a2e900 | |||
47a8736f77 | |||
df30a3a3ac | |||
dc5a9ef497 | |||
3d0ac9e483 | |||
325883e441 | |||
14e98f014b | |||
b536a486b6 | |||
02b6e66941 | |||
e7629d7004 | |||
0ce2585f7f | |||
b7d21161fb | |||
d4664d784f | |||
3b5f8bca67 | |||
c834314086 | |||
7bb2a9adbb | |||
57e59db458 | |||
e57932aedf | |||
ca33dee265 | |||
9c067ad74f | |||
3906386838 | |||
7fef6c4023 | |||
398f7b293a | |||
cd05bc388e | |||
1dff7500e7 | |||
06816098dc | |||
f4598c4bec | |||
c4a9da06f6 | |||
5c343ef790 | |||
106d7596b1 | |||
7bd957aafa | |||
d4f7deaa68 | |||
2f2b5b749c | |||
95749584ec | |||
0a9ecac410 | |||
e0a36f4eab | |||
35645da241 | |||
5dbb4d00d4 | |||
0767da14c2 | |||
d60b4e7fdd | |||
cd6bcd82ef | |||
4b160c2611 | |||
44e4c16b76 | |||
55c95495b2 | |||
b47b90f233 | |||
9d63125724 | |||
2bbf1b644e | |||
f22a8aec4d | |||
5d76fcd5aa | |||
7eb2ab6d7d | |||
9545407896 | |||
89653c4bad | |||
4fc0d5dc1d | |||
607d963450 | |||
1f1e7dc63d | |||
7ae318fb20 | |||
27ca92ef15 | |||
af3f98f59c | |||
23e20e6ddc | |||
cb3415c62f | |||
5f443fddff | |||
0731de788d | |||
395bf9180a | |||
ef82387f84 | |||
798ed728f5 | |||
8ab13b4480 | |||
7100291ed9 | |||
59e57cee84 | |||
36e32d4bff | |||
febad9c06c | |||
1157c31be1 | |||
86e451d49e | |||
9277903308 | |||
7d4eb47643 | |||
4f876eecbc | |||
7315afeafd | |||
3fd5bad407 | |||
8c59329ffa | |||
2d365b612b | |||
7d4916919d | |||
decb13b5db | |||
efea01e56f | |||
eb8f9d5876 | |||
e4a44f1e25 | |||
ad172841e2 | |||
e068c9ce56 | |||
53d7ce2e5d | |||
9f3dcece4d | |||
2bdeaf2660 | |||
12982c00cd | |||
1abb7768c3 | |||
f49a1d7a2d | |||
a599169399 | |||
e8d767050f | |||
90a2eac90d | |||
a620ae91a1 | |||
919abb09ef | |||
a6940987f4 | |||
12104d54f1 | |||
aede4d559a | |||
f57590cfa0 | |||
2e68850d0f | |||
ac8855bc14 | |||
bfd481739b | |||
81c49de911 | |||
c410682cc8 | |||
1ad2b68e03 | |||
274407537e | |||
d64acc25f5 | |||
dbd0c6cd42 | |||
4917fae797 | |||
5775c0ab9f | |||
484bde9b13 | |||
4e050c50d6 | |||
90b680935e | |||
68e367453b | |||
b921d03705 | |||
2c93537aea | |||
5a3ff887c4 | |||
806bd76f87 | |||
a2aa739c37 | |||
81b7fe853b | |||
5f779c734e | |||
06988c40b3 | |||
160ee372b9 | |||
7645319510 | |||
71f84420cb | |||
6b7e551934 | |||
6e0c642652 | |||
5adb9f9545 | |||
9deef63fdd | |||
85b216551f | |||
0177b434c2 | |||
57b3bb3134 | |||
49d16267a2 | |||
3811ec37fb | |||
8ec45a9302 | |||
ac3a8e913d | |||
850faffc29 | |||
e4fcacfd4f | |||
e564870cd6 | |||
af0a545c16 | |||
a487320e81 | |||
36ff95de6b | |||
5288b79d1b | |||
9f354e9e52 | |||
92dca3bd0e | |||
5333705440 | |||
1530456cf7 | |||
9ccc9e6863 | |||
8718295ee5 | |||
cc80acd6b5 | |||
f0aebdf5f1 | |||
f670b347ae | |||
d0d72c8229 | |||
0eeb4b46dc | |||
2b50ee0724 | |||
dd8d74ee25 | |||
4a86482756 | |||
8925643331 | |||
2dd446a402 | |||
6dd17c8762 | |||
f19171a1b4 | |||
fc279705c1 | |||
b4dd89cddc | |||
17939fe47c | |||
53562f4873 | |||
8c3af7029e | |||
bd802df38c | |||
f05e909008 | |||
4be0f5ec19 | |||
801784c46d | |||
d25f3fe008 | |||
e493ab048a | |||
8104ce3a8d | |||
f716d982b0 | |||
03d0aec4f6 | |||
4d3f0f9862 | |||
c6c6002ab2 | |||
9b23f273fc | |||
eeb2054606 | |||
327cce5581 | |||
2d6e25d1ac | |||
01923fda0e | |||
e004ccd148 | |||
80baab8de7 | |||
668fe80127 | |||
33312687ac | |||
4fd903cbe4 | |||
99fe3312a4 |
2
.gitattributes
vendored
Normal file
2
.gitattributes
vendored
Normal file
|
@ -0,0 +1,2 @@
|
|||
# Declare files that will always have CRLF line endings on checkout.
|
||||
*.ps1 text eol=crlf
|
14
.github/ISSUE_TEMPLATE/bug_report.md
vendored
14
.github/ISSUE_TEMPLATE/bug_report.md
vendored
|
@ -12,18 +12,22 @@ assignees: ''
|
|||
- [ ] I have read the [Opening an issue](https://github.com/aliparlakci/bulk-downloader-for-reddit/blob/master/docs/CONTRIBUTING.md#opening-an-issue)
|
||||
|
||||
## Description
|
||||
|
||||
A clear and concise description of what the bug is.
|
||||
|
||||
## Command
|
||||
```
|
||||
|
||||
```text
|
||||
Paste here the command(s) that causes the bug
|
||||
```
|
||||
|
||||
## Environment (please complete the following information):
|
||||
- OS: [e.g. Windows 10]
|
||||
- Python version: [e.g. 3.9.4]
|
||||
## Environment (please complete the following information)
|
||||
|
||||
- OS: [e.g. Windows 10]
|
||||
- Python version: [e.g. 3.9.4]
|
||||
|
||||
## Logs
|
||||
```
|
||||
|
||||
```text
|
||||
Paste the log output here.
|
||||
```
|
||||
|
|
1
.github/ISSUE_TEMPLATE/feature_request.md
vendored
1
.github/ISSUE_TEMPLATE/feature_request.md
vendored
|
@ -12,4 +12,5 @@ assignees: ''
|
|||
- [ ] I have read the [Opening an issue](../../README.md#configuration)
|
||||
|
||||
## Description
|
||||
|
||||
Clearly state the current situation and issues you experience. Then, explain how this feature would solve these issues and make life easier. Also, explain the feature with as many detail as possible.
|
||||
|
|
|
@ -12,7 +12,9 @@ assignees: ''
|
|||
- [ ] I have read the [Opening an issue](../../README.md#configuration)
|
||||
|
||||
## Site
|
||||
|
||||
Provide a URL to domain of the site.
|
||||
|
||||
## Example posts
|
||||
|
||||
Provide example reddit posts with the domain.
|
||||
|
|
13
.github/workflows/formatting_check.yml
vendored
Normal file
13
.github/workflows/formatting_check.yml
vendored
Normal file
|
@ -0,0 +1,13 @@
|
|||
name: formatting_check
|
||||
run-name: Check code formatting
|
||||
on: [push, pull_request]
|
||||
jobs:
|
||||
formatting_check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Install dependencies
|
||||
run: sudo gem install mdl
|
||||
- uses: actions/checkout@v3
|
||||
- uses: paolorechia/pox@v1.0.1
|
||||
with:
|
||||
tox_env: "format_check"
|
13
.github/workflows/protect_master.yml
vendored
Normal file
13
.github/workflows/protect_master.yml
vendored
Normal file
|
@ -0,0 +1,13 @@
|
|||
name: Protect master branch
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
jobs:
|
||||
merge_check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check if the pull request is mergeable to master
|
||||
run: |
|
||||
if [[ "$GITHUB_HEAD_REF" == 'development' && "$GITHUB_REPOSITORY" == 'aliparlakci/bulk-downloader-for-reddit' ]]; then exit 0; else exit 1; fi;
|
12
.github/workflows/publish.yml
vendored
12
.github/workflows/publish.yml
vendored
|
@ -11,25 +11,25 @@ jobs:
|
|||
deploy:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v2
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.9'
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install setuptools wheel twine
|
||||
pip install build setuptools wheel twine
|
||||
- name: Build and publish
|
||||
env:
|
||||
TWINE_USERNAME: ${{ secrets.PYPI_USERNAME }}
|
||||
TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }}
|
||||
run: |
|
||||
python setup.py sdist bdist_wheel
|
||||
python -m build
|
||||
twine upload dist/*
|
||||
|
||||
- name: Upload coverage report
|
||||
uses: actions/upload-artifact@v2
|
||||
- name: Upload dist folder
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: dist
|
||||
path: dist/
|
||||
|
|
20
.github/workflows/test.yml
vendored
20
.github/workflows/test.yml
vendored
|
@ -3,8 +3,16 @@ name: Python Test
|
|||
on:
|
||||
push:
|
||||
branches: [ master, development ]
|
||||
paths-ignore:
|
||||
- "**.md"
|
||||
- ".markdown_style.rb"
|
||||
- ".mdlrc"
|
||||
pull_request:
|
||||
branches: [ master, development ]
|
||||
paths-ignore:
|
||||
- "**.md"
|
||||
- ".markdown_style.rb"
|
||||
- ".mdlrc"
|
||||
|
||||
jobs:
|
||||
test:
|
||||
|
@ -19,16 +27,16 @@ jobs:
|
|||
python-version: 3.9
|
||||
ext: .ps1
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v2
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip flake8 pytest pytest-cov
|
||||
pip install -r requirements.txt
|
||||
python -m pip install --upgrade pip Flake8-pyproject pytest pytest-cov
|
||||
pip install .
|
||||
|
||||
- name: Make configuration for tests
|
||||
env:
|
||||
|
@ -38,14 +46,14 @@ jobs:
|
|||
|
||||
- name: Lint with flake8
|
||||
run: |
|
||||
flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
|
||||
flake8 . --select=E9,F63,F7,F82
|
||||
|
||||
- name: Test with pytest
|
||||
run: |
|
||||
pytest -m 'not slow' --verbose --cov=./bdfr/ --cov-report term:skip-covered --cov-report html
|
||||
|
||||
- name: Upload coverage report
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: coverage_report
|
||||
path: htmlcov/
|
||||
|
|
4
.markdown_style.rb
Normal file
4
.markdown_style.rb
Normal file
|
@ -0,0 +1,4 @@
|
|||
all
|
||||
exclude_tag :line_length
|
||||
rule 'MD007', :indent => 4
|
||||
rule 'MD029', :style => 'ordered'
|
25
.pre-commit-config.yaml
Normal file
25
.pre-commit-config.yaml
Normal file
|
@ -0,0 +1,25 @@
|
|||
# See https://pre-commit.com for more information
|
||||
# See https://pre-commit.com/hooks.html for more hooks
|
||||
|
||||
repos:
|
||||
- repo: https://github.com/psf/black
|
||||
rev: 22.12.0
|
||||
hooks:
|
||||
- id: black
|
||||
|
||||
- repo: https://github.com/pycqa/isort
|
||||
rev: 5.11.4
|
||||
hooks:
|
||||
- id: isort
|
||||
name: isort (python)
|
||||
|
||||
- repo: https://github.com/pycqa/flake8
|
||||
rev: 6.0.0
|
||||
hooks:
|
||||
- id: flake8
|
||||
additional_dependencies: [Flake8-pyproject]
|
||||
|
||||
- repo: https://github.com/markdownlint/markdownlint
|
||||
rev: v0.12.0
|
||||
hooks:
|
||||
- id: markdownlint
|
405
README.md
405
README.md
|
@ -1,26 +1,50 @@
|
|||
# Bulk Downloader for Reddit
|
||||
[![PyPI version](https://img.shields.io/pypi/v/bdfr.svg)](https://pypi.python.org/pypi/bdfr)
|
||||
[![PyPI downloads](https://img.shields.io/pypi/dm/bdfr)](https://pypi.python.org/pypi/bdfr)
|
||||
|
||||
[![PyPI Status](https://img.shields.io/pypi/status/bdfr?logo=PyPI)](https://pypi.python.org/pypi/bdfr)
|
||||
[![PyPI version](https://img.shields.io/pypi/v/bdfr.svg?logo=PyPI)](https://pypi.python.org/pypi/bdfr)
|
||||
[![PyPI downloads](https://img.shields.io/pypi/dm/bdfr?logo=PyPI)](https://pypi.python.org/pypi/bdfr)
|
||||
[![AUR version](https://img.shields.io/aur/version/python-bdfr?logo=Arch%20Linux)](https://aur.archlinux.org/packages/python-bdfr)
|
||||
[![Python Test](https://github.com/aliparlakci/bulk-downloader-for-reddit/actions/workflows/test.yml/badge.svg?branch=master)](https://github.com/aliparlakci/bulk-downloader-for-reddit/actions/workflows/test.yml)
|
||||
[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg?logo=Python)](https://github.com/psf/black)
|
||||
[![pre-commit](https://img.shields.io/badge/pre--commit-enabled-brightgreen?logo=pre-commit)](https://github.com/pre-commit/pre-commit)
|
||||
|
||||
This is a tool to download submissions or submission data from Reddit. It can be used to archive data or even crawl Reddit to gather research data. The BDFR is flexible and can be used in scripts if needed through an extensive command-line interface. [List of currently supported sources](#list-of-currently-supported-sources)
|
||||
|
||||
If you wish to open an issue, please read [the guide on opening issues](docs/CONTRIBUTING.md#opening-an-issue) to ensure that your issue is clear and contains everything it needs to for the developers to investigate.
|
||||
|
||||
Included in this README are a few example Bash tricks to get certain behaviour. For that, see [Common Command Tricks](#common-command-tricks).
|
||||
|
||||
## Installation
|
||||
*Bulk Downloader for Reddit* needs Python version 3.9 or above. Please update Python before installation to meet the requirement. Then, you can install it as such:
|
||||
|
||||
*Bulk Downloader for Reddit* needs Python version 3.9 or above. Please update Python before installation to meet the requirement.
|
||||
|
||||
Then, you can install it via pip with:
|
||||
|
||||
```bash
|
||||
python3 -m pip install bdfr --upgrade
|
||||
```
|
||||
**To update BDFR**, run the above command again after the installation.
|
||||
|
||||
or via [pipx](https://pypa.github.io/pipx) with:
|
||||
|
||||
```bash
|
||||
python3 -m pipx install bdfr
|
||||
```
|
||||
|
||||
**To update BDFR**, run the above command again for pip or `pipx upgrade bdfr` for pipx installations.
|
||||
|
||||
**To check your version of BDFR**, run `bdfr --version`
|
||||
|
||||
**To install shell completions**, run `bdfr completions`
|
||||
|
||||
### AUR Package
|
||||
|
||||
If on Arch Linux or derivative operating systems such as Manjaro, the BDFR can be installed through the AUR.
|
||||
|
||||
- Latest Release: https://aur.archlinux.org/packages/python-bdfr/
|
||||
- Latest Development Build: https://aur.archlinux.org/packages/python-bdfr-git/
|
||||
- Latest Release: <https://aur.archlinux.org/packages/python-bdfr>
|
||||
- Latest Development Build: <https://aur.archlinux.org/packages/python-bdfr-git>
|
||||
|
||||
### Source code
|
||||
|
||||
If you want to use the source code or make contributions, refer to [CONTRIBUTING](docs/CONTRIBUTING.md#preparing-the-environment-for-development)
|
||||
|
||||
## Usage
|
||||
|
@ -34,180 +58,254 @@ Note that the `clone` command is not a true, failthful clone of Reddit. It simpl
|
|||
After installation, run the program from any directory as shown below:
|
||||
|
||||
```bash
|
||||
python3 -m bdfr download
|
||||
bdfr download
|
||||
```
|
||||
|
||||
```bash
|
||||
python3 -m bdfr archive
|
||||
bdfr archive
|
||||
```
|
||||
|
||||
```bash
|
||||
python3 -m bdfr clone
|
||||
bdfr clone
|
||||
```
|
||||
|
||||
However, these commands are not enough. You should chain parameters in [Options](#options) according to your use case. Don't forget that some parameters can be provided multiple times. Some quick reference commands are:
|
||||
|
||||
```bash
|
||||
python3 -m bdfr download ./path/to/output --subreddit Python -L 10
|
||||
bdfr download ./path/to/output --subreddit Python -L 10
|
||||
```
|
||||
|
||||
```bash
|
||||
python3 -m bdfr download ./path/to/output --user me --saved --authenticate -L 25 --file-scheme '{POSTID}'
|
||||
bdfr download ./path/to/output --user reddituser --submitted -L 100
|
||||
```
|
||||
|
||||
```bash
|
||||
python3 -m bdfr download ./path/to/output --subreddit 'Python, all, mindustry' -L 10 --make-hard-links
|
||||
bdfr download ./path/to/output --user me --saved --authenticate -L 25 --file-scheme '{POSTID}'
|
||||
```
|
||||
|
||||
```bash
|
||||
python3 -m bdfr archive ./path/to/output --subreddit all --format yaml -L 500 --folder-scheme ''
|
||||
bdfr download ./path/to/output --subreddit 'Python, all, mindustry' -L 10 --make-hard-links
|
||||
```
|
||||
|
||||
```bash
|
||||
bdfr archive ./path/to/output --user reddituser --submitted --all-comments --comment-context
|
||||
```
|
||||
|
||||
```bash
|
||||
bdfr archive ./path/to/output --subreddit all --format yaml -L 500 --folder-scheme ''
|
||||
```
|
||||
|
||||
Alternatively, you can pass options through a YAML file.
|
||||
|
||||
```bash
|
||||
bdfr download ./path/to/output --opts my_opts.yaml
|
||||
```
|
||||
|
||||
For example, running it with the following file
|
||||
|
||||
```yaml
|
||||
skip: [mp4, avi]
|
||||
file_scheme: "{UPVOTES}_{REDDITOR}_{POSTID}_{DATE}"
|
||||
limit: 10
|
||||
sort: top
|
||||
subreddit:
|
||||
- EarthPorn
|
||||
- CityPorn
|
||||
```
|
||||
|
||||
would be equilavent to (take note that in YAML there is `file_scheme` instead of `file-scheme`):
|
||||
|
||||
```bash
|
||||
bdfr download ./path/to/output --skip mp4 --skip avi --file-scheme "{UPVOTES}_{REDDITOR}_{POSTID}_{DATE}" -L 10 -S top --subreddit EarthPorn --subreddit CityPorn
|
||||
```
|
||||
|
||||
Any option that can be specified multiple times should be formatted like subreddit is above.
|
||||
|
||||
In case when the same option is specified both in the YAML file and in as a command line argument, the command line argument takes priority
|
||||
|
||||
## Options
|
||||
|
||||
The following options are common between both the `archive` and `download` commands of the BDFR.
|
||||
|
||||
- `directory`
|
||||
- This is the directory to which the BDFR will download and place all files
|
||||
- This is the directory to which the BDFR will download and place all files
|
||||
- `--authenticate`
|
||||
- This flag will make the BDFR attempt to use an authenticated Reddit session
|
||||
- See [Authentication](#authentication-and-security) for more details
|
||||
- This flag will make the BDFR attempt to use an authenticated Reddit session
|
||||
- See [Authentication](#authentication-and-security) for more details
|
||||
- `--config`
|
||||
- If the path to a configuration file is supplied with this option, the BDFR will use the specified config
|
||||
- See [Configuration Files](#configuration) for more details
|
||||
- If the path to a configuration file is supplied with this option, the BDFR will use the specified config
|
||||
- See [Configuration Files](#configuration) for more details
|
||||
- `--opts`
|
||||
- Load options from a YAML file.
|
||||
- Has higher prority than the global config file but lower than command-line arguments.
|
||||
- See [opts_example.yaml](./opts_example.yaml) for an example file.
|
||||
- `--disable-module`
|
||||
- Can be specified multiple times
|
||||
- Disables certain modules from being used
|
||||
- See [Disabling Modules](#disabling-modules) for more information and a list of module names
|
||||
- Can be specified multiple times
|
||||
- Disables certain modules from being used
|
||||
- See [Disabling Modules](#disabling-modules) for more information and a list of module names
|
||||
- `--filename-restriction-scheme`
|
||||
- Can be: `windows`, `linux`
|
||||
- Turns off the OS detection and specifies which system to use when making filenames
|
||||
- See [Filesystem Restrictions](#filesystem-restrictions)
|
||||
- `--ignore-user`
|
||||
- This will add a user to ignore
|
||||
- Can be specified multiple times
|
||||
- `--include-id-file`
|
||||
- This will add any submission with the IDs in the files provided
|
||||
- Can be specified multiple times
|
||||
- Format is one ID per line
|
||||
- This will add any submission with the IDs in the files provided
|
||||
- Can be specified multiple times
|
||||
- Format is one ID per line
|
||||
- `--log`
|
||||
- This allows one to specify the location of the logfile
|
||||
- This must be done when running multiple instances of the BDFR, see [Multiple Instances](#multiple-instances) below
|
||||
- This allows one to specify the location of the logfile
|
||||
- This must be done when running multiple instances of the BDFR, see [Multiple Instances](#multiple-instances) below
|
||||
- `--saved`
|
||||
- This option will make the BDFR use the supplied user's saved posts list as a download source
|
||||
- This requires an authenticated Reddit instance, using the `--authenticate` flag, as well as `--user` set to `me`
|
||||
- This option will make the BDFR use the supplied user's saved posts list as a download source
|
||||
- This requires an authenticated Reddit instance, using the `--authenticate` flag, as well as `--user` set to `me`
|
||||
- `--search`
|
||||
- This will apply the specified search term to specific lists when scraping submissions
|
||||
- A search term can only be applied to subreddits and multireddits, supplied with the `- s` and `-m` flags respectively
|
||||
- This will apply the input search term to specific lists when scraping submissions
|
||||
- A search term can only be applied when using the `--subreddit` and `--multireddit` flags
|
||||
- `--submitted`
|
||||
- This will use a user's submissions as a source
|
||||
- A user must be specified with `--user`
|
||||
- This will use a user's submissions as a source
|
||||
- A user must be specified with `--user`
|
||||
- `--upvoted`
|
||||
- This will use a user's upvoted posts as a source of posts to scrape
|
||||
- This requires an authenticated Reddit instance, using the `--authenticate` flag, as well as `--user` set to `me`
|
||||
- This will use a user's upvoted posts as a source of posts to scrape
|
||||
- This requires an authenticated Reddit instance, using the `--authenticate` flag, as well as `--user` set to `me`
|
||||
- `-L, --limit`
|
||||
- This is the limit on the number of submissions retrieve
|
||||
- Default is max possible
|
||||
- Note that this limit applies to **each source individually** e.g. if a `--limit` of 10 and three subreddits are provided, then 30 total submissions will be scraped
|
||||
- If it is not supplied, then the BDFR will default to the maximum allowed by Reddit, roughly 1000 posts. **We cannot bypass this.**
|
||||
- This is the limit on the number of submissions retrieve
|
||||
- Default is max possible
|
||||
- Note that this limit applies to **each source individually** e.g. if a `--limit` of 10 and three subreddits are provided, then 30 total submissions will be scraped
|
||||
- If it is not supplied, then the BDFR will default to the maximum allowed by Reddit, roughly 1000 posts. **We cannot bypass this.**
|
||||
- `-S, --sort`
|
||||
- This is the sort type for each applicable submission source supplied to the BDFR
|
||||
- This option does not apply to upvoted or saved posts when scraping from these sources
|
||||
- The following options are available:
|
||||
- `controversial`
|
||||
- `hot` (default)
|
||||
- `new`
|
||||
- `relevance` (only available when using `--search`)
|
||||
- `rising`
|
||||
- `top`
|
||||
- This is the sort type for each applicable submission source supplied to the BDFR
|
||||
- This option does not apply to upvoted or saved posts when scraping from these sources
|
||||
- The following options are available:
|
||||
- `controversial`
|
||||
- `hot` (default)
|
||||
- `new`
|
||||
- `relevance` (only available when using `--search`)
|
||||
- `rising`
|
||||
- `top`
|
||||
- `-l, --link`
|
||||
- This is a direct link to a submission to download, either as a URL or an ID
|
||||
- Can be specified multiple times
|
||||
- This is a direct link to a submission to download, either as a URL or an ID
|
||||
- Can be specified multiple times
|
||||
- `-m, --multireddit`
|
||||
- This is the name of a multireddit to add as a source
|
||||
- Can be specified multiple times
|
||||
- This can be done by using `-m` multiple times
|
||||
- Multireddits can also be used to provide CSV multireddits e.g. `-m 'chess, favourites'`
|
||||
- The specified multireddits must all belong to the user specified with the `--user` option
|
||||
- This is the name of a multireddit to add as a source
|
||||
- Can be specified multiple times
|
||||
- This can be done by using `-m` multiple times
|
||||
- Multireddits can also be used to provide CSV multireddits e.g. `-m 'chess, favourites'`
|
||||
- The specified multireddits must all belong to the user specified with the `--user` option
|
||||
- `-s, --subreddit`
|
||||
- This adds a subreddit as a source
|
||||
- Can be used mutliple times
|
||||
- This can be done by using `-s` multiple times
|
||||
- Subreddits can also be used to provide CSV subreddits e.g. `-m 'all, python, mindustry'`
|
||||
- This adds a subreddit as a source
|
||||
- Can be used mutliple times
|
||||
- This can be done by using `-s` multiple times
|
||||
- Subreddits can also be used to provide CSV subreddits e.g. `-m 'all, python, mindustry'`
|
||||
- `-t, --time`
|
||||
- This is the time filter that will be applied to all applicable sources
|
||||
- This option does not apply to upvoted or saved posts when scraping from these sources
|
||||
- The following options are available:
|
||||
- `all` (default)
|
||||
- `hour`
|
||||
- `day`
|
||||
- `week`
|
||||
- `month`
|
||||
- `year`
|
||||
- `--time-format`
|
||||
- This specifies the format of the datetime string that replaces `{DATE}` in file and folder naming schemes
|
||||
- See [Time Formatting Customisation](#time-formatting-customisation) for more details, and the formatting scheme
|
||||
- This is the time filter that will be applied to all applicable sources
|
||||
- This option does not apply to upvoted or saved posts when scraping from these sources
|
||||
- This option only applies if sorting by top or controversial. See --sort for more detail.
|
||||
- The following options are available:
|
||||
- `all` (default)
|
||||
- `hour`
|
||||
- `day`
|
||||
- `week`
|
||||
- `month`
|
||||
- `year`
|
||||
- `--time-format`
|
||||
- This specifies the format of the datetime string that replaces `{DATE}` in file and folder naming schemes
|
||||
- See [Time Formatting Customisation](#time-formatting-customisation) for more details, and the formatting scheme
|
||||
- `-u, --user`
|
||||
- This specifies the user to scrape in concert with other options
|
||||
- When using `--authenticate`, `--user me` can be used to refer to the authenticated user
|
||||
- Can be specified multiple times for multiple users
|
||||
- If downloading a multireddit, only one user can be specified
|
||||
- This specifies the user to scrape in concert with other options
|
||||
- When using `--authenticate`, `--user me` can be used to refer to the authenticated user
|
||||
- Can be specified multiple times for multiple users
|
||||
- If downloading a multireddit, only one user can be specified
|
||||
- `-v, --verbose`
|
||||
- Increases the verbosity of the program
|
||||
- Can be specified multiple times
|
||||
- Increases the verbosity of the program
|
||||
- Can be specified multiple times
|
||||
|
||||
### Downloader Options
|
||||
|
||||
The following options apply only to the `download` command. This command downloads the files and resources linked to in the submission, or a text submission itself, to the disk in the specified directory.
|
||||
|
||||
- `--make-hard-links`
|
||||
- This flag will create hard links to an existing file when a duplicate is downloaded
|
||||
- This will make the file appear in multiple directories while only taking the space of a single instance
|
||||
- This flag will create hard links to an existing file when a duplicate is downloaded in the current run
|
||||
- This will make the file appear in multiple directories while only taking the space of a single instance
|
||||
- `--max-wait-time`
|
||||
- This option specifies the maximum wait time for downloading a resource
|
||||
- The default is 120 seconds
|
||||
- See [Rate Limiting](#rate-limiting) for details
|
||||
- This option specifies the maximum wait time for downloading a resource
|
||||
- The default is 120 seconds
|
||||
- See [Rate Limiting](#rate-limiting) for details
|
||||
- `--no-dupes`
|
||||
- This flag will not redownload files if they already exist somewhere in the root folder tree
|
||||
- This is calculated by MD5 hash
|
||||
- This flag will not redownload files if they were already downloaded in the current run
|
||||
- This is calculated by MD5 hash
|
||||
- `--search-existing`
|
||||
- This will make the BDFR compile the hashes for every file in `directory` and store them to remove duplicates if `--no-dupes` is also supplied
|
||||
- This will make the BDFR compile the hashes for every file in `directory`
|
||||
- The hashes are used to remove duplicates if `--no-dupes` is supplied or make hard links if `--make-hard-links` is supplied
|
||||
- `--file-scheme`
|
||||
- Sets the scheme for files
|
||||
- Default is `{REDDITOR}_{TITLE}_{POSTID}`
|
||||
- See [Folder and File Name Schemes](#folder-and-file-name-schemes) for more details
|
||||
- Sets the scheme for files
|
||||
- Default is `{REDDITOR}_{TITLE}_{POSTID}`
|
||||
- See [Folder and File Name Schemes](#folder-and-file-name-schemes) for more details
|
||||
- `--folder-scheme`
|
||||
- Sets the scheme for folders
|
||||
- Default is `{SUBREDDIT}`
|
||||
- See [Folder and File Name Schemes](#folder-and-file-name-schemes) for more details
|
||||
- Sets the scheme for folders
|
||||
- Default is `{SUBREDDIT}`
|
||||
- See [Folder and File Name Schemes](#folder-and-file-name-schemes) for more details
|
||||
- `--exclude-id`
|
||||
- This will skip the download of any submission with the ID provided
|
||||
- Can be specified multiple times
|
||||
- This will skip the download of any submission with the ID provided
|
||||
- Can be specified multiple times
|
||||
- `--exclude-id-file`
|
||||
- This will skip the download of any submission with any of the IDs in the files provided
|
||||
- Can be specified multiple times
|
||||
- Format is one ID per line
|
||||
- This will skip the download of any submission with any of the IDs in the files provided
|
||||
- Can be specified multiple times
|
||||
- Format is one ID per line
|
||||
- `--skip-domain`
|
||||
- This adds domains to the download filter i.e. submissions coming from these domains will not be downloaded
|
||||
- Can be specified multiple times
|
||||
- This adds domains to the download filter i.e. submissions coming from these domains will not be downloaded
|
||||
- Can be specified multiple times
|
||||
- Domains must be supplied in the form `example.com` or `img.example.com`
|
||||
- `--skip`
|
||||
- This adds file types to the download filter i.e. submissions with one of the supplied file extensions will not be downloaded
|
||||
- Can be specified multiple times
|
||||
- This adds file types to the download filter i.e. submissions with one of the supplied file extensions will not be downloaded
|
||||
- Can be specified multiple times
|
||||
- `--skip-subreddit`
|
||||
- This skips all submissions from the specified subreddit
|
||||
- Can be specified multiple times
|
||||
- Also accepts CSV subreddit names
|
||||
- This skips all submissions from the specified subreddit
|
||||
- Can be specified multiple times
|
||||
- Also accepts CSV subreddit names
|
||||
- `--min-score`
|
||||
- This skips all submissions which have fewer than specified upvotes
|
||||
- `--max-score`
|
||||
- This skips all submissions which have more than specified upvotes
|
||||
- `--min-score-ratio`
|
||||
- This skips all submissions which have lower than specified upvote ratio
|
||||
- `--max-score-ratio`
|
||||
- This skips all submissions which have higher than specified upvote ratio
|
||||
|
||||
### Archiver Options
|
||||
|
||||
The following options are for the `archive` command specifically.
|
||||
|
||||
- `--all-comments`
|
||||
- When combined with the `--user` option, this will download all the user's comments
|
||||
- When combined with the `--user` option, this will download all the user's comments
|
||||
- `-f, --format`
|
||||
- This specifies the format of the data file saved to disk
|
||||
- The following formats are available:
|
||||
- `json` (default)
|
||||
- `xml`
|
||||
- `yaml`
|
||||
- This specifies the format of the data file saved to disk
|
||||
- The following formats are available:
|
||||
- `json` (default)
|
||||
- `xml`
|
||||
- `yaml`
|
||||
- `--comment-context`
|
||||
- This option will, instead of downloading an individual comment, download the submission that comment is a part of
|
||||
- May result in a longer run time as it retrieves much more data
|
||||
- This option will, instead of downloading an individual comment, download the submission that comment is a part of
|
||||
- May result in a longer run time as it retrieves much more data
|
||||
|
||||
### Cloner Options
|
||||
|
||||
The `clone` command can take all the options listed above for both the `archive` and `download` commands since it performs the functions of both.
|
||||
|
||||
## Common Command Tricks
|
||||
|
||||
A common use case is for subreddits/users to be loaded from a file. The BDFR supports this via YAML file options (`--opts my_opts.yaml`).
|
||||
|
||||
Alternatively, you can use the command-line [xargs](https://en.wikipedia.org/wiki/Xargs) function.
|
||||
For a list of users `users.txt` (one user per line), type:
|
||||
|
||||
```bash
|
||||
cat users.txt | xargs -L 1 echo --user | xargs -L 50 bdfr download <ARGS>
|
||||
```
|
||||
|
||||
The part `-L 50` is to make sure that the character limit for a single line isn't exceeded, but may not be necessary. This can also be used to load subreddits from a file, simply exchange `--user` with `--subreddit` and so on.
|
||||
|
||||
## Authentication and Security
|
||||
|
||||
The BDFR uses OAuth2 authentication to connect to Reddit if authentication is required. This means that it is a secure, token-based system for making requests. This also means that the BDFR only has access to specific parts of the account authenticated, by default only saved posts, upvoted posts, and the identity of the authenticated account. Note that authentication is not required unless accessing private things like upvoted posts, saved posts, and private multireddits.
|
||||
|
@ -228,18 +326,18 @@ For more details on the configuration file and the values therein, see [Configur
|
|||
|
||||
The naming and folder schemes for the BDFR are both completely customisable. A number of different fields can be given which will be replaced with properties from a submission when downloading it. The scheme format takes the form of `{KEY}`, where `KEY` is a string from the below list.
|
||||
|
||||
- `DATE`
|
||||
- `FLAIR`
|
||||
- `POSTID`
|
||||
- `REDDITOR`
|
||||
- `SUBREDDIT`
|
||||
- `TITLE`
|
||||
- `UPVOTES`
|
||||
- `DATE`
|
||||
- `FLAIR`
|
||||
- `POSTID`
|
||||
- `REDDITOR`
|
||||
- `SUBREDDIT`
|
||||
- `TITLE`
|
||||
- `UPVOTES`
|
||||
|
||||
Each of these can be enclosed in curly bracket, `{}`, and included in the name. For example, to just title every downloaded post with the unique submission ID, you can use `{POSTID}`. Static strings can also be included, such as `download_{POSTID}` which will not change from submission to submission. For example, the previous string will result in the following submission file names:
|
||||
|
||||
- `download_aaaaaa.png`
|
||||
- `download_bbbbbb.png`
|
||||
- `download_aaaaaa.png`
|
||||
- `download_bbbbbb.png`
|
||||
|
||||
At least one key *must* be included in the file scheme, otherwise an error will be thrown. The folder scheme however, can be null or a simple static string. In the former case, all files will be placed in the folder specified with the `directory` argument. If the folder scheme is a static string, then all submissions will be placed in a folder of that name. In both cases, there will be no separation between all submissions.
|
||||
|
||||
|
@ -249,19 +347,19 @@ It is highly recommended that the file name scheme contain the parameter `{POSTI
|
|||
|
||||
The configuration files are, by default, stored in the configuration directory for the user. This differs depending on the OS that the BDFR is being run on. For Windows, this will be:
|
||||
|
||||
- `C:\Users\<User>\AppData\Local\BDFR\bdfr`
|
||||
- `C:\Users\<User>\AppData\Local\BDFR\bdfr`
|
||||
|
||||
If Python has been installed through the Windows Store, the folder will appear in a different place. Note that the hash included in the file path may change from installation to installation.
|
||||
|
||||
- `C:\Users\<User>\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.9_qbz5n2kfra8p0\LocalCache\Local\BDFR\bdfr`
|
||||
- `C:\Users\<User>\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.9_qbz5n2kfra8p0\LocalCache\Local\BDFR\bdfr`
|
||||
|
||||
On Mac OSX, this will be:
|
||||
|
||||
- `~/Library/Application Support/bdfr`.
|
||||
- `~/Library/Application Support/bdfr`.
|
||||
|
||||
Lastly, on a Linux system, this will be:
|
||||
|
||||
- `~/.config/bdfr/`
|
||||
- `~/.config/bdfr/`
|
||||
|
||||
The logging output for each run of the BDFR will be saved to this directory in the file `log_output.txt`. If you need to submit a bug, it is this file that you will need to submit with the report.
|
||||
|
||||
|
@ -269,16 +367,17 @@ The logging output for each run of the BDFR will be saved to this directory in t
|
|||
|
||||
The `config.cfg` is the file that supplies the BDFR with the configuration to use. At the moment, the following keys **must** be included in the configuration file supplied.
|
||||
|
||||
- `client_id`
|
||||
- `client_secret`
|
||||
- `scopes`
|
||||
- `client_id`
|
||||
- `client_secret`
|
||||
- `scopes`
|
||||
|
||||
The following keys are optional, and defaults will be used if they cannot be found.
|
||||
|
||||
- `backup_log_count`
|
||||
- `max_wait_time`
|
||||
- `time_format`
|
||||
- `disabled_modules`
|
||||
- `backup_log_count`
|
||||
- `max_wait_time`
|
||||
- `time_format`
|
||||
- `disabled_modules`
|
||||
- `filename-restriction-scheme`
|
||||
|
||||
All of these should not be modified unless you know what you're doing, as the default values will enable the BDFR to function just fine. A configuration is included in the BDFR when it is installed, and this will be placed in the configuration directory as the default.
|
||||
|
||||
|
@ -297,12 +396,16 @@ The individual modules of the BDFR, used to download submissions from websites,
|
|||
Modules can be disabled through the command line interface for the BDFR or more permanently in the configuration file via the `disabled_modules` option. The list of downloaders that can be disabled are the following. Note that they are case-insensitive.
|
||||
|
||||
- `Direct`
|
||||
- `DelayForReddit`
|
||||
- `Erome`
|
||||
- `Gallery` (Reddit Image Galleries)
|
||||
- `Gfycat`
|
||||
- `Imgur`
|
||||
- `PornHub`
|
||||
- `Redgifs`
|
||||
- `SelfPost` (Reddit Text Post)
|
||||
- `Vidble`
|
||||
- `VReddit` (Reddit Video Post)
|
||||
- `Youtube`
|
||||
- `YoutubeDlFallback`
|
||||
|
||||
|
@ -320,23 +423,41 @@ The BDFR can be run in multiple instances with multiple configurations, either c
|
|||
|
||||
Running these scenarios consecutively is done easily, like any single run. Configuration files that differ may be specified with the `--config` option to switch between tokens, for example. Otherwise, almost all configuration for data sources can be specified per-run through the command line.
|
||||
|
||||
Running scenarious concurrently (at the same time) however, is more complicated. The BDFR will look to a single, static place to put the detailed log files, in a directory with the configuration file specified above. If there are multiple instances, or processes, of the BDFR running at the same time, they will all be trying to write to a single file. On Linux and other UNIX based operating systems, this will succeed, though there is a substantial risk that the logfile will be useless due to garbled and jumbled data. On Windows however, attempting this will raise an error that crashes the program as Windows forbids multiple processes from accessing the same file.
|
||||
Running scenarios concurrently (at the same time) however, is more complicated. The BDFR will look to a single, static place to put the detailed log files, in a directory with the configuration file specified above. If there are multiple instances, or processes, of the BDFR running at the same time, they will all be trying to write to a single file. On Linux and other UNIX based operating systems, this will succeed, though there is a substantial risk that the logfile will be useless due to garbled and jumbled data. On Windows however, attempting this will raise an error that crashes the program as Windows forbids multiple processes from accessing the same file.
|
||||
|
||||
The way to fix this is to use the `--log` option to manually specify where the logfile is to be stored. If the given location is unique to each instance of the BDFR, then it will run fine.
|
||||
|
||||
## Filesystem Restrictions
|
||||
|
||||
Different filesystems have different restrictions for what files and directories can be named. Thesse are separated into two broad categories: Linux-based filesystems, which have very few restrictions; and Windows-based filesystems, which are much more restrictive in terms if forbidden characters and length of paths.
|
||||
|
||||
During the normal course of operation, the BDFR detects what filesystem it is running on and formats any filenames and directories to conform to the rules that are expected of it. However, there are cases where this will fail. When running on a Linux-based machine, or another system where the home filesystem is permissive, and accessing a share or drive with a less permissive system, the BDFR will assume that the *home* filesystem's rules apply. For example, when downloading to a SAMBA share from Ubuntu, there will be errors as SAMBA is more restrictive than Ubuntu.
|
||||
|
||||
The best option would be to always download to a filesystem that is as permission as possible, such as an NFS share or ext4 drive. However, when this is not possible, the BDFR allows for the restriction scheme to be manually specified at either the command-line or in the configuration file. At the command-line, this is done with `--filename-restriction-scheme windows`, or else an option by the same name in the configuration file.
|
||||
|
||||
## Manipulating Logfiles
|
||||
|
||||
The logfiles that the BDFR outputs are consistent and quite detailed and in a format that is amenable to regex. To this end, a number of bash scripts have been [included here](./scripts). They show examples for how to extract successfully downloaded IDs, failed IDs, and more besides.
|
||||
|
||||
## Unsaving posts
|
||||
|
||||
Back in v1 there was an option to unsave posts from your account when downloading, but it was removed from the core BDFR on v2 as it is considered a read-only tool. However, for those missing this functionality, a script was created that uses the log files to achieve this. There is info on how to use this on the README.md file on the scripts subdirectory.
|
||||
|
||||
## List of currently supported sources
|
||||
|
||||
- Direct links (links leading to a file)
|
||||
- Erome
|
||||
- Gfycat
|
||||
- Gif Delivery Network
|
||||
- Imgur
|
||||
- Reddit Galleries
|
||||
- Reddit Text Posts
|
||||
- Reddit Videos
|
||||
- Redgifs
|
||||
- YouTube
|
||||
- Streamable
|
||||
- Direct links (links leading to a file)
|
||||
- Delay for Reddit
|
||||
- Erome
|
||||
- Gfycat
|
||||
- Gif Delivery Network
|
||||
- Imgur
|
||||
- Reddit Galleries
|
||||
- Reddit Text Posts
|
||||
- Reddit Videos
|
||||
- Redgifs
|
||||
- Vidble
|
||||
- YouTube
|
||||
- Any source supported by [YT-DLP](https://github.com/yt-dlp/yt-dlp/blob/master/supportedsites.md) should be compatable
|
||||
|
||||
## Contributing
|
||||
|
||||
|
|
|
@ -0,0 +1,4 @@
|
|||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
__version__ = "2.6.2"
|
181
bdfr/__main__.py
181
bdfr/__main__.py
|
@ -1,58 +1,71 @@
|
|||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import logging
|
||||
import sys
|
||||
|
||||
import click
|
||||
import requests
|
||||
|
||||
from bdfr import __version__
|
||||
from bdfr.archiver import Archiver
|
||||
from bdfr.cloner import RedditCloner
|
||||
from bdfr.completion import Completion
|
||||
from bdfr.configuration import Configuration
|
||||
from bdfr.downloader import RedditDownloader
|
||||
|
||||
logger = logging.getLogger()
|
||||
|
||||
_common_options = [
|
||||
click.argument('directory', type=str),
|
||||
click.option('--authenticate', is_flag=True, default=None),
|
||||
click.option('--config', type=str, default=None),
|
||||
click.option('--disable-module', multiple=True, default=None, type=str),
|
||||
click.option('--include-id-file', multiple=True, default=None),
|
||||
click.option('--log', type=str, default=None),
|
||||
click.option('--saved', is_flag=True, default=None),
|
||||
click.option('--search', default=None, type=str),
|
||||
click.option('--submitted', is_flag=True, default=None),
|
||||
click.option('--time-format', type=str, default=None),
|
||||
click.option('--upvoted', is_flag=True, default=None),
|
||||
click.option('-L', '--limit', default=None, type=int),
|
||||
click.option('-l', '--link', multiple=True, default=None, type=str),
|
||||
click.option('-m', '--multireddit', multiple=True, default=None, type=str),
|
||||
click.option('-S', '--sort', type=click.Choice(('hot', 'top', 'new', 'controversial', 'rising', 'relevance')),
|
||||
default=None),
|
||||
click.option('-s', '--subreddit', multiple=True, default=None, type=str),
|
||||
click.option('-t', '--time', type=click.Choice(('all', 'hour', 'day', 'week', 'month', 'year')), default=None),
|
||||
click.option('-u', '--user', type=str, multiple=True, default=None),
|
||||
click.option('-v', '--verbose', default=None, count=True),
|
||||
click.argument("directory", type=str),
|
||||
click.option("--authenticate", is_flag=True, default=None),
|
||||
click.option("--config", type=str, default=None),
|
||||
click.option("--disable-module", multiple=True, default=None, type=str),
|
||||
click.option("--exclude-id", default=None, multiple=True),
|
||||
click.option("--exclude-id-file", default=None, multiple=True),
|
||||
click.option("--file-scheme", default=None, type=str),
|
||||
click.option("--filename-restriction-scheme", type=click.Choice(("linux", "windows")), default=None),
|
||||
click.option("--folder-scheme", default=None, type=str),
|
||||
click.option("--ignore-user", type=str, multiple=True, default=None),
|
||||
click.option("--include-id-file", multiple=True, default=None),
|
||||
click.option("--log", type=str, default=None),
|
||||
click.option("--opts", type=str, default=None),
|
||||
click.option("--saved", is_flag=True, default=None),
|
||||
click.option("--search", default=None, type=str),
|
||||
click.option("--submitted", is_flag=True, default=None),
|
||||
click.option("--subscribed", is_flag=True, default=None),
|
||||
click.option("--time-format", type=str, default=None),
|
||||
click.option("--upvoted", is_flag=True, default=None),
|
||||
click.option("-L", "--limit", default=None, type=int),
|
||||
click.option("-l", "--link", multiple=True, default=None, type=str),
|
||||
click.option("-m", "--multireddit", multiple=True, default=None, type=str),
|
||||
click.option(
|
||||
"-S", "--sort", type=click.Choice(("hot", "top", "new", "controversial", "rising", "relevance")), default=None
|
||||
),
|
||||
click.option("-s", "--subreddit", multiple=True, default=None, type=str),
|
||||
click.option("-t", "--time", type=click.Choice(("all", "hour", "day", "week", "month", "year")), default=None),
|
||||
click.option("-u", "--user", type=str, multiple=True, default=None),
|
||||
click.option("-v", "--verbose", default=None, count=True),
|
||||
]
|
||||
|
||||
_downloader_options = [
|
||||
click.option('--file-scheme', default=None, type=str),
|
||||
click.option('--folder-scheme', default=None, type=str),
|
||||
click.option('--make-hard-links', is_flag=True, default=None),
|
||||
click.option('--max-wait-time', type=int, default=None),
|
||||
click.option('--no-dupes', is_flag=True, default=None),
|
||||
click.option('--search-existing', is_flag=True, default=None),
|
||||
click.option('--exclude-id', default=None, multiple=True),
|
||||
click.option('--exclude-id-file', default=None, multiple=True),
|
||||
click.option('--skip', default=None, multiple=True),
|
||||
click.option('--skip-domain', default=None, multiple=True),
|
||||
click.option('--skip-subreddit', default=None, multiple=True),
|
||||
click.option("--make-hard-links", is_flag=True, default=None),
|
||||
click.option("--max-wait-time", type=int, default=None),
|
||||
click.option("--no-dupes", is_flag=True, default=None),
|
||||
click.option("--search-existing", is_flag=True, default=None),
|
||||
click.option("--skip", default=None, multiple=True),
|
||||
click.option("--skip-domain", default=None, multiple=True),
|
||||
click.option("--skip-subreddit", default=None, multiple=True),
|
||||
click.option("--min-score", type=int, default=None),
|
||||
click.option("--max-score", type=int, default=None),
|
||||
click.option("--min-score-ratio", type=float, default=None),
|
||||
click.option("--max-score-ratio", type=float, default=None),
|
||||
]
|
||||
|
||||
_archiver_options = [
|
||||
click.option('--all-comments', is_flag=True, default=None),
|
||||
click.option('--comment-context', is_flag=True, default=None),
|
||||
click.option('-f', '--format', type=click.Choice(('xml', 'json', 'yaml')), default=None),
|
||||
click.option("--all-comments", is_flag=True, default=None),
|
||||
click.option("--comment-context", is_flag=True, default=None),
|
||||
click.option("-f", "--format", type=click.Choice(("xml", "json", "yaml")), default=None),
|
||||
]
|
||||
|
||||
|
||||
|
@ -61,70 +74,123 @@ def _add_options(opts: list):
|
|||
for opt in opts:
|
||||
func = opt(func)
|
||||
return func
|
||||
|
||||
return wrap
|
||||
|
||||
|
||||
def _check_version(context, param, value):
|
||||
if not value or context.resilient_parsing:
|
||||
return
|
||||
current = __version__
|
||||
latest = requests.get("https://pypi.org/pypi/bdfr/json").json()["info"]["version"]
|
||||
print(f"You are currently using v{current} the latest is v{latest}")
|
||||
context.exit()
|
||||
|
||||
|
||||
@click.group()
|
||||
@click.help_option("-h", "--help")
|
||||
@click.option(
|
||||
"--version",
|
||||
is_flag=True,
|
||||
is_eager=True,
|
||||
expose_value=False,
|
||||
callback=_check_version,
|
||||
help="Check version and exit.",
|
||||
)
|
||||
def cli():
|
||||
"""BDFR is used to download and archive content from Reddit."""
|
||||
pass
|
||||
|
||||
|
||||
@cli.command('download')
|
||||
@cli.command("download")
|
||||
@_add_options(_common_options)
|
||||
@_add_options(_downloader_options)
|
||||
@click.help_option("-h", "--help")
|
||||
@click.pass_context
|
||||
def cli_download(context: click.Context, **_):
|
||||
"""Used to download content posted to Reddit."""
|
||||
config = Configuration()
|
||||
config.process_click_arguments(context)
|
||||
setup_logging(config.verbose)
|
||||
silence_module_loggers()
|
||||
stream = make_console_logging_handler(config.verbose)
|
||||
try:
|
||||
reddit_downloader = RedditDownloader(config)
|
||||
reddit_downloader = RedditDownloader(config, [stream])
|
||||
reddit_downloader.download()
|
||||
except Exception:
|
||||
logger.exception('Downloader exited unexpectedly')
|
||||
logger.exception("Downloader exited unexpectedly")
|
||||
raise
|
||||
else:
|
||||
logger.info('Program complete')
|
||||
logger.info("Program complete")
|
||||
|
||||
|
||||
@cli.command('archive')
|
||||
@cli.command("archive")
|
||||
@_add_options(_common_options)
|
||||
@_add_options(_archiver_options)
|
||||
@click.help_option("-h", "--help")
|
||||
@click.pass_context
|
||||
def cli_archive(context: click.Context, **_):
|
||||
"""Used to archive post data from Reddit."""
|
||||
config = Configuration()
|
||||
config.process_click_arguments(context)
|
||||
setup_logging(config.verbose)
|
||||
silence_module_loggers()
|
||||
stream = make_console_logging_handler(config.verbose)
|
||||
try:
|
||||
reddit_archiver = Archiver(config)
|
||||
reddit_archiver = Archiver(config, [stream])
|
||||
reddit_archiver.download()
|
||||
except Exception:
|
||||
logger.exception('Archiver exited unexpectedly')
|
||||
logger.exception("Archiver exited unexpectedly")
|
||||
raise
|
||||
else:
|
||||
logger.info('Program complete')
|
||||
logger.info("Program complete")
|
||||
|
||||
|
||||
@cli.command('clone')
|
||||
@cli.command("clone")
|
||||
@_add_options(_common_options)
|
||||
@_add_options(_archiver_options)
|
||||
@_add_options(_downloader_options)
|
||||
@click.help_option("-h", "--help")
|
||||
@click.pass_context
|
||||
def cli_clone(context: click.Context, **_):
|
||||
"""Combines archive and download commands."""
|
||||
config = Configuration()
|
||||
config.process_click_arguments(context)
|
||||
setup_logging(config.verbose)
|
||||
silence_module_loggers()
|
||||
stream = make_console_logging_handler(config.verbose)
|
||||
try:
|
||||
reddit_scraper = RedditCloner(config)
|
||||
reddit_scraper = RedditCloner(config, [stream])
|
||||
reddit_scraper.download()
|
||||
except Exception:
|
||||
logger.exception('Scraper exited unexpectedly')
|
||||
logger.exception("Scraper exited unexpectedly")
|
||||
raise
|
||||
else:
|
||||
logger.info('Program complete')
|
||||
logger.info("Program complete")
|
||||
|
||||
|
||||
def setup_logging(verbosity: int):
|
||||
@cli.command("completion")
|
||||
@click.argument("shell", type=click.Choice(("all", "bash", "fish", "zsh"), case_sensitive=False), default="all")
|
||||
@click.help_option("-h", "--help")
|
||||
@click.option("-u", "--uninstall", is_flag=True, default=False, help="Uninstall completion")
|
||||
def cli_completion(shell: str, uninstall: bool):
|
||||
"""\b
|
||||
Installs shell completions for BDFR.
|
||||
Options: all, bash, fish, zsh
|
||||
Default: all"""
|
||||
shell = shell.lower()
|
||||
if sys.platform == "win32":
|
||||
print("Completions are not currently supported on Windows.")
|
||||
return
|
||||
if uninstall and click.confirm(f"Would you like to uninstall {shell} completions for BDFR"):
|
||||
Completion(shell).uninstall()
|
||||
return
|
||||
if shell not in ("all", "bash", "fish", "zsh"):
|
||||
print(f"{shell} is not a valid option.")
|
||||
print("Options: all, bash, fish, zsh")
|
||||
return
|
||||
if click.confirm(f"Would you like to install {shell} completions for BDFR"):
|
||||
Completion(shell).install()
|
||||
|
||||
|
||||
def make_console_logging_handler(verbosity: int) -> logging.StreamHandler:
|
||||
class StreamExceptionFilter(logging.Filter):
|
||||
def filter(self, record: logging.LogRecord) -> bool:
|
||||
result = not (record.levelno == logging.ERROR and record.exc_info)
|
||||
|
@ -134,20 +200,23 @@ def setup_logging(verbosity: int):
|
|||
stream = logging.StreamHandler(sys.stdout)
|
||||
stream.addFilter(StreamExceptionFilter())
|
||||
|
||||
formatter = logging.Formatter('[%(asctime)s - %(name)s - %(levelname)s] - %(message)s')
|
||||
formatter = logging.Formatter("[%(asctime)s - %(name)s - %(levelname)s] - %(message)s")
|
||||
stream.setFormatter(formatter)
|
||||
|
||||
logger.addHandler(stream)
|
||||
if verbosity <= 0:
|
||||
stream.setLevel(logging.INFO)
|
||||
elif verbosity == 1:
|
||||
stream.setLevel(logging.DEBUG)
|
||||
else:
|
||||
stream.setLevel(9)
|
||||
logging.getLogger('praw').setLevel(logging.CRITICAL)
|
||||
logging.getLogger('prawcore').setLevel(logging.CRITICAL)
|
||||
logging.getLogger('urllib3').setLevel(logging.CRITICAL)
|
||||
return stream
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
def silence_module_loggers():
|
||||
logging.getLogger("praw").setLevel(logging.CRITICAL)
|
||||
logging.getLogger("prawcore").setLevel(logging.CRITICAL)
|
||||
logging.getLogger("urllib3").setLevel(logging.CRITICAL)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
cli()
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
#!/usr/bin/env python3
|
||||
# coding=utf-8
|
||||
# -*- coding: utf-8 -*-
|
||||
|
|
|
@ -1,13 +1,14 @@
|
|||
#!/usr/bin/env python3
|
||||
# coding=utf-8
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Union
|
||||
|
||||
from praw.models import Comment, Submission
|
||||
|
||||
|
||||
class BaseArchiveEntry(ABC):
|
||||
def __init__(self, source: (Comment, Submission)):
|
||||
def __init__(self, source: Union[Comment, Submission]):
|
||||
self.source = source
|
||||
self.post_details: dict = {}
|
||||
|
||||
|
@ -18,21 +19,21 @@ class BaseArchiveEntry(ABC):
|
|||
@staticmethod
|
||||
def _convert_comment_to_dict(in_comment: Comment) -> dict:
|
||||
out_dict = {
|
||||
'author': in_comment.author.name if in_comment.author else 'DELETED',
|
||||
'id': in_comment.id,
|
||||
'score': in_comment.score,
|
||||
'subreddit': in_comment.subreddit.display_name,
|
||||
'author_flair': in_comment.author_flair_text,
|
||||
'submission': in_comment.submission.id,
|
||||
'stickied': in_comment.stickied,
|
||||
'body': in_comment.body,
|
||||
'is_submitter': in_comment.is_submitter,
|
||||
'distinguished': in_comment.distinguished,
|
||||
'created_utc': in_comment.created_utc,
|
||||
'parent_id': in_comment.parent_id,
|
||||
'replies': [],
|
||||
"author": in_comment.author.name if in_comment.author else "DELETED",
|
||||
"id": in_comment.id,
|
||||
"score": in_comment.score,
|
||||
"subreddit": in_comment.subreddit.display_name,
|
||||
"author_flair": in_comment.author_flair_text,
|
||||
"submission": in_comment.submission.id,
|
||||
"stickied": in_comment.stickied,
|
||||
"body": in_comment.body,
|
||||
"is_submitter": in_comment.is_submitter,
|
||||
"distinguished": in_comment.distinguished,
|
||||
"created_utc": in_comment.created_utc,
|
||||
"parent_id": in_comment.parent_id,
|
||||
"replies": [],
|
||||
}
|
||||
in_comment.replies.replace_more(0)
|
||||
in_comment.replies.replace_more(limit=None)
|
||||
for reply in in_comment.replies:
|
||||
out_dict['replies'].append(BaseArchiveEntry._convert_comment_to_dict(reply))
|
||||
out_dict["replies"].append(BaseArchiveEntry._convert_comment_to_dict(reply))
|
||||
return out_dict
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
#!/usr/bin/env python3
|
||||
# coding=utf-8
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import logging
|
||||
|
||||
|
@ -17,5 +17,5 @@ class CommentArchiveEntry(BaseArchiveEntry):
|
|||
def compile(self) -> dict:
|
||||
self.source.refresh()
|
||||
self.post_details = self._convert_comment_to_dict(self.source)
|
||||
self.post_details['submission_title'] = self.source.submission.title
|
||||
self.post_details["submission_title"] = self.source.submission.title
|
||||
return self.post_details
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
#!/usr/bin/env python3
|
||||
# coding=utf-8
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import logging
|
||||
|
||||
|
@ -18,34 +18,34 @@ class SubmissionArchiveEntry(BaseArchiveEntry):
|
|||
comments = self._get_comments()
|
||||
self._get_post_details()
|
||||
out = self.post_details
|
||||
out['comments'] = comments
|
||||
out["comments"] = comments
|
||||
return out
|
||||
|
||||
def _get_post_details(self):
|
||||
self.post_details = {
|
||||
'title': self.source.title,
|
||||
'name': self.source.name,
|
||||
'url': self.source.url,
|
||||
'selftext': self.source.selftext,
|
||||
'score': self.source.score,
|
||||
'upvote_ratio': self.source.upvote_ratio,
|
||||
'permalink': self.source.permalink,
|
||||
'id': self.source.id,
|
||||
'author': self.source.author.name if self.source.author else 'DELETED',
|
||||
'link_flair_text': self.source.link_flair_text,
|
||||
'num_comments': self.source.num_comments,
|
||||
'over_18': self.source.over_18,
|
||||
'spoiler': self.source.spoiler,
|
||||
'pinned': self.source.pinned,
|
||||
'locked': self.source.locked,
|
||||
'distinguished': self.source.distinguished,
|
||||
'created_utc': self.source.created_utc,
|
||||
"title": self.source.title,
|
||||
"name": self.source.name,
|
||||
"url": self.source.url,
|
||||
"selftext": self.source.selftext,
|
||||
"score": self.source.score,
|
||||
"upvote_ratio": self.source.upvote_ratio,
|
||||
"permalink": self.source.permalink,
|
||||
"id": self.source.id,
|
||||
"author": self.source.author.name if self.source.author else "DELETED",
|
||||
"link_flair_text": self.source.link_flair_text,
|
||||
"num_comments": self.source.num_comments,
|
||||
"over_18": self.source.over_18,
|
||||
"spoiler": self.source.spoiler,
|
||||
"pinned": self.source.pinned,
|
||||
"locked": self.source.locked,
|
||||
"distinguished": self.source.distinguished,
|
||||
"created_utc": self.source.created_utc,
|
||||
}
|
||||
|
||||
def _get_comments(self) -> list[dict]:
|
||||
logger.debug(f'Retrieving full comment tree for submission {self.source.id}')
|
||||
logger.debug(f"Retrieving full comment tree for submission {self.source.id}")
|
||||
comments = []
|
||||
self.source.comments.replace_more(0)
|
||||
self.source.comments.replace_more(limit=None)
|
||||
for top_level_comment in self.source.comments:
|
||||
comments.append(self._convert_comment_to_dict(top_level_comment))
|
||||
return comments
|
||||
|
|
|
@ -1,13 +1,17 @@
|
|||
#!/usr/bin/env python3
|
||||
# coding=utf-8
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
from typing import Iterator
|
||||
from collections.abc import Iterable, Iterator
|
||||
from pathlib import Path
|
||||
from time import sleep
|
||||
from typing import Union
|
||||
|
||||
import dict2xml
|
||||
import praw.models
|
||||
import prawcore
|
||||
import yaml
|
||||
|
||||
from bdfr.archive_entry.base_archive_entry import BaseArchiveEntry
|
||||
|
@ -22,21 +26,40 @@ logger = logging.getLogger(__name__)
|
|||
|
||||
|
||||
class Archiver(RedditConnector):
|
||||
def __init__(self, args: Configuration):
|
||||
super(Archiver, self).__init__(args)
|
||||
def __init__(self, args: Configuration, logging_handlers: Iterable[logging.Handler] = ()):
|
||||
super(Archiver, self).__init__(args, logging_handlers)
|
||||
|
||||
def download(self):
|
||||
for generator in self.reddit_lists:
|
||||
for submission in generator:
|
||||
logger.debug(f'Attempting to archive submission {submission.id}')
|
||||
self.write_entry(submission)
|
||||
try:
|
||||
for submission in generator:
|
||||
try:
|
||||
if (submission.author and submission.author.name in self.args.ignore_user) or (
|
||||
submission.author is None and "DELETED" in self.args.ignore_user
|
||||
):
|
||||
logger.debug(
|
||||
f"Submission {submission.id} in {submission.subreddit.display_name} skipped due to"
|
||||
f" {submission.author.name if submission.author else 'DELETED'} being an ignored user"
|
||||
)
|
||||
continue
|
||||
if submission.id in self.excluded_submission_ids:
|
||||
logger.debug(f"Object {submission.id} in exclusion list, skipping")
|
||||
continue
|
||||
logger.debug(f"Attempting to archive submission {submission.id}")
|
||||
self.write_entry(submission)
|
||||
except prawcore.PrawcoreException as e:
|
||||
logger.error(f"Submission {submission.id} failed to be archived due to a PRAW exception: {e}")
|
||||
except prawcore.PrawcoreException as e:
|
||||
logger.error(f"The submission after {submission.id} failed to download due to a PRAW exception: {e}")
|
||||
logger.debug("Waiting 60 seconds to continue")
|
||||
sleep(60)
|
||||
|
||||
def get_submissions_from_link(self) -> list[list[praw.models.Submission]]:
|
||||
supplied_submissions = []
|
||||
for sub_id in self.args.link:
|
||||
if len(sub_id) == 6:
|
||||
supplied_submissions.append(self.reddit_instance.submission(id=sub_id))
|
||||
elif re.match(r'^\w{7}$', sub_id):
|
||||
elif re.match(r"^\w{7}$", sub_id):
|
||||
supplied_submissions.append(self.reddit_instance.comment(id=sub_id))
|
||||
else:
|
||||
supplied_submissions.append(self.reddit_instance.submission(url=sub_id))
|
||||
|
@ -47,54 +70,55 @@ class Archiver(RedditConnector):
|
|||
if self.args.user and self.args.all_comments:
|
||||
sort = self.determine_sort_function()
|
||||
for user in self.args.user:
|
||||
logger.debug(f'Retrieving comments of user {user}')
|
||||
logger.debug(f"Retrieving comments of user {user}")
|
||||
results.append(sort(self.reddit_instance.redditor(user).comments, limit=self.args.limit))
|
||||
return results
|
||||
|
||||
@staticmethod
|
||||
def _pull_lever_entry_factory(praw_item: (praw.models.Submission, praw.models.Comment)) -> BaseArchiveEntry:
|
||||
def _pull_lever_entry_factory(praw_item: Union[praw.models.Submission, praw.models.Comment]) -> BaseArchiveEntry:
|
||||
if isinstance(praw_item, praw.models.Submission):
|
||||
return SubmissionArchiveEntry(praw_item)
|
||||
elif isinstance(praw_item, praw.models.Comment):
|
||||
return CommentArchiveEntry(praw_item)
|
||||
else:
|
||||
raise ArchiverError(f'Factory failed to classify item of type {type(praw_item).__name__}')
|
||||
raise ArchiverError(f"Factory failed to classify item of type {type(praw_item).__name__}")
|
||||
|
||||
def write_entry(self, praw_item: (praw.models.Submission, praw.models.Comment)):
|
||||
def write_entry(self, praw_item: Union[praw.models.Submission, praw.models.Comment]):
|
||||
if self.args.comment_context and isinstance(praw_item, praw.models.Comment):
|
||||
logger.debug(f'Converting comment {praw_item.id} to submission {praw_item.submission.id}')
|
||||
logger.debug(f"Converting comment {praw_item.id} to submission {praw_item.submission.id}")
|
||||
praw_item = praw_item.submission
|
||||
archive_entry = self._pull_lever_entry_factory(praw_item)
|
||||
if self.args.format == 'json':
|
||||
if self.args.format == "json":
|
||||
self._write_entry_json(archive_entry)
|
||||
elif self.args.format == 'xml':
|
||||
elif self.args.format == "xml":
|
||||
self._write_entry_xml(archive_entry)
|
||||
elif self.args.format == 'yaml':
|
||||
elif self.args.format == "yaml":
|
||||
self._write_entry_yaml(archive_entry)
|
||||
else:
|
||||
raise ArchiverError(f'Unknown format {self.args.format} given')
|
||||
logger.info(f'Record for entry item {praw_item.id} written to disk')
|
||||
raise ArchiverError(f"Unknown format {self.args.format} given")
|
||||
logger.info(f"Record for entry item {praw_item.id} written to disk")
|
||||
|
||||
def _write_entry_json(self, entry: BaseArchiveEntry):
|
||||
resource = Resource(entry.source, '', lambda: None, '.json')
|
||||
resource = Resource(entry.source, "", lambda: None, ".json")
|
||||
content = json.dumps(entry.compile())
|
||||
self._write_content_to_disk(resource, content)
|
||||
|
||||
def _write_entry_xml(self, entry: BaseArchiveEntry):
|
||||
resource = Resource(entry.source, '', lambda: None, '.xml')
|
||||
content = dict2xml.dict2xml(entry.compile(), wrap='root')
|
||||
resource = Resource(entry.source, "", lambda: None, ".xml")
|
||||
content = dict2xml.dict2xml(entry.compile(), wrap="root")
|
||||
self._write_content_to_disk(resource, content)
|
||||
|
||||
def _write_entry_yaml(self, entry: BaseArchiveEntry):
|
||||
resource = Resource(entry.source, '', lambda: None, '.yaml')
|
||||
content = yaml.dump(entry.compile())
|
||||
resource = Resource(entry.source, "", lambda: None, ".yaml")
|
||||
content = yaml.safe_dump(entry.compile())
|
||||
self._write_content_to_disk(resource, content)
|
||||
|
||||
def _write_content_to_disk(self, resource: Resource, content: str):
|
||||
file_path = self.file_name_formatter.format_path(resource, self.download_directory)
|
||||
file_path.parent.mkdir(exist_ok=True, parents=True)
|
||||
with open(file_path, 'w', encoding="utf-8") as file:
|
||||
with Path(file_path).open(mode="w", encoding="utf-8") as file:
|
||||
logger.debug(
|
||||
f'Writing entry {resource.source_submission.id} to file in {resource.extension[1:].upper()}'
|
||||
f' format at {file_path}')
|
||||
f"Writing entry {resource.source_submission.id} to file in {resource.extension[1:].upper()}"
|
||||
f" format at {file_path}"
|
||||
)
|
||||
file.write(content)
|
||||
|
|
|
@ -1,7 +1,11 @@
|
|||
#!/usr/bin/env python3
|
||||
# coding=utf-8
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import logging
|
||||
from collections.abc import Iterable
|
||||
from time import sleep
|
||||
|
||||
import prawcore
|
||||
|
||||
from bdfr.archiver import Archiver
|
||||
from bdfr.configuration import Configuration
|
||||
|
@ -11,11 +15,19 @@ logger = logging.getLogger(__name__)
|
|||
|
||||
|
||||
class RedditCloner(RedditDownloader, Archiver):
|
||||
def __init__(self, args: Configuration):
|
||||
super(RedditCloner, self).__init__(args)
|
||||
def __init__(self, args: Configuration, logging_handlers: Iterable[logging.Handler] = ()):
|
||||
super(RedditCloner, self).__init__(args, logging_handlers)
|
||||
|
||||
def download(self):
|
||||
for generator in self.reddit_lists:
|
||||
for submission in generator:
|
||||
self._download_submission(submission)
|
||||
self.write_entry(submission)
|
||||
try:
|
||||
for submission in generator:
|
||||
try:
|
||||
self._download_submission(submission)
|
||||
self.write_entry(submission)
|
||||
except prawcore.PrawcoreException as e:
|
||||
logger.error(f"Submission {submission.id} failed to be cloned due to a PRAW exception: {e}")
|
||||
except prawcore.PrawcoreException as e:
|
||||
logger.error(f"The submission after {submission.id} failed to download due to a PRAW exception: {e}")
|
||||
logger.debug("Waiting 60 seconds to continue")
|
||||
sleep(60)
|
||||
|
|
68
bdfr/completion.py
Normal file
68
bdfr/completion.py
Normal file
|
@ -0,0 +1,68 @@
|
|||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import subprocess
|
||||
from os import environ
|
||||
from pathlib import Path
|
||||
|
||||
import appdirs
|
||||
|
||||
|
||||
class Completion:
|
||||
def __init__(self, shell: str):
|
||||
self.shell = shell
|
||||
self.env = environ.copy()
|
||||
self.share_dir = appdirs.user_data_dir()
|
||||
self.entry_points = ["bdfr", "bdfr-archive", "bdfr-clone", "bdfr-download"]
|
||||
|
||||
def install(self):
|
||||
if self.shell in ("all", "bash"):
|
||||
comp_dir = self.share_dir + "/bash-completion/completions/"
|
||||
if not Path(comp_dir).exists():
|
||||
print("Creating Bash completion directory.")
|
||||
Path(comp_dir).mkdir(parents=True, exist_ok=True)
|
||||
for point in self.entry_points:
|
||||
self.env[f"_{point.upper().replace('-', '_')}_COMPLETE"] = "bash_source"
|
||||
with Path(comp_dir + point).open(mode="w") as file:
|
||||
file.write(subprocess.run([point], env=self.env, capture_output=True, text=True).stdout)
|
||||
print(f"Bash completion for {point} written to {comp_dir}{point}")
|
||||
if self.shell in ("all", "fish"):
|
||||
comp_dir = self.share_dir + "/fish/vendor_completions.d/"
|
||||
if not Path(comp_dir).exists():
|
||||
print("Creating Fish completion directory.")
|
||||
Path(comp_dir).mkdir(parents=True, exist_ok=True)
|
||||
for point in self.entry_points:
|
||||
self.env[f"_{point.upper().replace('-', '_')}_COMPLETE"] = "fish_source"
|
||||
with Path(comp_dir + point + ".fish").open(mode="w") as file:
|
||||
file.write(subprocess.run([point], env=self.env, capture_output=True, text=True).stdout)
|
||||
print(f"Fish completion for {point} written to {comp_dir}{point}.fish")
|
||||
if self.shell in ("all", "zsh"):
|
||||
comp_dir = self.share_dir + "/zsh/site-functions/"
|
||||
if not Path(comp_dir).exists():
|
||||
print("Creating Zsh completion directory.")
|
||||
Path(comp_dir).mkdir(parents=True, exist_ok=True)
|
||||
for point in self.entry_points:
|
||||
self.env[f"_{point.upper().replace('-', '_')}_COMPLETE"] = "zsh_source"
|
||||
with Path(comp_dir + "_" + point).open(mode="w") as file:
|
||||
file.write(subprocess.run([point], env=self.env, capture_output=True, text=True).stdout)
|
||||
print(f"Zsh completion for {point} written to {comp_dir}_{point}")
|
||||
|
||||
def uninstall(self):
|
||||
if self.shell in ("all", "bash"):
|
||||
comp_dir = self.share_dir + "/bash-completion/completions/"
|
||||
for point in self.entry_points:
|
||||
if Path(comp_dir + point).exists():
|
||||
Path(comp_dir + point).unlink()
|
||||
print(f"Bash completion for {point} removed from {comp_dir}{point}")
|
||||
if self.shell in ("all", "fish"):
|
||||
comp_dir = self.share_dir + "/fish/vendor_completions.d/"
|
||||
for point in self.entry_points:
|
||||
if Path(comp_dir + point + ".fish").exists():
|
||||
Path(comp_dir + point + ".fish").unlink()
|
||||
print(f"Fish completion for {point} removed from {comp_dir}{point}.fish")
|
||||
if self.shell in ("all", "zsh"):
|
||||
comp_dir = self.share_dir + "/zsh/site-functions/"
|
||||
for point in self.entry_points:
|
||||
if Path(comp_dir + "_" + point).exists():
|
||||
Path(comp_dir + "_" + point).unlink()
|
||||
print(f"Zsh completion for {point} removed from {comp_dir}_{point}")
|
|
@ -1,10 +1,15 @@
|
|||
#!/usr/bin/env python3
|
||||
# coding=utf-8
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import logging
|
||||
from argparse import Namespace
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
import click
|
||||
import yaml
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Configuration(Namespace):
|
||||
|
@ -12,12 +17,15 @@ class Configuration(Namespace):
|
|||
super(Configuration, self).__init__()
|
||||
self.authenticate = False
|
||||
self.config = None
|
||||
self.directory: str = '.'
|
||||
self.opts: Optional[str] = None
|
||||
self.directory: str = "."
|
||||
self.disable_module: list[str] = []
|
||||
self.exclude_id = []
|
||||
self.exclude_id_file = []
|
||||
self.file_scheme: str = '{REDDITOR}_{TITLE}_{POSTID}'
|
||||
self.folder_scheme: str = '{SUBREDDIT}'
|
||||
self.file_scheme: str = "{REDDITOR}_{TITLE}_{POSTID}"
|
||||
self.filename_restriction_scheme = None
|
||||
self.folder_scheme: str = "{SUBREDDIT}"
|
||||
self.ignore_user = []
|
||||
self.include_id_file = []
|
||||
self.limit: Optional[int] = None
|
||||
self.link: list[str] = []
|
||||
|
@ -32,10 +40,15 @@ class Configuration(Namespace):
|
|||
self.skip: list[str] = []
|
||||
self.skip_domain: list[str] = []
|
||||
self.skip_subreddit: list[str] = []
|
||||
self.sort: str = 'hot'
|
||||
self.min_score = None
|
||||
self.max_score = None
|
||||
self.min_score_ratio = None
|
||||
self.max_score_ratio = None
|
||||
self.sort: str = "hot"
|
||||
self.submitted: bool = False
|
||||
self.subscribed: bool = False
|
||||
self.subreddit: list[str] = []
|
||||
self.time: str = 'all'
|
||||
self.time: str = "all"
|
||||
self.time_format = None
|
||||
self.upvoted: bool = False
|
||||
self.user: list[str] = []
|
||||
|
@ -43,10 +56,35 @@ class Configuration(Namespace):
|
|||
|
||||
# Archiver-specific options
|
||||
self.all_comments = False
|
||||
self.format = 'json'
|
||||
self.format = "json"
|
||||
self.comment_context: bool = False
|
||||
|
||||
def process_click_arguments(self, context: click.Context):
|
||||
if context.params.get("opts") is not None:
|
||||
self.parse_yaml_options(context.params["opts"])
|
||||
for arg_key in context.params.keys():
|
||||
if arg_key in vars(self) and context.params[arg_key] is not None:
|
||||
vars(self)[arg_key] = context.params[arg_key]
|
||||
if not hasattr(self, arg_key):
|
||||
logger.warning(f"Ignoring an unknown CLI argument: {arg_key}")
|
||||
continue
|
||||
val = context.params[arg_key]
|
||||
if val is None or val == ():
|
||||
# don't overwrite with an empty value
|
||||
continue
|
||||
setattr(self, arg_key, val)
|
||||
|
||||
def parse_yaml_options(self, file_path: str):
|
||||
yaml_file_loc = Path(file_path)
|
||||
if not yaml_file_loc.exists():
|
||||
logger.error(f"No YAML file found at {yaml_file_loc}")
|
||||
return
|
||||
with yaml_file_loc.open() as file:
|
||||
try:
|
||||
opts = yaml.safe_load(file)
|
||||
except yaml.YAMLError as e:
|
||||
logger.error(f"Could not parse YAML options file: {e}")
|
||||
return
|
||||
for arg_key, val in opts.items():
|
||||
if not hasattr(self, arg_key):
|
||||
logger.warning(f"Ignoring an unknown YAML argument: {arg_key}")
|
||||
continue
|
||||
setattr(self, arg_key, val)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
#!/usr/bin/env python3
|
||||
# coding=utf-8
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import configparser
|
||||
import importlib.resources
|
||||
|
@ -10,10 +10,11 @@ import re
|
|||
import shutil
|
||||
import socket
|
||||
from abc import ABCMeta, abstractmethod
|
||||
from collections.abc import Callable, Iterable, Iterator
|
||||
from datetime import datetime
|
||||
from enum import Enum, auto
|
||||
from pathlib import Path
|
||||
from typing import Callable, Iterator
|
||||
from time import sleep
|
||||
|
||||
import appdirs
|
||||
import praw
|
||||
|
@ -41,40 +42,40 @@ class RedditTypes:
|
|||
TOP = auto()
|
||||
|
||||
class TimeType(Enum):
|
||||
ALL = 'all'
|
||||
DAY = 'day'
|
||||
HOUR = 'hour'
|
||||
MONTH = 'month'
|
||||
WEEK = 'week'
|
||||
YEAR = 'year'
|
||||
ALL = "all"
|
||||
DAY = "day"
|
||||
HOUR = "hour"
|
||||
MONTH = "month"
|
||||
WEEK = "week"
|
||||
YEAR = "year"
|
||||
|
||||
|
||||
class RedditConnector(metaclass=ABCMeta):
|
||||
def __init__(self, args: Configuration):
|
||||
def __init__(self, args: Configuration, logging_handlers: Iterable[logging.Handler] = ()):
|
||||
self.args = args
|
||||
self.config_directories = appdirs.AppDirs('bdfr', 'BDFR')
|
||||
self.config_directories = appdirs.AppDirs("bdfr", "BDFR")
|
||||
self.determine_directories()
|
||||
self.load_config()
|
||||
self.read_config()
|
||||
file_log = self.create_file_logger()
|
||||
self._apply_logging_handlers(itertools.chain(logging_handlers, [file_log]))
|
||||
self.run_time = datetime.now().isoformat()
|
||||
self._setup_internal_objects()
|
||||
|
||||
self.reddit_lists = self.retrieve_reddit_lists()
|
||||
|
||||
def _setup_internal_objects(self):
|
||||
self.determine_directories()
|
||||
self.load_config()
|
||||
self.create_file_logger()
|
||||
|
||||
self.read_config()
|
||||
|
||||
self.parse_disabled_modules()
|
||||
|
||||
self.download_filter = self.create_download_filter()
|
||||
logger.log(9, 'Created download filter')
|
||||
logger.log(9, "Created download filter")
|
||||
self.time_filter = self.create_time_filter()
|
||||
logger.log(9, 'Created time filter')
|
||||
logger.log(9, "Created time filter")
|
||||
self.sort_filter = self.create_sort_filter()
|
||||
logger.log(9, 'Created sort filter')
|
||||
logger.log(9, "Created sort filter")
|
||||
self.file_name_formatter = self.create_file_name_formatter()
|
||||
logger.log(9, 'Create file name formatter')
|
||||
logger.log(9, "Create file name formatter")
|
||||
|
||||
self.create_reddit_instance()
|
||||
self.args.user = list(filter(None, [self.resolve_user_name(user) for user in self.args.user]))
|
||||
|
@ -88,79 +89,90 @@ class RedditConnector(metaclass=ABCMeta):
|
|||
|
||||
self.master_hash_list = {}
|
||||
self.authenticator = self.create_authenticator()
|
||||
logger.log(9, 'Created site authenticator')
|
||||
logger.log(9, "Created site authenticator")
|
||||
|
||||
self.args.skip_subreddit = self.split_args_input(self.args.skip_subreddit)
|
||||
self.args.skip_subreddit = set([sub.lower() for sub in self.args.skip_subreddit])
|
||||
self.args.skip_subreddit = {sub.lower() for sub in self.args.skip_subreddit}
|
||||
|
||||
@staticmethod
|
||||
def _apply_logging_handlers(handlers: Iterable[logging.Handler]):
|
||||
main_logger = logging.getLogger()
|
||||
for handler in handlers:
|
||||
main_logger.addHandler(handler)
|
||||
|
||||
def read_config(self):
|
||||
"""Read any cfg values that need to be processed"""
|
||||
if self.args.max_wait_time is None:
|
||||
self.args.max_wait_time = self.cfg_parser.getint('DEFAULT', 'max_wait_time', fallback=120)
|
||||
logger.debug(f'Setting maximum download wait time to {self.args.max_wait_time} seconds')
|
||||
self.args.max_wait_time = self.cfg_parser.getint("DEFAULT", "max_wait_time", fallback=120)
|
||||
logger.debug(f"Setting maximum download wait time to {self.args.max_wait_time} seconds")
|
||||
if self.args.time_format is None:
|
||||
option = self.cfg_parser.get('DEFAULT', 'time_format', fallback='ISO')
|
||||
if re.match(r'^[\s\'\"]*$', option):
|
||||
option = 'ISO'
|
||||
logger.debug(f'Setting datetime format string to {option}')
|
||||
option = self.cfg_parser.get("DEFAULT", "time_format", fallback="ISO")
|
||||
if re.match(r"^[\s\'\"]*$", option):
|
||||
option = "ISO"
|
||||
logger.debug(f"Setting datetime format string to {option}")
|
||||
self.args.time_format = option
|
||||
if not self.args.disable_module:
|
||||
self.args.disable_module = [self.cfg_parser.get('DEFAULT', 'disabled_modules', fallback='')]
|
||||
self.args.disable_module = [self.cfg_parser.get("DEFAULT", "disabled_modules", fallback="")]
|
||||
if not self.args.filename_restriction_scheme:
|
||||
self.args.filename_restriction_scheme = self.cfg_parser.get(
|
||||
"DEFAULT", "filename_restriction_scheme", fallback=None
|
||||
)
|
||||
logger.debug(f"Setting filename restriction scheme to '{self.args.filename_restriction_scheme}'")
|
||||
# Update config on disk
|
||||
with open(self.config_location, 'w') as file:
|
||||
with Path(self.config_location).open(mode="w") as file:
|
||||
self.cfg_parser.write(file)
|
||||
|
||||
def parse_disabled_modules(self):
|
||||
disabled_modules = self.args.disable_module
|
||||
disabled_modules = self.split_args_input(disabled_modules)
|
||||
disabled_modules = set([name.strip().lower() for name in disabled_modules])
|
||||
disabled_modules = {name.strip().lower() for name in disabled_modules}
|
||||
self.args.disable_module = disabled_modules
|
||||
logger.debug(f'Disabling the following modules: {", ".join(self.args.disable_module)}')
|
||||
|
||||
def create_reddit_instance(self):
|
||||
if self.args.authenticate:
|
||||
logger.debug('Using authenticated Reddit instance')
|
||||
if not self.cfg_parser.has_option('DEFAULT', 'user_token'):
|
||||
logger.log(9, 'Commencing OAuth2 authentication')
|
||||
scopes = self.cfg_parser.get('DEFAULT', 'scopes', fallback='identity, history, read, save')
|
||||
logger.debug("Using authenticated Reddit instance")
|
||||
if not self.cfg_parser.has_option("DEFAULT", "user_token"):
|
||||
logger.log(9, "Commencing OAuth2 authentication")
|
||||
scopes = self.cfg_parser.get("DEFAULT", "scopes", fallback="identity, history, read, save")
|
||||
scopes = OAuth2Authenticator.split_scopes(scopes)
|
||||
oauth2_authenticator = OAuth2Authenticator(
|
||||
scopes,
|
||||
self.cfg_parser.get('DEFAULT', 'client_id'),
|
||||
self.cfg_parser.get('DEFAULT', 'client_secret'),
|
||||
self.cfg_parser.get("DEFAULT", "client_id"),
|
||||
self.cfg_parser.get("DEFAULT", "client_secret"),
|
||||
)
|
||||
token = oauth2_authenticator.retrieve_new_token()
|
||||
self.cfg_parser['DEFAULT']['user_token'] = token
|
||||
with open(self.config_location, 'w') as file:
|
||||
self.cfg_parser["DEFAULT"]["user_token"] = token
|
||||
with Path(self.config_location).open(mode="w") as file:
|
||||
self.cfg_parser.write(file, True)
|
||||
token_manager = OAuth2TokenManager(self.cfg_parser, self.config_location)
|
||||
|
||||
self.authenticated = True
|
||||
self.reddit_instance = praw.Reddit(
|
||||
client_id=self.cfg_parser.get('DEFAULT', 'client_id'),
|
||||
client_secret=self.cfg_parser.get('DEFAULT', 'client_secret'),
|
||||
client_id=self.cfg_parser.get("DEFAULT", "client_id"),
|
||||
client_secret=self.cfg_parser.get("DEFAULT", "client_secret"),
|
||||
user_agent=socket.gethostname(),
|
||||
token_manager=token_manager,
|
||||
)
|
||||
else:
|
||||
logger.debug('Using unauthenticated Reddit instance')
|
||||
logger.debug("Using unauthenticated Reddit instance")
|
||||
self.authenticated = False
|
||||
self.reddit_instance = praw.Reddit(
|
||||
client_id=self.cfg_parser.get('DEFAULT', 'client_id'),
|
||||
client_secret=self.cfg_parser.get('DEFAULT', 'client_secret'),
|
||||
client_id=self.cfg_parser.get("DEFAULT", "client_id"),
|
||||
client_secret=self.cfg_parser.get("DEFAULT", "client_secret"),
|
||||
user_agent=socket.gethostname(),
|
||||
)
|
||||
|
||||
def retrieve_reddit_lists(self) -> list[praw.models.ListingGenerator]:
|
||||
master_list = []
|
||||
master_list.extend(self.get_subreddits())
|
||||
logger.log(9, 'Retrieved subreddits')
|
||||
logger.log(9, "Retrieved subreddits")
|
||||
master_list.extend(self.get_multireddits())
|
||||
logger.log(9, 'Retrieved multireddits')
|
||||
logger.log(9, "Retrieved multireddits")
|
||||
master_list.extend(self.get_user_data())
|
||||
logger.log(9, 'Retrieved user data')
|
||||
logger.log(9, "Retrieved user data")
|
||||
master_list.extend(self.get_submissions_from_link())
|
||||
logger.log(9, 'Retrieved submissions for given links')
|
||||
logger.log(9, "Retrieved submissions for given links")
|
||||
return master_list
|
||||
|
||||
def determine_directories(self):
|
||||
|
@ -178,37 +190,36 @@ class RedditConnector(metaclass=ABCMeta):
|
|||
self.config_location = cfg_path
|
||||
return
|
||||
possible_paths = [
|
||||
Path('./config.cfg'),
|
||||
Path('./default_config.cfg'),
|
||||
Path(self.config_directory, 'config.cfg'),
|
||||
Path(self.config_directory, 'default_config.cfg'),
|
||||
Path("./config.cfg"),
|
||||
Path("./default_config.cfg"),
|
||||
Path(self.config_directory, "config.cfg"),
|
||||
Path(self.config_directory, "default_config.cfg"),
|
||||
]
|
||||
self.config_location = None
|
||||
for path in possible_paths:
|
||||
if path.resolve().expanduser().exists():
|
||||
self.config_location = path
|
||||
logger.debug(f'Loading configuration from {path}')
|
||||
logger.debug(f"Loading configuration from {path}")
|
||||
break
|
||||
if not self.config_location:
|
||||
with importlib.resources.path('bdfr', 'default_config.cfg') as path:
|
||||
with importlib.resources.path("bdfr", "default_config.cfg") as path:
|
||||
self.config_location = path
|
||||
shutil.copy(self.config_location, Path(self.config_directory, 'default_config.cfg'))
|
||||
shutil.copy(self.config_location, Path(self.config_directory, "default_config.cfg"))
|
||||
if not self.config_location:
|
||||
raise errors.BulkDownloaderException('Could not find a configuration file to load')
|
||||
raise errors.BulkDownloaderException("Could not find a configuration file to load")
|
||||
self.cfg_parser.read(self.config_location)
|
||||
|
||||
def create_file_logger(self):
|
||||
main_logger = logging.getLogger()
|
||||
def create_file_logger(self) -> logging.handlers.RotatingFileHandler:
|
||||
if self.args.log is None:
|
||||
log_path = Path(self.config_directory, 'log_output.txt')
|
||||
log_path = Path(self.config_directory, "log_output.txt")
|
||||
else:
|
||||
log_path = Path(self.args.log).resolve().expanduser()
|
||||
if not log_path.parent.exists():
|
||||
raise errors.BulkDownloaderException(f'Designated location for logfile does not exist')
|
||||
backup_count = self.cfg_parser.getint('DEFAULT', 'backup_log_count', fallback=3)
|
||||
raise errors.BulkDownloaderException("Designated location for logfile does not exist")
|
||||
backup_count = self.cfg_parser.getint("DEFAULT", "backup_log_count", fallback=3)
|
||||
file_handler = logging.handlers.RotatingFileHandler(
|
||||
log_path,
|
||||
mode='a',
|
||||
mode="a",
|
||||
backupCount=backup_count,
|
||||
)
|
||||
if log_path.exists():
|
||||
|
@ -216,38 +227,48 @@ class RedditConnector(metaclass=ABCMeta):
|
|||
file_handler.doRollover()
|
||||
except PermissionError:
|
||||
logger.critical(
|
||||
'Cannot rollover logfile, make sure this is the only '
|
||||
'BDFR process or specify alternate logfile location')
|
||||
"Cannot rollover logfile, make sure this is the only "
|
||||
"BDFR process or specify alternate logfile location"
|
||||
)
|
||||
raise
|
||||
formatter = logging.Formatter('[%(asctime)s - %(name)s - %(levelname)s] - %(message)s')
|
||||
formatter = logging.Formatter("[%(asctime)s - %(name)s - %(levelname)s] - %(message)s")
|
||||
file_handler.setFormatter(formatter)
|
||||
file_handler.setLevel(0)
|
||||
|
||||
main_logger.addHandler(file_handler)
|
||||
return file_handler
|
||||
|
||||
@staticmethod
|
||||
def sanitise_subreddit_name(subreddit: str) -> str:
|
||||
pattern = re.compile(r'^(?:https://www\.reddit\.com/)?(?:r/)?(.*?)/?$')
|
||||
pattern = re.compile(r"^(?:https://www\.reddit\.com/)?(?:r/)?(.*?)/?$")
|
||||
match = re.match(pattern, subreddit)
|
||||
if not match:
|
||||
raise errors.BulkDownloaderException(f'Could not find subreddit name in string {subreddit}')
|
||||
raise errors.BulkDownloaderException(f"Could not find subreddit name in string {subreddit}")
|
||||
return match.group(1)
|
||||
|
||||
@staticmethod
|
||||
def split_args_input(entries: list[str]) -> set[str]:
|
||||
all_entries = []
|
||||
split_pattern = re.compile(r'[,;]\s?')
|
||||
split_pattern = re.compile(r"[,;]\s?")
|
||||
for entry in entries:
|
||||
results = re.split(split_pattern, entry)
|
||||
all_entries.extend([RedditConnector.sanitise_subreddit_name(name) for name in results])
|
||||
return set(all_entries)
|
||||
|
||||
def get_subreddits(self) -> list[praw.models.ListingGenerator]:
|
||||
if self.args.subreddit:
|
||||
out = []
|
||||
for reddit in self.split_args_input(self.args.subreddit):
|
||||
if reddit == 'friends' and self.authenticated is False:
|
||||
logger.error('Cannot read friends subreddit without an authenticated instance')
|
||||
out = []
|
||||
subscribed_subreddits = set()
|
||||
if self.args.subscribed:
|
||||
if self.args.authenticate:
|
||||
try:
|
||||
subscribed_subreddits = list(self.reddit_instance.user.subreddits(limit=None))
|
||||
subscribed_subreddits = {s.display_name for s in subscribed_subreddits}
|
||||
except prawcore.InsufficientScope:
|
||||
logger.error("BDFR has insufficient scope to access subreddit lists")
|
||||
else:
|
||||
logger.error("Cannot find subscribed subreddits without an authenticated instance")
|
||||
if self.args.subreddit or subscribed_subreddits:
|
||||
for reddit in self.split_args_input(self.args.subreddit) | subscribed_subreddits:
|
||||
if reddit == "friends" and self.authenticated is False:
|
||||
logger.error("Cannot read friends subreddit without an authenticated instance")
|
||||
continue
|
||||
try:
|
||||
reddit = self.reddit_instance.subreddit(reddit)
|
||||
|
@ -257,28 +278,29 @@ class RedditConnector(metaclass=ABCMeta):
|
|||
logger.error(e)
|
||||
continue
|
||||
if self.args.search:
|
||||
out.append(reddit.search(
|
||||
self.args.search,
|
||||
sort=self.sort_filter.name.lower(),
|
||||
limit=self.args.limit,
|
||||
time_filter=self.time_filter.value,
|
||||
))
|
||||
out.append(
|
||||
reddit.search(
|
||||
self.args.search,
|
||||
sort=self.sort_filter.name.lower(),
|
||||
limit=self.args.limit,
|
||||
time_filter=self.time_filter.value,
|
||||
)
|
||||
)
|
||||
logger.debug(
|
||||
f'Added submissions from subreddit {reddit} with the search term "{self.args.search}"')
|
||||
f'Added submissions from subreddit {reddit} with the search term "{self.args.search}"'
|
||||
)
|
||||
else:
|
||||
out.append(self.create_filtered_listing_generator(reddit))
|
||||
logger.debug(f'Added submissions from subreddit {reddit}')
|
||||
logger.debug(f"Added submissions from subreddit {reddit}")
|
||||
except (errors.BulkDownloaderException, praw.exceptions.PRAWException) as e:
|
||||
logger.error(f'Failed to get submissions for subreddit {reddit}: {e}')
|
||||
return out
|
||||
else:
|
||||
return []
|
||||
logger.error(f"Failed to get submissions for subreddit {reddit}: {e}")
|
||||
return out
|
||||
|
||||
def resolve_user_name(self, in_name: str) -> str:
|
||||
if in_name == 'me':
|
||||
if in_name == "me":
|
||||
if self.authenticated:
|
||||
resolved_name = self.reddit_instance.user.me().name
|
||||
logger.log(9, f'Resolved user to {resolved_name}')
|
||||
logger.log(9, f"Resolved user to {resolved_name}")
|
||||
return resolved_name
|
||||
else:
|
||||
logger.warning('To use "me" as a user, an authenticated Reddit instance must be used')
|
||||
|
@ -288,7 +310,7 @@ class RedditConnector(metaclass=ABCMeta):
|
|||
def get_submissions_from_link(self) -> list[list[praw.models.Submission]]:
|
||||
supplied_submissions = []
|
||||
for sub_id in self.args.link:
|
||||
if len(sub_id) == 6:
|
||||
if len(sub_id) in (6, 7):
|
||||
supplied_submissions.append(self.reddit_instance.submission(id=sub_id))
|
||||
else:
|
||||
supplied_submissions.append(self.reddit_instance.submission(url=sub_id))
|
||||
|
@ -310,18 +332,18 @@ class RedditConnector(metaclass=ABCMeta):
|
|||
def get_multireddits(self) -> list[Iterator]:
|
||||
if self.args.multireddit:
|
||||
if len(self.args.user) != 1:
|
||||
logger.error(f'Only 1 user can be supplied when retrieving from multireddits')
|
||||
logger.error("Only 1 user can be supplied when retrieving from multireddits")
|
||||
return []
|
||||
out = []
|
||||
for multi in self.split_args_input(self.args.multireddit):
|
||||
try:
|
||||
multi = self.reddit_instance.multireddit(self.args.user[0], multi)
|
||||
multi = self.reddit_instance.multireddit(redditor=self.args.user[0], name=multi)
|
||||
if not multi.subreddits:
|
||||
raise errors.BulkDownloaderException
|
||||
out.append(self.create_filtered_listing_generator(multi))
|
||||
logger.debug(f'Added submissions from multireddit {multi}')
|
||||
logger.debug(f"Added submissions from multireddit {multi}")
|
||||
except (errors.BulkDownloaderException, praw.exceptions.PRAWException, prawcore.PrawcoreException) as e:
|
||||
logger.error(f'Failed to get submissions for multireddit {multi}: {e}')
|
||||
logger.error(f"Failed to get submissions for multireddit {multi}: {e}")
|
||||
return out
|
||||
else:
|
||||
return []
|
||||
|
@ -336,29 +358,36 @@ class RedditConnector(metaclass=ABCMeta):
|
|||
def get_user_data(self) -> list[Iterator]:
|
||||
if any([self.args.submitted, self.args.upvoted, self.args.saved]):
|
||||
if not self.args.user:
|
||||
logger.warning('At least one user must be supplied to download user data')
|
||||
logger.warning("At least one user must be supplied to download user data")
|
||||
return []
|
||||
generators = []
|
||||
for user in self.args.user:
|
||||
try:
|
||||
self.check_user_existence(user)
|
||||
except errors.BulkDownloaderException as e:
|
||||
logger.error(e)
|
||||
continue
|
||||
if self.args.submitted:
|
||||
logger.debug(f'Retrieving submitted posts of user {self.args.user}')
|
||||
generators.append(self.create_filtered_listing_generator(
|
||||
self.reddit_instance.redditor(user).submissions,
|
||||
))
|
||||
if not self.authenticated and any((self.args.upvoted, self.args.saved)):
|
||||
logger.warning('Accessing user lists requires authentication')
|
||||
else:
|
||||
if self.args.upvoted:
|
||||
logger.debug(f'Retrieving upvoted posts of user {self.args.user}')
|
||||
generators.append(self.reddit_instance.redditor(user).upvoted(limit=self.args.limit))
|
||||
if self.args.saved:
|
||||
logger.debug(f'Retrieving saved posts of user {self.args.user}')
|
||||
generators.append(self.reddit_instance.redditor(user).saved(limit=self.args.limit))
|
||||
try:
|
||||
self.check_user_existence(user)
|
||||
except errors.BulkDownloaderException as e:
|
||||
logger.error(e)
|
||||
continue
|
||||
if self.args.submitted:
|
||||
logger.debug(f"Retrieving submitted posts of user {user}")
|
||||
generators.append(
|
||||
self.create_filtered_listing_generator(
|
||||
self.reddit_instance.redditor(user).submissions,
|
||||
)
|
||||
)
|
||||
if not self.authenticated and any((self.args.upvoted, self.args.saved)):
|
||||
logger.warning("Accessing user lists requires authentication")
|
||||
else:
|
||||
if self.args.upvoted:
|
||||
logger.debug(f"Retrieving upvoted posts of user {user}")
|
||||
generators.append(self.reddit_instance.redditor(user).upvoted(limit=self.args.limit))
|
||||
if self.args.saved:
|
||||
logger.debug(f"Retrieving saved posts of user {user}")
|
||||
generators.append(self.reddit_instance.redditor(user).saved(limit=self.args.limit))
|
||||
except prawcore.PrawcoreException as e:
|
||||
logger.error(f"User {user} failed to be retrieved due to a PRAW exception: {e}")
|
||||
logger.debug("Waiting 60 seconds to continue")
|
||||
sleep(60)
|
||||
return generators
|
||||
else:
|
||||
return []
|
||||
|
@ -369,13 +398,15 @@ class RedditConnector(metaclass=ABCMeta):
|
|||
if user.id:
|
||||
return
|
||||
except prawcore.exceptions.NotFound:
|
||||
raise errors.BulkDownloaderException(f'Could not find user {name}')
|
||||
raise errors.BulkDownloaderException(f"Could not find user {name}")
|
||||
except AttributeError:
|
||||
if hasattr(user, 'is_suspended'):
|
||||
raise errors.BulkDownloaderException(f'User {name} is banned')
|
||||
if hasattr(user, "is_suspended"):
|
||||
raise errors.BulkDownloaderException(f"User {name} is banned")
|
||||
|
||||
def create_file_name_formatter(self) -> FileNameFormatter:
|
||||
return FileNameFormatter(self.args.file_scheme, self.args.folder_scheme, self.args.time_format)
|
||||
return FileNameFormatter(
|
||||
self.args.file_scheme, self.args.folder_scheme, self.args.time_format, self.args.filename_restriction_scheme
|
||||
)
|
||||
|
||||
def create_time_filter(self) -> RedditTypes.TimeType:
|
||||
try:
|
||||
|
@ -401,14 +432,16 @@ class RedditConnector(metaclass=ABCMeta):
|
|||
|
||||
@staticmethod
|
||||
def check_subreddit_status(subreddit: praw.models.Subreddit):
|
||||
if subreddit.display_name in ('all', 'friends'):
|
||||
if subreddit.display_name in ("all", "friends"):
|
||||
return
|
||||
try:
|
||||
assert subreddit.id
|
||||
except prawcore.NotFound:
|
||||
raise errors.BulkDownloaderException(f'Source {subreddit.display_name} does not exist or cannot be found')
|
||||
raise errors.BulkDownloaderException(f"Source {subreddit.display_name} cannot be found")
|
||||
except prawcore.Redirect:
|
||||
raise errors.BulkDownloaderException(f"Source {subreddit.display_name} does not exist")
|
||||
except prawcore.Forbidden:
|
||||
raise errors.BulkDownloaderException(f'Source {subreddit.display_name} is private and cannot be scraped')
|
||||
raise errors.BulkDownloaderException(f"Source {subreddit.display_name} is private and cannot be scraped")
|
||||
|
||||
@staticmethod
|
||||
def read_id_files(file_locations: list[str]) -> set[str]:
|
||||
|
@ -416,9 +449,9 @@ class RedditConnector(metaclass=ABCMeta):
|
|||
for id_file in file_locations:
|
||||
id_file = Path(id_file).resolve().expanduser()
|
||||
if not id_file.exists():
|
||||
logger.warning(f'ID file at {id_file} does not exist')
|
||||
logger.warning(f"ID file at {id_file} does not exist")
|
||||
continue
|
||||
with open(id_file, 'r') as file:
|
||||
with id_file.open("r") as file:
|
||||
for line in file:
|
||||
out.append(line.strip())
|
||||
return set(out)
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
[DEFAULT]
|
||||
client_id = U-6gk4ZCh3IeNQ
|
||||
client_secret = 7CZHY6AmKweZME5s50SfDGylaPg
|
||||
scopes = identity, history, read, save
|
||||
scopes = identity, history, read, save, mysubreddits
|
||||
backup_log_count = 3
|
||||
max_wait_time = 120
|
||||
time_format = ISO
|
|
@ -1,5 +1,5 @@
|
|||
#!/usr/bin/env python3
|
||||
# coding=utf-8
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import logging
|
||||
import re
|
||||
|
@ -33,10 +33,10 @@ class DownloadFilter:
|
|||
def _check_extension(self, resource_extension: str) -> bool:
|
||||
if not self.excluded_extensions:
|
||||
return True
|
||||
combined_extensions = '|'.join(self.excluded_extensions)
|
||||
pattern = re.compile(r'.*({})$'.format(combined_extensions))
|
||||
combined_extensions = "|".join(self.excluded_extensions)
|
||||
pattern = re.compile(r".*({})$".format(combined_extensions))
|
||||
if re.match(pattern, resource_extension):
|
||||
logger.log(9, f'Url "{resource_extension}" matched with "{str(pattern)}"')
|
||||
logger.log(9, f'Url "{resource_extension}" matched with "{pattern}"')
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
@ -44,10 +44,10 @@ class DownloadFilter:
|
|||
def _check_domain(self, url: str) -> bool:
|
||||
if not self.excluded_domains:
|
||||
return True
|
||||
combined_domains = '|'.join(self.excluded_domains)
|
||||
pattern = re.compile(r'https?://.*({}).*'.format(combined_domains))
|
||||
combined_domains = "|".join(self.excluded_domains)
|
||||
pattern = re.compile(r"https?://.*({}).*".format(combined_domains))
|
||||
if re.match(pattern, url):
|
||||
logger.log(9, f'Url "{url}" matched with "{str(pattern)}"')
|
||||
logger.log(9, f'Url "{url}" matched with "{pattern}"')
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
|
|
@ -1,17 +1,20 @@
|
|||
#!/usr/bin/env python3
|
||||
# coding=utf-8
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import hashlib
|
||||
import logging.handlers
|
||||
import os
|
||||
import time
|
||||
from collections.abc import Iterable
|
||||
from datetime import datetime
|
||||
from multiprocessing import Pool
|
||||
from pathlib import Path
|
||||
from time import sleep
|
||||
|
||||
import praw
|
||||
import praw.exceptions
|
||||
import praw.models
|
||||
import prawcore
|
||||
|
||||
from bdfr import exceptions as errors
|
||||
from bdfr.configuration import Configuration
|
||||
|
@ -24,7 +27,7 @@ logger = logging.getLogger(__name__)
|
|||
def _calc_hash(existing_file: Path):
|
||||
chunk_size = 1024 * 1024
|
||||
md5_hash = hashlib.md5()
|
||||
with open(existing_file, 'rb') as file:
|
||||
with existing_file.open("rb") as file:
|
||||
chunk = file.read(chunk_size)
|
||||
while chunk:
|
||||
md5_hash.update(chunk)
|
||||
|
@ -34,92 +37,128 @@ def _calc_hash(existing_file: Path):
|
|||
|
||||
|
||||
class RedditDownloader(RedditConnector):
|
||||
def __init__(self, args: Configuration):
|
||||
super(RedditDownloader, self).__init__(args)
|
||||
def __init__(self, args: Configuration, logging_handlers: Iterable[logging.Handler] = ()):
|
||||
super(RedditDownloader, self).__init__(args, logging_handlers)
|
||||
if self.args.search_existing:
|
||||
self.master_hash_list = self.scan_existing_files(self.download_directory)
|
||||
|
||||
def download(self):
|
||||
for generator in self.reddit_lists:
|
||||
for submission in generator:
|
||||
self._download_submission(submission)
|
||||
try:
|
||||
for submission in generator:
|
||||
try:
|
||||
self._download_submission(submission)
|
||||
except prawcore.PrawcoreException as e:
|
||||
logger.error(f"Submission {submission.id} failed to download due to a PRAW exception: {e}")
|
||||
except prawcore.PrawcoreException as e:
|
||||
logger.error(f"The submission after {submission.id} failed to download due to a PRAW exception: {e}")
|
||||
logger.debug("Waiting 60 seconds to continue")
|
||||
sleep(60)
|
||||
|
||||
def _download_submission(self, submission: praw.models.Submission):
|
||||
if submission.id in self.excluded_submission_ids:
|
||||
logger.debug(f'Object {submission.id} in exclusion list, skipping')
|
||||
logger.debug(f"Object {submission.id} in exclusion list, skipping")
|
||||
return
|
||||
elif submission.subreddit.display_name.lower() in self.args.skip_subreddit:
|
||||
logger.debug(f'Submission {submission.id} in {submission.subreddit.display_name} in skip list')
|
||||
logger.debug(f"Submission {submission.id} in {submission.subreddit.display_name} in skip list")
|
||||
return
|
||||
elif (submission.author and submission.author.name in self.args.ignore_user) or (
|
||||
submission.author is None and "DELETED" in self.args.ignore_user
|
||||
):
|
||||
logger.debug(
|
||||
f"Submission {submission.id} in {submission.subreddit.display_name} skipped"
|
||||
f' due to {submission.author.name if submission.author else "DELETED"} being an ignored user'
|
||||
)
|
||||
return
|
||||
elif self.args.min_score and submission.score < self.args.min_score:
|
||||
logger.debug(
|
||||
f"Submission {submission.id} filtered due to score {submission.score} < [{self.args.min_score}]"
|
||||
)
|
||||
return
|
||||
elif self.args.max_score and self.args.max_score < submission.score:
|
||||
logger.debug(
|
||||
f"Submission {submission.id} filtered due to score {submission.score} > [{self.args.max_score}]"
|
||||
)
|
||||
return
|
||||
elif (self.args.min_score_ratio and submission.upvote_ratio < self.args.min_score_ratio) or (
|
||||
self.args.max_score_ratio and self.args.max_score_ratio < submission.upvote_ratio
|
||||
):
|
||||
logger.debug(f"Submission {submission.id} filtered due to score ratio ({submission.upvote_ratio})")
|
||||
return
|
||||
elif not isinstance(submission, praw.models.Submission):
|
||||
logger.warning(f'{submission.id} is not a submission')
|
||||
logger.warning(f"{submission.id} is not a submission")
|
||||
return
|
||||
elif not self.download_filter.check_url(submission.url):
|
||||
logger.debug(f'Submission {submission.id} filtered due to URL {submission.url}')
|
||||
logger.debug(f"Submission {submission.id} filtered due to URL {submission.url}")
|
||||
return
|
||||
|
||||
logger.debug(f'Attempting to download submission {submission.id}')
|
||||
logger.debug(f"Attempting to download submission {submission.id}")
|
||||
try:
|
||||
downloader_class = DownloadFactory.pull_lever(submission.url)
|
||||
downloader = downloader_class(submission)
|
||||
logger.debug(f'Using {downloader_class.__name__} with url {submission.url}')
|
||||
logger.debug(f"Using {downloader_class.__name__} with url {submission.url}")
|
||||
except errors.NotADownloadableLinkError as e:
|
||||
logger.error(f'Could not download submission {submission.id}: {e}')
|
||||
logger.error(f"Could not download submission {submission.id}: {e}")
|
||||
return
|
||||
if downloader_class.__name__.lower() in self.args.disable_module:
|
||||
logger.debug(f'Submission {submission.id} skipped due to disabled module {downloader_class.__name__}')
|
||||
logger.debug(f"Submission {submission.id} skipped due to disabled module {downloader_class.__name__}")
|
||||
return
|
||||
try:
|
||||
content = downloader.find_resources(self.authenticator)
|
||||
except errors.SiteDownloaderError as e:
|
||||
logger.error(f'Site {downloader_class.__name__} failed to download submission {submission.id}: {e}')
|
||||
logger.error(f"Site {downloader_class.__name__} failed to download submission {submission.id}: {e}")
|
||||
return
|
||||
for destination, res in self.file_name_formatter.format_resource_paths(content, self.download_directory):
|
||||
if destination.exists():
|
||||
logger.debug(f'File {destination} from submission {submission.id} already exists, continuing')
|
||||
logger.debug(f"File {destination} from submission {submission.id} already exists, continuing")
|
||||
continue
|
||||
elif not self.download_filter.check_resource(res):
|
||||
logger.debug(f'Download filter removed {submission.id} file with URL {submission.url}')
|
||||
logger.debug(f"Download filter removed {submission.id} file with URL {submission.url}")
|
||||
continue
|
||||
try:
|
||||
res.download({'max_wait_time': self.args.max_wait_time})
|
||||
res.download({"max_wait_time": self.args.max_wait_time})
|
||||
except errors.BulkDownloaderException as e:
|
||||
logger.error(f'Failed to download resource {res.url} in submission {submission.id} '
|
||||
f'with downloader {downloader_class.__name__}: {e}')
|
||||
logger.error(
|
||||
f"Failed to download resource {res.url} in submission {submission.id} "
|
||||
f"with downloader {downloader_class.__name__}: {e}"
|
||||
)
|
||||
return
|
||||
resource_hash = res.hash.hexdigest()
|
||||
destination.parent.mkdir(parents=True, exist_ok=True)
|
||||
if resource_hash in self.master_hash_list:
|
||||
if self.args.no_dupes:
|
||||
logger.info(
|
||||
f'Resource hash {resource_hash} from submission {submission.id} downloaded elsewhere')
|
||||
logger.info(f"Resource hash {resource_hash} from submission {submission.id} downloaded elsewhere")
|
||||
return
|
||||
elif self.args.make_hard_links:
|
||||
self.master_hash_list[resource_hash].link_to(destination)
|
||||
try:
|
||||
destination.hardlink_to(self.master_hash_list[resource_hash])
|
||||
except AttributeError:
|
||||
self.master_hash_list[resource_hash].link_to(destination)
|
||||
logger.info(
|
||||
f'Hard link made linking {destination} to {self.master_hash_list[resource_hash]}'
|
||||
f' in submission {submission.id}')
|
||||
f"Hard link made linking {destination} to {self.master_hash_list[resource_hash]}"
|
||||
f" in submission {submission.id}"
|
||||
)
|
||||
return
|
||||
try:
|
||||
with open(destination, 'wb') as file:
|
||||
with destination.open("wb") as file:
|
||||
file.write(res.content)
|
||||
logger.debug(f'Written file to {destination}')
|
||||
logger.debug(f"Written file to {destination}")
|
||||
except OSError as e:
|
||||
logger.exception(e)
|
||||
logger.error(f'Failed to write file in submission {submission.id} to {destination}: {e}')
|
||||
logger.error(f"Failed to write file in submission {submission.id} to {destination}: {e}")
|
||||
return
|
||||
creation_time = time.mktime(datetime.fromtimestamp(submission.created_utc).timetuple())
|
||||
os.utime(destination, (creation_time, creation_time))
|
||||
self.master_hash_list[resource_hash] = destination
|
||||
logger.debug(f'Hash added to master list: {resource_hash}')
|
||||
logger.info(f'Downloaded submission {submission.id} from {submission.subreddit.display_name}')
|
||||
logger.debug(f"Hash added to master list: {resource_hash}")
|
||||
logger.info(f"Downloaded submission {submission.id} from {submission.subreddit.display_name}")
|
||||
|
||||
@staticmethod
|
||||
def scan_existing_files(directory: Path) -> dict[str, Path]:
|
||||
files = []
|
||||
for (dirpath, dirnames, filenames) in os.walk(directory):
|
||||
for (dirpath, _dirnames, filenames) in os.walk(directory):
|
||||
files.extend([Path(dirpath, file) for file in filenames])
|
||||
logger.info(f'Calculating hashes for {len(files)} files')
|
||||
logger.info(f"Calculating hashes for {len(files)} files")
|
||||
|
||||
pool = Pool(15)
|
||||
results = pool.map(_calc_hash, files)
|
||||
|
|
|
@ -1,4 +1,6 @@
|
|||
#!/usr/bin/env
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
|
||||
class BulkDownloaderException(Exception):
|
||||
pass
|
||||
|
|
|
@ -1,12 +1,13 @@
|
|||
#!/usr/bin/env python3
|
||||
# coding=utf-8
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import datetime
|
||||
import logging
|
||||
import platform
|
||||
import re
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
from typing import Optional, Union
|
||||
|
||||
from praw.models import Comment, Submission
|
||||
|
||||
|
@ -18,165 +19,196 @@ logger = logging.getLogger(__name__)
|
|||
|
||||
class FileNameFormatter:
|
||||
key_terms = (
|
||||
'date',
|
||||
'flair',
|
||||
'postid',
|
||||
'redditor',
|
||||
'subreddit',
|
||||
'title',
|
||||
'upvotes',
|
||||
"date",
|
||||
"flair",
|
||||
"postid",
|
||||
"redditor",
|
||||
"subreddit",
|
||||
"title",
|
||||
"upvotes",
|
||||
)
|
||||
WINDOWS_MAX_PATH_LENGTH = 260
|
||||
LINUX_MAX_PATH_LENGTH = 4096
|
||||
|
||||
def __init__(self, file_format_string: str, directory_format_string: str, time_format_string: str):
|
||||
def __init__(
|
||||
self,
|
||||
file_format_string: str,
|
||||
directory_format_string: str,
|
||||
time_format_string: str,
|
||||
restriction_scheme: Optional[str] = None,
|
||||
):
|
||||
if not self.validate_string(file_format_string):
|
||||
raise BulkDownloaderException(f'"{file_format_string}" is not a valid format string')
|
||||
self.file_format_string = file_format_string
|
||||
self.directory_format_string: list[str] = directory_format_string.split('/')
|
||||
self.directory_format_string: list[str] = directory_format_string.split("/")
|
||||
self.time_format_string = time_format_string
|
||||
self.restiction_scheme = restriction_scheme.lower().strip() if restriction_scheme else None
|
||||
if self.restiction_scheme == "windows":
|
||||
self.max_path = self.WINDOWS_MAX_PATH_LENGTH
|
||||
else:
|
||||
self.max_path = self.find_max_path_length()
|
||||
|
||||
def _format_name(self, submission: (Comment, Submission), format_string: str) -> str:
|
||||
def _format_name(self, submission: Union[Comment, Submission], format_string: str) -> str:
|
||||
if isinstance(submission, Submission):
|
||||
attributes = self._generate_name_dict_from_submission(submission)
|
||||
elif isinstance(submission, Comment):
|
||||
attributes = self._generate_name_dict_from_comment(submission)
|
||||
else:
|
||||
raise BulkDownloaderException(f'Cannot name object {type(submission).__name__}')
|
||||
raise BulkDownloaderException(f"Cannot name object {type(submission).__name__}")
|
||||
result = format_string
|
||||
for key in attributes.keys():
|
||||
if re.search(fr'(?i).*{{{key}}}.*', result):
|
||||
key_value = str(attributes.get(key, 'unknown'))
|
||||
if re.search(rf"(?i).*{{{key}}}.*", result):
|
||||
key_value = str(attributes.get(key, "unknown"))
|
||||
key_value = FileNameFormatter._convert_unicode_escapes(key_value)
|
||||
key_value = key_value.replace('\\', '\\\\')
|
||||
result = re.sub(fr'(?i){{{key}}}', key_value, result)
|
||||
key_value = key_value.replace("\\", "\\\\")
|
||||
result = re.sub(rf"(?i){{{key}}}", key_value, result)
|
||||
|
||||
result = result.replace('/', '')
|
||||
result = result.replace("/", "")
|
||||
|
||||
if platform.system() == 'Windows':
|
||||
if self.restiction_scheme is None:
|
||||
if platform.system() == "Windows":
|
||||
result = FileNameFormatter._format_for_windows(result)
|
||||
elif self.restiction_scheme == "windows":
|
||||
logger.debug("Forcing Windows-compatible filenames")
|
||||
result = FileNameFormatter._format_for_windows(result)
|
||||
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
def _convert_unicode_escapes(in_string: str) -> str:
|
||||
pattern = re.compile(r'(\\u\d{4})')
|
||||
pattern = re.compile(r"(\\u\d{4})")
|
||||
matches = re.search(pattern, in_string)
|
||||
if matches:
|
||||
for match in matches.groups():
|
||||
converted_match = bytes(match, 'utf-8').decode('unicode-escape')
|
||||
converted_match = bytes(match, "utf-8").decode("unicode-escape")
|
||||
in_string = in_string.replace(match, converted_match)
|
||||
return in_string
|
||||
|
||||
def _generate_name_dict_from_submission(self, submission: Submission) -> dict:
|
||||
submission_attributes = {
|
||||
'title': submission.title,
|
||||
'subreddit': submission.subreddit.display_name,
|
||||
'redditor': submission.author.name if submission.author else 'DELETED',
|
||||
'postid': submission.id,
|
||||
'upvotes': submission.score,
|
||||
'flair': submission.link_flair_text,
|
||||
'date': self._convert_timestamp(submission.created_utc),
|
||||
"title": submission.title,
|
||||
"subreddit": submission.subreddit.display_name,
|
||||
"redditor": submission.author.name if submission.author else "DELETED",
|
||||
"postid": submission.id,
|
||||
"upvotes": submission.score,
|
||||
"flair": submission.link_flair_text,
|
||||
"date": self._convert_timestamp(submission.created_utc),
|
||||
}
|
||||
return submission_attributes
|
||||
|
||||
def _convert_timestamp(self, timestamp: float) -> str:
|
||||
input_time = datetime.datetime.fromtimestamp(timestamp)
|
||||
if self.time_format_string.upper().strip() == 'ISO':
|
||||
if self.time_format_string.upper().strip() == "ISO":
|
||||
return input_time.isoformat()
|
||||
else:
|
||||
return input_time.strftime(self.time_format_string)
|
||||
|
||||
def _generate_name_dict_from_comment(self, comment: Comment) -> dict:
|
||||
comment_attributes = {
|
||||
'title': comment.submission.title,
|
||||
'subreddit': comment.subreddit.display_name,
|
||||
'redditor': comment.author.name if comment.author else 'DELETED',
|
||||
'postid': comment.id,
|
||||
'upvotes': comment.score,
|
||||
'flair': '',
|
||||
'date': self._convert_timestamp(comment.created_utc),
|
||||
"title": comment.submission.title,
|
||||
"subreddit": comment.subreddit.display_name,
|
||||
"redditor": comment.author.name if comment.author else "DELETED",
|
||||
"postid": comment.id,
|
||||
"upvotes": comment.score,
|
||||
"flair": "",
|
||||
"date": self._convert_timestamp(comment.created_utc),
|
||||
}
|
||||
return comment_attributes
|
||||
|
||||
def format_path(
|
||||
self,
|
||||
resource: Resource,
|
||||
destination_directory: Path,
|
||||
index: Optional[int] = None,
|
||||
self,
|
||||
resource: Resource,
|
||||
destination_directory: Path,
|
||||
index: Optional[int] = None,
|
||||
) -> Path:
|
||||
subfolder = Path(
|
||||
destination_directory,
|
||||
*[self._format_name(resource.source_submission, part) for part in self.directory_format_string],
|
||||
)
|
||||
index = f'_{str(index)}' if index else ''
|
||||
index = f"_{index}" if index else ""
|
||||
if not resource.extension:
|
||||
raise BulkDownloaderException(f'Resource from {resource.url} has no extension')
|
||||
ending = index + resource.extension
|
||||
raise BulkDownloaderException(f"Resource from {resource.url} has no extension")
|
||||
file_name = str(self._format_name(resource.source_submission, self.file_format_string))
|
||||
|
||||
file_name = re.sub(r"\n", " ", file_name)
|
||||
|
||||
if not re.match(r".*\.$", file_name) and not re.match(r"^\..*", resource.extension):
|
||||
ending = index + "." + resource.extension
|
||||
else:
|
||||
ending = index + resource.extension
|
||||
|
||||
try:
|
||||
file_path = self._limit_file_name_length(file_name, ending, subfolder)
|
||||
file_path = self.limit_file_name_length(file_name, ending, subfolder)
|
||||
except TypeError:
|
||||
raise BulkDownloaderException(f'Could not determine path name: {subfolder}, {index}, {resource.extension}')
|
||||
raise BulkDownloaderException(f"Could not determine path name: {subfolder}, {index}, {resource.extension}")
|
||||
return file_path
|
||||
|
||||
@staticmethod
|
||||
def _limit_file_name_length(filename: str, ending: str, root: Path) -> Path:
|
||||
def limit_file_name_length(self, filename: str, ending: str, root: Path) -> Path:
|
||||
root = root.resolve().expanduser()
|
||||
possible_id = re.search(r'((?:_\w{6})?$)', filename)
|
||||
possible_id = re.search(r"((?:_\w{6})?$)", filename)
|
||||
if possible_id:
|
||||
ending = possible_id.group(1) + ending
|
||||
filename = filename[:possible_id.start()]
|
||||
max_path = FileNameFormatter.find_max_path_length()
|
||||
max_length_chars = 255 - len(ending)
|
||||
max_length_bytes = 255 - len(ending.encode('utf-8'))
|
||||
filename = filename[: possible_id.start()]
|
||||
max_path = self.max_path
|
||||
max_file_part_length_chars = 255 - len(ending)
|
||||
max_file_part_length_bytes = 255 - len(ending.encode("utf-8"))
|
||||
max_path_length = max_path - len(ending) - len(str(root)) - 1
|
||||
while len(filename) > max_length_chars or \
|
||||
len(filename.encode('utf-8')) > max_length_bytes or \
|
||||
len(filename) > max_path_length:
|
||||
|
||||
out = Path(root, filename + ending)
|
||||
while any(
|
||||
[
|
||||
len(filename) > max_file_part_length_chars,
|
||||
len(filename.encode("utf-8")) > max_file_part_length_bytes,
|
||||
len(str(out)) > max_path_length,
|
||||
]
|
||||
):
|
||||
filename = filename[:-1]
|
||||
return Path(root, filename + ending)
|
||||
out = Path(root, filename + ending)
|
||||
|
||||
return out
|
||||
|
||||
@staticmethod
|
||||
def find_max_path_length() -> int:
|
||||
try:
|
||||
return int(subprocess.check_output(['getconf', 'PATH_MAX', '/']))
|
||||
return int(subprocess.check_output(["getconf", "PATH_MAX", "/"]))
|
||||
except (ValueError, subprocess.CalledProcessError, OSError):
|
||||
if platform.system() == 'Windows':
|
||||
return 260
|
||||
if platform.system() == "Windows":
|
||||
return FileNameFormatter.WINDOWS_MAX_PATH_LENGTH
|
||||
else:
|
||||
return 4096
|
||||
return FileNameFormatter.LINUX_MAX_PATH_LENGTH
|
||||
|
||||
def format_resource_paths(
|
||||
self,
|
||||
resources: list[Resource],
|
||||
destination_directory: Path,
|
||||
self,
|
||||
resources: list[Resource],
|
||||
destination_directory: Path,
|
||||
) -> list[tuple[Path, Resource]]:
|
||||
out = []
|
||||
if len(resources) == 1:
|
||||
try:
|
||||
out.append((self.format_path(resources[0], destination_directory, None), resources[0]))
|
||||
except BulkDownloaderException as e:
|
||||
logger.error(f'Could not generate file path for resource {resources[0].url}: {e}')
|
||||
logger.exception('Could not generate file path')
|
||||
logger.error(f"Could not generate file path for resource {resources[0].url}: {e}")
|
||||
logger.exception("Could not generate file path")
|
||||
else:
|
||||
for i, res in enumerate(resources, start=1):
|
||||
logger.log(9, f'Formatting filename with index {i}')
|
||||
logger.log(9, f"Formatting filename with index {i}")
|
||||
try:
|
||||
out.append((self.format_path(res, destination_directory, i), res))
|
||||
except BulkDownloaderException as e:
|
||||
logger.error(f'Could not generate file path for resource {res.url}: {e}')
|
||||
logger.exception('Could not generate file path')
|
||||
logger.error(f"Could not generate file path for resource {res.url}: {e}")
|
||||
logger.exception("Could not generate file path")
|
||||
return out
|
||||
|
||||
@staticmethod
|
||||
def validate_string(test_string: str) -> bool:
|
||||
if not test_string:
|
||||
return False
|
||||
result = any([f'{{{key}}}' in test_string.lower() for key in FileNameFormatter.key_terms])
|
||||
result = any([f"{{{key}}}" in test_string.lower() for key in FileNameFormatter.key_terms])
|
||||
if result:
|
||||
if 'POSTID' not in test_string:
|
||||
logger.warning('Some files might not be downloaded due to name conflicts as filenames are'
|
||||
' not guaranteed to be be unique without {POSTID}')
|
||||
if "POSTID" not in test_string:
|
||||
logger.warning(
|
||||
"Some files might not be downloaded due to name conflicts as filenames are"
|
||||
" not guaranteed to be be unique without {POSTID}"
|
||||
)
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
@ -185,11 +217,11 @@ class FileNameFormatter:
|
|||
def _format_for_windows(input_string: str) -> str:
|
||||
invalid_characters = r'<>:"\/|?*'
|
||||
for char in invalid_characters:
|
||||
input_string = input_string.replace(char, '')
|
||||
input_string = input_string.replace(char, "")
|
||||
input_string = FileNameFormatter._strip_emojis(input_string)
|
||||
return input_string
|
||||
|
||||
@staticmethod
|
||||
def _strip_emojis(input_string: str) -> str:
|
||||
result = input_string.encode('ascii', errors='ignore').decode('utf-8')
|
||||
result = input_string.encode("ascii", errors="ignore").decode("utf-8")
|
||||
return result
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
#!/usr/bin/env python3
|
||||
# coding=utf-8
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import configparser
|
||||
import logging
|
||||
|
@ -17,7 +17,6 @@ logger = logging.getLogger(__name__)
|
|||
|
||||
|
||||
class OAuth2Authenticator:
|
||||
|
||||
def __init__(self, wanted_scopes: set[str], client_id: str, client_secret: str):
|
||||
self._check_scopes(wanted_scopes)
|
||||
self.scopes = wanted_scopes
|
||||
|
@ -26,39 +25,41 @@ class OAuth2Authenticator:
|
|||
|
||||
@staticmethod
|
||||
def _check_scopes(wanted_scopes: set[str]):
|
||||
response = requests.get('https://www.reddit.com/api/v1/scopes.json',
|
||||
headers={'User-Agent': 'fetch-scopes test'})
|
||||
response = requests.get(
|
||||
"https://www.reddit.com/api/v1/scopes.json", headers={"User-Agent": "fetch-scopes test"}
|
||||
)
|
||||
known_scopes = [scope for scope, data in response.json().items()]
|
||||
known_scopes.append('*')
|
||||
known_scopes.append("*")
|
||||
for scope in wanted_scopes:
|
||||
if scope not in known_scopes:
|
||||
raise BulkDownloaderException(f'Scope {scope} is not known to reddit')
|
||||
raise BulkDownloaderException(f"Scope {scope} is not known to reddit")
|
||||
|
||||
@staticmethod
|
||||
def split_scopes(scopes: str) -> set[str]:
|
||||
scopes = re.split(r'[,: ]+', scopes)
|
||||
scopes = re.split(r"[,: ]+", scopes)
|
||||
return set(scopes)
|
||||
|
||||
def retrieve_new_token(self) -> str:
|
||||
reddit = praw.Reddit(
|
||||
redirect_uri='http://localhost:7634',
|
||||
user_agent='obtain_refresh_token for BDFR',
|
||||
redirect_uri="http://localhost:7634",
|
||||
user_agent="obtain_refresh_token for BDFR",
|
||||
client_id=self.client_id,
|
||||
client_secret=self.client_secret)
|
||||
client_secret=self.client_secret,
|
||||
)
|
||||
state = str(random.randint(0, 65000))
|
||||
url = reddit.auth.url(self.scopes, state, 'permanent')
|
||||
logger.warning('Authentication action required before the program can proceed')
|
||||
logger.warning(f'Authenticate at {url}')
|
||||
url = reddit.auth.url(self.scopes, state, "permanent")
|
||||
logger.warning("Authentication action required before the program can proceed")
|
||||
logger.warning(f"Authenticate at {url}")
|
||||
|
||||
client = self.receive_connection()
|
||||
data = client.recv(1024).decode('utf-8')
|
||||
param_tokens = data.split(' ', 2)[1].split('?', 1)[1].split('&')
|
||||
params = {key: value for (key, value) in [token.split('=') for token in param_tokens]}
|
||||
data = client.recv(1024).decode("utf-8")
|
||||
param_tokens = data.split(" ", 2)[1].split("?", 1)[1].split("&")
|
||||
params = {key: value for (key, value) in [token.split("=") for token in param_tokens]}
|
||||
|
||||
if state != params['state']:
|
||||
if state != params["state"]:
|
||||
self.send_message(client)
|
||||
raise RedditAuthenticationError(f'State mismatch in OAuth2. Expected: {state} Received: {params["state"]}')
|
||||
elif 'error' in params:
|
||||
elif "error" in params:
|
||||
self.send_message(client)
|
||||
raise RedditAuthenticationError(f'Error in OAuth2: {params["error"]}')
|
||||
|
||||
|
@ -70,19 +71,19 @@ class OAuth2Authenticator:
|
|||
def receive_connection() -> socket.socket:
|
||||
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
||||
server.bind(('0.0.0.0', 7634))
|
||||
logger.log(9, 'Server listening on 0.0.0.0:7634')
|
||||
server.bind(("0.0.0.0", 7634))
|
||||
logger.log(9, "Server listening on 0.0.0.0:7634")
|
||||
|
||||
server.listen(1)
|
||||
client = server.accept()[0]
|
||||
server.close()
|
||||
logger.log(9, 'Server closed')
|
||||
logger.log(9, "Server closed")
|
||||
|
||||
return client
|
||||
|
||||
@staticmethod
|
||||
def send_message(client: socket.socket, message: str = ''):
|
||||
client.send(f'HTTP/1.1 200 OK\r\n\r\n{message}'.encode('utf-8'))
|
||||
def send_message(client: socket.socket, message: str = ""):
|
||||
client.send(f"HTTP/1.1 200 OK\r\n\r\n{message}".encode("utf-8"))
|
||||
client.close()
|
||||
|
||||
|
||||
|
@ -94,14 +95,14 @@ class OAuth2TokenManager(praw.reddit.BaseTokenManager):
|
|||
|
||||
def pre_refresh_callback(self, authorizer: praw.reddit.Authorizer):
|
||||
if authorizer.refresh_token is None:
|
||||
if self.config.has_option('DEFAULT', 'user_token'):
|
||||
authorizer.refresh_token = self.config.get('DEFAULT', 'user_token')
|
||||
logger.log(9, 'Loaded OAuth2 token for authoriser')
|
||||
if self.config.has_option("DEFAULT", "user_token"):
|
||||
authorizer.refresh_token = self.config.get("DEFAULT", "user_token")
|
||||
logger.log(9, "Loaded OAuth2 token for authoriser")
|
||||
else:
|
||||
raise RedditAuthenticationError('No auth token loaded in configuration')
|
||||
raise RedditAuthenticationError("No auth token loaded in configuration")
|
||||
|
||||
def post_refresh_callback(self, authorizer: praw.reddit.Authorizer):
|
||||
self.config.set('DEFAULT', 'user_token', authorizer.refresh_token)
|
||||
with open(self.config_location, 'w') as file:
|
||||
self.config.set("DEFAULT", "user_token", authorizer.refresh_token)
|
||||
with Path(self.config_location).open(mode="w") as file:
|
||||
self.config.write(file, True)
|
||||
logger.log(9, f'Written OAuth2 token from authoriser to {self.config_location}')
|
||||
logger.log(9, f"Written OAuth2 token from authoriser to {self.config_location}")
|
||||
|
|
|
@ -1,12 +1,13 @@
|
|||
#!/usr/bin/env python3
|
||||
# coding=utf-8
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import hashlib
|
||||
import logging
|
||||
import re
|
||||
import time
|
||||
import urllib.parse
|
||||
from typing import Callable, Optional
|
||||
from collections.abc import Callable
|
||||
from typing import Optional
|
||||
|
||||
import _hashlib
|
||||
import requests
|
||||
|
@ -30,33 +31,7 @@ class Resource:
|
|||
|
||||
@staticmethod
|
||||
def retry_download(url: str) -> Callable:
|
||||
max_wait_time = 300
|
||||
|
||||
def http_download(download_parameters: dict) -> Optional[bytes]:
|
||||
current_wait_time = 60
|
||||
if 'max_wait_time' in download_parameters:
|
||||
max_wait_time = download_parameters['max_wait_time']
|
||||
else:
|
||||
max_wait_time = 300
|
||||
while True:
|
||||
try:
|
||||
response = requests.get(url)
|
||||
if re.match(r'^2\d{2}', str(response.status_code)) and response.content:
|
||||
return response.content
|
||||
elif response.status_code in (408, 429):
|
||||
raise requests.exceptions.ConnectionError(f'Response code {response.status_code}')
|
||||
else:
|
||||
raise BulkDownloaderException(
|
||||
f'Unrecoverable error requesting resource: HTTP Code {response.status_code}')
|
||||
except (requests.exceptions.ConnectionError, requests.exceptions.ChunkedEncodingError) as e:
|
||||
logger.warning(f'Error occured downloading from {url}, waiting {current_wait_time} seconds: {e}')
|
||||
time.sleep(current_wait_time)
|
||||
if current_wait_time < max_wait_time:
|
||||
current_wait_time += 60
|
||||
else:
|
||||
logger.error(f'Max wait time exceeded for resource at url {url}')
|
||||
raise
|
||||
return http_download
|
||||
return lambda global_params: Resource.http_download(url, global_params)
|
||||
|
||||
def download(self, download_parameters: Optional[dict] = None):
|
||||
if download_parameters is None:
|
||||
|
@ -65,7 +40,7 @@ class Resource:
|
|||
try:
|
||||
content = self.download_function(download_parameters)
|
||||
except requests.exceptions.ConnectionError as e:
|
||||
raise BulkDownloaderException(f'Could not download resource: {e}')
|
||||
raise BulkDownloaderException(f"Could not download resource: {e}")
|
||||
except BulkDownloaderException:
|
||||
raise
|
||||
if content:
|
||||
|
@ -77,8 +52,36 @@ class Resource:
|
|||
self.hash = hashlib.md5(self.content)
|
||||
|
||||
def _determine_extension(self) -> Optional[str]:
|
||||
extension_pattern = re.compile(r'.*(\..{3,5})$')
|
||||
extension_pattern = re.compile(r".*(\..{3,5})$")
|
||||
stripped_url = urllib.parse.urlsplit(self.url).path
|
||||
match = re.search(extension_pattern, stripped_url)
|
||||
if match:
|
||||
return match.group(1)
|
||||
|
||||
@staticmethod
|
||||
def http_download(url: str, download_parameters: dict) -> Optional[bytes]:
|
||||
headers = download_parameters.get("headers")
|
||||
current_wait_time = 60
|
||||
if "max_wait_time" in download_parameters:
|
||||
max_wait_time = download_parameters["max_wait_time"]
|
||||
else:
|
||||
max_wait_time = 300
|
||||
while True:
|
||||
try:
|
||||
response = requests.get(url, headers=headers)
|
||||
if re.match(r"^2\d{2}", str(response.status_code)) and response.content:
|
||||
return response.content
|
||||
elif response.status_code in (408, 429):
|
||||
raise requests.exceptions.ConnectionError(f"Response code {response.status_code}")
|
||||
else:
|
||||
raise BulkDownloaderException(
|
||||
f"Unrecoverable error requesting resource: HTTP Code {response.status_code}"
|
||||
)
|
||||
except (requests.exceptions.ConnectionError, requests.exceptions.ChunkedEncodingError) as e:
|
||||
logger.warning(f"Error occured downloading from {url}, waiting {current_wait_time} seconds: {e}")
|
||||
time.sleep(current_wait_time)
|
||||
if current_wait_time < max_wait_time:
|
||||
current_wait_time += 60
|
||||
else:
|
||||
logger.error(f"Max wait time exceeded for resource at url {url}")
|
||||
raise
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
#!/usr/bin/env python3
|
||||
# coding=utf-8
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import configparser
|
||||
|
||||
|
|
|
@ -0,0 +1,2 @@
|
|||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
|
@ -1,5 +1,5 @@
|
|||
#!/usr/bin/env python3
|
||||
# coding=utf-8
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import logging
|
||||
from abc import ABC, abstractmethod
|
||||
|
@ -31,7 +31,7 @@ class BaseDownloader(ABC):
|
|||
res = requests.get(url, cookies=cookies, headers=headers)
|
||||
except requests.exceptions.RequestException as e:
|
||||
logger.exception(e)
|
||||
raise SiteDownloaderError(f'Failed to get page {url}')
|
||||
raise SiteDownloaderError(f"Failed to get page {url}")
|
||||
if res.status_code != 200:
|
||||
raise ResourceNotFound(f'Server responded with {res.status_code} to {url}')
|
||||
raise ResourceNotFound(f"Server responded with {res.status_code} to {url}")
|
||||
return res
|
||||
|
|
22
bdfr/site_downloaders/delay_for_reddit.py
Normal file
22
bdfr/site_downloaders/delay_for_reddit.py
Normal file
|
@ -0,0 +1,22 @@
|
|||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import logging
|
||||
from typing import Optional
|
||||
|
||||
from praw.models import Submission
|
||||
|
||||
from bdfr.resource import Resource
|
||||
from bdfr.site_authenticator import SiteAuthenticator
|
||||
from bdfr.site_downloaders.base_downloader import BaseDownloader
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DelayForReddit(BaseDownloader):
|
||||
def __init__(self, post: Submission):
|
||||
super().__init__(post)
|
||||
|
||||
def find_resources(self, authenticator: Optional[SiteAuthenticator] = None) -> list[Resource]:
|
||||
media = DelayForReddit.retrieve_url(self.post.url)
|
||||
return [Resource(self.post, media.url, Resource.retry_download(media.url))]
|
|
@ -1,11 +1,12 @@
|
|||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from typing import Optional
|
||||
|
||||
from praw.models import Submission
|
||||
|
||||
from bdfr.site_authenticator import SiteAuthenticator
|
||||
from bdfr.resource import Resource
|
||||
from bdfr.site_authenticator import SiteAuthenticator
|
||||
from bdfr.site_downloaders.base_downloader import BaseDownloader
|
||||
|
||||
|
||||
|
|
|
@ -1,15 +1,15 @@
|
|||
#!/usr/bin/env python3
|
||||
# coding=utf-8
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
import urllib.parse
|
||||
from typing import Type
|
||||
|
||||
from bdfr.exceptions import NotADownloadableLinkError
|
||||
from bdfr.site_downloaders.base_downloader import BaseDownloader
|
||||
from bdfr.site_downloaders.delay_for_reddit import DelayForReddit
|
||||
from bdfr.site_downloaders.direct import Direct
|
||||
from bdfr.site_downloaders.erome import Erome
|
||||
from bdfr.site_downloaders.fallback_downloaders.youtubedl_fallback import YoutubeDlFallback
|
||||
from bdfr.site_downloaders.fallback_downloaders.ytdlp_fallback import YtdlpFallback
|
||||
from bdfr.site_downloaders.gallery import Gallery
|
||||
from bdfr.site_downloaders.gfycat import Gfycat
|
||||
from bdfr.site_downloaders.imgur import Imgur
|
||||
|
@ -17,65 +17,71 @@ from bdfr.site_downloaders.pornhub import PornHub
|
|||
from bdfr.site_downloaders.redgifs import Redgifs
|
||||
from bdfr.site_downloaders.self_post import SelfPost
|
||||
from bdfr.site_downloaders.vidble import Vidble
|
||||
from bdfr.site_downloaders.vreddit import VReddit
|
||||
from bdfr.site_downloaders.youtube import Youtube
|
||||
|
||||
|
||||
class DownloadFactory:
|
||||
@staticmethod
|
||||
def pull_lever(url: str) -> Type[BaseDownloader]:
|
||||
sanitised_url = DownloadFactory.sanitise_url(url)
|
||||
if re.match(r'(i\.)?imgur.*\.gifv$', sanitised_url):
|
||||
def pull_lever(url: str) -> type[BaseDownloader]:
|
||||
sanitised_url = DownloadFactory.sanitise_url(url).lower()
|
||||
if re.match(r"(i\.|m\.|o\.)?imgur", sanitised_url):
|
||||
return Imgur
|
||||
elif re.match(r'.*/.*\.\w{3,4}(\?[\w;&=]*)?$', sanitised_url) and \
|
||||
not DownloadFactory.is_web_resource(sanitised_url):
|
||||
return Direct
|
||||
elif re.match(r'erome\.com.*', sanitised_url):
|
||||
return Erome
|
||||
elif re.match(r'reddit\.com/gallery/.*', sanitised_url):
|
||||
return Gallery
|
||||
elif re.match(r'gfycat\.', sanitised_url):
|
||||
return Gfycat
|
||||
elif re.match(r'(m\.)?imgur.*', sanitised_url):
|
||||
return Imgur
|
||||
elif re.match(r'(redgifs|gifdeliverynetwork)', sanitised_url):
|
||||
elif re.match(r"(i\.|thumbs\d\.|v\d\.)?(redgifs|gifdeliverynetwork)", sanitised_url):
|
||||
return Redgifs
|
||||
elif re.match(r'reddit\.com/r/', sanitised_url):
|
||||
return SelfPost
|
||||
elif re.match(r'(m\.)?youtu\.?be', sanitised_url):
|
||||
return Youtube
|
||||
elif re.match(r'i\.redd\.it.*', sanitised_url):
|
||||
elif re.match(r"(thumbs\.|giant\.)?gfycat\.", sanitised_url):
|
||||
return Gfycat
|
||||
elif re.match(r".*/.*\.[a-zA-Z34]{3,4}(\?[\w;&=]*)?$", sanitised_url) and not DownloadFactory.is_web_resource(
|
||||
sanitised_url
|
||||
):
|
||||
return Direct
|
||||
elif re.match(r'pornhub\.com.*', sanitised_url):
|
||||
elif re.match(r"erome\.com.*", sanitised_url):
|
||||
return Erome
|
||||
elif re.match(r"delayforreddit\.com", sanitised_url):
|
||||
return DelayForReddit
|
||||
elif re.match(r"reddit\.com/gallery/.*", sanitised_url):
|
||||
return Gallery
|
||||
elif re.match(r"patreon\.com.*", sanitised_url):
|
||||
return Gallery
|
||||
elif re.match(r"reddit\.com/r/", sanitised_url):
|
||||
return SelfPost
|
||||
elif re.match(r"(m\.)?youtu\.?be", sanitised_url):
|
||||
return Youtube
|
||||
elif re.match(r"i\.redd\.it.*", sanitised_url):
|
||||
return Direct
|
||||
elif re.match(r"v\.redd\.it.*", sanitised_url):
|
||||
return VReddit
|
||||
elif re.match(r"pornhub\.com.*", sanitised_url):
|
||||
return PornHub
|
||||
elif re.match(r'vidble\.com', sanitised_url):
|
||||
elif re.match(r"vidble\.com", sanitised_url):
|
||||
return Vidble
|
||||
elif YoutubeDlFallback.can_handle_link(sanitised_url):
|
||||
return YoutubeDlFallback
|
||||
elif YtdlpFallback.can_handle_link(sanitised_url):
|
||||
return YtdlpFallback
|
||||
else:
|
||||
raise NotADownloadableLinkError(f'No downloader module exists for url {url}')
|
||||
raise NotADownloadableLinkError(f"No downloader module exists for url {url}")
|
||||
|
||||
@staticmethod
|
||||
def sanitise_url(url: str) -> str:
|
||||
beginning_regex = re.compile(r'\s*(www\.?)?')
|
||||
beginning_regex = re.compile(r"\s*(www\.?)?")
|
||||
split_url = urllib.parse.urlsplit(url)
|
||||
split_url = split_url.netloc + split_url.path
|
||||
split_url = re.sub(beginning_regex, '', split_url)
|
||||
split_url = re.sub(beginning_regex, "", split_url)
|
||||
return split_url
|
||||
|
||||
@staticmethod
|
||||
def is_web_resource(url: str) -> bool:
|
||||
web_extensions = (
|
||||
'asp',
|
||||
'aspx',
|
||||
'cfm',
|
||||
'cfml',
|
||||
'css',
|
||||
'htm',
|
||||
'html',
|
||||
'js',
|
||||
'php',
|
||||
'php3',
|
||||
'xhtml',
|
||||
"asp",
|
||||
"aspx",
|
||||
"cfm",
|
||||
"cfml",
|
||||
"css",
|
||||
"htm",
|
||||
"html",
|
||||
"js",
|
||||
"php",
|
||||
"php3",
|
||||
"xhtml",
|
||||
)
|
||||
if re.match(rf'(?i).*/.*\.({"|".join(web_extensions)})$', url):
|
||||
return True
|
||||
|
|
|
@ -1,7 +1,9 @@
|
|||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import logging
|
||||
import re
|
||||
from collections.abc import Callable
|
||||
from typing import Optional
|
||||
|
||||
import bs4
|
||||
|
@ -23,23 +25,34 @@ class Erome(BaseDownloader):
|
|||
links = self._get_links(self.post.url)
|
||||
|
||||
if not links:
|
||||
raise SiteDownloaderError('Erome parser could not find any links')
|
||||
raise SiteDownloaderError("Erome parser could not find any links")
|
||||
|
||||
out = []
|
||||
for link in links:
|
||||
if not re.match(r'https?://.*', link):
|
||||
link = 'https://' + link
|
||||
out.append(Resource(self.post, link, Resource.retry_download(link)))
|
||||
if not re.match(r"https?://.*", link):
|
||||
link = "https://" + link
|
||||
out.append(Resource(self.post, link, self.erome_download(link)))
|
||||
return out
|
||||
|
||||
@staticmethod
|
||||
def _get_links(url: str) -> set[str]:
|
||||
page = Erome.retrieve_url(url)
|
||||
soup = bs4.BeautifulSoup(page.text, 'html.parser')
|
||||
front_images = soup.find_all('img', attrs={'class': 'lasyload'})
|
||||
out = [im.get('data-src') for im in front_images]
|
||||
soup = bs4.BeautifulSoup(page.text, "html.parser")
|
||||
front_images = soup.find_all("img", attrs={"class": "lasyload"})
|
||||
out = [im.get("data-src") for im in front_images]
|
||||
|
||||
videos = soup.find_all('source')
|
||||
out.extend([vid.get('src') for vid in videos])
|
||||
videos = soup.find_all("source")
|
||||
out.extend([vid.get("src") for vid in videos])
|
||||
|
||||
return set(out)
|
||||
|
||||
@staticmethod
|
||||
def erome_download(url: str) -> Callable:
|
||||
download_parameters = {
|
||||
"headers": {
|
||||
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)"
|
||||
" Chrome/88.0.4324.104 Safari/537.36",
|
||||
"Referer": "https://www.erome.com/",
|
||||
},
|
||||
}
|
||||
return lambda global_params: Resource.http_download(url, global_params | download_parameters)
|
||||
|
|
|
@ -0,0 +1,2 @@
|
|||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
|
@ -1,5 +1,5 @@
|
|||
#!/usr/bin/env python3
|
||||
# coding=utf-8
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
|
@ -7,7 +7,6 @@ from bdfr.site_downloaders.base_downloader import BaseDownloader
|
|||
|
||||
|
||||
class BaseFallbackDownloader(BaseDownloader, ABC):
|
||||
|
||||
@staticmethod
|
||||
@abstractmethod
|
||||
def can_handle_link(url: str) -> bool:
|
||||
|
|
|
@ -1,11 +1,12 @@
|
|||
#!/usr/bin/env python3
|
||||
# coding=utf-8
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import logging
|
||||
from typing import Optional
|
||||
|
||||
from praw.models import Submission
|
||||
|
||||
from bdfr.exceptions import NotADownloadableLinkError
|
||||
from bdfr.resource import Resource
|
||||
from bdfr.site_authenticator import SiteAuthenticator
|
||||
from bdfr.site_downloaders.fallback_downloaders.fallback_downloader import BaseFallbackDownloader
|
||||
|
@ -14,23 +15,24 @@ from bdfr.site_downloaders.youtube import Youtube
|
|||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class YoutubeDlFallback(BaseFallbackDownloader, Youtube):
|
||||
class YtdlpFallback(BaseFallbackDownloader, Youtube):
|
||||
def __init__(self, post: Submission):
|
||||
super(YoutubeDlFallback, self).__init__(post)
|
||||
super(YtdlpFallback, self).__init__(post)
|
||||
|
||||
def find_resources(self, authenticator: Optional[SiteAuthenticator] = None) -> list[Resource]:
|
||||
out = Resource(
|
||||
self.post,
|
||||
self.post.url,
|
||||
super()._download_video({}),
|
||||
super().get_video_attributes(self.post.url)['ext'],
|
||||
super().get_video_attributes(self.post.url)["ext"],
|
||||
)
|
||||
return [out]
|
||||
|
||||
@staticmethod
|
||||
def can_handle_link(url: str) -> bool:
|
||||
attributes = YoutubeDlFallback.get_video_attributes(url)
|
||||
try:
|
||||
attributes = YtdlpFallback.get_video_attributes(url)
|
||||
except NotADownloadableLinkError:
|
||||
return False
|
||||
if attributes:
|
||||
return True
|
||||
else:
|
||||
return False
|
|
@ -1,4 +1,5 @@
|
|||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import logging
|
||||
from typing import Optional
|
||||
|
@ -20,27 +21,27 @@ class Gallery(BaseDownloader):
|
|||
|
||||
def find_resources(self, authenticator: Optional[SiteAuthenticator] = None) -> list[Resource]:
|
||||
try:
|
||||
image_urls = self._get_links(self.post.gallery_data['items'])
|
||||
image_urls = self._get_links(self.post.gallery_data["items"])
|
||||
except (AttributeError, TypeError):
|
||||
try:
|
||||
image_urls = self._get_links(self.post.crosspost_parent_list[0]['gallery_data']['items'])
|
||||
except (AttributeError, IndexError, TypeError):
|
||||
logger.error(f'Could not find gallery data in submission {self.post.id}')
|
||||
logger.exception('Gallery image find failure')
|
||||
raise SiteDownloaderError('No images found in Reddit gallery')
|
||||
image_urls = self._get_links(self.post.crosspost_parent_list[0]["gallery_data"]["items"])
|
||||
except (AttributeError, IndexError, TypeError, KeyError):
|
||||
logger.error(f"Could not find gallery data in submission {self.post.id}")
|
||||
logger.exception("Gallery image find failure")
|
||||
raise SiteDownloaderError("No images found in Reddit gallery")
|
||||
|
||||
if not image_urls:
|
||||
raise SiteDownloaderError('No images found in Reddit gallery')
|
||||
raise SiteDownloaderError("No images found in Reddit gallery")
|
||||
return [Resource(self.post, url, Resource.retry_download(url)) for url in image_urls]
|
||||
|
||||
@ staticmethod
|
||||
@staticmethod
|
||||
def _get_links(id_dict: list[dict]) -> list[str]:
|
||||
out = []
|
||||
for item in id_dict:
|
||||
image_id = item['media_id']
|
||||
possible_extensions = ('.jpg', '.png', '.gif', '.gifv', '.jpeg')
|
||||
image_id = item["media_id"]
|
||||
possible_extensions = (".jpg", ".png", ".gif", ".gifv", ".jpeg")
|
||||
for extension in possible_extensions:
|
||||
test_url = f'https://i.redd.it/{image_id}{extension}'
|
||||
test_url = f"https://i.redd.it/{image_id}{extension}"
|
||||
response = requests.head(test_url)
|
||||
if response.status_code == 200:
|
||||
out.append(test_url)
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import json
|
||||
import re
|
||||
|
@ -21,22 +22,24 @@ class Gfycat(Redgifs):
|
|||
return super().find_resources(authenticator)
|
||||
|
||||
@staticmethod
|
||||
def _get_link(url: str) -> str:
|
||||
gfycat_id = re.match(r'.*/(.*?)/?$', url).group(1)
|
||||
url = 'https://gfycat.com/' + gfycat_id
|
||||
def _get_link(url: str) -> set[str]:
|
||||
gfycat_id = re.match(r".*/(.*?)(?:/?|-.*|\..{3-4})$", url).group(1)
|
||||
url = "https://gfycat.com/" + gfycat_id
|
||||
|
||||
response = Gfycat.retrieve_url(url)
|
||||
if re.search(r'(redgifs|gifdeliverynetwork)', response.url):
|
||||
if re.search(r"(redgifs|gifdeliverynetwork)", response.url):
|
||||
url = url.lower() # Fixes error with old gfycat/redgifs links
|
||||
return Redgifs._get_link(url)
|
||||
|
||||
soup = BeautifulSoup(response.text, 'html.parser')
|
||||
content = soup.find('script', attrs={'data-react-helmet': 'true', 'type': 'application/ld+json'})
|
||||
soup = BeautifulSoup(response.text, "html.parser")
|
||||
content = soup.find("script", attrs={"data-react-helmet": "true", "type": "application/ld+json"})
|
||||
|
||||
try:
|
||||
out = json.loads(content.contents[0])['video']['contentUrl']
|
||||
out = json.loads(content.contents[0])["video"]["contentUrl"]
|
||||
except (IndexError, KeyError, AttributeError) as e:
|
||||
raise SiteDownloaderError(f'Failed to download Gfycat link {url}: {e}')
|
||||
raise SiteDownloaderError(f"Failed to download Gfycat link {url}: {e}")
|
||||
except json.JSONDecodeError as e:
|
||||
raise SiteDownloaderError(f'Did not receive valid JSON data: {e}')
|
||||
return out
|
||||
raise SiteDownloaderError(f"Did not receive valid JSON data: {e}")
|
||||
return {
|
||||
out,
|
||||
}
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import json
|
||||
import re
|
||||
from typing import Optional
|
||||
|
||||
import bs4
|
||||
from praw.models import Submission
|
||||
|
||||
from bdfr.exceptions import SiteDownloaderError
|
||||
|
@ -14,7 +14,6 @@ from bdfr.site_downloaders.base_downloader import BaseDownloader
|
|||
|
||||
|
||||
class Imgur(BaseDownloader):
|
||||
|
||||
def __init__(self, post: Submission):
|
||||
super().__init__(post)
|
||||
self.raw_data = {}
|
||||
|
@ -23,59 +22,44 @@ class Imgur(BaseDownloader):
|
|||
self.raw_data = self._get_data(self.post.url)
|
||||
|
||||
out = []
|
||||
if 'album_images' in self.raw_data:
|
||||
images = self.raw_data['album_images']
|
||||
for image in images['images']:
|
||||
out.append(self._compute_image_url(image))
|
||||
if "is_album" in self.raw_data:
|
||||
for image in self.raw_data["images"]:
|
||||
if "mp4" in image:
|
||||
out.append(Resource(self.post, image["mp4"], Resource.retry_download(image["mp4"])))
|
||||
else:
|
||||
out.append(Resource(self.post, image["link"], Resource.retry_download(image["link"])))
|
||||
else:
|
||||
out.append(self._compute_image_url(self.raw_data))
|
||||
if "mp4" in self.raw_data:
|
||||
out.append(Resource(self.post, self.raw_data["mp4"], Resource.retry_download(self.raw_data["mp4"])))
|
||||
else:
|
||||
out.append(Resource(self.post, self.raw_data["link"], Resource.retry_download(self.raw_data["link"])))
|
||||
return out
|
||||
|
||||
def _compute_image_url(self, image: dict) -> Resource:
|
||||
image_url = 'https://i.imgur.com/' + image['hash'] + self._validate_extension(image['ext'])
|
||||
return Resource(self.post, image_url, Resource.retry_download(image_url))
|
||||
|
||||
@staticmethod
|
||||
def _get_data(link: str) -> dict:
|
||||
link = link.rstrip('?')
|
||||
if re.match(r'(?i).*\.gifv$', link):
|
||||
link = link.replace('i.imgur', 'imgur')
|
||||
link = re.sub('(?i)\\.gifv$', '', link)
|
||||
|
||||
res = Imgur.retrieve_url(link, cookies={'over18': '1', 'postpagebeta': '0'})
|
||||
|
||||
soup = bs4.BeautifulSoup(res.text, 'html.parser')
|
||||
scripts = soup.find_all('script', attrs={'type': 'text/javascript'})
|
||||
scripts = [script.string.replace('\n', '') for script in scripts if script.string]
|
||||
|
||||
script_regex = re.compile(r'\s*\(function\(widgetFactory\)\s*{\s*widgetFactory\.mergeConfig\(\'gallery\'')
|
||||
chosen_script = list(filter(lambda s: re.search(script_regex, s), scripts))
|
||||
if len(chosen_script) != 1:
|
||||
raise SiteDownloaderError(f'Could not read page source from {link}')
|
||||
|
||||
chosen_script = chosen_script[0]
|
||||
|
||||
outer_regex = re.compile(r'widgetFactory\.mergeConfig\(\'gallery\', ({.*})\);')
|
||||
inner_regex = re.compile(r'image\s*:(.*),\s*group')
|
||||
try:
|
||||
image_dict = re.search(outer_regex, chosen_script).group(1)
|
||||
image_dict = re.search(inner_regex, image_dict).group(1)
|
||||
if link.endswith("/"):
|
||||
link = link.removesuffix("/")
|
||||
if re.search(r".*/(.*?)(gallery/|a/)", link):
|
||||
imgur_id = re.match(r".*/(?:gallery/|a/)(.*?)(?:/.*)?$", link).group(1)
|
||||
link = f"https://api.imgur.com/3/album/{imgur_id}"
|
||||
else:
|
||||
imgur_id = re.match(r".*/(.*?)(?:_d)?(?:\..{0,})?$", link).group(1)
|
||||
link = f"https://api.imgur.com/3/image/{imgur_id}"
|
||||
except AttributeError:
|
||||
raise SiteDownloaderError(f'Could not find image dictionary in page source')
|
||||
raise SiteDownloaderError(f"Could not extract Imgur ID from {link}")
|
||||
|
||||
headers = {
|
||||
"referer": "https://imgur.com/",
|
||||
"origin": "https://imgur.com",
|
||||
"content-type": "application/json",
|
||||
"Authorization": "Client-ID 546c25a59c58ad7",
|
||||
}
|
||||
res = Imgur.retrieve_url(link, headers=headers)
|
||||
|
||||
try:
|
||||
image_dict = json.loads(image_dict)
|
||||
image_dict = json.loads(res.text)
|
||||
except json.JSONDecodeError as e:
|
||||
raise SiteDownloaderError(f'Could not parse received dict as JSON: {e}')
|
||||
raise SiteDownloaderError(f"Could not parse received response as JSON: {e}")
|
||||
|
||||
return image_dict
|
||||
|
||||
@staticmethod
|
||||
def _validate_extension(extension_suffix: str) -> str:
|
||||
extension_suffix = extension_suffix.strip('?1')
|
||||
possible_extensions = ('.jpg', '.png', '.mp4', '.gif')
|
||||
selection = [ext for ext in possible_extensions if ext == extension_suffix]
|
||||
if len(selection) == 1:
|
||||
return selection[0]
|
||||
else:
|
||||
raise SiteDownloaderError(f'"{extension_suffix}" is not recognized as a valid extension for Imgur')
|
||||
return image_dict["data"]
|
||||
|
|
|
@ -1,11 +1,12 @@
|
|||
#!/usr/bin/env python3
|
||||
# coding=utf-8
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import logging
|
||||
from typing import Optional
|
||||
|
||||
from praw.models import Submission
|
||||
|
||||
from bdfr.exceptions import SiteDownloaderError
|
||||
from bdfr.resource import Resource
|
||||
from bdfr.site_authenticator import SiteAuthenticator
|
||||
from bdfr.site_downloaders.youtube import Youtube
|
||||
|
@ -19,13 +20,18 @@ class PornHub(Youtube):
|
|||
|
||||
def find_resources(self, authenticator: Optional[SiteAuthenticator] = None) -> list[Resource]:
|
||||
ytdl_options = {
|
||||
'format': 'best',
|
||||
'nooverwrites': True,
|
||||
"format": "best",
|
||||
"nooverwrites": True,
|
||||
}
|
||||
if video_attributes := super().get_video_attributes(self.post.url):
|
||||
extension = video_attributes["ext"]
|
||||
else:
|
||||
raise SiteDownloaderError()
|
||||
|
||||
out = Resource(
|
||||
self.post,
|
||||
self.post.url,
|
||||
super()._download_video(ytdl_options),
|
||||
super().get_video_attributes(self.post.url)['ext'],
|
||||
extension,
|
||||
)
|
||||
return [out]
|
||||
|
|
|
@ -1,9 +1,11 @@
|
|||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import json
|
||||
import re
|
||||
from typing import Optional
|
||||
|
||||
import requests
|
||||
from praw.models import Submission
|
||||
|
||||
from bdfr.exceptions import SiteDownloaderError
|
||||
|
@ -17,31 +19,68 @@ class Redgifs(BaseDownloader):
|
|||
super().__init__(post)
|
||||
|
||||
def find_resources(self, authenticator: Optional[SiteAuthenticator] = None) -> list[Resource]:
|
||||
media_url = self._get_link(self.post.url)
|
||||
return [Resource(self.post, media_url, Resource.retry_download(media_url), '.mp4')]
|
||||
media_urls = self._get_link(self.post.url)
|
||||
return [Resource(self.post, m, Resource.retry_download(m), None) for m in media_urls]
|
||||
|
||||
@staticmethod
|
||||
def _get_link(url: str) -> str:
|
||||
def _get_id(url: str) -> str:
|
||||
try:
|
||||
redgif_id = re.match(r'.*/(.*?)/?$', url).group(1)
|
||||
if url.endswith("/"):
|
||||
url = url.removesuffix("/")
|
||||
redgif_id = re.match(r".*/(.*?)(?:#.*|\?.*|\..{0,})?$", url).group(1).lower()
|
||||
if redgif_id.endswith("-mobile"):
|
||||
redgif_id = redgif_id.removesuffix("-mobile")
|
||||
except AttributeError:
|
||||
raise SiteDownloaderError(f'Could not extract Redgifs ID from {url}')
|
||||
raise SiteDownloaderError(f"Could not extract Redgifs ID from {url}")
|
||||
return redgif_id
|
||||
|
||||
@staticmethod
|
||||
def _get_link(url: str) -> set[str]:
|
||||
redgif_id = Redgifs._get_id(url)
|
||||
|
||||
auth_token = json.loads(Redgifs.retrieve_url("https://api.redgifs.com/v2/auth/temporary").text)["token"]
|
||||
if not auth_token:
|
||||
raise SiteDownloaderError("Unable to retrieve Redgifs API token")
|
||||
|
||||
headers = {
|
||||
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
|
||||
'Chrome/90.0.4430.93 Safari/537.36',
|
||||
"referer": "https://www.redgifs.com/",
|
||||
"origin": "https://www.redgifs.com",
|
||||
"content-type": "application/json",
|
||||
"Authorization": f"Bearer {auth_token}",
|
||||
}
|
||||
|
||||
content = Redgifs.retrieve_url(f'https://api.redgifs.com/v1/gfycats/{redgif_id}', headers=headers)
|
||||
content = Redgifs.retrieve_url(f"https://api.redgifs.com/v2/gifs/{redgif_id}", headers=headers)
|
||||
|
||||
if content is None:
|
||||
raise SiteDownloaderError('Could not read the page source')
|
||||
raise SiteDownloaderError("Could not read the page source")
|
||||
|
||||
try:
|
||||
out = json.loads(content.text)['gfyItem']['mp4Url']
|
||||
except (KeyError, AttributeError):
|
||||
raise SiteDownloaderError('Failed to find JSON data in page')
|
||||
response_json = json.loads(content.text)
|
||||
except json.JSONDecodeError as e:
|
||||
raise SiteDownloaderError(f'Received data was not valid JSON: {e}')
|
||||
raise SiteDownloaderError(f"Received data was not valid JSON: {e}")
|
||||
|
||||
out = set()
|
||||
try:
|
||||
if response_json["gif"]["type"] == 1: # type 1 is a video
|
||||
if requests.get(response_json["gif"]["urls"]["hd"], headers=headers).ok:
|
||||
out.add(response_json["gif"]["urls"]["hd"])
|
||||
else:
|
||||
out.add(response_json["gif"]["urls"]["sd"])
|
||||
elif response_json["gif"]["type"] == 2: # type 2 is an image
|
||||
if response_json["gif"]["gallery"]:
|
||||
content = Redgifs.retrieve_url(
|
||||
f'https://api.redgifs.com/v2/gallery/{response_json["gif"]["gallery"]}'
|
||||
)
|
||||
response_json = json.loads(content.text)
|
||||
out = {p["urls"]["hd"] for p in response_json["gifs"]}
|
||||
else:
|
||||
out.add(response_json["gif"]["urls"]["hd"])
|
||||
else:
|
||||
raise KeyError
|
||||
except (KeyError, AttributeError):
|
||||
raise SiteDownloaderError("Failed to find JSON data in page")
|
||||
|
||||
# Update subdomain if old one is returned
|
||||
out = {re.sub("thumbs2", "thumbs3", link) for link in out}
|
||||
out = {re.sub("thumbs3", "thumbs4", link) for link in out}
|
||||
return out
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import logging
|
||||
from typing import Optional
|
||||
|
@ -17,27 +18,29 @@ class SelfPost(BaseDownloader):
|
|||
super().__init__(post)
|
||||
|
||||
def find_resources(self, authenticator: Optional[SiteAuthenticator] = None) -> list[Resource]:
|
||||
out = Resource(self.post, self.post.url, lambda: None, '.txt')
|
||||
out.content = self.export_to_string().encode('utf-8')
|
||||
out = Resource(self.post, self.post.url, lambda: None, ".txt")
|
||||
out.content = self.export_to_string().encode("utf-8")
|
||||
out.create_hash()
|
||||
return [out]
|
||||
|
||||
def export_to_string(self) -> str:
|
||||
"""Self posts are formatted here"""
|
||||
content = ("## ["
|
||||
+ self.post.fullname
|
||||
+ "]("
|
||||
+ self.post.url
|
||||
+ ")\n"
|
||||
+ self.post.selftext
|
||||
+ "\n\n---\n\n"
|
||||
+ "submitted to [r/"
|
||||
+ self.post.subreddit.title
|
||||
+ "](https://www.reddit.com/r/"
|
||||
+ self.post.subreddit.title
|
||||
+ ") by [u/"
|
||||
+ (self.post.author.name if self.post.author else "DELETED")
|
||||
+ "](https://www.reddit.com/user/"
|
||||
+ (self.post.author.name if self.post.author else "DELETED")
|
||||
+ ")")
|
||||
content = (
|
||||
"## ["
|
||||
+ self.post.fullname
|
||||
+ "]("
|
||||
+ self.post.url
|
||||
+ ")\n"
|
||||
+ self.post.selftext
|
||||
+ "\n\n---\n\n"
|
||||
+ "submitted to [r/"
|
||||
+ self.post.subreddit.title
|
||||
+ "](https://www.reddit.com/r/"
|
||||
+ self.post.subreddit.title
|
||||
+ ") by [u/"
|
||||
+ (self.post.author.name if self.post.author else "DELETED")
|
||||
+ "](https://www.reddit.com/user/"
|
||||
+ (self.post.author.name if self.post.author else "DELETED")
|
||||
+ ")"
|
||||
)
|
||||
return content
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
#!/usr/bin/env python3
|
||||
# coding=utf-8
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import itertools
|
||||
import logging
|
||||
import re
|
||||
|
@ -22,27 +23,33 @@ class Vidble(BaseDownloader):
|
|||
super().__init__(post)
|
||||
|
||||
def find_resources(self, authenticator: Optional[SiteAuthenticator] = None) -> list[Resource]:
|
||||
res = self.get_links(self.post.url)
|
||||
try:
|
||||
res = self.get_links(self.post.url)
|
||||
except AttributeError:
|
||||
raise SiteDownloaderError(f"Could not read page at {self.post.url}")
|
||||
if not res:
|
||||
raise SiteDownloaderError(rf'No resources found at {self.post.url}')
|
||||
raise SiteDownloaderError(rf"No resources found at {self.post.url}")
|
||||
res = [Resource(self.post, r, Resource.retry_download(r)) for r in res]
|
||||
return res
|
||||
|
||||
@staticmethod
|
||||
def get_links(url: str) -> set[str]:
|
||||
if not re.search(r"vidble.com/(show/|album/|watch\?v)", url):
|
||||
url = re.sub(r"/(\w*?)$", r"/show/\1", url)
|
||||
|
||||
page = requests.get(url)
|
||||
soup = bs4.BeautifulSoup(page.text, 'html.parser')
|
||||
content_div = soup.find('div', attrs={'id': 'ContentPlaceHolder1_divContent'})
|
||||
images = content_div.find_all('img')
|
||||
images = [i.get('src') for i in images]
|
||||
videos = content_div.find_all('source', attrs={'type': 'video/mp4'})
|
||||
videos = [v.get('src') for v in videos]
|
||||
soup = bs4.BeautifulSoup(page.text, "html.parser")
|
||||
content_div = soup.find("div", attrs={"id": "ContentPlaceHolder1_divContent"})
|
||||
images = content_div.find_all("img")
|
||||
images = [i.get("src") for i in images]
|
||||
videos = content_div.find_all("source", attrs={"type": "video/mp4"})
|
||||
videos = [v.get("src") for v in videos]
|
||||
resources = filter(None, itertools.chain(images, videos))
|
||||
resources = ['https://www.vidble.com' + r for r in resources]
|
||||
resources = ["https://www.vidble.com" + r for r in resources]
|
||||
resources = [Vidble.change_med_url(r) for r in resources]
|
||||
return set(resources)
|
||||
|
||||
@staticmethod
|
||||
def change_med_url(url: str) -> str:
|
||||
out = re.sub(r'_med(\..{3,4})$', r'\1', url)
|
||||
out = re.sub(r"_med(\..{3,4})$", r"\1", url)
|
||||
return out
|
||||
|
|
42
bdfr/site_downloaders/vreddit.py
Normal file
42
bdfr/site_downloaders/vreddit.py
Normal file
|
@ -0,0 +1,42 @@
|
|||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import logging
|
||||
from typing import Optional
|
||||
|
||||
from praw.models import Submission
|
||||
|
||||
from bdfr.exceptions import NotADownloadableLinkError
|
||||
from bdfr.resource import Resource
|
||||
from bdfr.site_authenticator import SiteAuthenticator
|
||||
from bdfr.site_downloaders.youtube import Youtube
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class VReddit(Youtube):
|
||||
def __init__(self, post: Submission):
|
||||
super().__init__(post)
|
||||
|
||||
def find_resources(self, authenticator: Optional[SiteAuthenticator] = None) -> list[Resource]:
|
||||
ytdl_options = {
|
||||
"playlistend": 1,
|
||||
"nooverwrites": True,
|
||||
}
|
||||
download_function = self._download_video(ytdl_options)
|
||||
extension = self.get_video_attributes(self.post.url)["ext"]
|
||||
res = Resource(self.post, self.post.url, download_function, extension)
|
||||
return [res]
|
||||
|
||||
@staticmethod
|
||||
def get_video_attributes(url: str) -> dict:
|
||||
result = VReddit.get_video_data(url)
|
||||
if "ext" in result:
|
||||
return result
|
||||
else:
|
||||
try:
|
||||
result = result["entries"][0]
|
||||
return result
|
||||
except Exception as e:
|
||||
logger.exception(e)
|
||||
raise NotADownloadableLinkError(f"Video info extraction failed for {url}")
|
|
@ -1,11 +1,13 @@
|
|||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import logging
|
||||
import tempfile
|
||||
from collections.abc import Callable
|
||||
from pathlib import Path
|
||||
from typing import Callable, Optional
|
||||
from typing import Optional
|
||||
|
||||
import youtube_dl
|
||||
import yt_dlp
|
||||
from praw.models import Submission
|
||||
|
||||
from bdfr.exceptions import NotADownloadableLinkError, SiteDownloaderError
|
||||
|
@ -22,51 +24,62 @@ class Youtube(BaseDownloader):
|
|||
|
||||
def find_resources(self, authenticator: Optional[SiteAuthenticator] = None) -> list[Resource]:
|
||||
ytdl_options = {
|
||||
'format': 'best',
|
||||
'playlistend': 1,
|
||||
'nooverwrites': True,
|
||||
"format": "best",
|
||||
"playlistend": 1,
|
||||
"nooverwrites": True,
|
||||
}
|
||||
download_function = self._download_video(ytdl_options)
|
||||
try:
|
||||
extension = self.get_video_attributes(self.post.url)['ext']
|
||||
except KeyError:
|
||||
raise NotADownloadableLinkError(f'Youtube-DL cannot download URL {self.post.url}')
|
||||
extension = self.get_video_attributes(self.post.url)["ext"]
|
||||
res = Resource(self.post, self.post.url, download_function, extension)
|
||||
return [res]
|
||||
|
||||
def _download_video(self, ytdl_options: dict) -> Callable:
|
||||
yt_logger = logging.getLogger('youtube-dl')
|
||||
yt_logger = logging.getLogger("youtube-dl")
|
||||
yt_logger.setLevel(logging.CRITICAL)
|
||||
ytdl_options['quiet'] = True
|
||||
ytdl_options['logger'] = yt_logger
|
||||
ytdl_options["quiet"] = True
|
||||
ytdl_options["logger"] = yt_logger
|
||||
|
||||
def download(_: dict) -> bytes:
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
download_path = Path(temp_dir).resolve()
|
||||
ytdl_options['outtmpl'] = str(download_path) + '/' + 'test.%(ext)s'
|
||||
ytdl_options["outtmpl"] = str(download_path) + "/" + "test.%(ext)s"
|
||||
try:
|
||||
with youtube_dl.YoutubeDL(ytdl_options) as ydl:
|
||||
with yt_dlp.YoutubeDL(ytdl_options) as ydl:
|
||||
ydl.download([self.post.url])
|
||||
except youtube_dl.DownloadError as e:
|
||||
raise SiteDownloaderError(f'Youtube download failed: {e}')
|
||||
except yt_dlp.DownloadError as e:
|
||||
raise SiteDownloaderError(f"Youtube download failed: {e}")
|
||||
|
||||
downloaded_files = list(download_path.iterdir())
|
||||
if len(downloaded_files) > 0:
|
||||
if downloaded_files:
|
||||
downloaded_file = downloaded_files[0]
|
||||
else:
|
||||
raise NotADownloadableLinkError(f"No media exists in the URL {self.post.url}")
|
||||
with open(downloaded_file, 'rb') as file:
|
||||
with downloaded_file.open("rb") as file:
|
||||
content = file.read()
|
||||
return content
|
||||
|
||||
return download
|
||||
|
||||
@staticmethod
|
||||
def get_video_attributes(url: str) -> dict:
|
||||
yt_logger = logging.getLogger('youtube-dl')
|
||||
def get_video_data(url: str) -> dict:
|
||||
yt_logger = logging.getLogger("youtube-dl")
|
||||
yt_logger.setLevel(logging.CRITICAL)
|
||||
with youtube_dl.YoutubeDL({'logger': yt_logger, }) as ydl:
|
||||
with yt_dlp.YoutubeDL(
|
||||
{
|
||||
"logger": yt_logger,
|
||||
}
|
||||
) as ydl:
|
||||
try:
|
||||
result = ydl.extract_info(url, download=False)
|
||||
return result
|
||||
except Exception as e:
|
||||
logger.exception(e)
|
||||
raise NotADownloadableLinkError(f"Video info extraction failed for {url}")
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
def get_video_attributes(url: str) -> dict:
|
||||
result = Youtube.get_video_data(url)
|
||||
if "ext" in result:
|
||||
return result
|
||||
else:
|
||||
raise NotADownloadableLinkError(f"Video info extraction failed for {url}")
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
if (-not ([string]::IsNullOrEmpty($env:REDDIT_TOKEN)))
|
||||
{
|
||||
copy .\\bdfr\\default_config.cfg .\\test_config.cfg
|
||||
echo "`nuser_token = $env:REDDIT_TOKEN" >> ./test_config.cfg
|
||||
Copy-Item .\\bdfr\\default_config.cfg .\\test_config.cfg
|
||||
Write-Output "`nuser_token = $env:REDDIT_TOKEN" >> ./test_config.cfg
|
||||
}
|
|
@ -1,4 +1,6 @@
|
|||
if [ ! -z "$REDDIT_TOKEN" ]
|
||||
#!/bin/bash
|
||||
|
||||
if [ -n "$REDDIT_TOKEN" ]
|
||||
then
|
||||
cp ./bdfr/default_config.cfg ./test_config.cfg
|
||||
echo -e "\nuser_token = $REDDIT_TOKEN" >> ./test_config.cfg
|
||||
|
|
|
@ -6,11 +6,11 @@ When the project was rewritten for v2, the goal was to make the codebase easily
|
|||
|
||||
The BDFR is designed to be a stateless downloader. This means that the state of the program is forgotten between each run of the program. There are no central lists, databases, or indices, that the BDFR uses, only the actual files on disk. There are several advantages to this approach:
|
||||
|
||||
1. There is no chance of the database being corrupted or changed by something other than the BDFR, rendering the BDFR's "idea" of the archive wrong or incomplete.
|
||||
2. Any information about the archive is contained by the archive itself i.e. for a list of all submission IDs in the archive, this can be extracted from the names of the files in said archive, assuming an appropriate naming scheme was used.
|
||||
3. Archives can be merged, split, or editing without worrying about having to update a central database
|
||||
4. There are no versioning issues between updates of the BDFR, where old version are stuck with a worse form of the database
|
||||
5. An archive can be put on a USB, moved to another computer with possibly a very different BDFR version, and work completely fine
|
||||
1. There is no chance of the database being corrupted or changed by something other than the BDFR, rendering the BDFR's "idea" of the archive wrong or incomplete.
|
||||
2. Any information about the archive is contained by the archive itself i.e. for a list of all submission IDs in the archive, this can be extracted from the names of the files in said archive, assuming an appropriate naming scheme was used.
|
||||
3. Archives can be merged, split, or editing without worrying about having to update a central database
|
||||
4. There are no versioning issues between updates of the BDFR, where old version are stuck with a worse form of the database
|
||||
5. An archive can be put on a USB, moved to another computer with possibly a very different BDFR version, and work completely fine
|
||||
|
||||
Another major part of the ethos of the design is DOTADIW, Do One Thing And Do It Well. It's a major part of Unix philosophy and states that each tool should have a well-defined, limited purpose. To this end, the BDFR is, as the name implies, a *downloader*. That is the scope of the tool. Managing the files downloaded can be for better-suited programs, since the BDFR is not a file manager. Nor the BDFR concern itself with how any of the data downloaded is displayed, changed, parsed, or analysed. This makes the BDFR suitable for data science-related tasks, archiving, personal downloads, or analysis of various Reddit sources as the BDFR is completely agnostic on how the data is used.
|
||||
|
||||
|
@ -18,23 +18,15 @@ Another major part of the ethos of the design is DOTADIW, Do One Thing And Do It
|
|||
|
||||
The BDFR is organised around a central object, the RedditDownloader class. The Archiver object extends and inherits from this class.
|
||||
|
||||
1. The RedditDownloader parses all the arguments and configuration options, held in the Configuration object, and creates a variety of internal objects for use, such as the file name formatter, download filter, etc.
|
||||
|
||||
2. The RedditDownloader scrapes raw submissions from Reddit via several methods relating to different sources. A source is defined as a single stream of submissions from a subreddit, multireddit, or user list.
|
||||
|
||||
3. These raw submissions are passed to the DownloaderFactory class to select the specialised downloader class to use. Each of these are for a specific website or link type, with some catch-all classes like Direct.
|
||||
|
||||
4. The BaseDownloader child, spawned by DownloaderFactory, takes the link and does any necessary processing to find the direct link to the actual resource.
|
||||
|
||||
5. This is returned to the RedditDownloader in the form of a Resource object. This holds the URL and some other information for the final resource.
|
||||
|
||||
6. The Resource is passed through the DownloadFilter instantiated in step 1.
|
||||
|
||||
7. The destination file name for the Resource is calculated. If it already exists, then the Resource will be discarded.
|
||||
|
||||
8. Here the actual data is downloaded to the Resource and a hash calculated which is used to find duplicates.
|
||||
|
||||
9. Only then is the Resource written to the disk.
|
||||
1. The RedditDownloader parses all the arguments and configuration options, held in the Configuration object, and creates a variety of internal objects for use, such as the file name formatter, download filter, etc.
|
||||
2. The RedditDownloader scrapes raw submissions from Reddit via several methods relating to different sources. A source is defined as a single stream of submissions from a subreddit, multireddit, or user list.
|
||||
3. These raw submissions are passed to the DownloaderFactory class to select the specialised downloader class to use. Each of these are for a specific website or link type, with some catch-all classes like Direct.
|
||||
4. The BaseDownloader child, spawned by DownloaderFactory, takes the link and does any necessary processing to find the direct link to the actual resource.
|
||||
5. This is returned to the RedditDownloader in the form of a Resource object. This holds the URL and some other information for the final resource.
|
||||
6. The Resource is passed through the DownloadFilter instantiated in step 1.
|
||||
7. The destination file name for the Resource is calculated. If it already exists, then the Resource will be discarded.
|
||||
8. Here the actual data is downloaded to the Resource and a hash calculated which is used to find duplicates.
|
||||
9. Only then is the Resource written to the disk.
|
||||
|
||||
This is the step-by-step process that the BDFR goes through to download a Reddit post.
|
||||
|
||||
|
|
|
@ -69,8 +69,6 @@ members of the project's leadership.
|
|||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
|
||||
available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
|
||||
available at <https://www.contributor-covenant.org/version/1/4/code-of-conduct.html>
|
||||
|
||||
[homepage]: https://www.contributor-covenant.org
|
||||
|
||||
|
||||
|
|
|
@ -11,11 +11,13 @@ All communication on GitHub, Discord, email, or any other medium must conform to
|
|||
**Before opening a new issue**, be sure that no issues regarding your problem already exist. If a similar issue exists, try to contribute to the issue.
|
||||
|
||||
### Bugs
|
||||
|
||||
When opening an issue about a bug, **please provide the full log file for the run in which the bug occurred**. This log file is named `log_output.txt` in the configuration folder. Check the [README](../README.md) for information on where this is. This log file will contain all the information required for the developers to recreate the bug.
|
||||
|
||||
If you do not have or cannot find the log file, then at minimum please provide the **Reddit ID for the submission** or comment which caused the issue. Also copy in the command that you used to run the BDFR from the command line, as that will also provide helpful information when trying to find and fix the bug. If needed, more information will be asked in the thread of the bug.
|
||||
|
||||
### Feature requests
|
||||
|
||||
In the case of requesting a feature or an enhancement, there are fewer requirements. However, please be clear in what you would like the BDFR to do and also how the feature/enhancement would be used or would be useful to more people. It is crucial that the feature is justified. Any feature request without a concrete reason for it to be implemented has a very small chance to get accepted. Be aware that proposed enhancements may be rejected for multiple reasons, or no reason, at the discretion of the developers.
|
||||
|
||||
## Pull Requests
|
||||
|
@ -24,13 +26,13 @@ Before creating a pull request (PR), check out [ARCHITECTURE](ARCHITECTURE.md) f
|
|||
|
||||
Once you have done both of these, the below list shows the path that should be followed when writing a PR.
|
||||
|
||||
1. If an issue does not already exist, open one that will relate to the PR.
|
||||
2. Ensure that any changes fit into the architecture specified above.
|
||||
3. Ensure that you have written tests that cover the new code.
|
||||
4. Ensure that no existing tests fail, unless there is a good reason for them to do so.
|
||||
5. If needed, update any documentation with changes.
|
||||
6. Open a pull request that references the relevant issue.
|
||||
7. Expect changes or suggestions and heed the Code of Conduct. We're all volunteers here.
|
||||
1. If an issue does not already exist, open one that will relate to the PR.
|
||||
2. Ensure that any changes fit into the architecture specified above.
|
||||
3. Ensure that you have written tests that cover the new code.
|
||||
4. Ensure that no existing tests fail, unless there is a good reason for them to do so.
|
||||
5. If needed, update any documentation with changes.
|
||||
6. Open a pull request that references the relevant issue.
|
||||
7. Expect changes or suggestions and heed the Code of Conduct. We're all volunteers here.
|
||||
|
||||
Someone will review your pull request as soon as possible, but remember that all maintainers are volunteers and this won't happen immediately. Once it is approved, congratulations! Your code is now part of the BDFR.
|
||||
|
||||
|
@ -41,36 +43,52 @@ Bulk Downloader for Reddit requires Python 3.9 at minimum. First, ensure that yo
|
|||
BDfR is built in a way that it can be packaged and installed via `pip`. This places BDfR next to other Python packages and enables you to run the program from any directory. Since it is managed by pip, you can also uninstall it.
|
||||
|
||||
To install the program, clone the repository and run pip inside the project's root directory:
|
||||
|
||||
```bash
|
||||
$ git clone https://github.com/aliparlakci/bulk-downloader-for-reddit.git
|
||||
$ cd ./bulk-downloader-for-reddit
|
||||
$ python3 -m pip install -e .
|
||||
git clone https://github.com/aliparlakci/bulk-downloader-for-reddit.git
|
||||
cd ./bulk-downloader-for-reddit
|
||||
python3 -m pip install -e .
|
||||
```
|
||||
|
||||
**`-e`** parameter creates a link to that folder. That is, any change inside the folder affects the package immidiately. So, when developing, you can be sure that the package is not stale and Python is always running your latest changes. (Due to this linking, moving/removing/renaming the folder might break it)
|
||||
|
||||
Then, you can run the program from anywhere in your disk as such:
|
||||
|
||||
```bash
|
||||
$ python3 -m bdfr
|
||||
bdfr
|
||||
```
|
||||
|
||||
There are additional Python packages that are required to develop the BDFR. These can be installed with the following command:
|
||||
|
||||
```bash
|
||||
python3 -m pip install -e .[dev]
|
||||
```
|
||||
|
||||
### Tools
|
||||
|
||||
The BDFR project uses several tools to manage the code of the project. These include:
|
||||
|
||||
- [black](https://github.com/psf/black)
|
||||
- [flake8](https://github.com/john-hen/Flake8-pyproject)
|
||||
- [isort](https://github.com/PyCQA/isort)
|
||||
- [markdownlint (mdl)](https://github.com/markdownlint/markdownlint)
|
||||
- [tox](https://tox.wiki/en/latest/)
|
||||
- [pre-commit](https://github.com/pre-commit/pre-commit)
|
||||
|
||||
The first four tools are formatters. These change the code to the standards expected for the BDFR project. The configuration details for these tools are contained in the [pyproject.toml](../pyproject.toml) file for the project.
|
||||
|
||||
The tool `tox` is used to run tests and tools on demand and has the following environments:
|
||||
|
||||
- `format`
|
||||
- `format_check`
|
||||
|
||||
The tool `pre-commit` is optional, and runs the three formatting tools automatically when a commit is made. This is **highly recommended** to ensure that all code submitted for this project is formatted acceptably. Note that any PR that does not follow the formatting guide will not be accepted. For information on how to use pre-commit to avoid this, see [the pre-commit documentation](https://pre-commit.com/).
|
||||
|
||||
## Style Guide
|
||||
|
||||
The BDFR must conform to PEP8 standard wherever there is Python code, with one exception. Line lengths may extend to 120 characters, but all other PEP8 standards must be followed.
|
||||
The BDFR uses the Black formatting standard and enforces this with the tool by the same name. Additionally, the tool isort is used as well to format imports.
|
||||
|
||||
It's easy to format your code without any manual work via a variety of tools. Autopep8 is a good one, and can be used with `autopep8 --max-line-length 120` which will format the code according to the style in use with the BDFR.
|
||||
|
||||
Hanging brackets are preferred when there are many items, items that otherwise go over the 120 character line limit, or when doing so would increase readability. It is also preferred when there might be many commits altering the list, such as with the parameter lists for tests. A hanging comma is also required in such cases. An example of this is below:
|
||||
|
||||
```python
|
||||
test = [
|
||||
'test 1',
|
||||
'test 2',
|
||||
'test 3',
|
||||
]
|
||||
```
|
||||
|
||||
Note that the last bracket is on its own line, and that the first bracket has a new line before the first term. Also note that there is a comma after the last term.
|
||||
See [Preparing the Environment for Development](#preparing-the-environment-for-development) for how to setup these tools to run automatically.
|
||||
|
||||
## Tests
|
||||
|
||||
|
@ -83,14 +101,14 @@ When submitting a PR, it is required that you run **all** possible tests to ensu
|
|||
This is accomplished with marks, a system that pytest uses to categorise tests. There are currently the current marks in use in the BDFR test suite.
|
||||
|
||||
- `slow`
|
||||
- This marks a test that may take a long time to complete
|
||||
- Usually marks a test that downloads many submissions or downloads a particularly large resource
|
||||
- This marks a test that may take a long time to complete
|
||||
- Usually marks a test that downloads many submissions or downloads a particularly large resource
|
||||
- `online`
|
||||
- This marks a test that requires an internet connection and uses online resources
|
||||
- This marks a test that requires an internet connection and uses online resources
|
||||
- `reddit`
|
||||
- This marks a test that accesses online Reddit specifically
|
||||
- This marks a test that accesses online Reddit specifically
|
||||
- `authenticated`
|
||||
- This marks a test that requires a test configuration file with a valid OAuth2 token
|
||||
- This marks a test that requires a test configuration file with a valid OAuth2 token
|
||||
|
||||
These tests can be run either all at once, or excluding certain marks. The tests that require online resources, such as those marked `reddit` or `online`, will naturally require more time to run than tests that are entirely offline. To run tests, you must be in the root directory of the project and can use the following command.
|
||||
|
||||
|
|
9
opts_example.yaml
Normal file
9
opts_example.yaml
Normal file
|
@ -0,0 +1,9 @@
|
|||
skip: [mp4, avi, mov]
|
||||
file_scheme: "{UPVOTES}_{REDDITOR}_{POSTID}_{DATE}"
|
||||
limit: 10
|
||||
sort: top
|
||||
time: all
|
||||
no_dupes: true
|
||||
subreddit:
|
||||
- EarthPorn
|
||||
- CityPorn
|
88
pyproject.toml
Normal file
88
pyproject.toml
Normal file
|
@ -0,0 +1,88 @@
|
|||
[build-system]
|
||||
requires = ["setuptools>=65.6.0", "wheel"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "bdfr"
|
||||
description = "Downloads and archives content from reddit"
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.9"
|
||||
license = {file = "LICENSE"}
|
||||
keywords = ["reddit", "download", "archive",]
|
||||
authors = [{name = "Ali Parlakci", email = "parlakciali@gmail.com"}]
|
||||
maintainers = [{name = "Serene Arc", email = "serenical@gmail.com"}]
|
||||
classifiers = [
|
||||
"Development Status :: 5 - Production/Stable",
|
||||
"Environment :: Console",
|
||||
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
|
||||
"Natural Language :: English",
|
||||
"Operating System :: OS Independent",
|
||||
"Programming Language :: Python :: 3",
|
||||
"Programming Language :: Python :: 3.9",
|
||||
"Programming Language :: Python :: 3.10",
|
||||
"Programming Language :: Python :: 3.11",
|
||||
]
|
||||
dependencies = [
|
||||
"appdirs>=1.4.4",
|
||||
"beautifulsoup4>=4.10.0",
|
||||
"click>=8.0.0",
|
||||
"dict2xml>=1.7.0",
|
||||
"praw>=7.2.0",
|
||||
"pyyaml>=5.4.1",
|
||||
"requests>=2.25.1",
|
||||
"yt-dlp>=2022.11.11",
|
||||
]
|
||||
dynamic = ["version"]
|
||||
|
||||
[tool.setuptools]
|
||||
dynamic = {"version" = {attr = 'bdfr.__version__'}}
|
||||
packages = ["bdfr", "bdfr.archive_entry", "bdfr.site_downloaders", "bdfr.site_downloaders.fallback_downloaders",]
|
||||
data-files = {"config" = ["bdfr/default_config.cfg",]}
|
||||
|
||||
[project.optional-dependencies]
|
||||
dev = [
|
||||
"black>=22.12.0",
|
||||
"Flake8-pyproject>=1.2.2",
|
||||
"isort>=5.11.4",
|
||||
"pre-commit>=2.20.0",
|
||||
"pytest>=7.1.0",
|
||||
"tox>=3.27.1",
|
||||
]
|
||||
|
||||
[project.urls]
|
||||
"Homepage" = "https://aliparlakci.github.io/bulk-downloader-for-reddit"
|
||||
"Source" = "https://github.com/aliparlakci/bulk-downloader-for-reddit"
|
||||
"Bug Reports" = "https://github.com/aliparlakci/bulk-downloader-for-reddit/issues"
|
||||
|
||||
[project.scripts]
|
||||
bdfr = "bdfr.__main__:cli"
|
||||
bdfr-archive = "bdfr.__main__:cli_archive"
|
||||
bdfr-clone = "bdfr.__main__:cli_clone"
|
||||
bdfr-download = "bdfr.__main__:cli_download"
|
||||
|
||||
[tool.black]
|
||||
line-length = 120
|
||||
|
||||
[tool.flake8]
|
||||
exclude = ["scripts"]
|
||||
max-line-length = 120
|
||||
show-source = true
|
||||
statistics = true
|
||||
|
||||
[tool.isort]
|
||||
profile = "black"
|
||||
py_version = 39
|
||||
multi_line_output = 3
|
||||
line_length = 120
|
||||
indent = 4
|
||||
|
||||
[tool.pytest.ini_options]
|
||||
minversion = "7.1"
|
||||
addopts = "--strict-markers"
|
||||
testpaths = "tests"
|
||||
markers = [
|
||||
"online: tests require a connection to the internet",
|
||||
"reddit: tests require a connection to Reddit",
|
||||
"slow: test is slow to run",
|
||||
"authenticated: test requires an authenticated Reddit instance",
|
||||
]
|
|
@ -1,7 +0,0 @@
|
|||
[pytest]
|
||||
markers =
|
||||
online: tests require a connection to the internet
|
||||
reddit: tests require a connection to Reddit
|
||||
slow: test is slow to run
|
||||
authenticated: test requires an authenticated Reddit instance
|
||||
|
|
@ -1,9 +0,0 @@
|
|||
appdirs>=1.4.4
|
||||
bs4>=0.0.1
|
||||
click>=7.1.2
|
||||
dict2xml>=1.7.0
|
||||
ffmpeg-python>=0.2.0
|
||||
praw>=7.2.0
|
||||
pyyaml>=5.4.1
|
||||
requests>=2.25.1
|
||||
youtube-dl>=2021.3.14
|
|
@ -6,6 +6,7 @@ Due to the verboseness of the logs, a great deal of information can be gathered
|
|||
- [Script to extract all failed download IDs](#extract-all-failed-ids)
|
||||
- [Timestamp conversion](#converting-bdfrv1-timestamps-to-bdfrv2-timestamps)
|
||||
- [Printing summary statistics for a run](#printing-summary-statistics)
|
||||
- [Unsaving posts from your account after downloading](#unsave-posts-after-downloading)
|
||||
|
||||
## Extract all Successfully Downloaded IDs
|
||||
|
||||
|
@ -58,7 +59,7 @@ A simple script has been included to print sumamry statistics for a run of the B
|
|||
|
||||
This will create an output like the following:
|
||||
|
||||
```
|
||||
```text
|
||||
Downloaded submissions: 250
|
||||
Failed downloads: 103
|
||||
Files already downloaded: 20073
|
||||
|
@ -67,3 +68,23 @@ Excluded submissions: 1146
|
|||
Files with existing hash skipped: 0
|
||||
Submissions from excluded subreddits: 0
|
||||
```
|
||||
|
||||
## Unsave Posts After Downloading
|
||||
|
||||
[This script](unsaveposts.py) takes a list of submission IDs from a file named `successfulids` created with the `extract_successful_ids.sh` script and unsaves them from your account. To make it work you will need to make a user script in your reddit profile like this:
|
||||
- Fill in the username and password fields in the script. Make sure you keep the quotes around the fields.
|
||||
- Go to https://old.reddit.com/prefs/apps/
|
||||
- Click on `Develop an app` at the bottom.
|
||||
- Make sure you select a `script` not a `web app`.
|
||||
- Name it `Unsave Posts`.
|
||||
- Fill in the `Redirect URI` field with `127.0.0.0`.
|
||||
- Save it.
|
||||
- Fill in the `client_id` and `client_secret` fields on the script. The client ID is the 14 character string under the name you gave your script. .It'll look like a bunch of random characters like this: pspYLwDoci9z_A. The client secret is the longer string next to "secret". Again keep the quotes around the fields.
|
||||
|
||||
Now the script is ready tu run. Just execute it like this:
|
||||
|
||||
```bash
|
||||
python3.9 -m bdfr download DOWNLOAD_DIR --authenticate --user me --saved --log LOGFILE_LOCATION
|
||||
./extract_successful_ids.sh LOGFILE_LOCATION > successfulids
|
||||
./unsaveposts.py
|
||||
```
|
||||
|
|
21
scripts/extract_failed_ids.ps1
Normal file
21
scripts/extract_failed_ids.ps1
Normal file
|
@ -0,0 +1,21 @@
|
|||
if (Test-Path -Path $args[0] -PathType Leaf) {
|
||||
$file=$args[0]
|
||||
}
|
||||
else {
|
||||
Write-Host "CANNOT FIND LOG FILE"
|
||||
Exit 1
|
||||
}
|
||||
|
||||
if ($null -ne $args[1]) {
|
||||
$output=$args[1]
|
||||
Write-Host "Outputting IDs to $output"
|
||||
}
|
||||
else {
|
||||
$output="./failed.txt"
|
||||
}
|
||||
|
||||
Select-String -Path $file -Pattern "Could not download submission" | ForEach-Object { -split $_.Line | Select-Object -Skip 11 | Select-Object -First 1 } | ForEach-Object { $_.substring(0,$_.Length-1) } >> $output
|
||||
Select-String -Path $file -Pattern "Failed to download resource" | ForEach-Object { -split $_.Line | Select-Object -Skip 14 | Select-Object -First 1 } >> $output
|
||||
Select-String -Path $file -Pattern "failed to download submission" | ForEach-Object { -split $_.Line | Select-Object -Skip 13 | Select-Object -First 1 } | ForEach-Object { $_.substring(0,$_.Length-1) } >> $output
|
||||
Select-String -Path $file -Pattern "Failed to write file" | ForEach-Object { -split $_.Line | Select-Object -Skip 13 | Select-Object -First 1 } >> $output
|
||||
Select-String -Path $file -Pattern "skipped due to disabled module" | ForEach-Object { -split $_.Line | Select-Object -Skip 8 | Select-Object -First 1 } >> $output
|
|
@ -7,17 +7,10 @@ else
|
|||
exit 1
|
||||
fi
|
||||
|
||||
if [ -n "$2" ]; then
|
||||
output="$2"
|
||||
echo "Outputting IDs to $output"
|
||||
else
|
||||
output="./failed.txt"
|
||||
fi
|
||||
|
||||
{
|
||||
grep 'Could not download submission' "$file" | awk '{ print $12 }' | rev | cut -c 2- | rev ;
|
||||
grep 'Failed to download resource' "$file" | awk '{ print $15 }' ;
|
||||
grep 'failed to download submission' "$file" | awk '{ print $14 }' | rev | cut -c 2- | rev ;
|
||||
grep 'Failed to write file' "$file" | awk '{ print $13 }' | rev | cut -c 2- | rev ;
|
||||
grep 'Failed to write file' "$file" | awk '{ print $14 }' ;
|
||||
grep 'skipped due to disabled module' "$file" | awk '{ print $9 }' ;
|
||||
} >>"$output"
|
||||
}
|
||||
|
|
21
scripts/extract_successful_ids.ps1
Normal file
21
scripts/extract_successful_ids.ps1
Normal file
|
@ -0,0 +1,21 @@
|
|||
if (Test-Path -Path $args[0] -PathType Leaf) {
|
||||
$file=$args[0]
|
||||
}
|
||||
else {
|
||||
Write-Host "CANNOT FIND LOG FILE"
|
||||
Exit 1
|
||||
}
|
||||
|
||||
if ($null -ne $args[1]) {
|
||||
$output=$args[1]
|
||||
Write-Host "Outputting IDs to $output"
|
||||
}
|
||||
else {
|
||||
$output="./successful.txt"
|
||||
}
|
||||
|
||||
Select-String -Path $file -Pattern "Downloaded submission" | ForEach-Object { -split $_.Line | Select-Object -Last 3 | Select-Object -SkipLast 2 } >> $output
|
||||
Select-String -Path $file -Pattern "Resource hash" | ForEach-Object { -split $_.Line | Select-Object -Last 3 | Select-Object -SkipLast 2 } >> $output
|
||||
Select-String -Path $file -Pattern "Download filter" | ForEach-Object { -split $_.Line | Select-Object -Last 4 | Select-Object -SkipLast 3 } >> $output
|
||||
Select-String -Path $file -Pattern "already exists, continuing" | ForEach-Object { -split $_.Line | Select-Object -Last 4 | Select-Object -SkipLast 3 } >> $output
|
||||
Select-String -Path $file -Pattern "Hard link made" | ForEach-Object { -split $_.Line | Select-Object -Last 1 } >> $output
|
|
@ -7,17 +7,11 @@ else
|
|||
exit 1
|
||||
fi
|
||||
|
||||
if [ -n "$2" ]; then
|
||||
output="$2"
|
||||
echo "Outputting IDs to $output"
|
||||
else
|
||||
output="./successful.txt"
|
||||
fi
|
||||
|
||||
{
|
||||
grep 'Downloaded submission' "$file" | awk '{ print $(NF-2) }' ;
|
||||
grep 'Resource hash' "$file" | awk '{ print $(NF-2) }' ;
|
||||
grep 'Download filter' "$file" | awk '{ print $(NF-3) }' ;
|
||||
grep 'already exists, continuing' "$file" | awk '{ print $(NF-3) }' ;
|
||||
grep 'Hard link made' "$file" | awk '{ print $(NF) }' ;
|
||||
} >> "$output"
|
||||
grep 'filtered due to score' "$file" | awk '{ print $9 }'
|
||||
}
|
||||
|
|
30
scripts/print_summary.ps1
Normal file
30
scripts/print_summary.ps1
Normal file
|
@ -0,0 +1,30 @@
|
|||
if (Test-Path -Path $args[0] -PathType Leaf) {
|
||||
$file=$args[0]
|
||||
}
|
||||
else {
|
||||
Write-Host "CANNOT FIND LOG FILE"
|
||||
Exit 1
|
||||
}
|
||||
|
||||
if ($null -ne $args[1]) {
|
||||
$output=$args[1]
|
||||
Write-Host "Outputting IDs to $output"
|
||||
}
|
||||
else {
|
||||
$output="./successful.txt"
|
||||
}
|
||||
|
||||
Write-Host -NoNewline "Downloaded submissions: "
|
||||
Write-Host (Select-String -Path $file -Pattern "Downloaded submission" -AllMatches).Matches.Count
|
||||
Write-Host -NoNewline "Failed downloads: "
|
||||
Write-Host (Select-String -Path $file -Pattern "failed to download submission" -AllMatches).Matches.Count
|
||||
Write-Host -NoNewline "Files already downloaded: "
|
||||
Write-Host (Select-String -Path $file -Pattern "already exists, continuing" -AllMatches).Matches.Count
|
||||
Write-Host -NoNewline "Hard linked submissions: "
|
||||
Write-Host (Select-String -Path $file -Pattern "Hard link made" -AllMatches).Matches.Count
|
||||
Write-Host -NoNewline "Excluded submissions: "
|
||||
Write-Host (Select-String -Path $file -Pattern "in exclusion list" -AllMatches).Matches.Count
|
||||
Write-Host -NoNewline "Files with existing hash skipped: "
|
||||
Write-Host (Select-String -Path $file -Pattern "downloaded elsewhere" -AllMatches).Matches.Count
|
||||
Write-Host -NoNewline "Submissions from excluded subreddits: "
|
||||
Write-Host (Select-String -Path $file -Pattern "in skip list" -AllMatches).Matches.Count
|
|
@ -1,2 +1 @@
|
|||
[2021-06-12 11:18:25,794 - bdfr.downloader - ERROR] - Failed to download resource https://i.redd.it/61fniokpjq471.jpg in submission nxv3dt with downloader Direct: Unrecoverable error requesting resource: HTTP Code 404
|
||||
|
||||
|
|
2
scripts/tests/example_logfiles/succeed_score_filter.txt
Normal file
2
scripts/tests/example_logfiles/succeed_score_filter.txt
Normal file
|
@ -0,0 +1,2 @@
|
|||
[2022-07-23 14:04:14,095 - bdfr.downloader - DEBUG] - Submission ljyy27 filtered due to score 15 < [50]
|
||||
[2022-07-23 14:04:14,104 - bdfr.downloader - DEBUG] - Submission ljyy27 filtered due to score 16 > [1]
|
|
@ -14,30 +14,35 @@ teardown() {
|
|||
|
||||
@test "fail no downloader module" {
|
||||
run ../extract_failed_ids.sh ./example_logfiles/failed_no_downloader.txt
|
||||
echo "$output" > failed.txt
|
||||
assert [ "$( wc -l 'failed.txt' | awk '{ print $1 }' )" -eq "3" ];
|
||||
assert [ "$( grep -Ecv '\w{6,7}' 'failed.txt' )" -eq "0" ];
|
||||
}
|
||||
|
||||
@test "fail resource error" {
|
||||
run ../extract_failed_ids.sh ./example_logfiles/failed_resource_error.txt
|
||||
echo "$output" > failed.txt
|
||||
assert [ "$( wc -l 'failed.txt' | awk '{ print $1 }' )" -eq "1" ];
|
||||
assert [ "$( grep -Ecv '\w{6,7}' 'failed.txt' )" -eq "0" ];
|
||||
}
|
||||
|
||||
@test "fail site downloader error" {
|
||||
run ../extract_failed_ids.sh ./example_logfiles/failed_sitedownloader_error.txt
|
||||
echo "$output" > failed.txt
|
||||
assert [ "$( wc -l 'failed.txt' | awk '{ print $1 }' )" -eq "2" ];
|
||||
assert [ "$( grep -Ecv '\w{6,7}' 'failed.txt' )" -eq "0" ];
|
||||
}
|
||||
|
||||
@test "fail failed file write" {
|
||||
run ../extract_failed_ids.sh ./example_logfiles/failed_write_error.txt
|
||||
echo "$output" > failed.txt
|
||||
assert [ "$( wc -l 'failed.txt' | awk '{ print $1 }' )" -eq "1" ];
|
||||
assert [ "$( grep -Ecv '\w{6,7}' 'failed.txt' )" -eq "0" ];
|
||||
}
|
||||
|
||||
@test "fail disabled module" {
|
||||
run ../extract_failed_ids.sh ./example_logfiles/failed_disabled_module.txt
|
||||
echo "$output" > failed.txt
|
||||
assert [ "$( wc -l 'failed.txt' | awk '{ print $1 }' )" -eq "1" ];
|
||||
assert [ "$( grep -Ecv '\w{6,7}' 'failed.txt' )" -eq "0" ];
|
||||
}
|
||||
|
|
|
@ -9,30 +9,42 @@ teardown() {
|
|||
|
||||
@test "success downloaded submission" {
|
||||
run ../extract_successful_ids.sh ./example_logfiles/succeed_downloaded_submission.txt
|
||||
echo "$output" > successful.txt
|
||||
assert [ "$( wc -l 'successful.txt' | awk '{ print $1 }' )" -eq "7" ];
|
||||
assert [ "$( grep -Ecv '\w{6,7}' 'successful.txt' )" -eq "0" ];
|
||||
}
|
||||
|
||||
@test "success resource hash" {
|
||||
run ../extract_successful_ids.sh ./example_logfiles/succeed_resource_hash.txt
|
||||
echo "$output" > successful.txt
|
||||
assert [ "$( wc -l 'successful.txt' | awk '{ print $1 }' )" -eq "1" ];
|
||||
assert [ "$( grep -Ecv '\w{6,7}' 'successful.txt' )" -eq "0" ];
|
||||
}
|
||||
|
||||
@test "success download filter" {
|
||||
run ../extract_successful_ids.sh ./example_logfiles/succeed_download_filter.txt
|
||||
echo "$output" > successful.txt
|
||||
assert [ "$( wc -l 'successful.txt' | awk '{ print $1 }' )" -eq "3" ];
|
||||
assert [ "$( grep -Ecv '\w{6,7}' 'successful.txt' )" -eq "0" ];
|
||||
}
|
||||
|
||||
@test "success already exists" {
|
||||
run ../extract_successful_ids.sh ./example_logfiles/succeed_already_exists.txt
|
||||
echo "$output" > successful.txt
|
||||
assert [ "$( wc -l 'successful.txt' | awk '{ print $1 }' )" -eq "3" ];
|
||||
assert [ "$( grep -Ecv '\w{6,7}' 'successful.txt' )" -eq "0" ];
|
||||
}
|
||||
|
||||
@test "success hard link" {
|
||||
run ../extract_successful_ids.sh ./example_logfiles/succeed_hard_link.txt
|
||||
echo "$output" > successful.txt
|
||||
assert [ "$( wc -l 'successful.txt' | awk '{ print $1 }' )" -eq "1" ];
|
||||
assert [ "$( grep -Ecv '\w{6,7}' 'successful.txt' )" -eq "0" ];
|
||||
}
|
||||
|
||||
@test "success score filter" {
|
||||
run ../extract_successful_ids.sh ./example_logfiles/succeed_score_filter.txt
|
||||
echo "$output" > successful.txt
|
||||
assert [ "$( wc -l 'successful.txt' | awk '{ print $1 }' )" -eq "2" ];
|
||||
assert [ "$( grep -Ecv '\w{6,7}' 'successful.txt' )" -eq "0" ];
|
||||
}
|
||||
|
|
40
scripts/unsaveposts.py
Normal file
40
scripts/unsaveposts.py
Normal file
|
@ -0,0 +1,40 @@
|
|||
#! /usr/bin/env python3.9
|
||||
'''
|
||||
This script takes a list of submission IDs from a file named "successfulids" created with the
|
||||
"extract_successful_ids.sh" script and unsaves them from your account. To make it work you must
|
||||
fill in the username and password fields below. Make sure you keep the quotes around the fields.
|
||||
You'll need to make a "user script" in your reddit profile to run this.
|
||||
Go to https://old.reddit.com/prefs/apps/
|
||||
Click on "Develop an app" at the bottom.
|
||||
Make sure you select a "script" not a "web app."
|
||||
Give it a random name. Doesn't matter.
|
||||
You need to fill in the "Redirect URI" field with something so go ahead and put 127.0.0.0 in there.
|
||||
Save it.
|
||||
The client ID is the 14 character string under the name you gave your script.
|
||||
It'll look like a bunch of random characters like this: pspYLwDoci9z_A
|
||||
The client secret is the longer string next to "secret".
|
||||
Replace those two fields below. Again keep the quotes around the fields.
|
||||
'''
|
||||
|
||||
import praw
|
||||
|
||||
try:
|
||||
r= praw.Reddit(
|
||||
client_id="CLIENTID",
|
||||
client_secret="CLIENTSECRET",
|
||||
password="USERPASSWORD",
|
||||
user_agent="Unsave Posts",
|
||||
username="USERNAME",
|
||||
)
|
||||
|
||||
with open("successfulids", "r") as f:
|
||||
for item in f:
|
||||
r.submission(id = item.strip()).unsave()
|
||||
|
||||
except:
|
||||
print("Something went wrong. Did you install PRAW? Did you change the user login fields?")
|
||||
|
||||
|
||||
else:
|
||||
print("Done! Thanks for playing!")
|
||||
|
22
setup.cfg
22
setup.cfg
|
@ -1,22 +0,0 @@
|
|||
[metadata]
|
||||
name = bdfr
|
||||
description_file = README.md
|
||||
description_content_type = text/markdown
|
||||
home_page = https://github.com/aliparlakci/bulk-downloader-for-reddit
|
||||
keywords = reddit, download, archive
|
||||
version = 2.4.0
|
||||
author = Ali Parlakci
|
||||
author_email = parlakciali@gmail.com
|
||||
maintainer = Serene Arc
|
||||
maintainer_email = serenical@gmail.com
|
||||
license = GPLv3
|
||||
classifiers =
|
||||
Programming Language :: Python :: 3
|
||||
License :: OSI Approved :: GNU General Public License v3 (GPLv3)
|
||||
Natural Language :: English
|
||||
Environment :: Console
|
||||
Operating System :: OS Independent
|
||||
platforms = any
|
||||
|
||||
[files]
|
||||
packages = bdfr
|
6
setup.py
6
setup.py
|
@ -1,6 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
# encoding=utf-8
|
||||
|
||||
from setuptools import setup
|
||||
|
||||
setup(setup_requires=['pbr', 'appdirs'], pbr=True, data_files=[('config', ['bdfr/default_config.cfg'])], python_requires='>=3.9.0')
|
|
@ -0,0 +1,2 @@
|
|||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
|
@ -1,2 +1,2 @@
|
|||
#!/usr/bin/env python3
|
||||
# coding=utf-8
|
||||
# -*- coding: utf-8 -*-
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
#!/usr/bin/env python3
|
||||
# coding=utf-8
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import praw
|
||||
import pytest
|
||||
|
@ -9,15 +9,21 @@ from bdfr.archive_entry.comment_archive_entry import CommentArchiveEntry
|
|||
|
||||
@pytest.mark.online
|
||||
@pytest.mark.reddit
|
||||
@pytest.mark.parametrize(('test_comment_id', 'expected_dict'), (
|
||||
('gstd4hk', {
|
||||
'author': 'james_pic',
|
||||
'subreddit': 'Python',
|
||||
'submission': 'mgi4op',
|
||||
'submission_title': '76% Faster CPython',
|
||||
'distinguished': None,
|
||||
}),
|
||||
))
|
||||
@pytest.mark.parametrize(
|
||||
("test_comment_id", "expected_dict"),
|
||||
(
|
||||
(
|
||||
"gstd4hk",
|
||||
{
|
||||
"author": "james_pic",
|
||||
"subreddit": "Python",
|
||||
"submission": "mgi4op",
|
||||
"submission_title": "76% Faster CPython",
|
||||
"distinguished": None,
|
||||
},
|
||||
),
|
||||
),
|
||||
)
|
||||
def test_get_comment_details(test_comment_id: str, expected_dict: dict, reddit_instance: praw.Reddit):
|
||||
comment = reddit_instance.comment(id=test_comment_id)
|
||||
test_entry = CommentArchiveEntry(comment)
|
||||
|
@ -27,13 +33,16 @@ def test_get_comment_details(test_comment_id: str, expected_dict: dict, reddit_i
|
|||
|
||||
@pytest.mark.online
|
||||
@pytest.mark.reddit
|
||||
@pytest.mark.parametrize(('test_comment_id', 'expected_min_comments'), (
|
||||
('gstd4hk', 4),
|
||||
('gsvyste', 3),
|
||||
('gsxnvvb', 5),
|
||||
))
|
||||
@pytest.mark.parametrize(
|
||||
("test_comment_id", "expected_min_comments"),
|
||||
(
|
||||
("gstd4hk", 4),
|
||||
("gsvyste", 3),
|
||||
("gsxnvvb", 5),
|
||||
),
|
||||
)
|
||||
def test_get_comment_replies(test_comment_id: str, expected_min_comments: int, reddit_instance: praw.Reddit):
|
||||
comment = reddit_instance.comment(id=test_comment_id)
|
||||
test_entry = CommentArchiveEntry(comment)
|
||||
result = test_entry.compile()
|
||||
assert len(result.get('replies')) >= expected_min_comments
|
||||
assert len(result.get("replies")) >= expected_min_comments
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
#!/usr/bin/env python3
|
||||
# coding=utf-8
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import praw
|
||||
import pytest
|
||||
|
@ -9,9 +9,7 @@ from bdfr.archive_entry.submission_archive_entry import SubmissionArchiveEntry
|
|||
|
||||
@pytest.mark.online
|
||||
@pytest.mark.reddit
|
||||
@pytest.mark.parametrize(('test_submission_id', 'min_comments'), (
|
||||
('m3reby', 27),
|
||||
))
|
||||
@pytest.mark.parametrize(("test_submission_id", "min_comments"), (("m3reby", 27),))
|
||||
def test_get_comments(test_submission_id: str, min_comments: int, reddit_instance: praw.Reddit):
|
||||
test_submission = reddit_instance.submission(id=test_submission_id)
|
||||
test_archive_entry = SubmissionArchiveEntry(test_submission)
|
||||
|
@ -21,21 +19,27 @@ def test_get_comments(test_submission_id: str, min_comments: int, reddit_instanc
|
|||
|
||||
@pytest.mark.online
|
||||
@pytest.mark.reddit
|
||||
@pytest.mark.parametrize(('test_submission_id', 'expected_dict'), (
|
||||
('m3reby', {
|
||||
'author': 'sinjen-tos',
|
||||
'id': 'm3reby',
|
||||
'link_flair_text': 'image',
|
||||
'pinned': False,
|
||||
'spoiler': False,
|
||||
'over_18': False,
|
||||
'locked': False,
|
||||
'distinguished': None,
|
||||
'created_utc': 1615583837,
|
||||
'permalink': '/r/australia/comments/m3reby/this_little_guy_fell_out_of_a_tree_and_in_front/'
|
||||
}),
|
||||
('m3kua3', {'author': 'DELETED'}),
|
||||
))
|
||||
@pytest.mark.parametrize(
|
||||
("test_submission_id", "expected_dict"),
|
||||
(
|
||||
(
|
||||
"m3reby",
|
||||
{
|
||||
"author": "sinjen-tos",
|
||||
"id": "m3reby",
|
||||
"link_flair_text": "image",
|
||||
"pinned": False,
|
||||
"spoiler": False,
|
||||
"over_18": False,
|
||||
"locked": False,
|
||||
"distinguished": None,
|
||||
"created_utc": 1615583837,
|
||||
"permalink": "/r/australia/comments/m3reby/this_little_guy_fell_out_of_a_tree_and_in_front/",
|
||||
},
|
||||
),
|
||||
# TODO: add deleted user test case
|
||||
),
|
||||
)
|
||||
def test_get_post_details(test_submission_id: str, expected_dict: dict, reddit_instance: praw.Reddit):
|
||||
test_submission = reddit_instance.submission(id=test_submission_id)
|
||||
test_archive_entry = SubmissionArchiveEntry(test_submission)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
#!/usr/bin/env python3
|
||||
# coding=utf-8
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import configparser
|
||||
import socket
|
||||
|
@ -11,29 +11,29 @@ import pytest
|
|||
from bdfr.oauth2 import OAuth2TokenManager
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
@pytest.fixture(scope="session")
|
||||
def reddit_instance():
|
||||
rd = praw.Reddit(
|
||||
client_id='U-6gk4ZCh3IeNQ',
|
||||
client_secret='7CZHY6AmKweZME5s50SfDGylaPg',
|
||||
user_agent='test',
|
||||
client_id="U-6gk4ZCh3IeNQ",
|
||||
client_secret="7CZHY6AmKweZME5s50SfDGylaPg",
|
||||
user_agent="test",
|
||||
)
|
||||
return rd
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
@pytest.fixture(scope="session")
|
||||
def authenticated_reddit_instance():
|
||||
test_config_path = Path('test_config.cfg')
|
||||
test_config_path = Path("./tests/test_config.cfg")
|
||||
if not test_config_path.exists():
|
||||
pytest.skip('Refresh token must be provided to authenticate with OAuth2')
|
||||
pytest.skip("Refresh token must be provided to authenticate with OAuth2")
|
||||
cfg_parser = configparser.ConfigParser()
|
||||
cfg_parser.read(test_config_path)
|
||||
if not cfg_parser.has_option('DEFAULT', 'user_token'):
|
||||
pytest.skip('Refresh token must be provided to authenticate with OAuth2')
|
||||
if not cfg_parser.has_option("DEFAULT", "user_token"):
|
||||
pytest.skip("Refresh token must be provided to authenticate with OAuth2")
|
||||
token_manager = OAuth2TokenManager(cfg_parser, test_config_path)
|
||||
reddit_instance = praw.Reddit(
|
||||
client_id=cfg_parser.get('DEFAULT', 'client_id'),
|
||||
client_secret=cfg_parser.get('DEFAULT', 'client_secret'),
|
||||
client_id=cfg_parser.get("DEFAULT", "client_id"),
|
||||
client_secret=cfg_parser.get("DEFAULT", "client_secret"),
|
||||
user_agent=socket.gethostname(),
|
||||
token_manager=token_manager,
|
||||
)
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
#!/usr/bin/env python3
|
||||
# coding=utf-8
|
||||
# -*- coding: utf-8 -*-
|
||||
|
|
|
@ -1,76 +1,89 @@
|
|||
#!/usr/bin/env python3
|
||||
# coding=utf-8
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import prawcore
|
||||
import pytest
|
||||
from click.testing import CliRunner
|
||||
|
||||
from bdfr.__main__ import cli
|
||||
|
||||
does_test_config_exist = Path('../test_config.cfg').exists()
|
||||
does_test_config_exist = Path("./tests/test_config.cfg").exists()
|
||||
|
||||
|
||||
def copy_test_config(run_path: Path):
|
||||
shutil.copy(Path('../test_config.cfg'), Path(run_path, '../test_config.cfg'))
|
||||
shutil.copy(Path("./tests/test_config.cfg"), Path(run_path, "test_config.cfg"))
|
||||
|
||||
|
||||
def create_basic_args_for_archive_runner(test_args: list[str], run_path: Path):
|
||||
copy_test_config(run_path)
|
||||
out = [
|
||||
'archive',
|
||||
"archive",
|
||||
str(run_path),
|
||||
'-v',
|
||||
'--config', str(Path(run_path, '../test_config.cfg')),
|
||||
'--log', str(Path(run_path, 'test_log.txt')),
|
||||
"-v",
|
||||
"--config",
|
||||
str(Path(run_path, "test_config.cfg")),
|
||||
"--log",
|
||||
str(Path(run_path, "test_log.txt")),
|
||||
] + test_args
|
||||
return out
|
||||
|
||||
|
||||
@pytest.mark.online
|
||||
@pytest.mark.reddit
|
||||
@pytest.mark.skipif(not does_test_config_exist, reason='A test config file is required for integration tests')
|
||||
@pytest.mark.parametrize('test_args', (
|
||||
['-l', 'gstd4hk'],
|
||||
['-l', 'm2601g', '-f', 'yaml'],
|
||||
['-l', 'n60t4c', '-f', 'xml'],
|
||||
))
|
||||
@pytest.mark.skipif(not does_test_config_exist, reason="A test config file is required for integration tests")
|
||||
@pytest.mark.parametrize(
|
||||
"test_args",
|
||||
(
|
||||
["-l", "gstd4hk"],
|
||||
["-l", "m2601g", "-f", "yaml"],
|
||||
["-l", "n60t4c", "-f", "xml"],
|
||||
),
|
||||
)
|
||||
def test_cli_archive_single(test_args: list[str], tmp_path: Path):
|
||||
runner = CliRunner()
|
||||
test_args = create_basic_args_for_archive_runner(test_args, tmp_path)
|
||||
result = runner.invoke(cli, test_args)
|
||||
assert result.exit_code == 0
|
||||
assert re.search(r'Writing entry .*? to file in .*? format', result.output)
|
||||
assert re.search(r"Writing entry .*? to file in .*? format", result.output)
|
||||
|
||||
|
||||
@pytest.mark.online
|
||||
@pytest.mark.reddit
|
||||
@pytest.mark.skipif(not does_test_config_exist, reason='A test config file is required for integration tests')
|
||||
@pytest.mark.parametrize('test_args', (
|
||||
['--subreddit', 'Mindustry', '-L', 25],
|
||||
['--subreddit', 'Mindustry', '-L', 25, '--format', 'xml'],
|
||||
['--subreddit', 'Mindustry', '-L', 25, '--format', 'yaml'],
|
||||
['--subreddit', 'Mindustry', '-L', 25, '--sort', 'new'],
|
||||
['--subreddit', 'Mindustry', '-L', 25, '--time', 'day'],
|
||||
['--subreddit', 'Mindustry', '-L', 25, '--time', 'day', '--sort', 'new'],
|
||||
))
|
||||
@pytest.mark.skipif(not does_test_config_exist, reason="A test config file is required for integration tests")
|
||||
@pytest.mark.parametrize(
|
||||
"test_args",
|
||||
(
|
||||
["--subreddit", "Mindustry", "-L", 25],
|
||||
["--subreddit", "Mindustry", "-L", 25, "--format", "xml"],
|
||||
["--subreddit", "Mindustry", "-L", 25, "--format", "yaml"],
|
||||
["--subreddit", "Mindustry", "-L", 25, "--sort", "new"],
|
||||
["--subreddit", "Mindustry", "-L", 25, "--time", "day"],
|
||||
["--subreddit", "Mindustry", "-L", 25, "--time", "day", "--sort", "new"],
|
||||
),
|
||||
)
|
||||
def test_cli_archive_subreddit(test_args: list[str], tmp_path: Path):
|
||||
runner = CliRunner()
|
||||
test_args = create_basic_args_for_archive_runner(test_args, tmp_path)
|
||||
result = runner.invoke(cli, test_args)
|
||||
assert result.exit_code == 0
|
||||
assert re.search(r'Writing entry .*? to file in .*? format', result.output)
|
||||
assert re.search(r"Writing entry .*? to file in .*? format", result.output)
|
||||
|
||||
|
||||
@pytest.mark.online
|
||||
@pytest.mark.reddit
|
||||
@pytest.mark.skipif(not does_test_config_exist, reason='A test config file is required for integration tests')
|
||||
@pytest.mark.parametrize('test_args', (
|
||||
['--user', 'me', '--authenticate', '--all-comments', '-L', '10'],
|
||||
['--user', 'me', '--user', 'djnish', '--authenticate', '--all-comments', '-L', '10'],
|
||||
))
|
||||
@pytest.mark.skipif(not does_test_config_exist, reason="A test config file is required for integration tests")
|
||||
@pytest.mark.parametrize(
|
||||
"test_args",
|
||||
(
|
||||
["--user", "me", "--authenticate", "--all-comments", "-L", "10"],
|
||||
["--user", "me", "--user", "djnish", "--authenticate", "--all-comments", "-L", "10"],
|
||||
),
|
||||
)
|
||||
def test_cli_archive_all_user_comments(test_args: list[str], tmp_path: Path):
|
||||
runner = CliRunner()
|
||||
test_args = create_basic_args_for_archive_runner(test_args, tmp_path)
|
||||
|
@ -80,29 +93,109 @@ def test_cli_archive_all_user_comments(test_args: list[str], tmp_path: Path):
|
|||
|
||||
@pytest.mark.online
|
||||
@pytest.mark.reddit
|
||||
@pytest.mark.skipif(not does_test_config_exist, reason='A test config file is required for integration tests')
|
||||
@pytest.mark.parametrize('test_args', (
|
||||
['--comment-context', '--link', 'gxqapql'],
|
||||
))
|
||||
@pytest.mark.skipif(not does_test_config_exist, reason="A test config file is required for integration tests")
|
||||
@pytest.mark.parametrize("test_args", (["--comment-context", "--link", "gxqapql"],))
|
||||
def test_cli_archive_full_context(test_args: list[str], tmp_path: Path):
|
||||
runner = CliRunner()
|
||||
test_args = create_basic_args_for_archive_runner(test_args, tmp_path)
|
||||
result = runner.invoke(cli, test_args)
|
||||
assert result.exit_code == 0
|
||||
assert 'Converting comment' in result.output
|
||||
assert "Converting comment" in result.output
|
||||
|
||||
|
||||
@pytest.mark.online
|
||||
@pytest.mark.reddit
|
||||
@pytest.mark.slow
|
||||
@pytest.mark.skipif(not does_test_config_exist, reason='A test config file is required for integration tests')
|
||||
@pytest.mark.parametrize('test_args', (
|
||||
['--subreddit', 'all', '-L', 100],
|
||||
['--subreddit', 'all', '-L', 100, '--sort', 'new'],
|
||||
))
|
||||
@pytest.mark.skipif(not does_test_config_exist, reason="A test config file is required for integration tests")
|
||||
@pytest.mark.parametrize(
|
||||
"test_args",
|
||||
(
|
||||
["--subreddit", "all", "-L", 100],
|
||||
["--subreddit", "all", "-L", 100, "--sort", "new"],
|
||||
),
|
||||
)
|
||||
def test_cli_archive_long(test_args: list[str], tmp_path: Path):
|
||||
runner = CliRunner()
|
||||
test_args = create_basic_args_for_archive_runner(test_args, tmp_path)
|
||||
result = runner.invoke(cli, test_args)
|
||||
assert result.exit_code == 0
|
||||
assert re.search(r'Writing entry .*? to file in .*? format', result.output)
|
||||
assert re.search(r"Writing entry .*? to file in .*? format", result.output)
|
||||
|
||||
|
||||
@pytest.mark.online
|
||||
@pytest.mark.reddit
|
||||
@pytest.mark.skipif(not does_test_config_exist, reason="A test config file is required for integration tests")
|
||||
@pytest.mark.parametrize("test_args", (["--ignore-user", "ArjanEgges", "-l", "m3hxzd"],))
|
||||
def test_cli_archive_ignore_user(test_args: list[str], tmp_path: Path):
|
||||
runner = CliRunner()
|
||||
test_args = create_basic_args_for_archive_runner(test_args, tmp_path)
|
||||
result = runner.invoke(cli, test_args)
|
||||
assert result.exit_code == 0
|
||||
assert "being an ignored user" in result.output
|
||||
assert "Attempting to archive submission" not in result.output
|
||||
|
||||
|
||||
@pytest.mark.online
|
||||
@pytest.mark.reddit
|
||||
@pytest.mark.skipif(not does_test_config_exist, reason="A test config file is required for integration tests")
|
||||
@pytest.mark.parametrize("test_args", (["--file-scheme", "{TITLE}", "-l", "suy011"],))
|
||||
def test_cli_archive_file_format(test_args: list[str], tmp_path: Path):
|
||||
runner = CliRunner()
|
||||
test_args = create_basic_args_for_archive_runner(test_args, tmp_path)
|
||||
result = runner.invoke(cli, test_args)
|
||||
assert result.exit_code == 0
|
||||
assert "Attempting to archive submission" in result.output
|
||||
assert re.search("format at /.+?/Judge says Trump and two adult", result.output)
|
||||
|
||||
|
||||
@pytest.mark.online
|
||||
@pytest.mark.reddit
|
||||
@pytest.mark.skipif(not does_test_config_exist, reason="A test config file is required for integration tests")
|
||||
@pytest.mark.parametrize("test_args", (["-l", "m2601g", "--exclude-id", "m2601g"],))
|
||||
def test_cli_archive_links_exclusion(test_args: list[str], tmp_path: Path):
|
||||
runner = CliRunner()
|
||||
test_args = create_basic_args_for_archive_runner(test_args, tmp_path)
|
||||
result = runner.invoke(cli, test_args)
|
||||
assert result.exit_code == 0
|
||||
assert "in exclusion list" in result.output
|
||||
assert "Attempting to archive" not in result.output
|
||||
|
||||
|
||||
@pytest.mark.online
|
||||
@pytest.mark.reddit
|
||||
@pytest.mark.skipif(not does_test_config_exist, reason="A test config file is required for integration tests")
|
||||
@pytest.mark.parametrize(
|
||||
"test_args",
|
||||
(
|
||||
["-l", "ijy4ch"], # user deleted post
|
||||
["-l", "kw4wjm"], # post from banned subreddit
|
||||
),
|
||||
)
|
||||
def test_cli_archive_soft_fail(test_args: list[str], tmp_path: Path):
|
||||
runner = CliRunner()
|
||||
test_args = create_basic_args_for_archive_runner(test_args, tmp_path)
|
||||
result = runner.invoke(cli, test_args)
|
||||
assert result.exit_code == 0
|
||||
assert "failed to be archived due to a PRAW exception" in result.output
|
||||
assert "Attempting to archive" not in result.output
|
||||
|
||||
|
||||
@pytest.mark.skipif(not does_test_config_exist, reason="A test config file is required for integration tests")
|
||||
@pytest.mark.parametrize(
|
||||
("test_args", "response"),
|
||||
(
|
||||
(["--user", "nasa", "--submitted"], 502),
|
||||
(["--user", "nasa", "--submitted"], 504),
|
||||
),
|
||||
)
|
||||
def test_user_serv_fail(test_args: list[str], response: int, tmp_path: Path):
|
||||
runner = CliRunner()
|
||||
test_args = create_basic_args_for_archive_runner(test_args, tmp_path)
|
||||
with patch("bdfr.connector.sleep", return_value=None):
|
||||
with patch(
|
||||
"bdfr.connector.RedditConnector.check_user_existence",
|
||||
side_effect=prawcore.exceptions.ResponseException(MagicMock(status_code=response)),
|
||||
):
|
||||
result = runner.invoke(cli, test_args)
|
||||
assert result.exit_code == 0
|
||||
assert f"received {response} HTTP response" in result.output
|
||||
|
|
|
@ -1,43 +1,93 @@
|
|||
#!/usr/bin/env python3
|
||||
# coding=utf-8
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import prawcore
|
||||
import pytest
|
||||
from click.testing import CliRunner
|
||||
|
||||
from bdfr.__main__ import cli
|
||||
|
||||
does_test_config_exist = Path('../test_config.cfg').exists()
|
||||
does_test_config_exist = Path("./tests/test_config.cfg").exists()
|
||||
|
||||
|
||||
def copy_test_config(run_path: Path):
|
||||
shutil.copy(Path('../test_config.cfg'), Path(run_path, '../test_config.cfg'))
|
||||
shutil.copy(Path("./tests/test_config.cfg"), Path(run_path, "test_config.cfg"))
|
||||
|
||||
|
||||
def create_basic_args_for_cloner_runner(test_args: list[str], tmp_path: Path):
|
||||
copy_test_config(tmp_path)
|
||||
out = [
|
||||
'clone',
|
||||
"clone",
|
||||
str(tmp_path),
|
||||
'-v',
|
||||
'--config', 'test_config.cfg',
|
||||
'--log', str(Path(tmp_path, 'test_log.txt')),
|
||||
"-v",
|
||||
"--config",
|
||||
str(Path(tmp_path, "test_config.cfg")),
|
||||
"--log",
|
||||
str(Path(tmp_path, "test_log.txt")),
|
||||
] + test_args
|
||||
return out
|
||||
|
||||
|
||||
@pytest.mark.online
|
||||
@pytest.mark.reddit
|
||||
@pytest.mark.skipif(not does_test_config_exist, reason='A test config file is required for integration tests')
|
||||
@pytest.mark.parametrize('test_args', (
|
||||
['-l', 'm2601g'],
|
||||
['-s', 'TrollXChromosomes/', '-L', 1],
|
||||
))
|
||||
@pytest.mark.skipif(not does_test_config_exist, reason="A test config file is required for integration tests")
|
||||
@pytest.mark.parametrize(
|
||||
"test_args",
|
||||
(
|
||||
["-l", "6l7778"],
|
||||
["-s", "TrollXChromosomes/", "-L", 1],
|
||||
["-l", "eiajjw"],
|
||||
["-l", "xl0lhi"],
|
||||
),
|
||||
)
|
||||
def test_cli_scrape_general(test_args: list[str], tmp_path: Path):
|
||||
runner = CliRunner()
|
||||
test_args = create_basic_args_for_cloner_runner(test_args, tmp_path)
|
||||
result = runner.invoke(cli, test_args)
|
||||
assert result.exit_code == 0
|
||||
assert 'Downloaded submission' in result.output
|
||||
assert 'Record for entry item' in result.output
|
||||
assert "Downloaded submission" in result.output
|
||||
assert "Record for entry item" in result.output
|
||||
|
||||
|
||||
@pytest.mark.online
|
||||
@pytest.mark.reddit
|
||||
@pytest.mark.skipif(not does_test_config_exist, reason="A test config file is required for integration tests")
|
||||
@pytest.mark.parametrize(
|
||||
"test_args",
|
||||
(
|
||||
["-l", "ijy4ch"], # user deleted post
|
||||
["-l", "kw4wjm"], # post from banned subreddit
|
||||
),
|
||||
)
|
||||
def test_cli_scrape_soft_fail(test_args: list[str], tmp_path: Path):
|
||||
runner = CliRunner()
|
||||
test_args = create_basic_args_for_cloner_runner(test_args, tmp_path)
|
||||
result = runner.invoke(cli, test_args)
|
||||
assert result.exit_code == 0
|
||||
assert "Downloaded submission" not in result.output
|
||||
assert "Record for entry item" not in result.output
|
||||
|
||||
|
||||
@pytest.mark.skipif(not does_test_config_exist, reason="A test config file is required for integration tests")
|
||||
@pytest.mark.parametrize(
|
||||
("test_args", "response"),
|
||||
(
|
||||
(["--user", "nasa", "--submitted"], 502),
|
||||
(["--user", "nasa", "--submitted"], 504),
|
||||
),
|
||||
)
|
||||
def test_user_serv_fail(test_args: list[str], response: int, tmp_path: Path):
|
||||
runner = CliRunner()
|
||||
test_args = create_basic_args_for_cloner_runner(test_args, tmp_path)
|
||||
with patch("bdfr.connector.sleep", return_value=None):
|
||||
with patch(
|
||||
"bdfr.connector.RedditConnector.check_user_existence",
|
||||
side_effect=prawcore.exceptions.ResponseException(MagicMock(status_code=response)),
|
||||
):
|
||||
result = runner.invoke(cli, test_args)
|
||||
assert result.exit_code == 0
|
||||
assert f"received {response} HTTP response" in result.output
|
||||
|
|
|
@ -1,104 +1,117 @@
|
|||
#!/usr/bin/env python3
|
||||
# coding=utf-8
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import prawcore
|
||||
import pytest
|
||||
from click.testing import CliRunner
|
||||
|
||||
from bdfr.__main__ import cli
|
||||
|
||||
does_test_config_exist = Path('../test_config.cfg').exists()
|
||||
does_test_config_exist = Path("./tests/test_config.cfg").exists()
|
||||
|
||||
|
||||
def copy_test_config(run_path: Path):
|
||||
shutil.copy(Path('../test_config.cfg'), Path(run_path, '../test_config.cfg'))
|
||||
shutil.copy(Path("./tests/test_config.cfg"), Path(run_path, "test_config.cfg"))
|
||||
|
||||
|
||||
def create_basic_args_for_download_runner(test_args: list[str], run_path: Path):
|
||||
copy_test_config(run_path)
|
||||
out = [
|
||||
'download', str(run_path),
|
||||
'-v',
|
||||
'--config', str(Path(run_path, '../test_config.cfg')),
|
||||
'--log', str(Path(run_path, 'test_log.txt')),
|
||||
"download",
|
||||
str(run_path),
|
||||
"-v",
|
||||
"--config",
|
||||
str(Path(run_path, "test_config.cfg")),
|
||||
"--log",
|
||||
str(Path(run_path, "test_log.txt")),
|
||||
] + test_args
|
||||
return out
|
||||
|
||||
|
||||
@pytest.mark.online
|
||||
@pytest.mark.reddit
|
||||
@pytest.mark.skipif(not does_test_config_exist, reason='A test config file is required for integration tests')
|
||||
@pytest.mark.parametrize('test_args', (
|
||||
['-s', 'Mindustry', '-L', 1],
|
||||
['-s', 'r/Mindustry', '-L', 1],
|
||||
['-s', 'r/mindustry', '-L', 1],
|
||||
['-s', 'mindustry', '-L', 1],
|
||||
['-s', 'https://www.reddit.com/r/TrollXChromosomes/', '-L', 1],
|
||||
['-s', 'r/TrollXChromosomes/', '-L', 1],
|
||||
['-s', 'TrollXChromosomes/', '-L', 1],
|
||||
['-s', 'trollxchromosomes', '-L', 1],
|
||||
['-s', 'trollxchromosomes,mindustry,python', '-L', 1],
|
||||
['-s', 'trollxchromosomes, mindustry, python', '-L', 1],
|
||||
['-s', 'trollxchromosomes', '-L', 1, '--time', 'day'],
|
||||
['-s', 'trollxchromosomes', '-L', 1, '--sort', 'new'],
|
||||
['-s', 'trollxchromosomes', '-L', 1, '--time', 'day', '--sort', 'new'],
|
||||
['-s', 'trollxchromosomes', '-L', 1, '--search', 'women'],
|
||||
['-s', 'trollxchromosomes', '-L', 1, '--time', 'day', '--search', 'women'],
|
||||
['-s', 'trollxchromosomes', '-L', 1, '--sort', 'new', '--search', 'women'],
|
||||
['-s', 'trollxchromosomes', '-L', 1, '--time', 'day', '--sort', 'new', '--search', 'women'],
|
||||
))
|
||||
@pytest.mark.skipif(not does_test_config_exist, reason="A test config file is required for integration tests")
|
||||
@pytest.mark.parametrize(
|
||||
"test_args",
|
||||
(
|
||||
["-s", "Mindustry", "-L", 3],
|
||||
["-s", "r/Mindustry", "-L", 3],
|
||||
["-s", "r/mindustry", "-L", 3],
|
||||
["-s", "mindustry", "-L", 3],
|
||||
["-s", "https://www.reddit.com/r/TrollXChromosomes/", "-L", 3],
|
||||
["-s", "r/TrollXChromosomes/", "-L", 3],
|
||||
["-s", "TrollXChromosomes/", "-L", 3],
|
||||
["-s", "trollxchromosomes", "-L", 3],
|
||||
["-s", "trollxchromosomes,mindustry,python", "-L", 3],
|
||||
["-s", "trollxchromosomes, mindustry, python", "-L", 3],
|
||||
["-s", "trollxchromosomes", "-L", 3, "--time", "day"],
|
||||
["-s", "trollxchromosomes", "-L", 3, "--sort", "new"],
|
||||
["-s", "trollxchromosomes", "-L", 3, "--time", "day", "--sort", "new"],
|
||||
["-s", "trollxchromosomes", "-L", 3, "--search", "women"],
|
||||
["-s", "trollxchromosomes", "-L", 3, "--time", "week", "--search", "women"],
|
||||
["-s", "trollxchromosomes", "-L", 3, "--sort", "new", "--search", "women"],
|
||||
["-s", "trollxchromosomes", "-L", 3, "--time", "week", "--sort", "new", "--search", "women"],
|
||||
),
|
||||
)
|
||||
def test_cli_download_subreddits(test_args: list[str], tmp_path: Path):
|
||||
runner = CliRunner()
|
||||
test_args = create_basic_args_for_download_runner(test_args, tmp_path)
|
||||
result = runner.invoke(cli, test_args)
|
||||
assert result.exit_code == 0
|
||||
assert 'Added submissions from subreddit ' in result.output
|
||||
assert 'Downloaded submission' in result.output
|
||||
assert "Added submissions from subreddit " in result.output
|
||||
assert "Downloaded submission" in result.output
|
||||
|
||||
|
||||
@pytest.mark.online
|
||||
@pytest.mark.reddit
|
||||
@pytest.mark.slow
|
||||
@pytest.mark.authenticated
|
||||
@pytest.mark.skipif(not does_test_config_exist, reason='A test config file is required for integration tests')
|
||||
@pytest.mark.parametrize('test_args', (
|
||||
['-s', 'hentai', '-L', 10, '--search', 'red', '--authenticate'],
|
||||
))
|
||||
@pytest.mark.skipif(not does_test_config_exist, reason="A test config file is required for integration tests")
|
||||
@pytest.mark.parametrize(
|
||||
"test_args",
|
||||
(
|
||||
["-s", "hentai", "-L", 10, "--search", "red", "--authenticate"],
|
||||
["--authenticate", "--subscribed", "-L", 10],
|
||||
),
|
||||
)
|
||||
def test_cli_download_search_subreddits_authenticated(test_args: list[str], tmp_path: Path):
|
||||
runner = CliRunner()
|
||||
test_args = create_basic_args_for_download_runner(test_args, tmp_path)
|
||||
result = runner.invoke(cli, test_args)
|
||||
assert result.exit_code == 0
|
||||
assert 'Added submissions from subreddit ' in result.output
|
||||
assert 'Downloaded submission' in result.output
|
||||
assert "Added submissions from subreddit " in result.output
|
||||
assert "Downloaded submission" in result.output
|
||||
|
||||
|
||||
@pytest.mark.online
|
||||
@pytest.mark.reddit
|
||||
@pytest.mark.authenticated
|
||||
@pytest.mark.skipif(not does_test_config_exist, reason='A test config file is required for integration tests')
|
||||
@pytest.mark.parametrize('test_args', (
|
||||
['--subreddit', 'friends', '-L', 10, '--authenticate'],
|
||||
))
|
||||
@pytest.mark.skipif(not does_test_config_exist, reason="A test config file is required for integration tests")
|
||||
@pytest.mark.parametrize("test_args", (["--subreddit", "friends", "-L", 10, "--authenticate"],))
|
||||
def test_cli_download_user_specific_subreddits(test_args: list[str], tmp_path: Path):
|
||||
runner = CliRunner()
|
||||
test_args = create_basic_args_for_download_runner(test_args, tmp_path)
|
||||
result = runner.invoke(cli, test_args)
|
||||
assert result.exit_code == 0
|
||||
assert 'Added submissions from subreddit ' in result.output
|
||||
assert "Added submissions from subreddit " in result.output
|
||||
|
||||
|
||||
@pytest.mark.online
|
||||
@pytest.mark.reddit
|
||||
@pytest.mark.skipif(not does_test_config_exist, reason='A test config file is required for integration tests')
|
||||
@pytest.mark.parametrize('test_args', (
|
||||
['-l', 'm2601g'],
|
||||
['-l', 'https://www.reddit.com/r/TrollXChromosomes/comments/m2601g/its_a_step_in_the_right_direction/'],
|
||||
['-l', 'm3hxzd'], # Really long title used to overflow filename limit
|
||||
['-l', 'm3kua3'], # Has a deleted user
|
||||
['-l', 'm5bqkf'], # Resource leading to a 404
|
||||
))
|
||||
@pytest.mark.skipif(not does_test_config_exist, reason="A test config file is required for integration tests")
|
||||
@pytest.mark.parametrize(
|
||||
"test_args",
|
||||
(
|
||||
["-l", "6l7778"],
|
||||
["-l", "https://reddit.com/r/EmpireDidNothingWrong/comments/6l7778/technically_true/"],
|
||||
["-l", "m3hxzd"], # Really long title used to overflow filename limit
|
||||
["-l", "m5bqkf"], # Resource leading to a 404
|
||||
),
|
||||
)
|
||||
def test_cli_download_links(test_args: list[str], tmp_path: Path):
|
||||
runner = CliRunner()
|
||||
test_args = create_basic_args_for_download_runner(test_args, tmp_path)
|
||||
|
@ -108,64 +121,66 @@ def test_cli_download_links(test_args: list[str], tmp_path: Path):
|
|||
|
||||
@pytest.mark.online
|
||||
@pytest.mark.reddit
|
||||
@pytest.mark.skipif(not does_test_config_exist, reason='A test config file is required for integration tests')
|
||||
@pytest.mark.parametrize('test_args', (
|
||||
['--user', 'helen_darten', '-m', 'cuteanimalpics', '-L', 10],
|
||||
['--user', 'helen_darten', '-m', 'cuteanimalpics', '-L', 10, '--sort', 'rising'],
|
||||
['--user', 'helen_darten', '-m', 'cuteanimalpics', '-L', 10, '--time', 'week'],
|
||||
['--user', 'helen_darten', '-m', 'cuteanimalpics', '-L', 10, '--time', 'week', '--sort', 'rising'],
|
||||
))
|
||||
@pytest.mark.skipif(not does_test_config_exist, reason="A test config file is required for integration tests")
|
||||
@pytest.mark.parametrize(
|
||||
"test_args",
|
||||
(
|
||||
["--user", "helen_darten", "-m", "cuteanimalpics", "-L", 10],
|
||||
["--user", "helen_darten", "-m", "cuteanimalpics", "-L", 10, "--sort", "rising"],
|
||||
["--user", "helen_darten", "-m", "cuteanimalpics", "-L", 10, "--time", "week"],
|
||||
["--user", "helen_darten", "-m", "cuteanimalpics", "-L", 10, "--time", "week", "--sort", "rising"],
|
||||
),
|
||||
)
|
||||
def test_cli_download_multireddit(test_args: list[str], tmp_path: Path):
|
||||
runner = CliRunner()
|
||||
test_args = create_basic_args_for_download_runner(test_args, tmp_path)
|
||||
result = runner.invoke(cli, test_args)
|
||||
assert result.exit_code == 0
|
||||
assert 'Added submissions from multireddit ' in result.output
|
||||
assert "Added submissions from multireddit " in result.output
|
||||
|
||||
|
||||
@pytest.mark.online
|
||||
@pytest.mark.reddit
|
||||
@pytest.mark.skipif(not does_test_config_exist, reason='A test config file is required for integration tests')
|
||||
@pytest.mark.parametrize('test_args', (
|
||||
['--user', 'helen_darten', '-m', 'xxyyzzqwerty', '-L', 10],
|
||||
))
|
||||
@pytest.mark.skipif(not does_test_config_exist, reason="A test config file is required for integration tests")
|
||||
@pytest.mark.parametrize("test_args", (["--user", "helen_darten", "-m", "xxyyzzqwerty", "-L", 10],))
|
||||
def test_cli_download_multireddit_nonexistent(test_args: list[str], tmp_path: Path):
|
||||
runner = CliRunner()
|
||||
test_args = create_basic_args_for_download_runner(test_args, tmp_path)
|
||||
result = runner.invoke(cli, test_args)
|
||||
assert result.exit_code == 0
|
||||
assert 'Failed to get submissions for multireddit' in result.output
|
||||
assert 'received 404 HTTP response' in result.output
|
||||
assert "Failed to get submissions for multireddit" in result.output
|
||||
assert "received 404 HTTP response" in result.output
|
||||
|
||||
|
||||
@pytest.mark.online
|
||||
@pytest.mark.reddit
|
||||
@pytest.mark.authenticated
|
||||
@pytest.mark.skipif(not does_test_config_exist, reason='A test config file is required for integration tests')
|
||||
@pytest.mark.parametrize('test_args', (
|
||||
['--user', 'djnish', '--submitted', '--user', 'FriesWithThat', '-L', 10],
|
||||
['--user', 'me', '--upvoted', '--authenticate', '-L', 10],
|
||||
['--user', 'me', '--saved', '--authenticate', '-L', 10],
|
||||
['--user', 'me', '--submitted', '--authenticate', '-L', 10],
|
||||
['--user', 'djnish', '--submitted', '-L', 10],
|
||||
['--user', 'djnish', '--submitted', '-L', 10, '--time', 'month'],
|
||||
['--user', 'djnish', '--submitted', '-L', 10, '--sort', 'controversial'],
|
||||
))
|
||||
@pytest.mark.skipif(not does_test_config_exist, reason="A test config file is required for integration tests")
|
||||
@pytest.mark.parametrize(
|
||||
"test_args",
|
||||
(
|
||||
["--user", "djnish", "--submitted", "--user", "FriesWithThat", "-L", 10],
|
||||
["--user", "me", "--upvoted", "--authenticate", "-L", 10],
|
||||
["--user", "me", "--saved", "--authenticate", "-L", 10],
|
||||
["--user", "me", "--submitted", "--authenticate", "-L", 10],
|
||||
["--user", "djnish", "--submitted", "-L", 10],
|
||||
["--user", "djnish", "--submitted", "-L", 10, "--time", "month"],
|
||||
["--user", "djnish", "--submitted", "-L", 10, "--sort", "controversial"],
|
||||
),
|
||||
)
|
||||
def test_cli_download_user_data_good(test_args: list[str], tmp_path: Path):
|
||||
runner = CliRunner()
|
||||
test_args = create_basic_args_for_download_runner(test_args, tmp_path)
|
||||
result = runner.invoke(cli, test_args)
|
||||
assert result.exit_code == 0
|
||||
assert 'Downloaded submission ' in result.output
|
||||
assert "Downloaded submission " in result.output
|
||||
|
||||
|
||||
@pytest.mark.online
|
||||
@pytest.mark.reddit
|
||||
@pytest.mark.authenticated
|
||||
@pytest.mark.skipif(not does_test_config_exist, reason='A test config file is required for integration tests')
|
||||
@pytest.mark.parametrize('test_args', (
|
||||
['--user', 'me', '-L', 10, '--folder-scheme', ''],
|
||||
))
|
||||
@pytest.mark.skipif(not does_test_config_exist, reason="A test config file is required for integration tests")
|
||||
@pytest.mark.parametrize("test_args", (["--user", "me", "-L", 10, "--folder-scheme", ""],))
|
||||
def test_cli_download_user_data_bad_me_unauthenticated(test_args: list[str], tmp_path: Path):
|
||||
runner = CliRunner()
|
||||
test_args = create_basic_args_for_download_runner(test_args, tmp_path)
|
||||
|
@ -176,42 +191,41 @@ def test_cli_download_user_data_bad_me_unauthenticated(test_args: list[str], tmp
|
|||
|
||||
@pytest.mark.online
|
||||
@pytest.mark.reddit
|
||||
@pytest.mark.skipif(not does_test_config_exist, reason='A test config file is required for integration tests')
|
||||
@pytest.mark.parametrize('test_args', (
|
||||
['--subreddit', 'python', '-L', 1, '--search-existing'],
|
||||
))
|
||||
@pytest.mark.skipif(not does_test_config_exist, reason="A test config file is required for integration tests")
|
||||
@pytest.mark.parametrize("test_args", (["--subreddit", "python", "-L", 1, "--search-existing"],))
|
||||
def test_cli_download_search_existing(test_args: list[str], tmp_path: Path):
|
||||
Path(tmp_path, 'test.txt').touch()
|
||||
Path(tmp_path, "test.txt").touch()
|
||||
runner = CliRunner()
|
||||
test_args = create_basic_args_for_download_runner(test_args, tmp_path)
|
||||
result = runner.invoke(cli, test_args)
|
||||
assert result.exit_code == 0
|
||||
assert 'Calculating hashes for' in result.output
|
||||
assert "Calculating hashes for" in result.output
|
||||
|
||||
|
||||
@pytest.mark.online
|
||||
@pytest.mark.reddit
|
||||
@pytest.mark.skipif(not does_test_config_exist, reason='A test config file is required for integration tests')
|
||||
@pytest.mark.parametrize('test_args', (
|
||||
['--subreddit', 'tumblr', '-L', '25', '--skip', 'png', '--skip', 'jpg'],
|
||||
['--subreddit', 'MaliciousCompliance', '-L', '25', '--skip', 'txt'],
|
||||
['--subreddit', 'tumblr', '-L', '10', '--skip-domain', 'i.redd.it'],
|
||||
))
|
||||
@pytest.mark.skipif(not does_test_config_exist, reason="A test config file is required for integration tests")
|
||||
@pytest.mark.parametrize(
|
||||
"test_args",
|
||||
(
|
||||
["--subreddit", "tumblr", "-L", "25", "--skip", "png", "--skip", "jpg"],
|
||||
["--subreddit", "MaliciousCompliance", "-L", "25", "--skip", "txt"],
|
||||
["--subreddit", "tumblr", "-L", "10", "--skip-domain", "i.redd.it"],
|
||||
),
|
||||
)
|
||||
def test_cli_download_download_filters(test_args: list[str], tmp_path: Path):
|
||||
runner = CliRunner()
|
||||
test_args = create_basic_args_for_download_runner(test_args, tmp_path)
|
||||
result = runner.invoke(cli, test_args)
|
||||
assert result.exit_code == 0
|
||||
assert any((string in result.output for string in ('Download filter removed ', 'filtered due to URL')))
|
||||
assert any((string in result.output for string in ("Download filter removed ", "filtered due to URL")))
|
||||
|
||||
|
||||
@pytest.mark.online
|
||||
@pytest.mark.reddit
|
||||
@pytest.mark.slow
|
||||
@pytest.mark.skipif(not does_test_config_exist, reason='A test config file is required for integration tests')
|
||||
@pytest.mark.parametrize('test_args', (
|
||||
['--subreddit', 'all', '-L', '100', '--sort', 'new'],
|
||||
))
|
||||
@pytest.mark.skipif(not does_test_config_exist, reason="A test config file is required for integration tests")
|
||||
@pytest.mark.parametrize("test_args", (["--subreddit", "all", "-L", "100", "--sort", "new"],))
|
||||
def test_cli_download_long(test_args: list[str], tmp_path: Path):
|
||||
runner = CliRunner()
|
||||
test_args = create_basic_args_for_download_runner(test_args, tmp_path)
|
||||
|
@ -222,32 +236,40 @@ def test_cli_download_long(test_args: list[str], tmp_path: Path):
|
|||
@pytest.mark.online
|
||||
@pytest.mark.reddit
|
||||
@pytest.mark.slow
|
||||
@pytest.mark.skipif(not does_test_config_exist, reason='A test config file is required for integration tests')
|
||||
@pytest.mark.parametrize('test_args', (
|
||||
['--user', 'sdclhgsolgjeroij', '--submitted', '-L', 10],
|
||||
['--user', 'me', '--upvoted', '-L', 10],
|
||||
['--user', 'sdclhgsolgjeroij', '--upvoted', '-L', 10],
|
||||
['--subreddit', 'submitters', '-L', 10], # Private subreddit
|
||||
['--subreddit', 'donaldtrump', '-L', 10], # Banned subreddit
|
||||
['--user', 'djnish', '--user', 'helen_darten', '-m', 'cuteanimalpics', '-L', 10],
|
||||
['--subreddit', 'friends', '-L', 10],
|
||||
))
|
||||
@pytest.mark.skipif(not does_test_config_exist, reason="A test config file is required for integration tests")
|
||||
@pytest.mark.parametrize(
|
||||
"test_args",
|
||||
(
|
||||
["--user", "sdclhgsolgjeroij", "--submitted", "-L", 10],
|
||||
["--user", "me", "--upvoted", "-L", 10],
|
||||
["--user", "sdclhgsolgjeroij", "--upvoted", "-L", 10],
|
||||
["--subreddit", "submitters", "-L", 10], # Private subreddit
|
||||
["--subreddit", "donaldtrump", "-L", 10], # Banned subreddit
|
||||
["--user", "djnish", "--user", "helen_darten", "-m", "cuteanimalpics", "-L", 10],
|
||||
["--subreddit", "friends", "-L", 10],
|
||||
["-l", "ijy4ch"], # user deleted post
|
||||
["-l", "kw4wjm"], # post from banned subreddit
|
||||
),
|
||||
)
|
||||
def test_cli_download_soft_fail(test_args: list[str], tmp_path: Path):
|
||||
runner = CliRunner()
|
||||
test_args = create_basic_args_for_download_runner(test_args, tmp_path)
|
||||
result = runner.invoke(cli, test_args)
|
||||
assert result.exit_code == 0
|
||||
assert 'Downloaded' not in result.output
|
||||
assert "Downloaded" not in result.output
|
||||
|
||||
|
||||
@pytest.mark.online
|
||||
@pytest.mark.reddit
|
||||
@pytest.mark.slow
|
||||
@pytest.mark.skipif(not does_test_config_exist, reason='A test config file is required for integration tests')
|
||||
@pytest.mark.parametrize('test_args', (
|
||||
['--time', 'random'],
|
||||
['--sort', 'random'],
|
||||
))
|
||||
@pytest.mark.skipif(not does_test_config_exist, reason="A test config file is required for integration tests")
|
||||
@pytest.mark.parametrize(
|
||||
"test_args",
|
||||
(
|
||||
["--time", "random"],
|
||||
["--sort", "random"],
|
||||
),
|
||||
)
|
||||
def test_cli_download_hard_fail(test_args: list[str], tmp_path: Path):
|
||||
runner = CliRunner()
|
||||
test_args = create_basic_args_for_download_runner(test_args, tmp_path)
|
||||
|
@ -257,83 +279,164 @@ def test_cli_download_hard_fail(test_args: list[str], tmp_path: Path):
|
|||
|
||||
def test_cli_download_use_default_config(tmp_path: Path):
|
||||
runner = CliRunner()
|
||||
test_args = ['download', '-vv', str(tmp_path)]
|
||||
test_args = ["download", "-vv", str(tmp_path), "--log", str(Path(tmp_path, "test_log.txt"))]
|
||||
result = runner.invoke(cli, test_args)
|
||||
assert result.exit_code == 0
|
||||
|
||||
|
||||
@pytest.mark.online
|
||||
@pytest.mark.reddit
|
||||
@pytest.mark.skipif(not does_test_config_exist, reason='A test config file is required for integration tests')
|
||||
@pytest.mark.parametrize('test_args', (
|
||||
['-l', 'm2601g', '--exclude-id', 'm2601g'],
|
||||
))
|
||||
@pytest.mark.skipif(not does_test_config_exist, reason="A test config file is required for integration tests")
|
||||
@pytest.mark.parametrize("test_args", (["-l", "6l7778", "--exclude-id", "6l7778"],))
|
||||
def test_cli_download_links_exclusion(test_args: list[str], tmp_path: Path):
|
||||
runner = CliRunner()
|
||||
test_args = create_basic_args_for_download_runner(test_args, tmp_path)
|
||||
result = runner.invoke(cli, test_args)
|
||||
assert result.exit_code == 0
|
||||
assert 'in exclusion list' in result.output
|
||||
assert 'Downloaded submission ' not in result.output
|
||||
assert "in exclusion list" in result.output
|
||||
assert "Downloaded submission " not in result.output
|
||||
|
||||
|
||||
@pytest.mark.online
|
||||
@pytest.mark.reddit
|
||||
@pytest.mark.skipif(not does_test_config_exist, reason='A test config file is required for integration tests')
|
||||
@pytest.mark.parametrize('test_args', (
|
||||
['-l', 'm2601g', '--skip-subreddit', 'trollxchromosomes'],
|
||||
['-s', 'trollxchromosomes', '--skip-subreddit', 'trollxchromosomes', '-L', '3'],
|
||||
))
|
||||
@pytest.mark.skipif(not does_test_config_exist, reason="A test config file is required for integration tests")
|
||||
@pytest.mark.parametrize(
|
||||
"test_args",
|
||||
(
|
||||
["-l", "6l7778", "--skip-subreddit", "EmpireDidNothingWrong"],
|
||||
["-s", "trollxchromosomes", "--skip-subreddit", "trollxchromosomes", "-L", "3"],
|
||||
),
|
||||
)
|
||||
def test_cli_download_subreddit_exclusion(test_args: list[str], tmp_path: Path):
|
||||
runner = CliRunner()
|
||||
test_args = create_basic_args_for_download_runner(test_args, tmp_path)
|
||||
result = runner.invoke(cli, test_args)
|
||||
assert result.exit_code == 0
|
||||
assert 'in skip list' in result.output
|
||||
assert 'Downloaded submission ' not in result.output
|
||||
assert "in skip list" in result.output
|
||||
assert "Downloaded submission " not in result.output
|
||||
|
||||
|
||||
@pytest.mark.online
|
||||
@pytest.mark.reddit
|
||||
@pytest.mark.skipif(not does_test_config_exist, reason='A test config file is required for integration tests')
|
||||
@pytest.mark.parametrize('test_args', (
|
||||
['--file-scheme', '{TITLE}'],
|
||||
['--file-scheme', '{TITLE}_test_{SUBREDDIT}'],
|
||||
))
|
||||
@pytest.mark.skipif(not does_test_config_exist, reason="A test config file is required for integration tests")
|
||||
@pytest.mark.parametrize(
|
||||
"test_args",
|
||||
(
|
||||
["--file-scheme", "{TITLE}"],
|
||||
["--file-scheme", "{TITLE}_test_{SUBREDDIT}"],
|
||||
),
|
||||
)
|
||||
def test_cli_download_file_scheme_warning(test_args: list[str], tmp_path: Path):
|
||||
runner = CliRunner()
|
||||
test_args = create_basic_args_for_download_runner(test_args, tmp_path)
|
||||
result = runner.invoke(cli, test_args)
|
||||
assert result.exit_code == 0
|
||||
assert 'Some files might not be downloaded due to name conflicts' in result.output
|
||||
assert "Some files might not be downloaded due to name conflicts" in result.output
|
||||
|
||||
|
||||
@pytest.mark.online
|
||||
@pytest.mark.reddit
|
||||
@pytest.mark.skipif(not does_test_config_exist, reason='A test config file is required for integration tests')
|
||||
@pytest.mark.parametrize('test_args', (
|
||||
['-l', 'm2601g', '--disable-module', 'Direct'],
|
||||
['-l', 'nnb9vs', '--disable-module', 'YoutubeDlFallback'],
|
||||
['-l', 'nnb9vs', '--disable-module', 'youtubedlfallback'],
|
||||
))
|
||||
@pytest.mark.skipif(not does_test_config_exist, reason="A test config file is required for integration tests")
|
||||
@pytest.mark.parametrize(
|
||||
"test_args",
|
||||
(
|
||||
["-l", "n9w9fo", "--disable-module", "SelfPost"],
|
||||
["-l", "nnb9vs", "--disable-module", "VReddit"],
|
||||
),
|
||||
)
|
||||
def test_cli_download_disable_modules(test_args: list[str], tmp_path: Path):
|
||||
runner = CliRunner()
|
||||
test_args = create_basic_args_for_download_runner(test_args, tmp_path)
|
||||
result = runner.invoke(cli, test_args)
|
||||
assert result.exit_code == 0
|
||||
assert 'skipped due to disabled module' in result.output
|
||||
assert 'Downloaded submission' not in result.output
|
||||
assert "skipped due to disabled module" in result.output
|
||||
assert "Downloaded submission" not in result.output
|
||||
|
||||
|
||||
@pytest.mark.online
|
||||
@pytest.mark.reddit
|
||||
@pytest.mark.skipif(not does_test_config_exist, reason='A test config file is required for integration tests')
|
||||
@pytest.mark.skipif(not does_test_config_exist, reason="A test config file is required for integration tests")
|
||||
def test_cli_download_include_id_file(tmp_path: Path):
|
||||
test_file = Path(tmp_path, 'include.txt')
|
||||
test_args = ['--include-id-file', str(test_file)]
|
||||
test_file.write_text('odr9wg\nody576')
|
||||
test_file = Path(tmp_path, "include.txt")
|
||||
test_args = ["--include-id-file", str(test_file)]
|
||||
test_file.write_text("odr9wg\nody576")
|
||||
runner = CliRunner()
|
||||
test_args = create_basic_args_for_download_runner(test_args, tmp_path)
|
||||
result = runner.invoke(cli, test_args)
|
||||
assert result.exit_code == 0
|
||||
assert 'Downloaded submission' in result.output
|
||||
assert "Downloaded submission" in result.output
|
||||
|
||||
|
||||
@pytest.mark.online
|
||||
@pytest.mark.reddit
|
||||
@pytest.mark.skipif(not does_test_config_exist, reason="A test config file is required for integration tests")
|
||||
@pytest.mark.parametrize("test_args", (["--ignore-user", "ArjanEgges", "-l", "m3hxzd"],))
|
||||
def test_cli_download_ignore_user(test_args: list[str], tmp_path: Path):
|
||||
runner = CliRunner()
|
||||
test_args = create_basic_args_for_download_runner(test_args, tmp_path)
|
||||
result = runner.invoke(cli, test_args)
|
||||
assert result.exit_code == 0
|
||||
assert "Downloaded submission" not in result.output
|
||||
assert "being an ignored user" in result.output
|
||||
|
||||
|
||||
@pytest.mark.online
|
||||
@pytest.mark.reddit
|
||||
@pytest.mark.skipif(not does_test_config_exist, reason="A test config file is required for integration tests")
|
||||
@pytest.mark.parametrize(
|
||||
("test_args", "was_filtered"),
|
||||
(
|
||||
(["-l", "ljyy27", "--min-score", "50"], True),
|
||||
(["-l", "ljyy27", "--min-score", "1"], False),
|
||||
(["-l", "ljyy27", "--max-score", "1"], True),
|
||||
(["-l", "ljyy27", "--max-score", "100"], False),
|
||||
),
|
||||
)
|
||||
def test_cli_download_score_filter(test_args: list[str], was_filtered: bool, tmp_path: Path):
|
||||
runner = CliRunner()
|
||||
test_args = create_basic_args_for_download_runner(test_args, tmp_path)
|
||||
result = runner.invoke(cli, test_args)
|
||||
assert result.exit_code == 0
|
||||
assert ("filtered due to score" in result.output) == was_filtered
|
||||
|
||||
|
||||
@pytest.mark.online
|
||||
@pytest.mark.reddit
|
||||
@pytest.mark.skipif(not does_test_config_exist, reason="A test config file is required for integration tests")
|
||||
@pytest.mark.parametrize(
|
||||
("test_args", "response"),
|
||||
(
|
||||
(["--user", "nasa", "--submitted"], 502),
|
||||
(["--user", "nasa", "--submitted"], 504),
|
||||
),
|
||||
)
|
||||
def test_cli_download_user_reddit_server_error(test_args: list[str], response: int, tmp_path: Path):
|
||||
runner = CliRunner()
|
||||
test_args = create_basic_args_for_download_runner(test_args, tmp_path)
|
||||
with patch("bdfr.connector.sleep", return_value=None):
|
||||
with patch(
|
||||
"bdfr.connector.RedditConnector.check_user_existence",
|
||||
side_effect=prawcore.exceptions.ResponseException(MagicMock(status_code=response)),
|
||||
):
|
||||
result = runner.invoke(cli, test_args)
|
||||
assert result.exit_code == 0
|
||||
assert f"received {response} HTTP response" in result.output
|
||||
|
||||
|
||||
@pytest.mark.online
|
||||
@pytest.mark.reddit
|
||||
@pytest.mark.skipif(not does_test_config_exist, reason="A test config file is required for integration tests")
|
||||
@pytest.mark.parametrize(
|
||||
"test_args",
|
||||
(
|
||||
["-l", "102vd5i", "--filename-restriction-scheme", "windows"],
|
||||
["-l", "m3hxzd", "--filename-restriction-scheme", "windows"],
|
||||
),
|
||||
)
|
||||
def test_cli_download_explicit_filename_restriction_scheme(test_args: list[str], tmp_path: Path):
|
||||
runner = CliRunner()
|
||||
test_args = create_basic_args_for_download_runner(test_args, tmp_path)
|
||||
result = runner.invoke(cli, test_args)
|
||||
assert result.exit_code == 0
|
||||
assert "Downloaded submission" in result.output
|
||||
assert "Forcing Windows-compatible filenames" in result.output
|
||||
|
|
|
@ -0,0 +1,2 @@
|
|||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
|
@ -0,0 +1,2 @@
|
|||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
|
@ -1,37 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
import pytest
|
||||
|
||||
from bdfr.resource import Resource
|
||||
from bdfr.site_downloaders.fallback_downloaders.youtubedl_fallback import YoutubeDlFallback
|
||||
|
||||
|
||||
@pytest.mark.online
|
||||
@pytest.mark.parametrize(('test_url', 'expected'), (
|
||||
('https://www.reddit.com/r/specializedtools/comments/n2nw5m/bamboo_splitter/', True),
|
||||
('https://www.youtube.com/watch?v=P19nvJOmqCc', True),
|
||||
('https://www.example.com/test', False),
|
||||
))
|
||||
def test_can_handle_link(test_url: str, expected: bool):
|
||||
result = YoutubeDlFallback.can_handle_link(test_url)
|
||||
assert result == expected
|
||||
|
||||
|
||||
@pytest.mark.online
|
||||
@pytest.mark.slow
|
||||
@pytest.mark.parametrize(('test_url', 'expected_hash'), (
|
||||
('https://streamable.com/dt46y', '1e7f4928e55de6e3ca23d85cc9246bbb'),
|
||||
('https://streamable.com/t8sem', '49b2d1220c485455548f1edbc05d4ecf'),
|
||||
('https://www.reddit.com/r/specializedtools/comments/n2nw5m/bamboo_splitter/', '21968d3d92161ea5e0abdcaf6311b06c'),
|
||||
('https://v.redd.it/9z1dnk3xr5k61', '351a2b57e888df5ccbc508056511f38d'),
|
||||
))
|
||||
def test_find_resources(test_url: str, expected_hash: str):
|
||||
test_submission = MagicMock()
|
||||
test_submission.url = test_url
|
||||
downloader = YoutubeDlFallback(test_submission)
|
||||
resources = downloader.find_resources()
|
||||
assert len(resources) == 1
|
||||
assert isinstance(resources[0], Resource)
|
||||
assert resources[0].hash.hexdigest() == expected_hash
|
|
@ -0,0 +1,59 @@
|
|||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
import pytest
|
||||
|
||||
from bdfr.exceptions import NotADownloadableLinkError
|
||||
from bdfr.resource import Resource
|
||||
from bdfr.site_downloaders.fallback_downloaders.ytdlp_fallback import YtdlpFallback
|
||||
|
||||
|
||||
@pytest.mark.online
|
||||
@pytest.mark.parametrize(
|
||||
("test_url", "expected"),
|
||||
(
|
||||
("https://www.reddit.com/r/specializedtools/comments/n2nw5m/bamboo_splitter/", True),
|
||||
("https://www.youtube.com/watch?v=P19nvJOmqCc", True),
|
||||
("https://www.example.com/test", False),
|
||||
("https://milesmatrix.bandcamp.com/album/la-boum/", False),
|
||||
("https://v.redd.it/dlr54z8p182a1", True),
|
||||
),
|
||||
)
|
||||
def test_can_handle_link(test_url: str, expected: bool):
|
||||
result = YtdlpFallback.can_handle_link(test_url)
|
||||
assert result == expected
|
||||
|
||||
|
||||
@pytest.mark.online
|
||||
@pytest.mark.parametrize("test_url", ("https://milesmatrix.bandcamp.com/album/la-boum/",))
|
||||
def test_info_extraction_bad(test_url: str):
|
||||
with pytest.raises(NotADownloadableLinkError):
|
||||
YtdlpFallback.get_video_attributes(test_url)
|
||||
|
||||
|
||||
@pytest.mark.online
|
||||
@pytest.mark.slow
|
||||
@pytest.mark.parametrize(
|
||||
("test_url", "expected_hash"),
|
||||
(
|
||||
("https://streamable.com/dt46y", "b7e465adaade5f2b6d8c2b4b7d0a2878"),
|
||||
("https://streamable.com/t8sem", "49b2d1220c485455548f1edbc05d4ecf"),
|
||||
(
|
||||
"https://www.reddit.com/r/specializedtools/comments/n2nw5m/bamboo_splitter/",
|
||||
"6c6ff46e04b4e33a755ae2a9b5a45ac5",
|
||||
),
|
||||
("https://v.redd.it/9z1dnk3xr5k61", "226cee353421c7aefb05c92424cc8cdd"),
|
||||
),
|
||||
)
|
||||
def test_find_resources(test_url: str, expected_hash: str):
|
||||
test_submission = MagicMock()
|
||||
test_submission.url = test_url
|
||||
downloader = YtdlpFallback(test_submission)
|
||||
resources = downloader.find_resources()
|
||||
assert len(resources) == 1
|
||||
assert isinstance(resources[0], Resource)
|
||||
for res in resources:
|
||||
res.download()
|
||||
assert resources[0].hash.hexdigest() == expected_hash
|
28
tests/site_downloaders/test_delay_for_reddit.py
Normal file
28
tests/site_downloaders/test_delay_for_reddit.py
Normal file
|
@ -0,0 +1,28 @@
|
|||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from unittest.mock import Mock
|
||||
|
||||
import pytest
|
||||
|
||||
from bdfr.resource import Resource
|
||||
from bdfr.site_downloaders.delay_for_reddit import DelayForReddit
|
||||
|
||||
|
||||
@pytest.mark.online
|
||||
@pytest.mark.parametrize(
|
||||
("test_url", "expected_hash"),
|
||||
(
|
||||
("https://www.delayforreddit.com/dfr/calvin6123/MjU1Njc5NQ==", "3300f28c2f9358d05667985c9c04210d"),
|
||||
("https://www.delayforreddit.com/dfr/RoXs_26/NDAwMzAyOQ==", "09b7b01719dff45ab197bdc08b90f78a"),
|
||||
),
|
||||
)
|
||||
def test_download_resource(test_url: str, expected_hash: str):
|
||||
mock_submission = Mock()
|
||||
mock_submission.url = test_url
|
||||
test_site = DelayForReddit(mock_submission)
|
||||
resources = test_site.find_resources()
|
||||
assert len(resources) == 1
|
||||
assert isinstance(resources[0], Resource)
|
||||
resources[0].download()
|
||||
assert resources[0].hash.hexdigest() == expected_hash
|
|
@ -1,5 +1,5 @@
|
|||
#!/usr/bin/env python3
|
||||
# coding=utf-8
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from unittest.mock import Mock
|
||||
|
||||
|
@ -10,10 +10,16 @@ from bdfr.site_downloaders.direct import Direct
|
|||
|
||||
|
||||
@pytest.mark.online
|
||||
@pytest.mark.parametrize(('test_url', 'expected_hash'), (
|
||||
('https://giant.gfycat.com/DefinitiveCanineCrayfish.mp4', '48f9bd4dbec1556d7838885612b13b39'),
|
||||
('https://giant.gfycat.com/DazzlingSilkyIguana.mp4', '808941b48fc1e28713d36dd7ed9dc648'),
|
||||
))
|
||||
@pytest.mark.parametrize(
|
||||
("test_url", "expected_hash"),
|
||||
(
|
||||
("https://i.redd.it/q6ebualjxzea1.jpg", "6ec154859c777cb401132bb991cb3635"),
|
||||
(
|
||||
"https://file-examples.com/wp-content/uploads/2017/11/file_example_MP3_700KB.mp3",
|
||||
"3caa342e241ddb7d76fd24a834094101",
|
||||
),
|
||||
),
|
||||
)
|
||||
def test_download_resource(test_url: str, expected_hash: str):
|
||||
mock_submission = Mock()
|
||||
mock_submission.url = test_url
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
#!/usr/bin/env python3
|
||||
# coding=utf-8
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import praw
|
||||
import pytest
|
||||
|
@ -9,81 +9,98 @@ from bdfr.site_downloaders.base_downloader import BaseDownloader
|
|||
from bdfr.site_downloaders.direct import Direct
|
||||
from bdfr.site_downloaders.download_factory import DownloadFactory
|
||||
from bdfr.site_downloaders.erome import Erome
|
||||
from bdfr.site_downloaders.fallback_downloaders.youtubedl_fallback import YoutubeDlFallback
|
||||
from bdfr.site_downloaders.fallback_downloaders.ytdlp_fallback import YtdlpFallback
|
||||
from bdfr.site_downloaders.gallery import Gallery
|
||||
from bdfr.site_downloaders.gfycat import Gfycat
|
||||
from bdfr.site_downloaders.imgur import Imgur
|
||||
from bdfr.site_downloaders.pornhub import PornHub
|
||||
from bdfr.site_downloaders.redgifs import Redgifs
|
||||
from bdfr.site_downloaders.self_post import SelfPost
|
||||
from bdfr.site_downloaders.vreddit import VReddit
|
||||
from bdfr.site_downloaders.youtube import Youtube
|
||||
|
||||
|
||||
@pytest.mark.online
|
||||
@pytest.mark.parametrize(('test_submission_url', 'expected_class'), (
|
||||
('https://www.reddit.com/r/TwoXChromosomes/comments/lu29zn/i_refuse_to_live_my_life'
|
||||
'_in_anything_but_comfort/', SelfPost),
|
||||
('https://i.imgur.com/bZx1SJQ.jpg', Direct),
|
||||
('https://i.redd.it/affyv0axd5k61.png', Direct),
|
||||
('https://imgur.com/3ls94yv.jpeg', Direct),
|
||||
('https://i.imgur.com/BuzvZwb.gifv', Imgur),
|
||||
('https://imgur.com/BuzvZwb.gifv', Imgur),
|
||||
('https://i.imgur.com/6fNdLst.gif', Direct),
|
||||
('https://imgur.com/a/MkxAzeg', Imgur),
|
||||
('https://www.reddit.com/gallery/lu93m7', Gallery),
|
||||
('https://gfycat.com/concretecheerfulfinwhale', Gfycat),
|
||||
('https://www.erome.com/a/NWGw0F09', Erome),
|
||||
('https://youtube.com/watch?v=Gv8Wz74FjVA', Youtube),
|
||||
('https://redgifs.com/watch/courageousimpeccablecanvasback', Redgifs),
|
||||
('https://www.gifdeliverynetwork.com/repulsivefinishedandalusianhorse', Redgifs),
|
||||
('https://youtu.be/DevfjHOhuFc', Youtube),
|
||||
('https://m.youtube.com/watch?v=kr-FeojxzUM', Youtube),
|
||||
('https://i.imgur.com/3SKrQfK.jpg?1', Direct),
|
||||
('https://dynasty-scans.com/system/images_images/000/017/819/original/80215103_p0.png?1612232781', Direct),
|
||||
('https://m.imgur.com/a/py3RW0j', Imgur),
|
||||
('https://v.redd.it/9z1dnk3xr5k61', YoutubeDlFallback),
|
||||
('https://streamable.com/dt46y', YoutubeDlFallback),
|
||||
('https://vimeo.com/channels/31259/53576664', YoutubeDlFallback),
|
||||
('http://video.pbs.org/viralplayer/2365173446/', YoutubeDlFallback),
|
||||
('https://www.pornhub.com/view_video.php?viewkey=ph5a2ee0461a8d0', PornHub),
|
||||
))
|
||||
@pytest.mark.parametrize(
|
||||
("test_submission_url", "expected_class"),
|
||||
(
|
||||
(
|
||||
"https://www.reddit.com/r/TwoXChromosomes/comments/lu29zn/i_refuse_to_live_my_life"
|
||||
"_in_anything_but_comfort/",
|
||||
SelfPost,
|
||||
),
|
||||
("https://i.redd.it/affyv0axd5k61.png", Direct),
|
||||
("https://i.imgur.com/bZx1SJQ.jpg", Imgur),
|
||||
("https://i.Imgur.com/bZx1SJQ.jpg", Imgur),
|
||||
("https://imgur.com/BuzvZwb.gifv", Imgur),
|
||||
("https://imgur.com/a/MkxAzeg", Imgur),
|
||||
("https://m.imgur.com/a/py3RW0j", Imgur),
|
||||
("https://www.reddit.com/gallery/lu93m7", Gallery),
|
||||
("https://gfycat.com/concretecheerfulfinwhale", Gfycat),
|
||||
("https://www.erome.com/a/NWGw0F09", Erome),
|
||||
("https://youtube.com/watch?v=Gv8Wz74FjVA", Youtube),
|
||||
("https://redgifs.com/watch/courageousimpeccablecanvasback", Redgifs),
|
||||
("https://www.gifdeliverynetwork.com/repulsivefinishedandalusianhorse", Redgifs),
|
||||
("https://thumbs4.redgifs.com/DismalIgnorantDrongo-mobile.mp4", Redgifs),
|
||||
("https://v3.redgifs.com/watch/kaleidoscopicdaringvenomoussnake", Redgifs),
|
||||
("https://youtu.be/DevfjHOhuFc", Youtube),
|
||||
("https://m.youtube.com/watch?v=kr-FeojxzUM", Youtube),
|
||||
("https://dynasty-scans.com/system/images_images/000/017/819/original/80215103_p0.png?1612232781", Direct),
|
||||
("https://v.redd.it/9z1dnk3xr5k61", VReddit),
|
||||
("https://streamable.com/dt46y", YtdlpFallback),
|
||||
("https://vimeo.com/channels/31259/53576664", YtdlpFallback),
|
||||
("http://video.pbs.org/viralplayer/2365173446/", YtdlpFallback),
|
||||
("https://www.pornhub.com/view_video.php?viewkey=ph5a2ee0461a8d0", PornHub),
|
||||
("https://www.patreon.com/posts/minecart-track-59346560", Gallery),
|
||||
),
|
||||
)
|
||||
def test_factory_lever_good(test_submission_url: str, expected_class: BaseDownloader, reddit_instance: praw.Reddit):
|
||||
result = DownloadFactory.pull_lever(test_submission_url)
|
||||
assert result is expected_class
|
||||
|
||||
|
||||
@pytest.mark.parametrize('test_url', (
|
||||
'random.com',
|
||||
'bad',
|
||||
'https://www.google.com/',
|
||||
'https://www.google.com',
|
||||
'https://www.google.com/test',
|
||||
'https://www.google.com/test/',
|
||||
))
|
||||
@pytest.mark.parametrize(
|
||||
"test_url",
|
||||
(
|
||||
"random.com",
|
||||
"bad",
|
||||
"https://www.google.com/",
|
||||
"https://www.google.com",
|
||||
"https://www.google.com/test",
|
||||
"https://www.google.com/test/",
|
||||
"https://www.tiktok.com/@keriberry.420",
|
||||
),
|
||||
)
|
||||
def test_factory_lever_bad(test_url: str):
|
||||
with pytest.raises(NotADownloadableLinkError):
|
||||
DownloadFactory.pull_lever(test_url)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(('test_url', 'expected'), (
|
||||
('www.test.com/test.png', 'test.com/test.png'),
|
||||
('www.test.com/test.png?test_value=random', 'test.com/test.png'),
|
||||
('https://youtube.com/watch?v=Gv8Wz74FjVA', 'youtube.com/watch'),
|
||||
('https://i.imgur.com/BuzvZwb.gifv', 'i.imgur.com/BuzvZwb.gifv'),
|
||||
))
|
||||
@pytest.mark.parametrize(
|
||||
("test_url", "expected"),
|
||||
(
|
||||
("www.test.com/test.png", "test.com/test.png"),
|
||||
("www.test.com/test.png?test_value=random", "test.com/test.png"),
|
||||
("https://youtube.com/watch?v=Gv8Wz74FjVA", "youtube.com/watch"),
|
||||
("https://i.imgur.com/BuzvZwb.gifv", "i.imgur.com/BuzvZwb.gifv"),
|
||||
),
|
||||
)
|
||||
def test_sanitise_url(test_url: str, expected: str):
|
||||
result = DownloadFactory.sanitise_url(test_url)
|
||||
assert result == expected
|
||||
|
||||
|
||||
@pytest.mark.parametrize(('test_url', 'expected'), (
|
||||
('www.example.com/test.asp', True),
|
||||
('www.example.com/test.html', True),
|
||||
('www.example.com/test.js', True),
|
||||
('www.example.com/test.xhtml', True),
|
||||
('www.example.com/test.mp4', False),
|
||||
('www.example.com/test.png', False),
|
||||
))
|
||||
@pytest.mark.parametrize(
|
||||
("test_url", "expected"),
|
||||
(
|
||||
("www.example.com/test.asp", True),
|
||||
("www.example.com/test.html", True),
|
||||
("www.example.com/test.js", True),
|
||||
("www.example.com/test.xhtml", True),
|
||||
("www.example.com/test.mp4", False),
|
||||
("www.example.com/test.png", False),
|
||||
),
|
||||
)
|
||||
def test_is_web_resource(test_url: str, expected: bool):
|
||||
result = DownloadFactory.is_web_resource(test_url)
|
||||
assert result == expected
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
#!/usr/bin/env python3
|
||||
# coding=utf-8
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
import pytest
|
||||
|
@ -9,46 +10,46 @@ from bdfr.site_downloaders.erome import Erome
|
|||
|
||||
|
||||
@pytest.mark.online
|
||||
@pytest.mark.parametrize(('test_url', 'expected_urls'), (
|
||||
('https://www.erome.com/a/vqtPuLXh', (
|
||||
'https://s11.erome.com/365/vqtPuLXh/KH2qBT99_480p.mp4',
|
||||
)),
|
||||
('https://www.erome.com/a/ORhX0FZz', (
|
||||
'https://s15.erome.com/355/ORhX0FZz/9IYQocM9_480p.mp4',
|
||||
'https://s15.erome.com/355/ORhX0FZz/9eEDc8xm_480p.mp4',
|
||||
'https://s15.erome.com/355/ORhX0FZz/EvApC7Rp_480p.mp4',
|
||||
'https://s15.erome.com/355/ORhX0FZz/LruobtMs_480p.mp4',
|
||||
'https://s15.erome.com/355/ORhX0FZz/TJNmSUU5_480p.mp4',
|
||||
'https://s15.erome.com/355/ORhX0FZz/X11Skh6Z_480p.mp4',
|
||||
'https://s15.erome.com/355/ORhX0FZz/bjlTkpn7_480p.mp4'
|
||||
)),
|
||||
))
|
||||
@pytest.mark.parametrize(
|
||||
("test_url", "expected_urls"),
|
||||
(
|
||||
("https://www.erome.com/a/vqtPuLXh", (r"https://[a-z]\d+.erome.com/\d{3}/vqtPuLXh/KH2qBT99_480p.mp4",)),
|
||||
(
|
||||
"https://www.erome.com/a/ORhX0FZz",
|
||||
(
|
||||
r"https://[a-z]\d+.erome.com/\d{3}/ORhX0FZz/9IYQocM9_480p.mp4",
|
||||
r"https://[a-z]\d+.erome.com/\d{3}/ORhX0FZz/9eEDc8xm_480p.mp4",
|
||||
r"https://[a-z]\d+.erome.com/\d{3}/ORhX0FZz/EvApC7Rp_480p.mp4",
|
||||
r"https://[a-z]\d+.erome.com/\d{3}/ORhX0FZz/LruobtMs_480p.mp4",
|
||||
r"https://[a-z]\d+.erome.com/\d{3}/ORhX0FZz/TJNmSUU5_480p.mp4",
|
||||
r"https://[a-z]\d+.erome.com/\d{3}/ORhX0FZz/X11Skh6Z_480p.mp4",
|
||||
r"https://[a-z]\d+.erome.com/\d{3}/ORhX0FZz/bjlTkpn7_480p.mp4",
|
||||
),
|
||||
),
|
||||
),
|
||||
)
|
||||
def test_get_link(test_url: str, expected_urls: tuple[str]):
|
||||
result = Erome. _get_links(test_url)
|
||||
assert set(result) == set(expected_urls)
|
||||
result = Erome._get_links(test_url)
|
||||
assert all([any([re.match(p, r) for r in result]) for p in expected_urls])
|
||||
|
||||
|
||||
@pytest.mark.online
|
||||
@pytest.mark.slow
|
||||
@pytest.mark.parametrize(('test_url', 'expected_hashes'), (
|
||||
('https://www.erome.com/a/vqtPuLXh', {
|
||||
'5da2a8d60d87bed279431fdec8e7d72f'
|
||||
}),
|
||||
('https://www.erome.com/a/lGrcFxmb', {
|
||||
'0e98f9f527a911dcedde4f846bb5b69f',
|
||||
'25696ae364750a5303fc7d7dc78b35c1',
|
||||
'63775689f438bd393cde7db6d46187de',
|
||||
'a1abf398cfd4ef9cfaf093ceb10c746a',
|
||||
'bd9e1a4ea5ef0d6ba47fb90e337c2d14'
|
||||
}),
|
||||
))
|
||||
def test_download_resource(test_url: str, expected_hashes: tuple[str]):
|
||||
@pytest.mark.parametrize(
|
||||
("test_url", "expected_hashes_len"),
|
||||
(
|
||||
("https://www.erome.com/a/vqtPuLXh", 1),
|
||||
("https://www.erome.com/a/4tP3KI6F", 1),
|
||||
),
|
||||
)
|
||||
def test_download_resource(test_url: str, expected_hashes_len: int):
|
||||
# Can't compare hashes for this test, Erome doesn't return the exact same file from request to request so the hash
|
||||
# will change back and forth randomly
|
||||
mock_submission = MagicMock()
|
||||
mock_submission.url = test_url
|
||||
test_site = Erome(mock_submission)
|
||||
resources = test_site.find_resources()
|
||||
[res.download() for res in resources]
|
||||
for res in resources:
|
||||
res.download()
|
||||
resource_hashes = [res.hash.hexdigest() for res in resources]
|
||||
assert len(resource_hashes) == len(expected_hashes)
|
||||
assert len(resource_hashes) == expected_hashes_len
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
#!/usr/bin/env python3
|
||||
# coding=utf-8
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import praw
|
||||
import pytest
|
||||
|
@ -9,30 +9,39 @@ from bdfr.site_downloaders.gallery import Gallery
|
|||
|
||||
|
||||
@pytest.mark.online
|
||||
@pytest.mark.parametrize(('test_ids', 'expected'), (
|
||||
([
|
||||
{'media_id': '18nzv9ch0hn61'},
|
||||
{'media_id': 'jqkizcch0hn61'},
|
||||
{'media_id': 'k0fnqzbh0hn61'},
|
||||
{'media_id': 'm3gamzbh0hn61'},
|
||||
], {
|
||||
'https://i.redd.it/18nzv9ch0hn61.jpg',
|
||||
'https://i.redd.it/jqkizcch0hn61.jpg',
|
||||
'https://i.redd.it/k0fnqzbh0hn61.jpg',
|
||||
'https://i.redd.it/m3gamzbh0hn61.jpg'
|
||||
}),
|
||||
([
|
||||
{'media_id': '04vxj25uqih61'},
|
||||
{'media_id': '0fnx83kpqih61'},
|
||||
{'media_id': '7zkmr1wqqih61'},
|
||||
{'media_id': 'u37k5gxrqih61'},
|
||||
], {
|
||||
'https://i.redd.it/04vxj25uqih61.png',
|
||||
'https://i.redd.it/0fnx83kpqih61.png',
|
||||
'https://i.redd.it/7zkmr1wqqih61.png',
|
||||
'https://i.redd.it/u37k5gxrqih61.png'
|
||||
}),
|
||||
))
|
||||
@pytest.mark.parametrize(
|
||||
("test_ids", "expected"),
|
||||
(
|
||||
(
|
||||
[
|
||||
{"media_id": "18nzv9ch0hn61"},
|
||||
{"media_id": "jqkizcch0hn61"},
|
||||
{"media_id": "k0fnqzbh0hn61"},
|
||||
{"media_id": "m3gamzbh0hn61"},
|
||||
],
|
||||
{
|
||||
"https://i.redd.it/18nzv9ch0hn61.jpg",
|
||||
"https://i.redd.it/jqkizcch0hn61.jpg",
|
||||
"https://i.redd.it/k0fnqzbh0hn61.jpg",
|
||||
"https://i.redd.it/m3gamzbh0hn61.jpg",
|
||||
},
|
||||
),
|
||||
(
|
||||
[
|
||||
{"media_id": "04vxj25uqih61"},
|
||||
{"media_id": "0fnx83kpqih61"},
|
||||
{"media_id": "7zkmr1wqqih61"},
|
||||
{"media_id": "u37k5gxrqih61"},
|
||||
],
|
||||
{
|
||||
"https://i.redd.it/04vxj25uqih61.png",
|
||||
"https://i.redd.it/0fnx83kpqih61.png",
|
||||
"https://i.redd.it/7zkmr1wqqih61.png",
|
||||
"https://i.redd.it/u37k5gxrqih61.png",
|
||||
},
|
||||
),
|
||||
),
|
||||
)
|
||||
def test_gallery_get_links(test_ids: list[dict], expected: set[str]):
|
||||
results = Gallery._get_links(test_ids)
|
||||
assert set(results) == expected
|
||||
|
@ -40,24 +49,47 @@ def test_gallery_get_links(test_ids: list[dict], expected: set[str]):
|
|||
|
||||
@pytest.mark.online
|
||||
@pytest.mark.reddit
|
||||
@pytest.mark.parametrize(('test_submission_id', 'expected_hashes'), (
|
||||
('m6lvrh', {
|
||||
'5c42b8341dd56eebef792e86f3981c6a',
|
||||
'8f38d76da46f4057bf2773a778e725ca',
|
||||
'f5776f8f90491c8b770b8e0a6bfa49b3',
|
||||
'fa1a43c94da30026ad19a9813a0ed2c2',
|
||||
}),
|
||||
('ljyy27', {
|
||||
'359c203ec81d0bc00e675f1023673238',
|
||||
'79262fd46bce5bfa550d878a3b898be4',
|
||||
'808c35267f44acb523ce03bfa5687404',
|
||||
'ec8b65bdb7f1279c4b3af0ea2bbb30c3',
|
||||
}),
|
||||
('obkflw', {
|
||||
'65163f685fb28c5b776e0e77122718be',
|
||||
'2a337eb5b13c34d3ca3f51b5db7c13e9',
|
||||
}),
|
||||
))
|
||||
@pytest.mark.parametrize(
|
||||
("test_submission_id", "expected_hashes"),
|
||||
(
|
||||
(
|
||||
"m6lvrh",
|
||||
{
|
||||
"5c42b8341dd56eebef792e86f3981c6a",
|
||||
"8f38d76da46f4057bf2773a778e725ca",
|
||||
"f5776f8f90491c8b770b8e0a6bfa49b3",
|
||||
"fa1a43c94da30026ad19a9813a0ed2c2",
|
||||
},
|
||||
),
|
||||
(
|
||||
"ljyy27",
|
||||
{
|
||||
"359c203ec81d0bc00e675f1023673238",
|
||||
"79262fd46bce5bfa550d878a3b898be4",
|
||||
"808c35267f44acb523ce03bfa5687404",
|
||||
"ec8b65bdb7f1279c4b3af0ea2bbb30c3",
|
||||
},
|
||||
),
|
||||
(
|
||||
"obkflw",
|
||||
{
|
||||
"65163f685fb28c5b776e0e77122718be",
|
||||
"2a337eb5b13c34d3ca3f51b5db7c13e9",
|
||||
},
|
||||
),
|
||||
(
|
||||
"rb3ub6",
|
||||
{ # patreon post
|
||||
"748a976c6cedf7ea85b6f90e7cb685c7",
|
||||
"839796d7745e88ced6355504e1f74508",
|
||||
"bcdb740367d0f19f97a77e614b48a42d",
|
||||
"0f230b8c4e5d103d35a773fab9814ec3",
|
||||
"e5192d6cb4f84c4f4a658355310bf0f9",
|
||||
"91cbe172cd8ccbcf049fcea4204eb979",
|
||||
},
|
||||
),
|
||||
),
|
||||
)
|
||||
def test_gallery_download(test_submission_id: str, expected_hashes: set[str], reddit_instance: praw.Reddit):
|
||||
test_submission = reddit_instance.submission(id=test_submission_id)
|
||||
gallery = Gallery(test_submission)
|
||||
|
@ -67,10 +99,13 @@ def test_gallery_download(test_submission_id: str, expected_hashes: set[str], re
|
|||
assert set(hashes) == expected_hashes
|
||||
|
||||
|
||||
@pytest.mark.parametrize('test_id', (
|
||||
'n0pyzp',
|
||||
'nxyahw',
|
||||
))
|
||||
@pytest.mark.parametrize(
|
||||
"test_id",
|
||||
(
|
||||
"n0pyzp",
|
||||
"nxyahw",
|
||||
),
|
||||
)
|
||||
def test_gallery_download_raises_right_error(test_id: str, reddit_instance: praw.Reddit):
|
||||
test_submission = reddit_instance.submission(id=test_id)
|
||||
gallery = Gallery(test_submission)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
#!/usr/bin/env python3
|
||||
# coding=utf-8
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from unittest.mock import Mock
|
||||
|
||||
|
@ -10,20 +10,35 @@ from bdfr.site_downloaders.gfycat import Gfycat
|
|||
|
||||
|
||||
@pytest.mark.online
|
||||
@pytest.mark.parametrize(('test_url', 'expected_url'), (
|
||||
('https://gfycat.com/definitivecaninecrayfish', 'https://giant.gfycat.com/DefinitiveCanineCrayfish.mp4'),
|
||||
('https://gfycat.com/dazzlingsilkyiguana', 'https://giant.gfycat.com/DazzlingSilkyIguana.mp4'),
|
||||
))
|
||||
@pytest.mark.parametrize(
|
||||
("test_url", "expected_url"),
|
||||
(
|
||||
("https://gfycat.com/definitivecaninecrayfish", "https://giant.gfycat.com/DefinitiveCanineCrayfish.mp4"),
|
||||
("https://gfycat.com/dazzlingsilkyiguana", "https://giant.gfycat.com/DazzlingSilkyIguana.mp4"),
|
||||
("https://gfycat.com/WearyComposedHairstreak", "https://thumbs4.redgifs.com/WearyComposedHairstreak.mp4"),
|
||||
(
|
||||
"https://thumbs.gfycat.com/ComposedWholeBullfrog-size_restricted.gif",
|
||||
"https://thumbs4.redgifs.com/ComposedWholeBullfrog.mp4",
|
||||
),
|
||||
("https://giant.gfycat.com/ComposedWholeBullfrog.mp4", "https://thumbs4.redgifs.com/ComposedWholeBullfrog.mp4"),
|
||||
),
|
||||
)
|
||||
def test_get_link(test_url: str, expected_url: str):
|
||||
result = Gfycat._get_link(test_url)
|
||||
assert result == expected_url
|
||||
assert expected_url in result.pop()
|
||||
|
||||
|
||||
@pytest.mark.online
|
||||
@pytest.mark.parametrize(('test_url', 'expected_hash'), (
|
||||
('https://gfycat.com/definitivecaninecrayfish', '48f9bd4dbec1556d7838885612b13b39'),
|
||||
('https://gfycat.com/dazzlingsilkyiguana', '808941b48fc1e28713d36dd7ed9dc648'),
|
||||
))
|
||||
@pytest.mark.parametrize(
|
||||
("test_url", "expected_hash"),
|
||||
(
|
||||
("https://gfycat.com/definitivecaninecrayfish", "48f9bd4dbec1556d7838885612b13b39"),
|
||||
("https://gfycat.com/dazzlingsilkyiguana", "808941b48fc1e28713d36dd7ed9dc648"),
|
||||
("https://gfycat.com/WearyComposedHairstreak", "5f82ba1ba23cc927c9fbb0c0421953a5"),
|
||||
("https://thumbs.gfycat.com/ComposedWholeBullfrog-size_restricted.gif", "5292343665a13b5369d889d911ae284d"),
|
||||
("https://giant.gfycat.com/ComposedWholeBullfrog.mp4", "5292343665a13b5369d889d911ae284d"),
|
||||
),
|
||||
)
|
||||
def test_download_resource(test_url: str, expected_hash: str):
|
||||
mock_submission = Mock()
|
||||
mock_submission.url = test_url
|
||||
|
|
|
@ -1,148 +1,57 @@
|
|||
#!/usr/bin/env python3
|
||||
# coding=utf-8
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from unittest.mock import Mock
|
||||
|
||||
import pytest
|
||||
|
||||
from bdfr.exceptions import SiteDownloaderError
|
||||
from bdfr.resource import Resource
|
||||
from bdfr.site_downloaders.imgur import Imgur
|
||||
|
||||
|
||||
@pytest.mark.online
|
||||
@pytest.mark.parametrize(('test_url', 'expected_gen_dict', 'expected_image_dict'), (
|
||||
@pytest.mark.parametrize(
|
||||
("test_url", "expected_hashes"),
|
||||
(
|
||||
'https://imgur.com/a/xWZsDDP',
|
||||
{'num_images': '1', 'id': 'xWZsDDP', 'hash': 'xWZsDDP'},
|
||||
[
|
||||
{'hash': 'ypa8YfS', 'title': '', 'ext': '.png', 'animated': False}
|
||||
]
|
||||
),
|
||||
(
|
||||
'https://imgur.com/gallery/IjJJdlC',
|
||||
{'num_images': 1, 'id': 384898055, 'hash': 'IjJJdlC'},
|
||||
[
|
||||
{'hash': 'CbbScDt',
|
||||
'description': 'watch when he gets it',
|
||||
'ext': '.gif',
|
||||
'animated': True,
|
||||
'has_sound': False
|
||||
}
|
||||
],
|
||||
),
|
||||
(
|
||||
'https://imgur.com/a/dcc84Gt',
|
||||
{'num_images': '4', 'id': 'dcc84Gt', 'hash': 'dcc84Gt'},
|
||||
[
|
||||
{'hash': 'ylx0Kle', 'ext': '.jpg', 'title': ''},
|
||||
{'hash': 'TdYfKbK', 'ext': '.jpg', 'title': ''},
|
||||
{'hash': 'pCxGbe8', 'ext': '.jpg', 'title': ''},
|
||||
{'hash': 'TSAkikk', 'ext': '.jpg', 'title': ''},
|
||||
]
|
||||
),
|
||||
(
|
||||
'https://m.imgur.com/a/py3RW0j',
|
||||
{'num_images': '1', 'id': 'py3RW0j', 'hash': 'py3RW0j', },
|
||||
[
|
||||
{'hash': 'K24eQmK', 'has_sound': False, 'ext': '.jpg'}
|
||||
],
|
||||
),
|
||||
))
|
||||
def test_get_data_album(test_url: str, expected_gen_dict: dict, expected_image_dict: list[dict]):
|
||||
result = Imgur._get_data(test_url)
|
||||
assert all([result.get(key) == expected_gen_dict[key] for key in expected_gen_dict.keys()])
|
||||
|
||||
# Check if all the keys from the test dict are correct in at least one of the album entries
|
||||
assert any([all([image.get(key) == image_dict[key] for key in image_dict.keys()])
|
||||
for image_dict in expected_image_dict for image in result['album_images']['images']])
|
||||
|
||||
|
||||
@pytest.mark.online
|
||||
@pytest.mark.parametrize(('test_url', 'expected_image_dict'), (
|
||||
(
|
||||
'https://i.imgur.com/dLk3FGY.gifv',
|
||||
{'hash': 'dLk3FGY', 'title': '', 'ext': '.mp4', 'animated': True}
|
||||
),
|
||||
(
|
||||
'https://imgur.com/65FqTpT.gifv',
|
||||
{
|
||||
'hash': '65FqTpT',
|
||||
'title': '',
|
||||
'description': '',
|
||||
'animated': True,
|
||||
'mimetype': 'video/mp4'
|
||||
},
|
||||
),
|
||||
))
|
||||
def test_get_data_gif(test_url: str, expected_image_dict: dict):
|
||||
result = Imgur._get_data(test_url)
|
||||
assert all([result.get(key) == expected_image_dict[key] for key in expected_image_dict.keys()])
|
||||
|
||||
|
||||
@pytest.mark.parametrize('test_extension', (
|
||||
'.gif',
|
||||
'.png',
|
||||
'.jpg',
|
||||
'.mp4'
|
||||
))
|
||||
def test_imgur_extension_validation_good(test_extension: str):
|
||||
result = Imgur._validate_extension(test_extension)
|
||||
assert result == test_extension
|
||||
|
||||
|
||||
@pytest.mark.parametrize('test_extension', (
|
||||
'.jpeg',
|
||||
'bad',
|
||||
'.avi',
|
||||
'.test',
|
||||
'.flac',
|
||||
))
|
||||
def test_imgur_extension_validation_bad(test_extension: str):
|
||||
with pytest.raises(SiteDownloaderError):
|
||||
Imgur._validate_extension(test_extension)
|
||||
|
||||
|
||||
@pytest.mark.online
|
||||
@pytest.mark.parametrize(('test_url', 'expected_hashes'), (
|
||||
(
|
||||
'https://imgur.com/a/xWZsDDP',
|
||||
('f551d6e6b0fef2ce909767338612e31b',)
|
||||
),
|
||||
(
|
||||
'https://imgur.com/gallery/IjJJdlC',
|
||||
('7227d4312a9779b74302724a0cfa9081',),
|
||||
),
|
||||
(
|
||||
'https://imgur.com/a/dcc84Gt',
|
||||
("https://imgur.com/a/xWZsDDP", ("f551d6e6b0fef2ce909767338612e31b",)),
|
||||
("https://imgur.com/gallery/IjJJdlC", ("740b006cf9ec9d6f734b6e8f5130bdab",)),
|
||||
("https://imgur.com/gallery/IjJJdlC/", ("740b006cf9ec9d6f734b6e8f5130bdab",)),
|
||||
(
|
||||
'cf1158e1de5c3c8993461383b96610cf',
|
||||
'28d6b791a2daef8aa363bf5a3198535d',
|
||||
'248ef8f2a6d03eeb2a80d0123dbaf9b6',
|
||||
'029c475ce01b58fdf1269d8771d33913',
|
||||
"https://imgur.com/a/dcc84Gt",
|
||||
(
|
||||
"cf1158e1de5c3c8993461383b96610cf",
|
||||
"28d6b791a2daef8aa363bf5a3198535d",
|
||||
"248ef8f2a6d03eeb2a80d0123dbaf9b6",
|
||||
"029c475ce01b58fdf1269d8771d33913",
|
||||
),
|
||||
),
|
||||
),
|
||||
(
|
||||
'https://imgur.com/a/eemHCCK',
|
||||
(
|
||||
'9cb757fd8f055e7ef7aa88addc9d9fa5',
|
||||
'b6cb6c918e2544e96fb7c07d828774b5',
|
||||
'fb6c913d721c0bbb96aa65d7f560d385',
|
||||
"https://imgur.com/a/eemHCCK",
|
||||
(
|
||||
"9cb757fd8f055e7ef7aa88addc9d9fa5",
|
||||
"b6cb6c918e2544e96fb7c07d828774b5",
|
||||
"fb6c913d721c0bbb96aa65d7f560d385",
|
||||
),
|
||||
),
|
||||
("https://o.imgur.com/jZw9gq2.jpg", ("6d6ea9aa1d98827a05425338afe675bc",)),
|
||||
("https://i.imgur.com/lFJai6i.gifv", ("01a6e79a30bec0e644e5da12365d5071",)),
|
||||
("https://i.imgur.com/ywSyILa.gifv?", ("56d4afc32d2966017c38d98568709b45",)),
|
||||
("https://imgur.com/ubYwpbk.GIFV", ("d4a774aac1667783f9ed3a1bd02fac0c",)),
|
||||
("https://i.imgur.com/j1CNCZY.gifv", ("ed63d7062bc32edaeea8b53f876a307c",)),
|
||||
("https://i.imgur.com/uTvtQsw.gifv", ("46c86533aa60fc0e09f2a758513e3ac2",)),
|
||||
("https://i.imgur.com/OGeVuAe.giff", ("77389679084d381336f168538793f218",)),
|
||||
("https://i.imgur.com/OGeVuAe.gift", ("77389679084d381336f168538793f218",)),
|
||||
("https://i.imgur.com/3SKrQfK.jpg?1", ("aa299e181b268578979cad176d1bd1d0",)),
|
||||
("https://i.imgur.com/cbivYRW.jpg?3", ("7ec6ceef5380cb163a1d498c359c51fd",)),
|
||||
("http://i.imgur.com/s9uXxlq.jpg?5.jpg", ("338de3c23ee21af056b3a7c154e2478f",)),
|
||||
("http://i.imgur.com/s9uXxlqb.jpg", ("338de3c23ee21af056b3a7c154e2478f",)),
|
||||
("https://i.imgur.com/2TtN68l_d.webp", ("6569ab9ad9fa68d93f6b408f112dd741",)),
|
||||
("https://imgur.com/a/1qzfWtY/gifv", ("65fbc7ba5c3ed0e3af47c4feef4d3735",)),
|
||||
("https://imgur.com/a/1qzfWtY/mp4", ("65fbc7ba5c3ed0e3af47c4feef4d3735",)),
|
||||
("https://imgur.com/a/1qzfWtY/spqr", ("65fbc7ba5c3ed0e3af47c4feef4d3735",)),
|
||||
("https://i.imgur.com/expO7Rc.gifv", ("e309f98158fc98072eb2ae68f947f421",)),
|
||||
),
|
||||
(
|
||||
'https://i.imgur.com/lFJai6i.gifv',
|
||||
('01a6e79a30bec0e644e5da12365d5071',),
|
||||
),
|
||||
(
|
||||
'https://i.imgur.com/ywSyILa.gifv?',
|
||||
('56d4afc32d2966017c38d98568709b45',),
|
||||
),
|
||||
(
|
||||
'https://imgur.com/ubYwpbk.GIFV',
|
||||
('d4a774aac1667783f9ed3a1bd02fac0c',),
|
||||
),
|
||||
))
|
||||
)
|
||||
def test_find_resources(test_url: str, expected_hashes: list[str]):
|
||||
mock_download = Mock()
|
||||
mock_download.url = test_url
|
||||
|
|
|
@ -1,20 +1,22 @@
|
|||
#!/usr/bin/env python3
|
||||
# coding=utf-8
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
import pytest
|
||||
|
||||
from bdfr.exceptions import SiteDownloaderError
|
||||
from bdfr.resource import Resource
|
||||
from bdfr.site_downloaders.pornhub import PornHub
|
||||
|
||||
|
||||
@pytest.mark.online
|
||||
@pytest.mark.slow
|
||||
@pytest.mark.parametrize(('test_url', 'expected_hash'), (
|
||||
('https://www.pornhub.com/view_video.php?viewkey=ph5a2ee0461a8d0', '5f5294b9b97dbb7cb9cf8df278515621'),
|
||||
))
|
||||
def test_find_resources_good(test_url: str, expected_hash: str):
|
||||
@pytest.mark.parametrize(
|
||||
("test_url", "expected_hash"),
|
||||
(("https://www.pornhub.com/view_video.php?viewkey=ph5eafee2d174ff", "d15090cbbaa8ee90500a257c7899ff84"),),
|
||||
)
|
||||
def test_hash_resources_good(test_url: str, expected_hash: str):
|
||||
test_submission = MagicMock()
|
||||
test_submission.url = test_url
|
||||
downloader = PornHub(test_submission)
|
||||
|
@ -23,3 +25,13 @@ def test_find_resources_good(test_url: str, expected_hash: str):
|
|||
assert isinstance(resources[0], Resource)
|
||||
resources[0].download()
|
||||
assert resources[0].hash.hexdigest() == expected_hash
|
||||
|
||||
|
||||
@pytest.mark.online
|
||||
@pytest.mark.parametrize("test_url", ("https://www.pornhub.com/view_video.php?viewkey=ph5ede121f0d3f8",))
|
||||
def test_find_resources_good(test_url: str):
|
||||
test_submission = MagicMock()
|
||||
test_submission.url = test_url
|
||||
downloader = PornHub(test_submission)
|
||||
with pytest.raises(SiteDownloaderError):
|
||||
downloader.find_resources()
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
#!/usr/bin/env python3
|
||||
# coding=utf-8
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
from unittest.mock import Mock
|
||||
|
||||
import pytest
|
||||
|
@ -9,33 +10,115 @@ from bdfr.resource import Resource
|
|||
from bdfr.site_downloaders.redgifs import Redgifs
|
||||
|
||||
|
||||
@pytest.mark.online
|
||||
@pytest.mark.parametrize(('test_url', 'expected'), (
|
||||
('https://redgifs.com/watch/frighteningvictorioussalamander',
|
||||
'https://thumbs2.redgifs.com/FrighteningVictoriousSalamander.mp4'),
|
||||
('https://redgifs.com/watch/springgreendecisivetaruca',
|
||||
'https://thumbs2.redgifs.com/SpringgreenDecisiveTaruca.mp4'),
|
||||
('https://www.redgifs.com/watch/palegoldenrodrawhalibut',
|
||||
'https://thumbs2.redgifs.com/PalegoldenrodRawHalibut.mp4'),
|
||||
))
|
||||
def test_get_link(test_url: str, expected: str):
|
||||
result = Redgifs._get_link(test_url)
|
||||
@pytest.mark.parametrize(
|
||||
("test_url", "expected"),
|
||||
(
|
||||
("https://redgifs.com/watch/frighteningvictorioussalamander", "frighteningvictorioussalamander"),
|
||||
("https://www.redgifs.com/watch/genuineprivateguillemot/", "genuineprivateguillemot"),
|
||||
("https://www.redgifs.com/watch/marriedcrushingcob?rel=u%3Akokiri.girl%3Bo%3Arecent", "marriedcrushingcob"),
|
||||
("https://thumbs4.redgifs.com/DismalIgnorantDrongo.mp4", "dismalignorantdrongo"),
|
||||
("https://thumbs4.redgifs.com/DismalIgnorantDrongo-mobile.mp4", "dismalignorantdrongo"),
|
||||
("https://v3.redgifs.com/watch/newilliteratemeerkat#rel=user%3Atastynova", "newilliteratemeerkat"),
|
||||
),
|
||||
)
|
||||
def test_get_id(test_url: str, expected: str):
|
||||
result = Redgifs._get_id(test_url)
|
||||
assert result == expected
|
||||
|
||||
|
||||
@pytest.mark.online
|
||||
@pytest.mark.parametrize(('test_url', 'expected_hash'), (
|
||||
('https://redgifs.com/watch/frighteningvictorioussalamander', '4007c35d9e1f4b67091b5f12cffda00a'),
|
||||
('https://redgifs.com/watch/springgreendecisivetaruca', '8dac487ac49a1f18cc1b4dabe23f0869'),
|
||||
('https://redgifs.com/watch/leafysaltydungbeetle', '076792c660b9c024c0471ef4759af8bd'),
|
||||
('https://www.redgifs.com/watch/palegoldenrodrawhalibut', '46d5aa77fe80c6407de1ecc92801c10e'),
|
||||
))
|
||||
def test_download_resource(test_url: str, expected_hash: str):
|
||||
@pytest.mark.parametrize(
|
||||
("test_url", "expected"),
|
||||
(
|
||||
("https://redgifs.com/watch/frighteningvictorioussalamander", {"FrighteningVictoriousSalamander.mp4"}),
|
||||
("https://redgifs.com/watch/springgreendecisivetaruca", {"SpringgreenDecisiveTaruca.mp4"}),
|
||||
("https://www.redgifs.com/watch/palegoldenrodrawhalibut", {"PalegoldenrodRawHalibut.mp4"}),
|
||||
("https://redgifs.com/watch/hollowintentsnowyowl", {"HollowIntentSnowyowl-large.jpg"}),
|
||||
(
|
||||
"https://www.redgifs.com/watch/lustrousstickywaxwing",
|
||||
{
|
||||
"EntireEnchantingHypsilophodon-large.jpg",
|
||||
"FancyMagnificentAdamsstaghornedbeetle-large.jpg",
|
||||
"LustrousStickyWaxwing-large.jpg",
|
||||
"ParchedWindyArmyworm-large.jpg",
|
||||
"ThunderousColorlessErmine-large.jpg",
|
||||
"UnripeUnkemptWoodpecker-large.jpg",
|
||||
},
|
||||
),
|
||||
("https://www.redgifs.com/watch/genuineprivateguillemot/", {"GenuinePrivateGuillemot.mp4"}),
|
||||
),
|
||||
)
|
||||
def test_get_link(test_url: str, expected: set[str]):
|
||||
result = Redgifs._get_link(test_url)
|
||||
result = list(result)
|
||||
patterns = [r"https://thumbs\d\.redgifs\.com/" + e + r".*" for e in expected]
|
||||
assert all([re.match(p, r) for p in patterns] for r in result)
|
||||
|
||||
|
||||
@pytest.mark.online
|
||||
@pytest.mark.parametrize(
|
||||
("test_url", "expected_hashes"),
|
||||
(
|
||||
("https://redgifs.com/watch/frighteningvictorioussalamander", {"4007c35d9e1f4b67091b5f12cffda00a"}),
|
||||
("https://redgifs.com/watch/springgreendecisivetaruca", {"8dac487ac49a1f18cc1b4dabe23f0869"}),
|
||||
("https://redgifs.com/watch/leafysaltydungbeetle", {"076792c660b9c024c0471ef4759af8bd"}),
|
||||
("https://www.redgifs.com/watch/palegoldenrodrawhalibut", {"46d5aa77fe80c6407de1ecc92801c10e"}),
|
||||
("https://redgifs.com/watch/hollowintentsnowyowl", {"5ee51fa15e0a58e98f11dea6a6cca771"}),
|
||||
(
|
||||
"https://www.redgifs.com/watch/lustrousstickywaxwing",
|
||||
{
|
||||
"b461e55664f07bed8d2f41d8586728fa",
|
||||
"30ba079a8ed7d7adf17929dc3064c10f",
|
||||
"0d4f149d170d29fc2f015c1121bab18b",
|
||||
"53987d99cfd77fd65b5fdade3718f9f1",
|
||||
"fb2e7d972846b83bf4016447d3060d60",
|
||||
"44fb28f72ec9a5cca63fa4369ab4f672",
|
||||
},
|
||||
),
|
||||
),
|
||||
)
|
||||
def test_download_resource(test_url: str, expected_hashes: set[str]):
|
||||
mock_submission = Mock()
|
||||
mock_submission.url = test_url
|
||||
test_site = Redgifs(mock_submission)
|
||||
resources = test_site.find_resources()
|
||||
assert len(resources) == 1
|
||||
assert isinstance(resources[0], Resource)
|
||||
resources[0].download()
|
||||
assert resources[0].hash.hexdigest() == expected_hash
|
||||
results = test_site.find_resources()
|
||||
assert all([isinstance(res, Resource) for res in results])
|
||||
[res.download() for res in results]
|
||||
hashes = set([res.hash.hexdigest() for res in results])
|
||||
assert hashes == set(expected_hashes)
|
||||
|
||||
|
||||
@pytest.mark.online
|
||||
@pytest.mark.parametrize(
|
||||
("test_url", "expected_link", "expected_hash"),
|
||||
(
|
||||
(
|
||||
"https://redgifs.com/watch/flippantmemorablebaiji",
|
||||
{"FlippantMemorableBaiji-mobile.mp4"},
|
||||
{"41a5fb4865367ede9f65fc78736f497a"},
|
||||
),
|
||||
(
|
||||
"https://redgifs.com/watch/thirstyunfortunatewaterdragons",
|
||||
{"thirstyunfortunatewaterdragons-mobile.mp4"},
|
||||
{"1a51dad8fedb594bdd84f027b3cbe8af"},
|
||||
),
|
||||
(
|
||||
"https://redgifs.com/watch/conventionalplainxenopterygii",
|
||||
{"conventionalplainxenopterygii-mobile.mp4"},
|
||||
{"2e1786b3337da85b80b050e2c289daa4"},
|
||||
),
|
||||
),
|
||||
)
|
||||
def test_hd_soft_fail(test_url: str, expected_link: set[str], expected_hash: set[str]):
|
||||
link = Redgifs._get_link(test_url)
|
||||
link = list(link)
|
||||
patterns = [r"https://thumbs\d\.redgifs\.com/" + e + r".*" for e in expected_link]
|
||||
assert all([re.match(p, r) for p in patterns] for r in link)
|
||||
mock_submission = Mock()
|
||||
mock_submission.url = test_url
|
||||
test_site = Redgifs(mock_submission)
|
||||
results = test_site.find_resources()
|
||||
assert all([isinstance(res, Resource) for res in results])
|
||||
[res.download() for res in results]
|
||||
hashes = set([res.hash.hexdigest() for res in results])
|
||||
assert hashes == set(expected_hash)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
#!/usr/bin/env python3
|
||||
# coding=utf-8
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import praw
|
||||
import pytest
|
||||
|
@ -10,11 +10,14 @@ from bdfr.site_downloaders.self_post import SelfPost
|
|||
|
||||
@pytest.mark.online
|
||||
@pytest.mark.reddit
|
||||
@pytest.mark.parametrize(('test_submission_id', 'expected_hash'), (
|
||||
('ltmivt', '7d2c9e4e989e5cf2dca2e55a06b1c4f6'),
|
||||
('ltoaan', '221606386b614d6780c2585a59bd333f'),
|
||||
('d3sc8o', 'c1ff2b6bd3f6b91381dcd18dfc4ca35f'),
|
||||
))
|
||||
@pytest.mark.parametrize(
|
||||
("test_submission_id", "expected_hash"),
|
||||
(
|
||||
("ltmivt", "7d2c9e4e989e5cf2dca2e55a06b1c4f6"),
|
||||
("ltoaan", "221606386b614d6780c2585a59bd333f"),
|
||||
("d3sc8o", "c1ff2b6bd3f6b91381dcd18dfc4ca35f"),
|
||||
),
|
||||
)
|
||||
def test_find_resource(test_submission_id: str, expected_hash: str, reddit_instance: praw.Reddit):
|
||||
submission = reddit_instance.submission(id=test_submission_id)
|
||||
downloader = SelfPost(submission)
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
#!/usr/bin/env python3
|
||||
# coding=utf-8
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from unittest.mock import Mock
|
||||
|
||||
import pytest
|
||||
|
@ -8,54 +9,83 @@ from bdfr.resource import Resource
|
|||
from bdfr.site_downloaders.vidble import Vidble
|
||||
|
||||
|
||||
@pytest.mark.parametrize(('test_url', 'expected'), (
|
||||
('/RDFbznUvcN_med.jpg', '/RDFbznUvcN.jpg'),
|
||||
))
|
||||
@pytest.mark.parametrize(("test_url", "expected"), (("/RDFbznUvcN_med.jpg", "/RDFbznUvcN.jpg"),))
|
||||
def test_change_med_url(test_url: str, expected: str):
|
||||
result = Vidble.change_med_url(test_url)
|
||||
assert result == expected
|
||||
|
||||
|
||||
@pytest.mark.online
|
||||
@pytest.mark.parametrize(('test_url', 'expected'), (
|
||||
('https://www.vidble.com/show/UxsvAssYe5', {
|
||||
'https://www.vidble.com/UxsvAssYe5.gif',
|
||||
}),
|
||||
('https://vidble.com/show/RDFbznUvcN', {
|
||||
'https://www.vidble.com/RDFbznUvcN.jpg',
|
||||
}),
|
||||
('https://vidble.com/album/h0jTLs6B', {
|
||||
'https://www.vidble.com/XG4eAoJ5JZ.jpg',
|
||||
'https://www.vidble.com/IqF5UdH6Uq.jpg',
|
||||
'https://www.vidble.com/VWuNsnLJMD.jpg',
|
||||
'https://www.vidble.com/sMmM8O650W.jpg',
|
||||
}),
|
||||
('https://vidble.com/watch?v=0q4nWakqM6kzQWxlePD8N62Dsflev0N9', {
|
||||
'https://www.vidble.com/0q4nWakqM6kzQWxlePD8N62Dsflev0N9.mp4',
|
||||
}),
|
||||
))
|
||||
@pytest.mark.parametrize(
|
||||
("test_url", "expected"),
|
||||
(
|
||||
(
|
||||
"https://www.vidble.com/show/UxsvAssYe5",
|
||||
{
|
||||
"https://www.vidble.com/UxsvAssYe5.gif",
|
||||
},
|
||||
),
|
||||
(
|
||||
"https://vidble.com/show/RDFbznUvcN",
|
||||
{
|
||||
"https://www.vidble.com/RDFbznUvcN.jpg",
|
||||
},
|
||||
),
|
||||
(
|
||||
"https://vidble.com/album/h0jTLs6B",
|
||||
{
|
||||
"https://www.vidble.com/XG4eAoJ5JZ.jpg",
|
||||
"https://www.vidble.com/IqF5UdH6Uq.jpg",
|
||||
"https://www.vidble.com/VWuNsnLJMD.jpg",
|
||||
"https://www.vidble.com/sMmM8O650W.jpg",
|
||||
},
|
||||
),
|
||||
(
|
||||
"https://www.vidble.com/pHuwWkOcEb",
|
||||
{
|
||||
"https://www.vidble.com/pHuwWkOcEb.jpg",
|
||||
},
|
||||
),
|
||||
),
|
||||
)
|
||||
def test_get_links(test_url: str, expected: set[str]):
|
||||
results = Vidble.get_links(test_url)
|
||||
assert results == expected
|
||||
|
||||
|
||||
@pytest.mark.parametrize(('test_url', 'expected_hashes'), (
|
||||
('https://www.vidble.com/show/UxsvAssYe5', {
|
||||
'0ef2f8e0e0b45936d2fb3e6fbdf67e28',
|
||||
}),
|
||||
('https://vidble.com/show/RDFbznUvcN', {
|
||||
'c2dd30a71e32369c50eed86f86efff58',
|
||||
}),
|
||||
('https://vidble.com/album/h0jTLs6B', {
|
||||
'3b3cba02e01c91f9858a95240b942c71',
|
||||
'dd6ecf5fc9e936f9fb614eb6a0537f99',
|
||||
'b31a942cd8cdda218ed547bbc04c3a27',
|
||||
'6f77c570b451eef4222804bd52267481',
|
||||
}),
|
||||
('https://vidble.com/watch?v=0q4nWakqM6kzQWxlePD8N62Dsflev0N9', {
|
||||
'cebe9d5f24dba3b0443e5097f160ca83',
|
||||
}),
|
||||
))
|
||||
@pytest.mark.online
|
||||
@pytest.mark.parametrize(
|
||||
("test_url", "expected_hashes"),
|
||||
(
|
||||
(
|
||||
"https://www.vidble.com/show/UxsvAssYe5",
|
||||
{
|
||||
"0ef2f8e0e0b45936d2fb3e6fbdf67e28",
|
||||
},
|
||||
),
|
||||
(
|
||||
"https://vidble.com/show/RDFbznUvcN",
|
||||
{
|
||||
"c2dd30a71e32369c50eed86f86efff58",
|
||||
},
|
||||
),
|
||||
(
|
||||
"https://vidble.com/album/h0jTLs6B",
|
||||
{
|
||||
"3b3cba02e01c91f9858a95240b942c71",
|
||||
"dd6ecf5fc9e936f9fb614eb6a0537f99",
|
||||
"b31a942cd8cdda218ed547bbc04c3a27",
|
||||
"6f77c570b451eef4222804bd52267481",
|
||||
},
|
||||
),
|
||||
(
|
||||
"https://www.vidble.com/pHuwWkOcEb",
|
||||
{
|
||||
"585f486dd0b2f23a57bddbd5bf185bc7",
|
||||
},
|
||||
),
|
||||
),
|
||||
)
|
||||
def test_find_resources(test_url: str, expected_hashes: set[str]):
|
||||
mock_download = Mock()
|
||||
mock_download.url = test_url
|
||||
|
|
43
tests/site_downloaders/test_vreddit.py
Normal file
43
tests/site_downloaders/test_vreddit.py
Normal file
|
@ -0,0 +1,43 @@
|
|||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
import pytest
|
||||
|
||||
from bdfr.exceptions import NotADownloadableLinkError
|
||||
from bdfr.resource import Resource
|
||||
from bdfr.site_downloaders.vreddit import VReddit
|
||||
|
||||
|
||||
@pytest.mark.online
|
||||
@pytest.mark.slow
|
||||
@pytest.mark.parametrize(
|
||||
("test_url", "expected_hash"),
|
||||
(("https://reddit.com/r/Unexpected/comments/z4xsuj/omg_thats_so_cute/", "1ffab5e5c0cc96db18108e4f37e8ca7f"),),
|
||||
)
|
||||
def test_find_resources_good(test_url: str, expected_hash: str):
|
||||
test_submission = MagicMock()
|
||||
test_submission.url = test_url
|
||||
downloader = VReddit(test_submission)
|
||||
resources = downloader.find_resources()
|
||||
assert len(resources) == 1
|
||||
assert isinstance(resources[0], Resource)
|
||||
resources[0].download()
|
||||
assert resources[0].hash.hexdigest() == expected_hash
|
||||
|
||||
|
||||
@pytest.mark.online
|
||||
@pytest.mark.parametrize(
|
||||
"test_url",
|
||||
(
|
||||
"https://www.polygon.com/disney-plus/2020/5/14/21249881/gargoyles-animated-series-disney-plus-greg-weisman"
|
||||
"-interview-oj-simpson-goliath-chronicles",
|
||||
),
|
||||
)
|
||||
def test_find_resources_bad(test_url: str):
|
||||
test_submission = MagicMock()
|
||||
test_submission.url = test_url
|
||||
downloader = VReddit(test_submission)
|
||||
with pytest.raises(NotADownloadableLinkError):
|
||||
downloader.find_resources()
|
|
@ -1,5 +1,5 @@
|
|||
#!/usr/bin/env python3
|
||||
# coding=utf-8
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
|
@ -12,10 +12,13 @@ from bdfr.site_downloaders.youtube import Youtube
|
|||
|
||||
@pytest.mark.online
|
||||
@pytest.mark.slow
|
||||
@pytest.mark.parametrize(('test_url', 'expected_hash'), (
|
||||
('https://www.youtube.com/watch?v=uSm2VDgRIUs', 'f70b704b4b78b9bb5cd032bfc26e4971'),
|
||||
('https://www.youtube.com/watch?v=GcI7nxQj7HA', '2bfdbf434ed284623e46f3bf52c36166'),
|
||||
))
|
||||
@pytest.mark.parametrize(
|
||||
("test_url", "expected_hash"),
|
||||
(
|
||||
("https://www.youtube.com/watch?v=uSm2VDgRIUs", "2d60b54582df5b95ec72bb00b580d2ff"),
|
||||
("https://www.youtube.com/watch?v=GcI7nxQj7HA", "5db0fc92a0a7fb9ac91e63505eea9cf0"),
|
||||
),
|
||||
)
|
||||
def test_find_resources_good(test_url: str, expected_hash: str):
|
||||
test_submission = MagicMock()
|
||||
test_submission.url = test_url
|
||||
|
@ -28,10 +31,13 @@ def test_find_resources_good(test_url: str, expected_hash: str):
|
|||
|
||||
|
||||
@pytest.mark.online
|
||||
@pytest.mark.parametrize('test_url', (
|
||||
'https://www.polygon.com/disney-plus/2020/5/14/21249881/gargoyles-animated-series-disney-plus-greg-weisman'
|
||||
'-interview-oj-simpson-goliath-chronicles',
|
||||
))
|
||||
@pytest.mark.parametrize(
|
||||
"test_url",
|
||||
(
|
||||
"https://www.polygon.com/disney-plus/2020/5/14/21249881/gargoyles-animated-series-disney-plus-greg-weisman"
|
||||
"-interview-oj-simpson-goliath-chronicles",
|
||||
),
|
||||
)
|
||||
def test_find_resources_bad(test_url: str):
|
||||
test_submission = MagicMock()
|
||||
test_submission.url = test_url
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
#!/usr/bin/env python3
|
||||
# coding=utf-8
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from pathlib import Path
|
||||
from unittest.mock import MagicMock
|
||||
|
@ -12,15 +12,18 @@ from bdfr.archiver import Archiver
|
|||
|
||||
@pytest.mark.online
|
||||
@pytest.mark.reddit
|
||||
@pytest.mark.parametrize(('test_submission_id', 'test_format'), (
|
||||
('m3reby', 'xml'),
|
||||
('m3reby', 'json'),
|
||||
('m3reby', 'yaml'),
|
||||
))
|
||||
@pytest.mark.parametrize(
|
||||
("test_submission_id", "test_format"),
|
||||
(
|
||||
("m3reby", "xml"),
|
||||
("m3reby", "json"),
|
||||
("m3reby", "yaml"),
|
||||
),
|
||||
)
|
||||
def test_write_submission_json(test_submission_id: str, tmp_path: Path, test_format: str, reddit_instance: praw.Reddit):
|
||||
archiver_mock = MagicMock()
|
||||
archiver_mock.args.format = test_format
|
||||
test_path = Path(tmp_path, 'test')
|
||||
test_path = Path(tmp_path, "test")
|
||||
test_submission = reddit_instance.submission(id=test_submission_id)
|
||||
archiver_mock.file_name_formatter.format_path.return_value = test_path
|
||||
Archiver.write_entry(archiver_mock, test_submission)
|
||||
|
|
54
tests/test_completion.py
Normal file
54
tests/test_completion.py
Normal file
|
@ -0,0 +1,54 @@
|
|||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
|
||||
from bdfr.completion import Completion
|
||||
|
||||
|
||||
@pytest.mark.skipif(sys.platform == "win32", reason="Completions are not currently supported on Windows.")
|
||||
def test_cli_completion_all(tmp_path: Path):
|
||||
tmp_path = str(tmp_path)
|
||||
with patch("appdirs.user_data_dir", return_value=tmp_path):
|
||||
Completion("all").install()
|
||||
assert Path(tmp_path + "/bash-completion/completions/bdfr").exists() == 1
|
||||
assert Path(tmp_path + "/fish/vendor_completions.d/bdfr.fish").exists() == 1
|
||||
assert Path(tmp_path + "/zsh/site-functions/_bdfr").exists() == 1
|
||||
Completion("all").uninstall()
|
||||
assert Path(tmp_path + "/bash-completion/completions/bdfr").exists() == 0
|
||||
assert Path(tmp_path + "/fish/vendor_completions.d/bdfr.fish").exists() == 0
|
||||
assert Path(tmp_path + "/zsh/site-functions/_bdfr").exists() == 0
|
||||
|
||||
|
||||
@pytest.mark.skipif(sys.platform == "win32", reason="Completions are not currently supported on Windows.")
|
||||
def test_cli_completion_bash(tmp_path: Path):
|
||||
tmp_path = str(tmp_path)
|
||||
with patch("appdirs.user_data_dir", return_value=tmp_path):
|
||||
Completion("bash").install()
|
||||
assert Path(tmp_path + "/bash-completion/completions/bdfr").exists() == 1
|
||||
Completion("bash").uninstall()
|
||||
assert Path(tmp_path + "/bash-completion/completions/bdfr").exists() == 0
|
||||
|
||||
|
||||
@pytest.mark.skipif(sys.platform == "win32", reason="Completions are not currently supported on Windows.")
|
||||
def test_cli_completion_fish(tmp_path: Path):
|
||||
tmp_path = str(tmp_path)
|
||||
with patch("appdirs.user_data_dir", return_value=tmp_path):
|
||||
Completion("fish").install()
|
||||
assert Path(tmp_path + "/fish/vendor_completions.d/bdfr.fish").exists() == 1
|
||||
Completion("fish").uninstall()
|
||||
assert Path(tmp_path + "/fish/vendor_completions.d/bdfr.fish").exists() == 0
|
||||
|
||||
|
||||
@pytest.mark.skipif(sys.platform == "win32", reason="Completions are not currently supported on Windows.")
|
||||
def test_cli_completion_zsh(tmp_path: Path):
|
||||
tmp_path = str(tmp_path)
|
||||
with patch("appdirs.user_data_dir", return_value=tmp_path):
|
||||
Completion("zsh").install()
|
||||
assert Path(tmp_path + "/zsh/site-functions/_bdfr").exists() == 1
|
||||
Completion("zsh").uninstall()
|
||||
assert Path(tmp_path + "/zsh/site-functions/_bdfr").exists() == 0
|
|
@ -1,5 +1,5 @@
|
|||
#!/usr/bin/env python3
|
||||
# coding=utf-8
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
|
@ -8,13 +8,16 @@ import pytest
|
|||
from bdfr.configuration import Configuration
|
||||
|
||||
|
||||
@pytest.mark.parametrize('arg_dict', (
|
||||
{'directory': 'test_dir'},
|
||||
{
|
||||
'directory': 'test_dir',
|
||||
'no_dupes': True,
|
||||
},
|
||||
))
|
||||
@pytest.mark.parametrize(
|
||||
"arg_dict",
|
||||
(
|
||||
{"directory": "test_dir"},
|
||||
{
|
||||
"directory": "test_dir",
|
||||
"no_dupes": True,
|
||||
},
|
||||
),
|
||||
)
|
||||
def test_process_click_context(arg_dict: dict):
|
||||
test_config = Configuration()
|
||||
test_context = MagicMock()
|
||||
|
@ -22,3 +25,12 @@ def test_process_click_context(arg_dict: dict):
|
|||
test_config.process_click_arguments(test_context)
|
||||
test_config = vars(test_config)
|
||||
assert all([test_config[arg] == arg_dict[arg] for arg in arg_dict.keys()])
|
||||
|
||||
|
||||
def test_yaml_file_read():
|
||||
file = "./tests/yaml_test_configuration.yaml"
|
||||
test_config = Configuration()
|
||||
test_config.parse_yaml_options(file)
|
||||
assert test_config.subreddit == ["EarthPorn", "TwoXChromosomes", "Mindustry"]
|
||||
assert test_config.sort == "new"
|
||||
assert test_config.limit == 10
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue