diff --git a/README.md b/README.md index 6d5d320..f3cec58 100644 --- a/README.md +++ b/README.md @@ -55,6 +55,8 @@ It should redirect to a page which shows your **imgur_client_id** and **imgur_cl ## Changes on *master* ### [23/07/2018](https://github.com/aliparlakci/bulk-downloader-for-reddit/tree/bcae177b1e2d4e951db0fad26863b956fa920132) - Split download() funtion +- Remove exclude feature +- Bug fix ### [22/07/2018](https://github.com/aliparlakci/bulk-downloader-for-reddit/tree/a67da461d2fcd70672effcb20c8179e3224091bb) - Put log files in a folder named "LOG_FILES" diff --git a/docs/COMMAND_LINE_ARGUMENTS.md b/docs/COMMAND_LINE_ARGUMENTS.md index e816cfb..5c15e09 100644 --- a/docs/COMMAND_LINE_ARGUMENTS.md +++ b/docs/COMMAND_LINE_ARGUMENTS.md @@ -40,8 +40,6 @@ optional arguments: all --NoDownload Just gets the posts and store them in a file for downloading later - --exclude {imgur,gfycat,direct,self} [{imgur,gfycat,direct,self} ...] - Do not download specified links ``` # Examples diff --git a/script.py b/script.py index 6f36a09..76b1dcc 100644 --- a/script.py +++ b/script.py @@ -144,11 +144,6 @@ def parseArguments(arguments=[]): action="store_true", default=False) - parser.add_argument("--exclude", - nargs="+", - help="Do not download specified links", - choices=["imgur","gfycat","direct","self"], - type=str) if arguments == []: return parser.parse_args() @@ -253,7 +248,8 @@ class PromptUser: GLOBAL.arguments.subreddit = "+".join(GLOBAL.arguments.subreddit.split()) # DELETE THE PLUS (+) AT THE END - GLOBAL.arguments.subreddit = GLOBAL.arguments.subreddit[:-1] + if not subredditInput.lower() == "frontpage": + GLOBAL.arguments.subreddit = GLOBAL.arguments.subreddit[:-1] print("\nselect sort type:") sortTypes = [ @@ -327,32 +323,6 @@ class PromptUser: if Path(GLOBAL.arguments.log ).is_file(): break - GLOBAL.arguments.exclude = [] - - sites = ["imgur","gfycat","direct","self"] - - excludeInput = input("exclude: ").lower() - if excludeInput in sites and excludeInput != "": - GLOBAL.arguments.exclude = [excludeInput] - - while not excludeInput == "": - while True: - excludeInput = input("exclude: ").lower() - if not excludeInput in sites or excludeInput in GLOBAL.arguments.exclude: - break - elif excludeInput == "": - break - else: - GLOBAL.arguments.exclude.append(excludeInput) - - for i in range(len(GLOBAL.arguments.exclude)): - if " " in GLOBAL.arguments.exclude[i]: - inputWithWhitespace = GLOBAL.arguments.exclude[i] - del GLOBAL.arguments.exclude[i] - for siteInput in inputWithWhitespace.split(): - if siteInput in sites and siteInput not in GLOBAL.arguments.exclude: - GLOBAL.arguments.exclude.append(siteInput) - while True: try: GLOBAL.arguments.limit = int(input("\nlimit (0 for none): ")) @@ -472,15 +442,14 @@ def postExists(POST): else: return False -def downloadPost(SUBMISSION,EXCLUDE): +def downloadPost(SUBMISSION): directory = GLOBAL.directory / SUBMISSION['postSubreddit'] global lastRequestTime downloaders = {"imgur":Imgur,"gfycat":Gfycat,"direct":Direct,"self":Self} - if SUBMISSION['postType'] in downloaders and \ - not SUBMISSION['postType'] in EXCLUDE: + if SUBMISSION['postType'] in downloaders: print(SUBMISSION['postType'].upper()) @@ -542,11 +511,6 @@ def download(submissions): downloadedCount = subsLenght duplicates = 0 - if GLOBAL.arguments.exclude is not None: - DoNotDownload = GLOBAL.arguments.exclude - else: - DoNotDownload = [] - FAILED_FILE = createLogFile("FAILED") for i in range(subsLenght): @@ -566,7 +530,7 @@ def download(submissions): continue try: - downloadPost(submissions[i],DoNotDownload) + downloadPost(submissions[i]) except FileAlreadyExistsError: print("It already exists") @@ -636,7 +600,7 @@ def main(): logDir = Path(GLOBAL.arguments.log) download(postFromLog(logDir)) sys.exit() - + try: POSTS = getPosts(prepareAttributes()) except InsufficientPermission: