diff options
Diffstat (limited to 'wikiget/wikiget.py')
| -rw-r--r-- | wikiget/wikiget.py | 93 |
1 files changed, 52 insertions, 41 deletions
diff --git a/wikiget/wikiget.py b/wikiget/wikiget.py index a78056c..bdd05a3 100644 --- a/wikiget/wikiget.py +++ b/wikiget/wikiget.py @@ -1,5 +1,5 @@ # wikiget - CLI tool for downloading files from Wikimedia sites -# Copyright (C) 2018, 2019 Cody Logan +# Copyright (C) 2018, 2019, 2020 Cody Logan # SPDX-License-Identifier: GPL-3.0-or-later # # Wikiget is free software: you can redistribute it and/or modify @@ -17,14 +17,6 @@ """Main wikiget functions.""" -from __future__ import absolute_import, division, print_function, unicode_literals - -from builtins import open - -from future import standard_library - -standard_library.install_aliases() - import argparse import hashlib import logging @@ -52,34 +44,42 @@ def main(): """ parser = argparse.ArgumentParser(description=""" - A tool for downloading files from MediaWiki sites - using the file name or description page URL + A tool for downloading files from + MediaWiki sites using the file name or + description page URL """, epilog=""" - Copyright (C) 2018, 2019 Cody Logan. License GPLv3+: GNU GPL - version 3 or later <http://www.gnu.org/licenses/gpl.html>. - This is free software; you are free to change and redistribute - it under certain conditions. There is NO WARRANTY, to the + Copyright (C) 2018, 2019, 2020 Cody Logan. + License GPLv3+: GNU GPL version 3 or later + <http://www.gnu.org/licenses/gpl.html>. + This is free software; you are free to + change and redistribute it under certain + conditions. There is NO WARRANTY, to the extent permitted by law. """) parser.add_argument('FILE', help=""" - name of the file to download with the File: or Image: prefix, - or the URL of its file description page + name of the file to download with the File: or Image: + prefix, or the URL of its file description page """) parser.add_argument('-V', '--version', action='version', version='%(prog)s {}'.format(__version__)) output_options = parser.add_mutually_exclusive_group() - output_options.add_argument('-q', '--quiet', help='suppress warning messages', + output_options.add_argument('-q', '--quiet', + help='suppress warning messages', action='store_true') output_options.add_argument('-v', '--verbose', - help='print detailed information; use -vv for even more detail', - action='count', default=0) - parser.add_argument('-f', '--force', help='force overwriting existing files', + help='print detailed information; use -vv for ' + 'even more detail', action='count', default=0) + parser.add_argument('-f', '--force', + help='force overwriting existing files', action='store_true') parser.add_argument('-s', '--site', default=DEFAULT_SITE, - help='MediaWiki site to download from (default: %(default)s)') + help='MediaWiki site to download from (default: ' + '%(default)s)') parser.add_argument('-o', '--output', help='write download to OUTPUT') - parser.add_argument('-a', '--batch', help='treat FILE as a textfile containing multiple files to download, one URL or filename per line', + parser.add_argument('-a', '--batch', + help='treat FILE as a textfile containing multiple ' + 'files to download, one URL or filename per line', action='store_true') args = parser.parse_args() @@ -98,7 +98,8 @@ def main(): try: fd = open(input_file, 'r') except IOError as e: - print('File could not be read. The following error was encountered:') + print('File could not be read. The following error was ' + 'encountered:') print(e) sys.exit(1) else: @@ -120,7 +121,8 @@ def download(dl, args): site_name = url.netloc if args.site is not DEFAULT_SITE and not args.quiet: # this will work even if the user specifies 'commons.wikimedia.org' - print('Warning: target is a URL, ignoring site specified with --site') + print('Warning: target is a URL, ignoring site specified with ' + '--site') else: filename = dl site_name = args.site @@ -130,7 +132,8 @@ def download(dl, args): # check for valid site parameter if not site_match: - print('Only Wikimedia sites (wikipedia.org and wikimedia.org) are currently supported.') + print('Only Wikimedia sites (wikipedia.org and wikimedia.org) are ' + 'currently supported.') sys.exit(1) # check if this is a valid file @@ -139,10 +142,12 @@ def download(dl, args): filename = file_match.group(2) else: # no file extension and/or prefix, probably an article - print('Downloading Wikipedia articles is not currently supported.', end='') + print('Downloading Wikipedia articles is not currently supported.', + end='') if file_match and not file_match.group(1): # file extension detected, but no prefix - # TODO: no longer possible to get to this point since file_match is None with no prefix + # TODO: no longer possible to get to this point since file_match is + # None with no prefix print(" If this is a file, please add the 'File:' prefix.") else: print('\n', end='') @@ -159,7 +164,8 @@ def download(dl, args): try: site = Site(site_name, clients_useragent=USER_AGENT) except ConnectionError: - # usually this means there is no such site, or there's no network connection + # usually this means there is no such site, or there's no network + # connection print("Error: couldn't connect to specified site.") sys.exit(1) except InvalidResponse as e: @@ -178,7 +184,8 @@ def download(dl, args): if args.verbose >= 1: print("Info: downloading '{}' " - "({} bytes) from {}".format(filename, file_size, site.host), end='') + '({} bytes) from {}'.format(filename, file_size, site.host), + end='') if args.output: print(" to '{}'".format(dest)) else: @@ -186,12 +193,14 @@ def download(dl, args): print('Info: {}'.format(file_url)) if os.path.isfile(dest) and not args.force: - print("File '{}' already exists, skipping download (use -f to ignore)".format(dest)) + print("File '{}' already exists, skipping download (use -f to " + "ignore)".format(dest)) else: try: fd = open(dest, 'wb') except IOError as e: - print('File could not be written. The following error was encountered:') + print('File could not be written. The following error was ' + 'encountered:') print(e) sys.exit(1) else: @@ -221,15 +230,16 @@ def download(dl, args): else: # no file information returned - print("Target '{}' does not appear to be a valid file.".format(filename)) + print("Target '{}' does not appear to be a valid file." + .format(filename)) sys.exit(1) def valid_file(search_string): """ - Determines if the given string contains a valid file name, defined as a string - ending with a '.' and at least one character, beginning with 'File:' or - 'Image:', the standard file prefixes in MediaWiki. + Determines if the given string contains a valid file name, defined as a + string ending with a '.' and at least one character, beginning with 'File:' + or 'Image:', the standard file prefixes in MediaWiki. :param search_string: string to validate :returns: a regex Match object if there's a match or None otherwise """ @@ -241,10 +251,10 @@ def valid_file(search_string): def valid_site(search_string): """ - Determines if the given string contains a valid site name, defined as a string - ending with 'wikipedia.org' or 'wikimedia.org'. This covers all subdomains of - those domains. Eventually, it should be possible to support any MediaWiki site, - regardless of domain name. + Determines if the given string contains a valid site name, defined as a + string ending with 'wikipedia.org' or 'wikimedia.org'. This covers all + subdomains of those domains. Eventually, it should be possible to support + any MediaWiki site, regardless of domain name. :param search_string: string to validate :returns: a regex Match object if there's a match or None otherwise """ @@ -254,7 +264,8 @@ def valid_site(search_string): def verify_hash(filename): """ - Calculates the SHA1 hash of the given file for comparison with a known value. + Calculates the SHA1 hash of the given file for comparison with a known + value. :param filename: name of the file to calculate a hash for :return: hash digest """ |
