[pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci
This commit is contained in:
pre-commit-ci[bot] 2024-04-13 00:00:18 +00:00
parent 72ad6dc953
commit f4cd1ba0d6
813 changed files with 66015 additions and 58839 deletions

View file

@ -1,43 +1,53 @@
"""PyPI and direct package downloading"""
import sys
import os
import re
import io
import shutil
import socket
from __future__ import annotations
import base64
import hashlib
import itertools
import warnings
import configparser
import hashlib
import html
import http.client
import io
import itertools
import os
import re
import shutil
import socket
import sys
import urllib.error
import urllib.parse
import urllib.request
import urllib.error
import warnings
from fnmatch import translate
from functools import wraps
import setuptools
from pkg_resources import (
CHECKOUT_DIST, Distribution, BINARY_DIST, normalize_path, SOURCE_DIST,
Environment, find_distributions, safe_name, safe_version,
to_filename, Requirement, DEVELOP_DIST, EGG_DIST,
)
from distutils import log
from distutils.errors import DistutilsError
from fnmatch import translate
from setuptools.wheel import Wheel
from pkg_resources import BINARY_DIST
from pkg_resources import CHECKOUT_DIST
from pkg_resources import DEVELOP_DIST
from pkg_resources import Distribution
from pkg_resources import EGG_DIST
from pkg_resources import Environment
from pkg_resources import find_distributions
from pkg_resources import normalize_path
from pkg_resources import Requirement
from pkg_resources import safe_name
from pkg_resources import safe_version
from pkg_resources import SOURCE_DIST
from pkg_resources import to_filename
from setuptools.extern.more_itertools import unique_everseen
from setuptools.wheel import Wheel
EGG_FRAGMENT = re.compile(r'^egg=([-A-Za-z0-9_.+!]+)$')
HREF = re.compile(r"""href\s*=\s*['"]?([^'"> ]+)""", re.I)
PYPI_MD5 = re.compile(
r'<a href="([^"#]+)">([^<]+)</a>\n\s+\(<a (?:title="MD5 hash"\n\s+)'
r'href="[^?]+\?:action=show_md5&amp;digest=([0-9a-f]{32})">md5</a>\)'
r'href="[^?]+\?:action=show_md5&amp;digest=([0-9a-f]{32})">md5</a>\)',
)
URL_SCHEME = re.compile('([-+.a-z0-9]{2,}):', re.I).match
EXTENSIONS = ".tar.gz .tar.bz2 .tar .zip .tgz".split()
EXTENSIONS = '.tar.gz .tar.bz2 .tar .zip .tgz'.split()
__all__ = [
'PackageIndex', 'distros_for_url', 'parse_bdist_wininst',
@ -46,9 +56,10 @@ __all__ = [
_SOCKET_TIMEOUT = 15
_tmpl = "setuptools/{setuptools.__version__} Python-urllib/{py_major}"
_tmpl = 'setuptools/{setuptools.__version__} Python-urllib/{py_major}'
user_agent = _tmpl.format(
py_major='{}.{}'.format(*sys.version_info), setuptools=setuptools)
py_major='{}.{}'.format(*sys.version_info), setuptools=setuptools,
)
def parse_requirement_arg(spec):
@ -56,7 +67,7 @@ def parse_requirement_arg(spec):
return Requirement.parse(spec)
except ValueError as e:
raise DistutilsError(
"Not a URL, existing file, or requirement spec: %r" % (spec,)
'Not a URL, existing file, or requirement spec: {!r}'.format(spec),
) from e
@ -98,15 +109,13 @@ def egg_info_for_url(url):
def distros_for_url(url, metadata=None):
"""Yield egg or source distribution objects that might be found at a URL"""
base, fragment = egg_info_for_url(url)
for dist in distros_for_location(url, base, metadata):
yield dist
yield from distros_for_location(url, base, metadata)
if fragment:
match = EGG_FRAGMENT.match(fragment)
if match:
for dist in interpret_distro_name(
url, match.group(1), metadata, precedence=CHECKOUT_DIST
):
yield dist
yield from interpret_distro_name(
url, match.group(1), metadata, precedence=CHECKOUT_DIST,
)
def distros_for_location(location, basename, metadata=None):
@ -120,18 +129,20 @@ def distros_for_location(location, basename, metadata=None):
wheel = Wheel(basename)
if not wheel.is_compatible():
return []
return [Distribution(
location=location,
project_name=wheel.project_name,
version=wheel.version,
# Increase priority over eggs.
precedence=EGG_DIST + 1,
)]
return [
Distribution(
location=location,
project_name=wheel.project_name,
version=wheel.version,
# Increase priority over eggs.
precedence=EGG_DIST + 1,
),
]
if basename.endswith('.exe'):
win_base, py_ver, platform = parse_bdist_wininst(basename)
if win_base is not None:
return interpret_distro_name(
location, win_base, metadata, py_ver, BINARY_DIST, platform
location, win_base, metadata, py_ver, BINARY_DIST, platform,
)
# Try source distro extensions (.zip, .tgz, etc.)
#
@ -145,13 +156,13 @@ def distros_for_location(location, basename, metadata=None):
def distros_for_filename(filename, metadata=None):
"""Yield possible egg or source distribution objects based on a filename"""
return distros_for_location(
normalize_path(filename), os.path.basename(filename), metadata
normalize_path(filename), os.path.basename(filename), metadata,
)
def interpret_distro_name(
location, basename, metadata, py_version=None, precedence=SOURCE_DIST,
platform=None
platform=None,
):
"""Generate alternative interpretations of a source distro name
@ -180,7 +191,7 @@ def interpret_distro_name(
yield Distribution(
location, metadata, '-'.join(parts[:p]), '-'.join(parts[p:]),
py_version=py_version, precedence=precedence,
platform=platform
platform=platform,
)
@ -212,7 +223,7 @@ def find_external_links(url, page):
for match in HREF.finditer(tag):
yield urllib.parse.urljoin(url, htmldecode(match.group(1)))
for tag in ("<th>Home Page", "<th>Download URL"):
for tag in ('<th>Home Page', '<th>Download URL'):
pos = page.find(tag)
if pos != -1:
match = HREF.search(page, pos)
@ -248,7 +259,7 @@ class ContentChecker:
class HashChecker(ContentChecker):
pattern = re.compile(
r'(?P<hash_name>sha1|sha224|sha384|sha256|sha512|md5)='
r'(?P<expected>[a-f0-9]+)'
r'(?P<expected>[a-f0-9]+)',
)
def __init__(self, hash_name, expected):
@ -258,7 +269,7 @@ class HashChecker(ContentChecker):
@classmethod
def from_url(cls, url):
"Construct a (possibly null) ContentChecker from a URL"
'Construct a (possibly null) ContentChecker from a URL'
fragment = urllib.parse.urlparse(url)[-1]
if not fragment:
return ContentChecker()
@ -282,11 +293,11 @@ class PackageIndex(Environment):
"""A distribution index that scans web pages for download URLs"""
def __init__(
self, index_url="https://pypi.org/simple/", hosts=('*',),
ca_bundle=None, verify_ssl=True, *args, **kw
self, index_url='https://pypi.org/simple/', hosts=('*',),
ca_bundle=None, verify_ssl=True, *args, **kw,
):
Environment.__init__(self, *args, **kw)
self.index_url = index_url + "/" [:not index_url.endswith('/')]
self.index_url = index_url + '/' [:not index_url.endswith('/')]
self.scanned_urls = {}
self.fetched_urls = {}
self.package_pages = {}
@ -308,7 +319,7 @@ class PackageIndex(Environment):
if dists:
if not self.url_ok(url):
return
self.debug("Found link: %s", url)
self.debug('Found link: %s', url)
if dists or not retrieve or url in self.fetched_urls:
list(map(self.add, dists))
@ -318,14 +329,14 @@ class PackageIndex(Environment):
self.fetched_urls[url] = True
return
self.info("Reading %s", url)
self.info('Reading %s', url)
self.fetched_urls[url] = True # prevent multiple fetch attempts
tmpl = "Download error on %s: %%s -- Some packages may not be found!"
tmpl = 'Download error on %s: %%s -- Some packages may not be found!'
f = self.open_url(url, tmpl % url)
if f is None:
return
if isinstance(f, urllib.error.HTTPError) and f.code == 401:
self.info("Authentication error: %s" % f.msg)
self.info('Authentication error: %s' % f.msg)
self.fetched_urls[f.url] = True
if 'html' not in f.headers.get('content-type', '').lower():
f.close() # not html, we can't process it
@ -340,7 +351,7 @@ class PackageIndex(Environment):
charset = 'latin-1'
else:
charset = f.headers.get_param('charset') or 'latin-1'
page = page.decode(charset, "ignore")
page = page.decode(charset, 'ignore')
f.close()
for match in HREF.finditer(page):
link = urllib.parse.urljoin(base, htmldecode(match.group(1)))
@ -351,7 +362,7 @@ class PackageIndex(Environment):
def process_filename(self, fn, nested=False):
# process filenames or directories
if not os.path.exists(fn):
self.warn("Not found: %s", fn)
self.warn('Not found: %s', fn)
return
if os.path.isdir(fn) and not nested:
@ -361,7 +372,7 @@ class PackageIndex(Environment):
dists = distros_for_filename(fn)
if dists:
self.debug("Found: %s", fn)
self.debug('Found: %s', fn)
list(map(self.add, dists))
def url_ok(self, url, fatal=False):
@ -370,8 +381,9 @@ class PackageIndex(Environment):
if is_file or self.allows(urllib.parse.urlparse(url)[1]):
return True
msg = (
"\nNote: Bypassing %s (disallowed host; see "
"http://bit.ly/2hrImnY for details).\n")
'\nNote: Bypassing %s (disallowed host; see '
'http://bit.ly/2hrImnY for details).\n'
)
if fatal:
raise DistutilsError(msg % url)
else:
@ -409,9 +421,11 @@ class PackageIndex(Environment):
if not link.startswith(self.index_url):
return NO_MATCH_SENTINEL
parts = list(map(
urllib.parse.unquote, link[len(self.index_url):].split('/')
))
parts = list(
map(
urllib.parse.unquote, link[len(self.index_url):].split('/'),
),
)
if len(parts) != 2 or '#' in parts[1]:
return NO_MATCH_SENTINEL
@ -433,7 +447,7 @@ class PackageIndex(Environment):
pkg, ver = self._scan(url) # ensure this page is in the page index
if not pkg:
return "" # no sense double-scanning non-package pages
return '' # no sense double-scanning non-package pages
# process individual package page
for new_url in find_external_links(url, page):
@ -441,19 +455,19 @@ class PackageIndex(Environment):
base, frag = egg_info_for_url(new_url)
if base.endswith('.py') and not frag:
if ver:
new_url += '#egg=%s-%s' % (pkg, ver)
new_url += '#egg={}-{}'.format(pkg, ver)
else:
self.need_version_info(url)
self.scan_url(new_url)
return PYPI_MD5.sub(
lambda m: '<a href="%s#md5=%s">%s</a>' % m.group(1, 3, 2), page
lambda m: '<a href="%s#md5=%s">%s</a>' % m.group(1, 3, 2), page,
)
def need_version_info(self, url):
self.scan_all(
"Page at %s links to .py file(s) without version info; an index "
"scan is required.", url
'Page at %s links to .py file(s) without version info; an index '
'scan is required.', url,
)
def scan_all(self, msg=None, *args):
@ -461,7 +475,7 @@ class PackageIndex(Environment):
if msg:
self.warn(msg, *args)
self.info(
"Scanning index of all packages (this may take a while)"
'Scanning index of all packages (this may take a while)',
)
self.scan_url(self.index_url)
@ -486,8 +500,8 @@ class PackageIndex(Environment):
for dist in self[requirement.key]:
if dist in requirement:
return dist
self.debug("%s does not match %s", requirement, dist)
return super(PackageIndex, self).obtain(requirement, installer)
self.debug('%s does not match %s', requirement, dist)
return super().obtain(requirement, installer)
def check_hash(self, checker, filename, tfp):
"""
@ -495,24 +509,25 @@ class PackageIndex(Environment):
"""
checker.report(
self.debug,
"Validating %%s checksum for %s" % filename)
'Validating %%s checksum for %s' % filename,
)
if not checker.is_valid():
tfp.close()
os.unlink(filename)
raise DistutilsError(
"%s validation failed for %s; "
"possible download problem?"
% (checker.hash.name, os.path.basename(filename))
'%s validation failed for %s; '
'possible download problem?'
% (checker.hash.name, os.path.basename(filename)),
)
def add_find_links(self, urls):
"""Add `urls` to the list that will be prescanned for searches"""
for url in urls:
if (
self.to_scan is None # if we have already "gone online"
or not URL_SCHEME(url) # or it's a local file/directory
or url.startswith('file:')
or list(distros_for_url(url)) # or a direct package link
self.to_scan is None or # if we have already "gone online"
not URL_SCHEME(url) or # or it's a local file/directory
url.startswith('file:') or
list(distros_for_url(url)) # or a direct package link
):
# then go ahead and process it now
self.scan_url(url)
@ -532,7 +547,8 @@ class PackageIndex(Environment):
else: # no distros seen for this name, might be misspelled
meth, msg = (
self.warn,
"Couldn't find index page for %r (maybe misspelled?)")
"Couldn't find index page for %r (maybe misspelled?)",
)
meth(msg, requirement.unsafe_name)
self.scan_all()
@ -572,7 +588,8 @@ class PackageIndex(Environment):
def fetch_distribution( # noqa: C901 # is too complex (14) # FIXME
self, requirement, tmpdir, force_scan=False, source=False,
develop_ok=False, local_index=None):
develop_ok=False, local_index=None,
):
"""Obtain a distribution suitable for fulfilling `requirement`
`requirement` must be a ``pkg_resources.Requirement`` instance.
@ -590,7 +607,7 @@ class PackageIndex(Environment):
format) will be ignored.
"""
# process a Requirement
self.info("Searching for %s", requirement)
self.info('Searching for %s', requirement)
skipped = {}
dist = None
@ -604,14 +621,14 @@ class PackageIndex(Environment):
if dist.precedence == DEVELOP_DIST and not develop_ok:
if dist not in skipped:
self.warn(
"Skipping development or system egg: %s", dist,
'Skipping development or system egg: %s', dist,
)
skipped[dist] = 1
continue
test = (
dist in req
and (dist.precedence <= SOURCE_DIST or not source)
dist in req and
(dist.precedence <= SOURCE_DIST or not source)
)
if test:
loc = self.download(dist.location, tmpdir)
@ -638,12 +655,12 @@ class PackageIndex(Environment):
if dist is None:
self.warn(
"No local packages or working download links found for %s%s",
(source and "a source distribution of " or ""),
'No local packages or working download links found for %s%s',
(source and 'a source distribution of ' or ''),
requirement,
)
else:
self.info("Best match: %s", dist)
self.info('Best match: %s', dist)
return dist.clone(location=dist.download_location)
def fetch(self, requirement, tmpdir, force_scan=False, source=False):
@ -679,31 +696,31 @@ class PackageIndex(Environment):
with open(os.path.join(tmpdir, 'setup.py'), 'w') as file:
file.write(
"from setuptools import setup\n"
"setup(name=%r, version=%r, py_modules=[%r])\n"
'from setuptools import setup\n'
'setup(name=%r, version=%r, py_modules=[%r])\n'
% (
dists[0].project_name, dists[0].version,
os.path.splitext(basename)[0]
)
os.path.splitext(basename)[0],
),
)
return filename
elif match:
raise DistutilsError(
"Can't unambiguously interpret project/version identifier %r; "
"any dashes in the name or version should be escaped using "
"underscores. %r" % (fragment, dists)
'any dashes in the name or version should be escaped using '
'underscores. %r' % (fragment, dists),
)
else:
raise DistutilsError(
"Can't process plain .py files without an '#egg=name-version'"
" suffix to enable automatic setup script generation."
' suffix to enable automatic setup script generation.',
)
dl_blocksize = 8192
def _download_to(self, url, filename):
self.info("Downloading %s", url)
self.info('Downloading %s', url)
# Download the file
fp = None
try:
@ -711,13 +728,13 @@ class PackageIndex(Environment):
fp = self.open_url(url)
if isinstance(fp, urllib.error.HTTPError):
raise DistutilsError(
"Can't download %s: %s %s" % (url, fp.code, fp.msg)
"Can't download {}: {} {}".format(url, fp.code, fp.msg),
)
headers = fp.info()
blocknum = 0
bs = self.dl_blocksize
size = -1
if "content-length" in headers:
if 'content-length' in headers:
# Some servers return multiple Content-Length headers :(
sizes = headers.get_all('Content-Length')
size = max(map(int, sizes))
@ -752,15 +769,17 @@ class PackageIndex(Environment):
if warning:
self.warn(warning, msg)
else:
raise DistutilsError('%s %s' % (url, msg)) from v
raise DistutilsError('{} {}'.format(url, msg)) from v
except urllib.error.HTTPError as v:
return v
except urllib.error.URLError as v:
if warning:
self.warn(warning, v.reason)
else:
raise DistutilsError("Download error for %s: %s"
% (url, v.reason)) from v
raise DistutilsError(
'Download error for %s: %s'
% (url, v.reason),
) from v
except http.client.BadStatusLine as v:
if warning:
self.warn(warning, v.line)
@ -768,14 +787,16 @@ class PackageIndex(Environment):
raise DistutilsError(
'%s returned a bad status line. The server might be '
'down, %s' %
(url, v.line)
(url, v.line),
) from v
except (http.client.HTTPException, socket.error) as v:
except (http.client.HTTPException, OSError) as v:
if warning:
self.warn(warning, v)
else:
raise DistutilsError("Download error for %s: %s"
% (url, v)) from v
raise DistutilsError(
'Download error for %s: %s'
% (url, v),
) from v
def _download_url(self, scheme, url, tmpdir):
# Determine download filename
@ -785,7 +806,7 @@ class PackageIndex(Environment):
while '..' in name:
name = name.replace('..', '.').replace('\\', '_')
else:
name = "__downloaded__" # default if URL has no path contents
name = '__downloaded__' # default if URL has no path contents
if name.endswith('.egg.zip'):
name = name[:-4] # strip the extra .zip before download
@ -829,10 +850,10 @@ class PackageIndex(Environment):
break # not an index page
file.close()
os.unlink(filename)
raise DistutilsError("Unexpected HTML page found at " + url)
raise DistutilsError('Unexpected HTML page found at ' + url)
def _download_svn(self, url, filename):
warnings.warn("SVN download support is deprecated", UserWarning)
warnings.warn('SVN download support is deprecated', UserWarning)
url = url.split('#', 1)[0] # remove any fragment for svn's sake
creds = ''
if url.lower().startswith('svn:') and '@' in url:
@ -843,14 +864,14 @@ class PackageIndex(Environment):
if auth:
if ':' in auth:
user, pw = auth.split(':', 1)
creds = " --username=%s --password=%s" % (user, pw)
creds = ' --username={} --password={}'.format(user, pw)
else:
creds = " --username=" + auth
creds = ' --username=' + auth
netloc = host
parts = scheme, netloc, url, p, q, f
url = urllib.parse.urlunparse(parts)
self.info("Doing subversion checkout from %s to %s", url, filename)
os.system("svn checkout%s -q %s %s" % (creds, url, filename))
self.info('Doing subversion checkout from %s to %s', url, filename)
os.system('svn checkout{} -q {} {}'.format(creds, url, filename))
return filename
@staticmethod
@ -875,15 +896,17 @@ class PackageIndex(Environment):
filename = filename.split('#', 1)[0]
url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True)
self.info("Doing git clone from %s to %s", url, filename)
os.system("git clone --quiet %s %s" % (url, filename))
self.info('Doing git clone from %s to %s', url, filename)
os.system('git clone --quiet {} {}'.format(url, filename))
if rev is not None:
self.info("Checking out %s", rev)
os.system("git -C %s checkout --quiet %s" % (
filename,
rev,
))
self.info('Checking out %s', rev)
os.system(
'git -C {} checkout --quiet {}'.format(
filename,
rev,
),
)
return filename
@ -891,15 +914,17 @@ class PackageIndex(Environment):
filename = filename.split('#', 1)[0]
url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True)
self.info("Doing hg clone from %s to %s", url, filename)
os.system("hg clone --quiet %s %s" % (url, filename))
self.info('Doing hg clone from %s to %s', url, filename)
os.system('hg clone --quiet {} {}'.format(url, filename))
if rev is not None:
self.info("Updating to %s", rev)
os.system("hg --cwd %s up -C -r %s -q" % (
filename,
rev,
))
self.info('Updating to %s', rev)
os.system(
'hg --cwd {} up -C -r {} -q'.format(
filename,
rev,
),
)
return filename
@ -1050,11 +1075,11 @@ def open_with_auth(url, opener=urllib.request.urlopen):
log.info('Authenticating as %s for %s (from .pypirc)', *info)
if auth:
auth = "Basic " + _encode_auth(auth)
auth = 'Basic ' + _encode_auth(auth)
parts = scheme, address, path, params, query, frag
new_url = urllib.parse.urlunparse(parts)
request = urllib.request.Request(new_url)
request.add_header("Authorization", auth)
request.add_header('Authorization', auth)
else:
request = urllib.request.Request(url)
@ -1099,7 +1124,7 @@ def local_open(url):
for f in os.listdir(filename):
filepath = os.path.join(filename, f)
if f == 'index.html':
with open(filepath, 'r') as fp:
with open(filepath) as fp:
body = fp.read()
break
elif os.path.isdir(filepath):
@ -1107,12 +1132,13 @@ def local_open(url):
files.append('<a href="{name}">{name}</a>'.format(name=f))
else:
tmpl = (
"<html><head><title>{url}</title>"
"</head><body>{files}</body></html>")
'<html><head><title>{url}</title>'
'</head><body>{files}</body></html>'
)
body = tmpl.format(url=url, files='\n'.join(files))
status, message = 200, "OK"
status, message = 200, 'OK'
else:
status, message, body = 404, "Path not found", "Not found"
status, message, body = 404, 'Path not found', 'Not found'
headers = {'content-type': 'text/html'}
body_stream = io.StringIO(body)