[pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci
This commit is contained in:
pre-commit-ci[bot] 2024-04-13 00:00:18 +00:00
parent 72ad6dc953
commit f4cd1ba0d6
813 changed files with 66015 additions and 58839 deletions

View file

@ -1,15 +1,16 @@
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import annotations
__title__ = "packaging"
__summary__ = "Core utilities for Python packages"
__uri__ = "https://github.com/pypa/packaging"
__title__ = 'packaging'
__summary__ = 'Core utilities for Python packages'
__uri__ = 'https://github.com/pypa/packaging'
__version__ = "24.0"
__version__ = '24.0'
__author__ = "Donald Stufft and individual contributors"
__email__ = "donald@stufft.io"
__author__ = 'Donald Stufft and individual contributors'
__email__ = 'donald@stufft.io'
__license__ = "BSD-2-Clause or Apache-2.0"
__copyright__ = "2014 %s" % __author__
__license__ = 'BSD-2-Clause or Apache-2.0'
__copyright__ = '2014 %s' % __author__

View file

@ -7,11 +7,14 @@ interface to ``ZipFile``. Only the read interface is implemented.
Based on: https://gist.github.com/lyssdod/f51579ae8d93c8657a5564aefc2ffbca
ELF header: https://refspecs.linuxfoundation.org/elf/gabi4+/ch4.eheader.html
"""
from __future__ import annotations
import enum
import os
import struct
from typing import IO, Optional, Tuple
from typing import IO
from typing import Optional
from typing import Tuple
class ELFInvalid(ValueError):
@ -45,12 +48,12 @@ class ELFFile:
self._f = f
try:
ident = self._read("16B")
ident = self._read('16B')
except struct.error:
raise ELFInvalid("unable to parse identification")
raise ELFInvalid('unable to parse identification')
magic = bytes(ident[:4])
if magic != b"\x7fELF":
raise ELFInvalid(f"invalid magic: {magic!r}")
if magic != b'\x7fELF':
raise ELFInvalid(f'invalid magic: {magic!r}')
self.capacity = ident[4] # Format for program header (bitness).
self.encoding = ident[5] # Data structure encoding (endianness).
@ -60,15 +63,15 @@ class ELFFile:
# p_fmt: Format for section header.
# p_idx: Indexes to find p_type, p_offset, and p_filesz.
e_fmt, self._p_fmt, self._p_idx = {
(1, 1): ("<HHIIIIIHHH", "<IIIIIIII", (0, 1, 4)), # 32-bit LSB.
(1, 2): (">HHIIIIIHHH", ">IIIIIIII", (0, 1, 4)), # 32-bit MSB.
(2, 1): ("<HHIQQQIHHH", "<IIQQQQQQ", (0, 2, 5)), # 64-bit LSB.
(2, 2): (">HHIQQQIHHH", ">IIQQQQQQ", (0, 2, 5)), # 64-bit MSB.
(1, 1): ('<HHIIIIIHHH', '<IIIIIIII', (0, 1, 4)), # 32-bit LSB.
(1, 2): ('>HHIIIIIHHH', '>IIIIIIII', (0, 1, 4)), # 32-bit MSB.
(2, 1): ('<HHIQQQIHHH', '<IIQQQQQQ', (0, 2, 5)), # 64-bit LSB.
(2, 2): ('>HHIQQQIHHH', '>IIQQQQQQ', (0, 2, 5)), # 64-bit MSB.
}[(self.capacity, self.encoding)]
except KeyError:
raise ELFInvalid(
f"unrecognized capacity ({self.capacity}) or "
f"encoding ({self.encoding})"
f'unrecognized capacity ({self.capacity}) or '
f'encoding ({self.encoding})',
)
try:
@ -85,13 +88,13 @@ class ELFFile:
self._e_phnum, # Number of sections.
) = self._read(e_fmt)
except struct.error as e:
raise ELFInvalid("unable to parse machine and section information") from e
raise ELFInvalid('unable to parse machine and section information') from e
def _read(self, fmt: str) -> Tuple[int, ...]:
def _read(self, fmt: str) -> tuple[int, ...]:
return struct.unpack(fmt, self._f.read(struct.calcsize(fmt)))
@property
def interpreter(self) -> Optional[str]:
def interpreter(self) -> str | None:
"""
The path recorded in the ``PT_INTERP`` section header.
"""
@ -104,5 +107,5 @@ class ELFFile:
if data[self._p_idx[0]] != 3: # Not PT_INTERP.
continue
self._f.seek(data[self._p_idx[1]])
return os.fsdecode(self._f.read(data[self._p_idx[2]])).strip("\0")
return os.fsdecode(self._f.read(data[self._p_idx[2]])).strip('\0')
return None

View file

@ -1,3 +1,5 @@
from __future__ import annotations
import collections
import contextlib
import functools
@ -5,9 +7,18 @@ import os
import re
import sys
import warnings
from typing import Dict, Generator, Iterator, NamedTuple, Optional, Sequence, Tuple
from typing import Dict
from typing import Generator
from typing import Iterator
from typing import NamedTuple
from typing import Optional
from typing import Sequence
from typing import Tuple
from ._elffile import EIClass, EIData, ELFFile, EMachine
from ._elffile import EIClass
from ._elffile import EIData
from ._elffile import ELFFile
from ._elffile import EMachine
EF_ARM_ABIMASK = 0xFF000000
EF_ARM_ABI_VER5 = 0x05000000
@ -17,9 +28,9 @@ EF_ARM_ABI_FLOAT_HARD = 0x00000400
# `os.PathLike` not a generic type until Python 3.9, so sticking with `str`
# as the type for `path` until then.
@contextlib.contextmanager
def _parse_elf(path: str) -> Generator[Optional[ELFFile], None, None]:
def _parse_elf(path: str) -> Generator[ELFFile | None, None, None]:
try:
with open(path, "rb") as f:
with open(path, 'rb') as f:
yield ELFFile(f)
except (OSError, TypeError, ValueError):
yield None
@ -31,38 +42,38 @@ def _is_linux_armhf(executable: str) -> bool:
# https://static.docs.arm.com/ihi0044/g/aaelf32.pdf
with _parse_elf(executable) as f:
return (
f is not None
and f.capacity == EIClass.C32
and f.encoding == EIData.Lsb
and f.machine == EMachine.Arm
and f.flags & EF_ARM_ABIMASK == EF_ARM_ABI_VER5
and f.flags & EF_ARM_ABI_FLOAT_HARD == EF_ARM_ABI_FLOAT_HARD
f is not None and
f.capacity == EIClass.C32 and
f.encoding == EIData.Lsb and
f.machine == EMachine.Arm and
f.flags & EF_ARM_ABIMASK == EF_ARM_ABI_VER5 and
f.flags & EF_ARM_ABI_FLOAT_HARD == EF_ARM_ABI_FLOAT_HARD
)
def _is_linux_i686(executable: str) -> bool:
with _parse_elf(executable) as f:
return (
f is not None
and f.capacity == EIClass.C32
and f.encoding == EIData.Lsb
and f.machine == EMachine.I386
f is not None and
f.capacity == EIClass.C32 and
f.encoding == EIData.Lsb and
f.machine == EMachine.I386
)
def _have_compatible_abi(executable: str, archs: Sequence[str]) -> bool:
if "armv7l" in archs:
if 'armv7l' in archs:
return _is_linux_armhf(executable)
if "i686" in archs:
if 'i686' in archs:
return _is_linux_i686(executable)
allowed_archs = {
"x86_64",
"aarch64",
"ppc64",
"ppc64le",
"s390x",
"loongarch64",
"riscv64",
'x86_64',
'aarch64',
'ppc64',
'ppc64le',
's390x',
'loongarch64',
'riscv64',
}
return any(arch in allowed_archs for arch in archs)
@ -72,7 +83,7 @@ def _have_compatible_abi(executable: str, archs: Sequence[str]) -> bool:
# For now, guess what the highest minor version might be, assume it will
# be 50 for testing. Once this actually happens, update the dictionary
# with the actual value.
_LAST_GLIBC_MINOR: Dict[int, int] = collections.defaultdict(lambda: 50)
_LAST_GLIBC_MINOR: dict[int, int] = collections.defaultdict(lambda: 50)
class _GLibCVersion(NamedTuple):
@ -80,7 +91,7 @@ class _GLibCVersion(NamedTuple):
minor: int
def _glibc_version_string_confstr() -> Optional[str]:
def _glibc_version_string_confstr() -> str | None:
"""
Primary implementation of glibc_version_string using os.confstr.
"""
@ -90,7 +101,7 @@ def _glibc_version_string_confstr() -> Optional[str]:
# https://github.com/python/cpython/blob/fcf1d003bf4f0100c/Lib/platform.py#L175-L183
try:
# Should be a string like "glibc 2.17".
version_string: Optional[str] = os.confstr("CS_GNU_LIBC_VERSION")
version_string: str | None = os.confstr('CS_GNU_LIBC_VERSION')
assert version_string is not None
_, version = version_string.rsplit()
except (AssertionError, AttributeError, OSError, ValueError):
@ -99,7 +110,7 @@ def _glibc_version_string_confstr() -> Optional[str]:
return version
def _glibc_version_string_ctypes() -> Optional[str]:
def _glibc_version_string_ctypes() -> str | None:
"""
Fallback implementation of glibc_version_string using ctypes.
"""
@ -138,17 +149,17 @@ def _glibc_version_string_ctypes() -> Optional[str]:
version_str: str = gnu_get_libc_version()
# py2 / py3 compatibility:
if not isinstance(version_str, str):
version_str = version_str.decode("ascii")
version_str = version_str.decode('ascii')
return version_str
def _glibc_version_string() -> Optional[str]:
def _glibc_version_string() -> str | None:
"""Returns glibc version string, or None if not using glibc."""
return _glibc_version_string_confstr() or _glibc_version_string_ctypes()
def _parse_glibc_version(version_str: str) -> Tuple[int, int]:
def _parse_glibc_version(version_str: str) -> tuple[int, int]:
"""Parse glibc version.
We use a regexp instead of str.split because we want to discard any
@ -156,19 +167,19 @@ def _parse_glibc_version(version_str: str) -> Tuple[int, int]:
in patched/forked versions of glibc (e.g. Linaro's version of glibc
uses version strings like "2.20-2014.11"). See gh-3588.
"""
m = re.match(r"(?P<major>[0-9]+)\.(?P<minor>[0-9]+)", version_str)
m = re.match(r'(?P<major>[0-9]+)\.(?P<minor>[0-9]+)', version_str)
if not m:
warnings.warn(
f"Expected glibc version with 2 components major.minor,"
f" got: {version_str}",
f'Expected glibc version with 2 components major.minor,'
f' got: {version_str}',
RuntimeWarning,
)
return -1, -1
return int(m.group("major")), int(m.group("minor"))
return int(m.group('major')), int(m.group('minor'))
@functools.lru_cache()
def _get_glibc_version() -> Tuple[int, int]:
@functools.lru_cache
def _get_glibc_version() -> tuple[int, int]:
version_str = _glibc_version_string()
if version_str is None:
return (-1, -1)
@ -185,30 +196,30 @@ def _is_compatible(arch: str, version: _GLibCVersion) -> bool:
import _manylinux
except ImportError:
return True
if hasattr(_manylinux, "manylinux_compatible"):
if hasattr(_manylinux, 'manylinux_compatible'):
result = _manylinux.manylinux_compatible(version[0], version[1], arch)
if result is not None:
return bool(result)
return True
if version == _GLibCVersion(2, 5):
if hasattr(_manylinux, "manylinux1_compatible"):
if hasattr(_manylinux, 'manylinux1_compatible'):
return bool(_manylinux.manylinux1_compatible)
if version == _GLibCVersion(2, 12):
if hasattr(_manylinux, "manylinux2010_compatible"):
if hasattr(_manylinux, 'manylinux2010_compatible'):
return bool(_manylinux.manylinux2010_compatible)
if version == _GLibCVersion(2, 17):
if hasattr(_manylinux, "manylinux2014_compatible"):
if hasattr(_manylinux, 'manylinux2014_compatible'):
return bool(_manylinux.manylinux2014_compatible)
return True
_LEGACY_MANYLINUX_MAP = {
# CentOS 7 w/ glibc 2.17 (PEP 599)
(2, 17): "manylinux2014",
(2, 17): 'manylinux2014',
# CentOS 6 w/ glibc 2.12 (PEP 571)
(2, 12): "manylinux2010",
(2, 12): 'manylinux2010',
# CentOS 5 w/ glibc 2.5 (PEP 513)
(2, 5): "manylinux1",
(2, 5): 'manylinux1',
}
@ -227,7 +238,7 @@ def platform_tags(archs: Sequence[str]) -> Iterator[str]:
return
# Oldest glibc to be supported regardless of architecture is (2, 17).
too_old_glibc2 = _GLibCVersion(2, 16)
if set(archs) & {"x86_64", "i686"}:
if set(archs) & {'x86_64', 'i686'}:
# On x86/i686 also oldest glibc to be supported is (2, 5).
too_old_glibc2 = _GLibCVersion(2, 4)
current_glibc = _GLibCVersion(*_get_glibc_version())
@ -250,11 +261,11 @@ def platform_tags(archs: Sequence[str]) -> Iterator[str]:
min_minor = -1
for glibc_minor in range(glibc_max.minor, min_minor, -1):
glibc_version = _GLibCVersion(glibc_max.major, glibc_minor)
tag = "manylinux_{}_{}".format(*glibc_version)
tag = 'manylinux_{}_{}'.format(*glibc_version)
if _is_compatible(arch, glibc_version):
yield f"{tag}_{arch}"
yield f'{tag}_{arch}'
# Handle the legacy manylinux1, manylinux2010, manylinux2014 tags.
if glibc_version in _LEGACY_MANYLINUX_MAP:
legacy_tag = _LEGACY_MANYLINUX_MAP[glibc_version]
if _is_compatible(arch, glibc_version):
yield f"{legacy_tag}_{arch}"
yield f'{legacy_tag}_{arch}'

View file

@ -3,12 +3,16 @@
This module implements logic to detect if the currently running Python is
linked against musl, and what musl version is used.
"""
from __future__ import annotations
import functools
import re
import subprocess
import sys
from typing import Iterator, NamedTuple, Optional, Sequence
from typing import Iterator
from typing import NamedTuple
from typing import Optional
from typing import Sequence
from ._elffile import ELFFile
@ -18,18 +22,18 @@ class _MuslVersion(NamedTuple):
minor: int
def _parse_musl_version(output: str) -> Optional[_MuslVersion]:
def _parse_musl_version(output: str) -> _MuslVersion | None:
lines = [n for n in (n.strip() for n in output.splitlines()) if n]
if len(lines) < 2 or lines[0][:4] != "musl":
if len(lines) < 2 or lines[0][:4] != 'musl':
return None
m = re.match(r"Version (\d+)\.(\d+)", lines[1])
m = re.match(r'Version (\d+)\.(\d+)', lines[1])
if not m:
return None
return _MuslVersion(major=int(m.group(1)), minor=int(m.group(2)))
@functools.lru_cache()
def _get_musl_version(executable: str) -> Optional[_MuslVersion]:
@functools.lru_cache
def _get_musl_version(executable: str) -> _MuslVersion | None:
"""Detect currently-running musl runtime version.
This is done by checking the specified executable's dynamic linking
@ -41,11 +45,11 @@ def _get_musl_version(executable: str) -> Optional[_MuslVersion]:
Dynamic Program Loader
"""
try:
with open(executable, "rb") as f:
with open(executable, 'rb') as f:
ld = ELFFile(f).interpreter
except (OSError, TypeError, ValueError):
return None
if ld is None or "musl" not in ld:
if ld is None or 'musl' not in ld:
return None
proc = subprocess.run([ld], stderr=subprocess.PIPE, text=True)
return _parse_musl_version(proc.stderr)
@ -67,17 +71,17 @@ def platform_tags(archs: Sequence[str]) -> Iterator[str]:
return
for arch in archs:
for minor in range(sys_musl.minor, -1, -1):
yield f"musllinux_{sys_musl.major}_{minor}_{arch}"
yield f'musllinux_{sys_musl.major}_{minor}_{arch}'
if __name__ == "__main__": # pragma: no cover
if __name__ == '__main__': # pragma: no cover
import sysconfig
plat = sysconfig.get_platform()
assert plat.startswith("linux-"), "not linux"
assert plat.startswith('linux-'), 'not linux'
print("plat:", plat)
print("musl:", _get_musl_version(sys.executable))
print("tags:", end=" ")
for t in platform_tags(re.sub(r"[.-]", "_", plat.split("-", 1)[-1])):
print(t, end="\n ")
print('plat:', plat)
print('musl:', _get_musl_version(sys.executable))
print('tags:', end=' ')
for t in platform_tags(re.sub(r'[.-]', '_', plat.split('-', 1)[-1])):
print(t, end='\n ')

View file

@ -3,11 +3,18 @@
The docstring for each __parse_* function contains ENBF-inspired grammar representing
the implementation.
"""
from __future__ import annotations
import ast
from typing import Any, List, NamedTuple, Optional, Tuple, Union
from typing import Any
from typing import List
from typing import NamedTuple
from typing import Optional
from typing import Tuple
from typing import Union
from ._tokenizer import DEFAULT_RULES, Tokenizer
from ._tokenizer import DEFAULT_RULES
from ._tokenizer import Tokenizer
class Node:
@ -52,9 +59,9 @@ MarkerList = List[Any]
class ParsedRequirement(NamedTuple):
name: str
url: str
extras: List[str]
extras: list[str]
specifier: str
marker: Optional[MarkerList]
marker: MarkerList | None
# --------------------------------------------------------------------------------------
@ -68,68 +75,68 @@ def _parse_requirement(tokenizer: Tokenizer) -> ParsedRequirement:
"""
requirement = WS? IDENTIFIER WS? extras WS? requirement_details
"""
tokenizer.consume("WS")
tokenizer.consume('WS')
name_token = tokenizer.expect(
"IDENTIFIER", expected="package name at the start of dependency specifier"
'IDENTIFIER', expected='package name at the start of dependency specifier',
)
name = name_token.text
tokenizer.consume("WS")
tokenizer.consume('WS')
extras = _parse_extras(tokenizer)
tokenizer.consume("WS")
tokenizer.consume('WS')
url, specifier, marker = _parse_requirement_details(tokenizer)
tokenizer.expect("END", expected="end of dependency specifier")
tokenizer.expect('END', expected='end of dependency specifier')
return ParsedRequirement(name, url, extras, specifier, marker)
def _parse_requirement_details(
tokenizer: Tokenizer,
) -> Tuple[str, str, Optional[MarkerList]]:
) -> tuple[str, str, MarkerList | None]:
"""
requirement_details = AT URL (WS requirement_marker?)?
| specifier WS? (requirement_marker)?
"""
specifier = ""
url = ""
specifier = ''
url = ''
marker = None
if tokenizer.check("AT"):
if tokenizer.check('AT'):
tokenizer.read()
tokenizer.consume("WS")
tokenizer.consume('WS')
url_start = tokenizer.position
url = tokenizer.expect("URL", expected="URL after @").text
if tokenizer.check("END", peek=True):
url = tokenizer.expect('URL', expected='URL after @').text
if tokenizer.check('END', peek=True):
return (url, specifier, marker)
tokenizer.expect("WS", expected="whitespace after URL")
tokenizer.expect('WS', expected='whitespace after URL')
# The input might end after whitespace.
if tokenizer.check("END", peek=True):
if tokenizer.check('END', peek=True):
return (url, specifier, marker)
marker = _parse_requirement_marker(
tokenizer, span_start=url_start, after="URL and whitespace"
tokenizer, span_start=url_start, after='URL and whitespace',
)
else:
specifier_start = tokenizer.position
specifier = _parse_specifier(tokenizer)
tokenizer.consume("WS")
tokenizer.consume('WS')
if tokenizer.check("END", peek=True):
if tokenizer.check('END', peek=True):
return (url, specifier, marker)
marker = _parse_requirement_marker(
tokenizer,
span_start=specifier_start,
after=(
"version specifier"
'version specifier'
if specifier
else "name and no valid version specifier"
else 'name and no valid version specifier'
),
)
@ -137,66 +144,66 @@ def _parse_requirement_details(
def _parse_requirement_marker(
tokenizer: Tokenizer, *, span_start: int, after: str
tokenizer: Tokenizer, *, span_start: int, after: str,
) -> MarkerList:
"""
requirement_marker = SEMICOLON marker WS?
"""
if not tokenizer.check("SEMICOLON"):
if not tokenizer.check('SEMICOLON'):
tokenizer.raise_syntax_error(
f"Expected end or semicolon (after {after})",
f'Expected end or semicolon (after {after})',
span_start=span_start,
)
tokenizer.read()
marker = _parse_marker(tokenizer)
tokenizer.consume("WS")
tokenizer.consume('WS')
return marker
def _parse_extras(tokenizer: Tokenizer) -> List[str]:
def _parse_extras(tokenizer: Tokenizer) -> list[str]:
"""
extras = (LEFT_BRACKET wsp* extras_list? wsp* RIGHT_BRACKET)?
"""
if not tokenizer.check("LEFT_BRACKET", peek=True):
if not tokenizer.check('LEFT_BRACKET', peek=True):
return []
with tokenizer.enclosing_tokens(
"LEFT_BRACKET",
"RIGHT_BRACKET",
around="extras",
'LEFT_BRACKET',
'RIGHT_BRACKET',
around='extras',
):
tokenizer.consume("WS")
tokenizer.consume('WS')
extras = _parse_extras_list(tokenizer)
tokenizer.consume("WS")
tokenizer.consume('WS')
return extras
def _parse_extras_list(tokenizer: Tokenizer) -> List[str]:
def _parse_extras_list(tokenizer: Tokenizer) -> list[str]:
"""
extras_list = identifier (wsp* ',' wsp* identifier)*
"""
extras: List[str] = []
extras: list[str] = []
if not tokenizer.check("IDENTIFIER"):
if not tokenizer.check('IDENTIFIER'):
return extras
extras.append(tokenizer.read().text)
while True:
tokenizer.consume("WS")
if tokenizer.check("IDENTIFIER", peek=True):
tokenizer.raise_syntax_error("Expected comma between extra names")
elif not tokenizer.check("COMMA"):
tokenizer.consume('WS')
if tokenizer.check('IDENTIFIER', peek=True):
tokenizer.raise_syntax_error('Expected comma between extra names')
elif not tokenizer.check('COMMA'):
break
tokenizer.read()
tokenizer.consume("WS")
tokenizer.consume('WS')
extra_token = tokenizer.expect("IDENTIFIER", expected="extra name after comma")
extra_token = tokenizer.expect('IDENTIFIER', expected='extra name after comma')
extras.append(extra_token.text)
return extras
@ -208,13 +215,13 @@ def _parse_specifier(tokenizer: Tokenizer) -> str:
| WS? version_many WS?
"""
with tokenizer.enclosing_tokens(
"LEFT_PARENTHESIS",
"RIGHT_PARENTHESIS",
around="version specifier",
'LEFT_PARENTHESIS',
'RIGHT_PARENTHESIS',
around='version specifier',
):
tokenizer.consume("WS")
tokenizer.consume('WS')
parsed_specifiers = _parse_version_many(tokenizer)
tokenizer.consume("WS")
tokenizer.consume('WS')
return parsed_specifiers
@ -223,27 +230,27 @@ def _parse_version_many(tokenizer: Tokenizer) -> str:
"""
version_many = (SPECIFIER (WS? COMMA WS? SPECIFIER)*)?
"""
parsed_specifiers = ""
while tokenizer.check("SPECIFIER"):
parsed_specifiers = ''
while tokenizer.check('SPECIFIER'):
span_start = tokenizer.position
parsed_specifiers += tokenizer.read().text
if tokenizer.check("VERSION_PREFIX_TRAIL", peek=True):
if tokenizer.check('VERSION_PREFIX_TRAIL', peek=True):
tokenizer.raise_syntax_error(
".* suffix can only be used with `==` or `!=` operators",
'.* suffix can only be used with `==` or `!=` operators',
span_start=span_start,
span_end=tokenizer.position + 1,
)
if tokenizer.check("VERSION_LOCAL_LABEL_TRAIL", peek=True):
if tokenizer.check('VERSION_LOCAL_LABEL_TRAIL', peek=True):
tokenizer.raise_syntax_error(
"Local version label can only be used with `==` or `!=` operators",
'Local version label can only be used with `==` or `!=` operators',
span_start=span_start,
span_end=tokenizer.position,
)
tokenizer.consume("WS")
if not tokenizer.check("COMMA"):
tokenizer.consume('WS')
if not tokenizer.check('COMMA'):
break
parsed_specifiers += tokenizer.read().text
tokenizer.consume("WS")
tokenizer.consume('WS')
return parsed_specifiers
@ -257,7 +264,7 @@ def parse_marker(source: str) -> MarkerList:
def _parse_full_marker(tokenizer: Tokenizer) -> MarkerList:
retval = _parse_marker(tokenizer)
tokenizer.expect("END", expected="end of marker expression")
tokenizer.expect('END', expected='end of marker expression')
return retval
@ -266,7 +273,7 @@ def _parse_marker(tokenizer: Tokenizer) -> MarkerList:
marker = marker_atom (BOOLOP marker_atom)+
"""
expression = [_parse_marker_atom(tokenizer)]
while tokenizer.check("BOOLOP"):
while tokenizer.check('BOOLOP'):
token = tokenizer.read()
expr_right = _parse_marker_atom(tokenizer)
expression.extend((token.text, expr_right))
@ -279,19 +286,19 @@ def _parse_marker_atom(tokenizer: Tokenizer) -> MarkerAtom:
| WS? marker_item WS?
"""
tokenizer.consume("WS")
if tokenizer.check("LEFT_PARENTHESIS", peek=True):
tokenizer.consume('WS')
if tokenizer.check('LEFT_PARENTHESIS', peek=True):
with tokenizer.enclosing_tokens(
"LEFT_PARENTHESIS",
"RIGHT_PARENTHESIS",
around="marker expression",
'LEFT_PARENTHESIS',
'RIGHT_PARENTHESIS',
around='marker expression',
):
tokenizer.consume("WS")
tokenizer.consume('WS')
marker: MarkerAtom = _parse_marker(tokenizer)
tokenizer.consume("WS")
tokenizer.consume('WS')
else:
marker = _parse_marker_item(tokenizer)
tokenizer.consume("WS")
tokenizer.consume('WS')
return marker
@ -299,13 +306,13 @@ def _parse_marker_item(tokenizer: Tokenizer) -> MarkerItem:
"""
marker_item = WS? marker_var WS? marker_op WS? marker_var WS?
"""
tokenizer.consume("WS")
tokenizer.consume('WS')
marker_var_left = _parse_marker_var(tokenizer)
tokenizer.consume("WS")
tokenizer.consume('WS')
marker_op = _parse_marker_op(tokenizer)
tokenizer.consume("WS")
tokenizer.consume('WS')
marker_var_right = _parse_marker_var(tokenizer)
tokenizer.consume("WS")
tokenizer.consume('WS')
return (marker_var_left, marker_op, marker_var_right)
@ -313,19 +320,19 @@ def _parse_marker_var(tokenizer: Tokenizer) -> MarkerVar:
"""
marker_var = VARIABLE | QUOTED_STRING
"""
if tokenizer.check("VARIABLE"):
return process_env_var(tokenizer.read().text.replace(".", "_"))
elif tokenizer.check("QUOTED_STRING"):
if tokenizer.check('VARIABLE'):
return process_env_var(tokenizer.read().text.replace('.', '_'))
elif tokenizer.check('QUOTED_STRING'):
return process_python_str(tokenizer.read().text)
else:
tokenizer.raise_syntax_error(
message="Expected a marker variable or quoted string"
message='Expected a marker variable or quoted string',
)
def process_env_var(env_var: str) -> Variable:
if env_var in ("platform_python_implementation", "python_implementation"):
return Variable("platform_python_implementation")
if env_var in ('platform_python_implementation', 'python_implementation'):
return Variable('platform_python_implementation')
else:
return Variable(env_var)
@ -339,18 +346,18 @@ def _parse_marker_op(tokenizer: Tokenizer) -> Op:
"""
marker_op = IN | NOT IN | OP
"""
if tokenizer.check("IN"):
if tokenizer.check('IN'):
tokenizer.read()
return Op("in")
elif tokenizer.check("NOT"):
return Op('in')
elif tokenizer.check('NOT'):
tokenizer.read()
tokenizer.expect("WS", expected="whitespace after 'not'")
tokenizer.expect("IN", expected="'in' after 'not'")
return Op("not in")
elif tokenizer.check("OP"):
tokenizer.expect('WS', expected="whitespace after 'not'")
tokenizer.expect('IN', expected="'in' after 'not'")
return Op('not in')
elif tokenizer.check('OP'):
return Op(tokenizer.read().text)
else:
return tokenizer.raise_syntax_error(
"Expected marker operator, one of "
"<=, <, !=, ==, >=, >, ~=, ===, in, not in"
'Expected marker operator, one of '
'<=, <, !=, ==, >=, >, ~=, ===, in, not in',
)

View file

@ -1,11 +1,12 @@
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import annotations
class InfinityType:
def __repr__(self) -> str:
return "Infinity"
return 'Infinity'
def __hash__(self) -> int:
return hash(repr(self))
@ -25,7 +26,7 @@ class InfinityType:
def __ge__(self, other: object) -> bool:
return True
def __neg__(self: object) -> "NegativeInfinityType":
def __neg__(self: object) -> NegativeInfinityType:
return NegativeInfinity
@ -34,7 +35,7 @@ Infinity = InfinityType()
class NegativeInfinityType:
def __repr__(self) -> str:
return "-Infinity"
return '-Infinity'
def __hash__(self) -> int:
return hash(repr(self))

View file

@ -1,7 +1,14 @@
from __future__ import annotations
import contextlib
import re
from dataclasses import dataclass
from typing import Dict, Iterator, NoReturn, Optional, Tuple, Union
from typing import Dict
from typing import Iterator
from typing import NoReturn
from typing import Optional
from typing import Tuple
from typing import Union
from .specifiers import Specifier
@ -21,7 +28,7 @@ class ParserSyntaxError(Exception):
message: str,
*,
source: str,
span: Tuple[int, int],
span: tuple[int, int],
) -> None:
self.span = span
self.message = message
@ -30,18 +37,18 @@ class ParserSyntaxError(Exception):
super().__init__()
def __str__(self) -> str:
marker = " " * self.span[0] + "~" * (self.span[1] - self.span[0]) + "^"
return "\n ".join([self.message, self.source, marker])
marker = ' ' * self.span[0] + '~' * (self.span[1] - self.span[0]) + '^'
return '\n '.join([self.message, self.source, marker])
DEFAULT_RULES: "Dict[str, Union[str, re.Pattern[str]]]" = {
"LEFT_PARENTHESIS": r"\(",
"RIGHT_PARENTHESIS": r"\)",
"LEFT_BRACKET": r"\[",
"RIGHT_BRACKET": r"\]",
"SEMICOLON": r";",
"COMMA": r",",
"QUOTED_STRING": re.compile(
DEFAULT_RULES: Dict[str, Union[str, re.Pattern[str]]] = {
'LEFT_PARENTHESIS': r'\(',
'RIGHT_PARENTHESIS': r'\)',
'LEFT_BRACKET': r'\[',
'RIGHT_BRACKET': r'\]',
'SEMICOLON': r';',
'COMMA': r',',
'QUOTED_STRING': re.compile(
r"""
(
('[^']*')
@ -51,11 +58,11 @@ DEFAULT_RULES: "Dict[str, Union[str, re.Pattern[str]]]" = {
""",
re.VERBOSE,
),
"OP": r"(===|==|~=|!=|<=|>=|<|>)",
"BOOLOP": r"\b(or|and)\b",
"IN": r"\bin\b",
"NOT": r"\bnot\b",
"VARIABLE": re.compile(
'OP': r'(===|==|~=|!=|<=|>=|<|>)',
'BOOLOP': r'\b(or|and)\b',
'IN': r'\bin\b',
'NOT': r'\bnot\b',
'VARIABLE': re.compile(
r"""
\b(
python_version
@ -71,17 +78,17 @@ DEFAULT_RULES: "Dict[str, Union[str, re.Pattern[str]]]" = {
""",
re.VERBOSE,
),
"SPECIFIER": re.compile(
'SPECIFIER': re.compile(
Specifier._operator_regex_str + Specifier._version_regex_str,
re.VERBOSE | re.IGNORECASE,
),
"AT": r"\@",
"URL": r"[^ \t]+",
"IDENTIFIER": r"\b[a-zA-Z0-9][a-zA-Z0-9._-]*\b",
"VERSION_PREFIX_TRAIL": r"\.\*",
"VERSION_LOCAL_LABEL_TRAIL": r"\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*",
"WS": r"[ \t]+",
"END": r"$",
'AT': r'\@',
'URL': r'[^ \t]+',
'IDENTIFIER': r'\b[a-zA-Z0-9][a-zA-Z0-9._-]*\b',
'VERSION_PREFIX_TRAIL': r'\.\*',
'VERSION_LOCAL_LABEL_TRAIL': r'\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*',
'WS': r'[ \t]+',
'END': r'$',
}
@ -96,13 +103,13 @@ class Tokenizer:
self,
source: str,
*,
rules: "Dict[str, Union[str, re.Pattern[str]]]",
rules: Dict[str, Union[str, re.Pattern[str]]],
) -> None:
self.source = source
self.rules: Dict[str, re.Pattern[str]] = {
self.rules: dict[str, re.Pattern[str]] = {
name: re.compile(pattern) for name, pattern in rules.items()
}
self.next_token: Optional[Token] = None
self.next_token: Token | None = None
self.position = 0
def consume(self, name: str) -> None:
@ -119,8 +126,8 @@ class Tokenizer:
"""
assert (
self.next_token is None
), f"Cannot check for {name!r}, already have {self.next_token!r}"
assert name in self.rules, f"Unknown token name: {name!r}"
), f'Cannot check for {name!r}, already have {self.next_token!r}'
assert name in self.rules, f'Unknown token name: {name!r}'
expression = self.rules[name]
@ -137,7 +144,7 @@ class Tokenizer:
The token is *not* read.
"""
if not self.check(name):
raise self.raise_syntax_error(f"Expected {expected}")
raise self.raise_syntax_error(f'Expected {expected}')
return self.read()
def read(self) -> Token:
@ -154,8 +161,8 @@ class Tokenizer:
self,
message: str,
*,
span_start: Optional[int] = None,
span_end: Optional[int] = None,
span_start: int | None = None,
span_end: int | None = None,
) -> NoReturn:
"""Raise ParserSyntaxError at the given position."""
span = (
@ -170,7 +177,7 @@ class Tokenizer:
@contextlib.contextmanager
def enclosing_tokens(
self, open_token: str, close_token: str, *, around: str
self, open_token: str, close_token: str, *, around: str,
) -> Iterator[None]:
if self.check(open_token):
open_position = self.position
@ -185,7 +192,7 @@ class Tokenizer:
if not self.check(close_token):
self.raise_syntax_error(
f"Expected matching {close_token} for {open_token}, after {around}",
f'Expected matching {close_token} for {open_token}, after {around}',
span_start=open_position,
)

View file

@ -1,31 +1,37 @@
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import annotations
import operator
import os
import platform
import sys
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
from ._parser import (
MarkerAtom,
MarkerList,
Op,
Value,
Variable,
parse_marker as _parse_marker,
)
from ._parser import MarkerAtom
from ._parser import MarkerList
from ._parser import Op
from ._parser import parse_marker as _parse_marker
from ._parser import Value
from ._parser import Variable
from ._tokenizer import ParserSyntaxError
from .specifiers import InvalidSpecifier, Specifier
from .specifiers import InvalidSpecifier
from .specifiers import Specifier
from .utils import canonicalize_name
__all__ = [
"InvalidMarker",
"UndefinedComparison",
"UndefinedEnvironmentName",
"Marker",
"default_environment",
'InvalidMarker',
'UndefinedComparison',
'UndefinedEnvironmentName',
'Marker',
'default_environment',
]
Operator = Callable[[str, str], bool]
@ -56,10 +62,10 @@ def _normalize_extra_values(results: Any) -> Any:
"""
if isinstance(results[0], tuple):
lhs, op, rhs = results[0]
if isinstance(lhs, Variable) and lhs.value == "extra":
if isinstance(lhs, Variable) and lhs.value == 'extra':
normalized_extra = canonicalize_name(rhs.value)
rhs = Value(normalized_extra)
elif isinstance(rhs, Variable) and rhs.value == "extra":
elif isinstance(rhs, Variable) and rhs.value == 'extra':
normalized_extra = canonicalize_name(lhs.value)
lhs = Value(normalized_extra)
results[0] = lhs, op, rhs
@ -67,7 +73,7 @@ def _normalize_extra_values(results: Any) -> Any:
def _format_marker(
marker: Union[List[str], MarkerAtom, str], first: Optional[bool] = True
marker: list[str] | MarkerAtom | str, first: bool | None = True,
) -> str:
assert isinstance(marker, (list, tuple, str))
@ -77,65 +83,65 @@ def _format_marker(
# the rest of this function so that we don't get extraneous () on the
# outside.
if (
isinstance(marker, list)
and len(marker) == 1
and isinstance(marker[0], (list, tuple))
isinstance(marker, list) and
len(marker) == 1 and
isinstance(marker[0], (list, tuple))
):
return _format_marker(marker[0])
if isinstance(marker, list):
inner = (_format_marker(m, first=False) for m in marker)
if first:
return " ".join(inner)
return ' '.join(inner)
else:
return "(" + " ".join(inner) + ")"
return '(' + ' '.join(inner) + ')'
elif isinstance(marker, tuple):
return " ".join([m.serialize() for m in marker])
return ' '.join([m.serialize() for m in marker])
else:
return marker
_operators: Dict[str, Operator] = {
"in": lambda lhs, rhs: lhs in rhs,
"not in": lambda lhs, rhs: lhs not in rhs,
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
_operators: dict[str, Operator] = {
'in': lambda lhs, rhs: lhs in rhs,
'not in': lambda lhs, rhs: lhs not in rhs,
'<': operator.lt,
'<=': operator.le,
'==': operator.eq,
'!=': operator.ne,
'>=': operator.ge,
'>': operator.gt,
}
def _eval_op(lhs: str, op: Op, rhs: str) -> bool:
try:
spec = Specifier("".join([op.serialize(), rhs]))
spec = Specifier(''.join([op.serialize(), rhs]))
except InvalidSpecifier:
pass
else:
return spec.contains(lhs, prereleases=True)
oper: Optional[Operator] = _operators.get(op.serialize())
oper: Operator | None = _operators.get(op.serialize())
if oper is None:
raise UndefinedComparison(f"Undefined {op!r} on {lhs!r} and {rhs!r}.")
raise UndefinedComparison(f'Undefined {op!r} on {lhs!r} and {rhs!r}.')
return oper(lhs, rhs)
def _normalize(*values: str, key: str) -> Tuple[str, ...]:
def _normalize(*values: str, key: str) -> tuple[str, ...]:
# PEP 685 Comparison of extra names for optional distribution dependencies
# https://peps.python.org/pep-0685/
# > When comparing extra names, tools MUST normalize the names being
# > compared using the semantics outlined in PEP 503 for names
if key == "extra":
if key == 'extra':
return tuple(canonicalize_name(v) for v in values)
# other environment markers don't have such standards
return values
def _evaluate_markers(markers: MarkerList, environment: Dict[str, str]) -> bool:
groups: List[List[bool]] = [[]]
def _evaluate_markers(markers: MarkerList, environment: dict[str, str]) -> bool:
groups: list[list[bool]] = [[]]
for marker in markers:
assert isinstance(marker, (list, tuple, str))
@ -157,36 +163,36 @@ def _evaluate_markers(markers: MarkerList, environment: Dict[str, str]) -> bool:
lhs_value, rhs_value = _normalize(lhs_value, rhs_value, key=environment_key)
groups[-1].append(_eval_op(lhs_value, op, rhs_value))
else:
assert marker in ["and", "or"]
if marker == "or":
assert marker in ['and', 'or']
if marker == 'or':
groups.append([])
return any(all(item) for item in groups)
def format_full_version(info: "sys._version_info") -> str:
version = "{0.major}.{0.minor}.{0.micro}".format(info)
def format_full_version(info: sys._version_info) -> str:
version = '{0.major}.{0.minor}.{0.micro}'.format(info)
kind = info.releaselevel
if kind != "final":
if kind != 'final':
version += kind[0] + str(info.serial)
return version
def default_environment() -> Dict[str, str]:
def default_environment() -> dict[str, str]:
iver = format_full_version(sys.implementation.version)
implementation_name = sys.implementation.name
return {
"implementation_name": implementation_name,
"implementation_version": iver,
"os_name": os.name,
"platform_machine": platform.machine(),
"platform_release": platform.release(),
"platform_system": platform.system(),
"platform_version": platform.version(),
"python_full_version": platform.python_version(),
"platform_python_implementation": platform.python_implementation(),
"python_version": ".".join(platform.python_version_tuple()[:2]),
"sys_platform": sys.platform,
'implementation_name': implementation_name,
'implementation_version': iver,
'os_name': os.name,
'platform_machine': platform.machine(),
'platform_release': platform.release(),
'platform_system': platform.system(),
'platform_version': platform.version(),
'python_full_version': platform.python_version(),
'platform_python_implementation': platform.python_implementation(),
'python_version': '.'.join(platform.python_version_tuple()[:2]),
'sys_platform': sys.platform,
}
@ -231,7 +237,7 @@ class Marker:
return str(self) == str(other)
def evaluate(self, environment: Optional[Dict[str, str]] = None) -> bool:
def evaluate(self, environment: dict[str, str] | None = None) -> bool:
"""Evaluate a marker.
Return the boolean from evaluating the given marker against the
@ -241,12 +247,12 @@ class Marker:
The environment is determined from the current Python process.
"""
current_environment = default_environment()
current_environment["extra"] = ""
current_environment['extra'] = ''
if environment is not None:
current_environment.update(environment)
# The API used to allow setting extra to None. We need to handle this
# case for backwards compatibility.
if current_environment["extra"] is None:
current_environment["extra"] = ""
if current_environment['extra'] is None:
current_environment['extra'] = ''
return _evaluate_markers(self._markers, current_environment)

View file

@ -1,3 +1,5 @@
from __future__ import annotations
import email.feedparser
import email.header
import email.message
@ -5,22 +7,23 @@ import email.parser
import email.policy
import sys
import typing
from typing import (
Any,
Callable,
Dict,
Generic,
List,
Optional,
Tuple,
Type,
Union,
cast,
)
from typing import Any
from typing import Callable
from typing import cast
from typing import Dict
from typing import Generic
from typing import List
from typing import Optional
from typing import Tuple
from typing import Type
from typing import Union
from . import requirements, specifiers, utils, version as version_module
from . import requirements
from . import specifiers
from . import utils
from . import version as version_module
T = typing.TypeVar("T")
T = typing.TypeVar('T')
if sys.version_info[:2] >= (3, 8): # pragma: no cover
from typing import Literal, TypedDict
else: # pragma: no cover
@ -52,14 +55,14 @@ except NameError: # pragma: no cover
"""
message: str
exceptions: List[Exception]
exceptions: list[Exception]
def __init__(self, message: str, exceptions: List[Exception]) -> None:
def __init__(self, message: str, exceptions: list[Exception]) -> None:
self.message = message
self.exceptions = exceptions
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self.message!r}, {self.exceptions!r})"
return f'{self.__class__.__name__}({self.message!r}, {self.exceptions!r})'
else: # pragma: no cover
ExceptionGroup = ExceptionGroup
@ -100,32 +103,32 @@ class RawMetadata(TypedDict, total=False):
metadata_version: str
name: str
version: str
platforms: List[str]
platforms: list[str]
summary: str
description: str
keywords: List[str]
keywords: list[str]
home_page: str
author: str
author_email: str
license: str
# Metadata 1.1 - PEP 314
supported_platforms: List[str]
supported_platforms: list[str]
download_url: str
classifiers: List[str]
requires: List[str]
provides: List[str]
obsoletes: List[str]
classifiers: list[str]
requires: list[str]
provides: list[str]
obsoletes: list[str]
# Metadata 1.2 - PEP 345
maintainer: str
maintainer_email: str
requires_dist: List[str]
provides_dist: List[str]
obsoletes_dist: List[str]
requires_dist: list[str]
provides_dist: list[str]
obsoletes_dist: list[str]
requires_python: str
requires_external: List[str]
project_urls: Dict[str, str]
requires_external: list[str]
project_urls: dict[str, str]
# Metadata 2.0
# PEP 426 attempted to completely revamp the metadata format
@ -138,10 +141,10 @@ class RawMetadata(TypedDict, total=False):
# Metadata 2.1 - PEP 566
description_content_type: str
provides_extra: List[str]
provides_extra: list[str]
# Metadata 2.2 - PEP 643
dynamic: List[str]
dynamic: list[str]
# Metadata 2.3 - PEP 685
# No new fields were added in PEP 685, just some edge case were
@ -149,48 +152,48 @@ class RawMetadata(TypedDict, total=False):
_STRING_FIELDS = {
"author",
"author_email",
"description",
"description_content_type",
"download_url",
"home_page",
"license",
"maintainer",
"maintainer_email",
"metadata_version",
"name",
"requires_python",
"summary",
"version",
'author',
'author_email',
'description',
'description_content_type',
'download_url',
'home_page',
'license',
'maintainer',
'maintainer_email',
'metadata_version',
'name',
'requires_python',
'summary',
'version',
}
_LIST_FIELDS = {
"classifiers",
"dynamic",
"obsoletes",
"obsoletes_dist",
"platforms",
"provides",
"provides_dist",
"provides_extra",
"requires",
"requires_dist",
"requires_external",
"supported_platforms",
'classifiers',
'dynamic',
'obsoletes',
'obsoletes_dist',
'platforms',
'provides',
'provides_dist',
'provides_extra',
'requires',
'requires_dist',
'requires_external',
'supported_platforms',
}
_DICT_FIELDS = {
"project_urls",
'project_urls',
}
def _parse_keywords(data: str) -> List[str]:
def _parse_keywords(data: str) -> list[str]:
"""Split a string of comma-separate keyboards into a list of keywords."""
return [k.strip() for k in data.split(",")]
return [k.strip() for k in data.split(',')]
def _parse_project_urls(data: List[str]) -> Dict[str, str]:
def _parse_project_urls(data: list[str]) -> dict[str, str]:
"""Parse a list of label/URL string pairings separated by a comma."""
urls = {}
for pair in data:
@ -211,8 +214,8 @@ def _parse_project_urls(data: List[str]) -> Dict[str, str]:
# answer with what to do in that case. As such, we'll do the only
# thing we can, which is treat the field as unparseable and add it
# to our list of unparsed fields.
parts = [p.strip() for p in pair.split(",", 1)]
parts.extend([""] * (max(0, 2 - len(parts)))) # Ensure 2 items
parts = [p.strip() for p in pair.split(',', 1)]
parts.extend([''] * (max(0, 2 - len(parts)))) # Ensure 2 items
# TODO: The spec doesn't say anything about if the keys should be
# considered case sensitive or not... logically they should
@ -224,13 +227,13 @@ def _parse_project_urls(data: List[str]) -> Dict[str, str]:
# The label already exists in our set of urls, so this field
# is unparseable, and we can just add the whole thing to our
# unparseable data and stop processing it.
raise KeyError("duplicate labels in project urls")
raise KeyError('duplicate labels in project urls')
urls[label] = url
return urls
def _get_payload(msg: email.message.Message, source: Union[bytes, str]) -> str:
def _get_payload(msg: email.message.Message, source: bytes | str) -> str:
"""Get the body of the message."""
# If our source is a str, then our caller has managed encodings for us,
# and we don't need to deal with it.
@ -242,9 +245,9 @@ def _get_payload(msg: email.message.Message, source: Union[bytes, str]) -> str:
else:
bpayload: bytes = msg.get_payload(decode=True)
try:
return bpayload.decode("utf8", "strict")
return bpayload.decode('utf8', 'strict')
except UnicodeDecodeError:
raise ValueError("payload in an invalid encoding")
raise ValueError('payload in an invalid encoding')
# The various parse_FORMAT functions here are intended to be as lenient as
@ -260,39 +263,39 @@ def _get_payload(msg: email.message.Message, source: Union[bytes, str]) -> str:
# Map METADATA fields to RawMetadata.
_EMAIL_TO_RAW_MAPPING = {
"author": "author",
"author-email": "author_email",
"classifier": "classifiers",
"description": "description",
"description-content-type": "description_content_type",
"download-url": "download_url",
"dynamic": "dynamic",
"home-page": "home_page",
"keywords": "keywords",
"license": "license",
"maintainer": "maintainer",
"maintainer-email": "maintainer_email",
"metadata-version": "metadata_version",
"name": "name",
"obsoletes": "obsoletes",
"obsoletes-dist": "obsoletes_dist",
"platform": "platforms",
"project-url": "project_urls",
"provides": "provides",
"provides-dist": "provides_dist",
"provides-extra": "provides_extra",
"requires": "requires",
"requires-dist": "requires_dist",
"requires-external": "requires_external",
"requires-python": "requires_python",
"summary": "summary",
"supported-platform": "supported_platforms",
"version": "version",
'author': 'author',
'author-email': 'author_email',
'classifier': 'classifiers',
'description': 'description',
'description-content-type': 'description_content_type',
'download-url': 'download_url',
'dynamic': 'dynamic',
'home-page': 'home_page',
'keywords': 'keywords',
'license': 'license',
'maintainer': 'maintainer',
'maintainer-email': 'maintainer_email',
'metadata-version': 'metadata_version',
'name': 'name',
'obsoletes': 'obsoletes',
'obsoletes-dist': 'obsoletes_dist',
'platform': 'platforms',
'project-url': 'project_urls',
'provides': 'provides',
'provides-dist': 'provides_dist',
'provides-extra': 'provides_extra',
'requires': 'requires',
'requires-dist': 'requires_dist',
'requires-external': 'requires_external',
'requires-python': 'requires_python',
'summary': 'summary',
'supported-platform': 'supported_platforms',
'version': 'version',
}
_RAW_TO_EMAIL_MAPPING = {raw: email for email, raw in _EMAIL_TO_RAW_MAPPING.items()}
def parse_email(data: Union[bytes, str]) -> Tuple[RawMetadata, Dict[str, List[str]]]:
def parse_email(data: bytes | str) -> tuple[RawMetadata, dict[str, list[str]]]:
"""Parse a distribution's metadata stored as email headers (e.g. from ``METADATA``).
This function returns a two-item tuple of dicts. The first dict is of
@ -308,8 +311,8 @@ def parse_email(data: Union[bytes, str]) -> Tuple[RawMetadata, Dict[str, List[st
included in this dict.
"""
raw: Dict[str, Union[str, List[str], Dict[str, str]]] = {}
unparsed: Dict[str, List[str]] = {}
raw: dict[str, str | list[str] | dict[str, str]] = {}
unparsed: dict[str, list[str]] = {}
if isinstance(data, str):
parsed = email.parser.Parser(policy=email.policy.compat32).parsestr(data)
@ -357,16 +360,16 @@ def parse_email(data: Union[bytes, str]) -> Tuple[RawMetadata, Dict[str, List[st
# The Header object stores it's data as chunks, and each chunk
# can be independently encoded, so we'll need to check each
# of them.
chunks: List[Tuple[bytes, Optional[str]]] = []
chunks: list[tuple[bytes, str | None]] = []
for bin, encoding in email.header.decode_header(h):
try:
bin.decode("utf8", "strict")
bin.decode('utf8', 'strict')
except UnicodeDecodeError:
# Enable mojibake.
encoding = "latin1"
encoding = 'latin1'
valid_encoding = False
else:
encoding = "utf8"
encoding = 'utf8'
chunks.append((bin, encoding))
# Turn our chunks back into a Header object, then let that
@ -416,7 +419,7 @@ def parse_email(data: Union[bytes, str]) -> Tuple[RawMetadata, Dict[str, List[st
# but it conceptually is a list of strings, and is serialized using
# ", ".join(keywords), so we'll do some light data massaging to turn
# this into what it logically is.
elif raw_name == "keywords" and len(value) == 1:
elif raw_name == 'keywords' and len(value) == 1:
raw[raw_name] = _parse_keywords(value[0])
# Special Case: Project-URL
# The project urls is implemented in the metadata spec as a list of
@ -427,7 +430,7 @@ def parse_email(data: Union[bytes, str]) -> Tuple[RawMetadata, Dict[str, List[st
#
# We will do a little light data massaging to turn this into a map as
# it logically should be.
elif raw_name == "project_urls":
elif raw_name == 'project_urls':
try:
raw[raw_name] = _parse_project_urls(value)
except KeyError:
@ -444,22 +447,22 @@ def parse_email(data: Union[bytes, str]) -> Tuple[RawMetadata, Dict[str, List[st
try:
payload = _get_payload(parsed, data)
except ValueError:
unparsed.setdefault("description", []).append(
parsed.get_payload(decode=isinstance(data, bytes))
unparsed.setdefault('description', []).append(
parsed.get_payload(decode=isinstance(data, bytes)),
)
else:
if payload:
# Check to see if we've already got a description, if so then both
# it, and this body move to unparseable.
if "description" in raw:
description_header = cast(str, raw.pop("description"))
unparsed.setdefault("description", []).extend(
[description_header, payload]
if 'description' in raw:
description_header = cast(str, raw.pop('description'))
unparsed.setdefault('description', []).extend(
[description_header, payload],
)
elif "description" in unparsed:
unparsed["description"].append(payload)
elif 'description' in unparsed:
unparsed['description'].append(payload)
else:
raw["description"] = payload
raw['description'] = payload
# We need to cast our `raw` to a metadata, because a TypedDict only support
# literal key names, but we're computing our key names on purpose, but the
@ -472,10 +475,10 @@ _NOT_FOUND = object()
# Keep the two values in sync.
_VALID_METADATA_VERSIONS = ["1.0", "1.1", "1.2", "2.1", "2.2", "2.3"]
_MetadataVersion = Literal["1.0", "1.1", "1.2", "2.1", "2.2", "2.3"]
_VALID_METADATA_VERSIONS = ['1.0', '1.1', '1.2', '2.1', '2.2', '2.3']
_MetadataVersion = Literal['1.0', '1.1', '1.2', '2.1', '2.2', '2.3']
_REQUIRED_ATTRS = frozenset(["metadata_version", "name", "version"])
_REQUIRED_ATTRS = frozenset(['metadata_version', 'name', 'version'])
class _Validator(Generic[T]):
@ -495,15 +498,15 @@ class _Validator(Generic[T]):
def __init__(
self,
*,
added: _MetadataVersion = "1.0",
added: _MetadataVersion = '1.0',
) -> None:
self.added = added
def __set_name__(self, _owner: "Metadata", name: str) -> None:
def __set_name__(self, _owner: Metadata, name: str) -> None:
self.name = name
self.raw_name = _RAW_TO_EMAIL_MAPPING[name]
def __get__(self, instance: "Metadata", _owner: Type["Metadata"]) -> T:
def __get__(self, instance: Metadata, _owner: type[Metadata]) -> T:
# With Python 3.8, the caching can be replaced with functools.cached_property().
# No need to check the cache as attribute lookup will resolve into the
# instance's __dict__ before __get__ is called.
@ -516,7 +519,7 @@ class _Validator(Generic[T]):
# converters never have to deal with the None union.
if self.name in _REQUIRED_ATTRS or value is not None:
try:
converter: Callable[[Any], T] = getattr(self, f"_process_{self.name}")
converter: Callable[[Any], T] = getattr(self, f'_process_{self.name}')
except AttributeError:
pass
else:
@ -531,10 +534,10 @@ class _Validator(Generic[T]):
return cast(T, value)
def _invalid_metadata(
self, msg: str, cause: Optional[Exception] = None
self, msg: str, cause: Exception | None = None,
) -> InvalidMetadata:
exc = InvalidMetadata(
self.raw_name, msg.format_map({"field": repr(self.raw_name)})
self.raw_name, msg.format_map({'field': repr(self.raw_name)}),
)
exc.__cause__ = cause
return exc
@ -542,91 +545,91 @@ class _Validator(Generic[T]):
def _process_metadata_version(self, value: str) -> _MetadataVersion:
# Implicitly makes Metadata-Version required.
if value not in _VALID_METADATA_VERSIONS:
raise self._invalid_metadata(f"{value!r} is not a valid metadata version")
raise self._invalid_metadata(f'{value!r} is not a valid metadata version')
return cast(_MetadataVersion, value)
def _process_name(self, value: str) -> str:
if not value:
raise self._invalid_metadata("{field} is a required field")
raise self._invalid_metadata('{field} is a required field')
# Validate the name as a side-effect.
try:
utils.canonicalize_name(value, validate=True)
except utils.InvalidName as exc:
raise self._invalid_metadata(
f"{value!r} is invalid for {{field}}", cause=exc
f'{value!r} is invalid for {{field}}', cause=exc,
)
else:
return value
def _process_version(self, value: str) -> version_module.Version:
if not value:
raise self._invalid_metadata("{field} is a required field")
raise self._invalid_metadata('{field} is a required field')
try:
return version_module.parse(value)
except version_module.InvalidVersion as exc:
raise self._invalid_metadata(
f"{value!r} is invalid for {{field}}", cause=exc
f'{value!r} is invalid for {{field}}', cause=exc,
)
def _process_summary(self, value: str) -> str:
"""Check the field contains no newlines."""
if "\n" in value:
raise self._invalid_metadata("{field} must be a single line")
if '\n' in value:
raise self._invalid_metadata('{field} must be a single line')
return value
def _process_description_content_type(self, value: str) -> str:
content_types = {"text/plain", "text/x-rst", "text/markdown"}
content_types = {'text/plain', 'text/x-rst', 'text/markdown'}
message = email.message.EmailMessage()
message["content-type"] = value
message['content-type'] = value
content_type, parameters = (
# Defaults to `text/plain` if parsing failed.
message.get_content_type().lower(),
message["content-type"].params,
message['content-type'].params,
)
# Check if content-type is valid or defaulted to `text/plain` and thus was
# not parseable.
if content_type not in content_types or content_type not in value.lower():
raise self._invalid_metadata(
f"{{field}} must be one of {list(content_types)}, not {value!r}"
f'{{field}} must be one of {list(content_types)}, not {value!r}',
)
charset = parameters.get("charset", "UTF-8")
if charset != "UTF-8":
charset = parameters.get('charset', 'UTF-8')
if charset != 'UTF-8':
raise self._invalid_metadata(
f"{{field}} can only specify the UTF-8 charset, not {list(charset)}"
f'{{field}} can only specify the UTF-8 charset, not {list(charset)}',
)
markdown_variants = {"GFM", "CommonMark"}
variant = parameters.get("variant", "GFM") # Use an acceptable default.
if content_type == "text/markdown" and variant not in markdown_variants:
markdown_variants = {'GFM', 'CommonMark'}
variant = parameters.get('variant', 'GFM') # Use an acceptable default.
if content_type == 'text/markdown' and variant not in markdown_variants:
raise self._invalid_metadata(
f"valid Markdown variants for {{field}} are {list(markdown_variants)}, "
f"not {variant!r}",
f'valid Markdown variants for {{field}} are {list(markdown_variants)}, '
f'not {variant!r}',
)
return value
def _process_dynamic(self, value: List[str]) -> List[str]:
def _process_dynamic(self, value: list[str]) -> list[str]:
for dynamic_field in map(str.lower, value):
if dynamic_field in {"name", "version", "metadata-version"}:
if dynamic_field in {'name', 'version', 'metadata-version'}:
raise self._invalid_metadata(
f"{value!r} is not allowed as a dynamic field"
f'{value!r} is not allowed as a dynamic field',
)
elif dynamic_field not in _EMAIL_TO_RAW_MAPPING:
raise self._invalid_metadata(f"{value!r} is not a valid dynamic field")
raise self._invalid_metadata(f'{value!r} is not a valid dynamic field')
return list(map(str.lower, value))
def _process_provides_extra(
self,
value: List[str],
) -> List[utils.NormalizedName]:
value: list[str],
) -> list[utils.NormalizedName]:
normalized_names = []
try:
for name in value:
normalized_names.append(utils.canonicalize_name(name, validate=True))
except utils.InvalidName as exc:
raise self._invalid_metadata(
f"{name!r} is invalid for {{field}}", cause=exc
f'{name!r} is invalid for {{field}}', cause=exc,
)
else:
return normalized_names
@ -636,19 +639,19 @@ class _Validator(Generic[T]):
return specifiers.SpecifierSet(value)
except specifiers.InvalidSpecifier as exc:
raise self._invalid_metadata(
f"{value!r} is invalid for {{field}}", cause=exc
f'{value!r} is invalid for {{field}}', cause=exc,
)
def _process_requires_dist(
self,
value: List[str],
) -> List[requirements.Requirement]:
value: list[str],
) -> list[requirements.Requirement]:
reqs = []
try:
for req in value:
reqs.append(requirements.Requirement(req))
except requirements.InvalidRequirement as exc:
raise self._invalid_metadata(f"{req!r} is invalid for {{field}}", cause=exc)
raise self._invalid_metadata(f'{req!r} is invalid for {{field}}', cause=exc)
else:
return reqs
@ -665,7 +668,7 @@ class Metadata:
_raw: RawMetadata
@classmethod
def from_raw(cls, data: RawMetadata, *, validate: bool = True) -> "Metadata":
def from_raw(cls, data: RawMetadata, *, validate: bool = True) -> Metadata:
"""Create an instance from :class:`RawMetadata`.
If *validate* is true, all metadata will be validated. All exceptions
@ -675,7 +678,7 @@ class Metadata:
ins._raw = data.copy() # Mutations occur due to caching enriched values.
if validate:
exceptions: List[Exception] = []
exceptions: list[Exception] = []
try:
metadata_version = ins.metadata_version
metadata_age = _VALID_METADATA_VERSIONS.index(metadata_version)
@ -687,7 +690,7 @@ class Metadata:
# fields (so their absence can be reported).
fields_to_check = frozenset(ins._raw) | _REQUIRED_ATTRS
# Remove fields that have already been checked.
fields_to_check -= {"metadata_version"}
fields_to_check -= {'metadata_version'}
for key in fields_to_check:
try:
@ -697,18 +700,18 @@ class Metadata:
try:
field_metadata_version = cls.__dict__[key].added
except KeyError:
exc = InvalidMetadata(key, f"unrecognized field: {key!r}")
exc = InvalidMetadata(key, f'unrecognized field: {key!r}')
exceptions.append(exc)
continue
field_age = _VALID_METADATA_VERSIONS.index(
field_metadata_version
field_metadata_version,
)
if field_age > metadata_age:
field = _RAW_TO_EMAIL_MAPPING[key]
exc = InvalidMetadata(
field,
"{field} introduced in metadata version "
"{field_metadata_version}, not {metadata_version}",
'{field} introduced in metadata version '
'{field_metadata_version}, not {metadata_version}',
)
exceptions.append(exc)
continue
@ -717,14 +720,14 @@ class Metadata:
exceptions.append(exc)
if exceptions:
raise ExceptionGroup("invalid metadata", exceptions)
raise ExceptionGroup('invalid metadata', exceptions)
return ins
@classmethod
def from_email(
cls, data: Union[bytes, str], *, validate: bool = True
) -> "Metadata":
cls, data: bytes | str, *, validate: bool = True,
) -> Metadata:
"""Parse metadata from email headers.
If *validate* is true, the metadata will be validated. All exceptions
@ -736,19 +739,19 @@ class Metadata:
exceptions: list[Exception] = []
for unparsed_key in unparsed:
if unparsed_key in _EMAIL_TO_RAW_MAPPING:
message = f"{unparsed_key!r} has invalid data"
message = f'{unparsed_key!r} has invalid data'
else:
message = f"unrecognized field: {unparsed_key!r}"
message = f'unrecognized field: {unparsed_key!r}'
exceptions.append(InvalidMetadata(unparsed_key, message))
if exceptions:
raise ExceptionGroup("unparsed", exceptions)
raise ExceptionGroup('unparsed', exceptions)
try:
return cls.from_raw(raw, validate=validate)
except ExceptionGroup as exc_group:
raise ExceptionGroup(
"invalid or unparsed metadata", exc_group.exceptions
'invalid or unparsed metadata', exc_group.exceptions,
) from None
metadata_version: _Validator[_MetadataVersion] = _Validator()
@ -760,66 +763,66 @@ class Metadata:
*validate* parameter)"""
version: _Validator[version_module.Version] = _Validator()
""":external:ref:`core-metadata-version` (required)"""
dynamic: _Validator[Optional[List[str]]] = _Validator(
added="2.2",
dynamic: _Validator[list[str] | None] = _Validator(
added='2.2',
)
""":external:ref:`core-metadata-dynamic`
(validated against core metadata field names and lowercased)"""
platforms: _Validator[Optional[List[str]]] = _Validator()
platforms: _Validator[list[str] | None] = _Validator()
""":external:ref:`core-metadata-platform`"""
supported_platforms: _Validator[Optional[List[str]]] = _Validator(added="1.1")
supported_platforms: _Validator[list[str] | None] = _Validator(added='1.1')
""":external:ref:`core-metadata-supported-platform`"""
summary: _Validator[Optional[str]] = _Validator()
summary: _Validator[str | None] = _Validator()
""":external:ref:`core-metadata-summary` (validated to contain no newlines)"""
description: _Validator[Optional[str]] = _Validator() # TODO 2.1: can be in body
description: _Validator[str | None] = _Validator() # TODO 2.1: can be in body
""":external:ref:`core-metadata-description`"""
description_content_type: _Validator[Optional[str]] = _Validator(added="2.1")
description_content_type: _Validator[str | None] = _Validator(added='2.1')
""":external:ref:`core-metadata-description-content-type` (validated)"""
keywords: _Validator[Optional[List[str]]] = _Validator()
keywords: _Validator[list[str] | None] = _Validator()
""":external:ref:`core-metadata-keywords`"""
home_page: _Validator[Optional[str]] = _Validator()
home_page: _Validator[str | None] = _Validator()
""":external:ref:`core-metadata-home-page`"""
download_url: _Validator[Optional[str]] = _Validator(added="1.1")
download_url: _Validator[str | None] = _Validator(added='1.1')
""":external:ref:`core-metadata-download-url`"""
author: _Validator[Optional[str]] = _Validator()
author: _Validator[str | None] = _Validator()
""":external:ref:`core-metadata-author`"""
author_email: _Validator[Optional[str]] = _Validator()
author_email: _Validator[str | None] = _Validator()
""":external:ref:`core-metadata-author-email`"""
maintainer: _Validator[Optional[str]] = _Validator(added="1.2")
maintainer: _Validator[str | None] = _Validator(added='1.2')
""":external:ref:`core-metadata-maintainer`"""
maintainer_email: _Validator[Optional[str]] = _Validator(added="1.2")
maintainer_email: _Validator[str | None] = _Validator(added='1.2')
""":external:ref:`core-metadata-maintainer-email`"""
license: _Validator[Optional[str]] = _Validator()
license: _Validator[str | None] = _Validator()
""":external:ref:`core-metadata-license`"""
classifiers: _Validator[Optional[List[str]]] = _Validator(added="1.1")
classifiers: _Validator[list[str] | None] = _Validator(added='1.1')
""":external:ref:`core-metadata-classifier`"""
requires_dist: _Validator[Optional[List[requirements.Requirement]]] = _Validator(
added="1.2"
requires_dist: _Validator[list[requirements.Requirement] | None] = _Validator(
added='1.2',
)
""":external:ref:`core-metadata-requires-dist`"""
requires_python: _Validator[Optional[specifiers.SpecifierSet]] = _Validator(
added="1.2"
requires_python: _Validator[specifiers.SpecifierSet | None] = _Validator(
added='1.2',
)
""":external:ref:`core-metadata-requires-python`"""
# Because `Requires-External` allows for non-PEP 440 version specifiers, we
# don't do any processing on the values.
requires_external: _Validator[Optional[List[str]]] = _Validator(added="1.2")
requires_external: _Validator[list[str] | None] = _Validator(added='1.2')
""":external:ref:`core-metadata-requires-external`"""
project_urls: _Validator[Optional[Dict[str, str]]] = _Validator(added="1.2")
project_urls: _Validator[dict[str, str] | None] = _Validator(added='1.2')
""":external:ref:`core-metadata-project-url`"""
# PEP 685 lets us raise an error if an extra doesn't pass `Name` validation
# regardless of metadata version.
provides_extra: _Validator[Optional[List[utils.NormalizedName]]] = _Validator(
added="2.1",
provides_extra: _Validator[list[utils.NormalizedName] | None] = _Validator(
added='2.1',
)
""":external:ref:`core-metadata-provides-extra`"""
provides_dist: _Validator[Optional[List[str]]] = _Validator(added="1.2")
provides_dist: _Validator[list[str] | None] = _Validator(added='1.2')
""":external:ref:`core-metadata-provides-dist`"""
obsoletes_dist: _Validator[Optional[List[str]]] = _Validator(added="1.2")
obsoletes_dist: _Validator[list[str] | None] = _Validator(added='1.2')
""":external:ref:`core-metadata-obsoletes-dist`"""
requires: _Validator[Optional[List[str]]] = _Validator(added="1.1")
requires: _Validator[list[str] | None] = _Validator(added='1.1')
"""``Requires`` (deprecated)"""
provides: _Validator[Optional[List[str]]] = _Validator(added="1.1")
provides: _Validator[list[str] | None] = _Validator(added='1.1')
"""``Provides`` (deprecated)"""
obsoletes: _Validator[Optional[List[str]]] = _Validator(added="1.1")
obsoletes: _Validator[list[str] | None] = _Validator(added='1.1')
"""``Obsoletes`` (deprecated)"""

View file

@ -1,12 +1,17 @@
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import annotations
from typing import Any, Iterator, Optional, Set
from typing import Any
from typing import Iterator
from typing import Optional
from typing import Set
from ._parser import parse_requirement as _parse_requirement
from ._tokenizer import ParserSyntaxError
from .markers import Marker, _normalize_extra_values
from .markers import _normalize_extra_values
from .markers import Marker
from .specifiers import SpecifierSet
from .utils import canonicalize_name
@ -37,10 +42,10 @@ class Requirement:
raise InvalidRequirement(str(e)) from e
self.name: str = parsed.name
self.url: Optional[str] = parsed.url or None
self.extras: Set[str] = set(parsed.extras or [])
self.url: str | None = parsed.url or None
self.extras: set[str] = set(parsed.extras or [])
self.specifier: SpecifierSet = SpecifierSet(parsed.specifier)
self.marker: Optional[Marker] = None
self.marker: Marker | None = None
if parsed.marker is not None:
self.marker = Marker.__new__(Marker)
self.marker._markers = _normalize_extra_values(parsed.marker)
@ -49,22 +54,22 @@ class Requirement:
yield name
if self.extras:
formatted_extras = ",".join(sorted(self.extras))
yield f"[{formatted_extras}]"
formatted_extras = ','.join(sorted(self.extras))
yield f'[{formatted_extras}]'
if self.specifier:
yield str(self.specifier)
if self.url:
yield f"@ {self.url}"
yield f'@ {self.url}'
if self.marker:
yield " "
yield ' '
if self.marker:
yield f"; {self.marker}"
yield f'; {self.marker}'
def __str__(self) -> str:
return "".join(self._iter_parts(self.name))
return ''.join(self._iter_parts(self.name))
def __repr__(self) -> str:
return f"<Requirement('{self}')>"
@ -74,7 +79,7 @@ class Requirement:
(
self.__class__.__name__,
*self._iter_parts(canonicalize_name(self.name)),
)
),
)
def __eq__(self, other: Any) -> bool:
@ -82,9 +87,9 @@ class Requirement:
return NotImplemented
return (
canonicalize_name(self.name) == canonicalize_name(other.name)
and self.extras == other.extras
and self.specifier == other.specifier
and self.url == other.url
and self.marker == other.marker
canonicalize_name(self.name) == canonicalize_name(other.name) and
self.extras == other.extras and
self.specifier == other.specifier and
self.url == other.url and
self.marker == other.marker
)

View file

@ -7,17 +7,25 @@
from packaging.specifiers import Specifier, SpecifierSet, InvalidSpecifier
from packaging.version import Version
"""
from __future__ import annotations
import abc
import itertools
import re
from typing import Callable, Iterable, Iterator, List, Optional, Tuple, TypeVar, Union
from typing import Callable
from typing import Iterable
from typing import Iterator
from typing import List
from typing import Optional
from typing import Tuple
from typing import TypeVar
from typing import Union
from .utils import canonicalize_version
from .version import Version
UnparsedVersion = Union[Version, str]
UnparsedVersionVar = TypeVar("UnparsedVersionVar", bound=UnparsedVersion)
UnparsedVersionVar = TypeVar('UnparsedVersionVar', bound=UnparsedVersion)
CallableOperator = Callable[[Version, str], bool]
@ -64,7 +72,7 @@ class BaseSpecifier(metaclass=abc.ABCMeta):
@property
@abc.abstractmethod
def prereleases(self) -> Optional[bool]:
def prereleases(self) -> bool | None:
"""Whether or not pre-releases as a whole are allowed.
This can be set to either ``True`` or ``False`` to explicitly enable or disable
@ -79,14 +87,14 @@ class BaseSpecifier(metaclass=abc.ABCMeta):
"""
@abc.abstractmethod
def contains(self, item: str, prereleases: Optional[bool] = None) -> bool:
def contains(self, item: str, prereleases: bool | None = None) -> bool:
"""
Determines if the given item is contained within this specifier.
"""
@abc.abstractmethod
def filter(
self, iterable: Iterable[UnparsedVersionVar], prereleases: Optional[bool] = None
self, iterable: Iterable[UnparsedVersionVar], prereleases: bool | None = None,
) -> Iterator[UnparsedVersionVar]:
"""
Takes an iterable of items and filters them so that only items which
@ -202,22 +210,22 @@ class Specifier(BaseSpecifier):
"""
_regex = re.compile(
r"^\s*" + _operator_regex_str + _version_regex_str + r"\s*$",
r'^\s*' + _operator_regex_str + _version_regex_str + r'\s*$',
re.VERBOSE | re.IGNORECASE,
)
_operators = {
"~=": "compatible",
"==": "equal",
"!=": "not_equal",
"<=": "less_than_equal",
">=": "greater_than_equal",
"<": "less_than",
">": "greater_than",
"===": "arbitrary",
'~=': 'compatible',
'==': 'equal',
'!=': 'not_equal',
'<=': 'less_than_equal',
'>=': 'greater_than_equal',
'<': 'less_than',
'>': 'greater_than',
'===': 'arbitrary',
}
def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None:
def __init__(self, spec: str = '', prereleases: bool | None = None) -> None:
"""Initialize a Specifier instance.
:param spec:
@ -234,9 +242,9 @@ class Specifier(BaseSpecifier):
if not match:
raise InvalidSpecifier(f"Invalid specifier: '{spec}'")
self._spec: Tuple[str, str] = (
match.group("operator").strip(),
match.group("version").strip(),
self._spec: tuple[str, str] = (
match.group('operator').strip(),
match.group('version').strip(),
)
# Store whether or not this Specifier should accept prereleases
@ -254,10 +262,10 @@ class Specifier(BaseSpecifier):
# operators, and if they are if they are including an explicit
# prerelease.
operator, version = self._spec
if operator in ["==", ">=", "<=", "~=", "==="]:
if operator in ['==', '>=', '<=', '~=', '===']:
# The == specifier can include a trailing .*, if it does we
# want to remove before parsing.
if operator == "==" and version.endswith(".*"):
if operator == '==' and version.endswith('.*'):
version = version[:-2]
# Parse the version, and if it is a pre-release than this
@ -300,12 +308,12 @@ class Specifier(BaseSpecifier):
<Specifier('>=1.0.0', prereleases=True)>
"""
pre = (
f", prereleases={self.prereleases!r}"
f', prereleases={self.prereleases!r}'
if self._prereleases is not None
else ""
else ''
)
return f"<{self.__class__.__name__}({str(self)!r}{pre})>"
return f'<{self.__class__.__name__}({str(self)!r}{pre})>'
def __str__(self) -> str:
"""A string representation of the Specifier that can be round-tripped.
@ -315,13 +323,13 @@ class Specifier(BaseSpecifier):
>>> str(Specifier('>=1.0.0', prereleases=False))
'>=1.0.0'
"""
return "{}{}".format(*self._spec)
return '{}{}'.format(*self._spec)
@property
def _canonical_spec(self) -> Tuple[str, str]:
def _canonical_spec(self) -> tuple[str, str]:
canonical_version = canonicalize_version(
self._spec[1],
strip_trailing_zero=(self._spec[0] != "~="),
strip_trailing_zero=(self._spec[0] != '~='),
)
return self._spec[0], canonical_version
@ -359,7 +367,7 @@ class Specifier(BaseSpecifier):
def _get_operator(self, op: str) -> CallableOperator:
operator_callable: CallableOperator = getattr(
self, f"_compare_{self._operators[op]}"
self, f'_compare_{self._operators[op]}',
)
return operator_callable
@ -374,23 +382,23 @@ class Specifier(BaseSpecifier):
# We want everything but the last item in the version, but we want to
# ignore suffix segments.
prefix = _version_join(
list(itertools.takewhile(_is_not_suffix, _version_split(spec)))[:-1]
list(itertools.takewhile(_is_not_suffix, _version_split(spec)))[:-1],
)
# Add the prefix notation to the end of our string
prefix += ".*"
prefix += '.*'
return self._get_operator(">=")(prospective, spec) and self._get_operator("==")(
prospective, prefix
return self._get_operator('>=')(prospective, spec) and self._get_operator('==')(
prospective, prefix,
)
def _compare_equal(self, prospective: Version, spec: str) -> bool:
# We need special logic to handle prefix matching
if spec.endswith(".*"):
if spec.endswith('.*'):
# In the case of prefix matching we want to ignore local segment.
normalized_prospective = canonicalize_version(
prospective.public, strip_trailing_zero=False
prospective.public, strip_trailing_zero=False,
)
# Get the normalized version string ignoring the trailing .*
normalized_spec = canonicalize_version(spec[:-2], strip_trailing_zero=False)
@ -501,7 +509,7 @@ class Specifier(BaseSpecifier):
def _compare_arbitrary(self, prospective: Version, spec: str) -> bool:
return str(prospective).lower() == str(spec).lower()
def __contains__(self, item: Union[str, Version]) -> bool:
def __contains__(self, item: str | Version) -> bool:
"""Return whether or not the item is contained in this specifier.
:param item: The item to check for.
@ -523,7 +531,7 @@ class Specifier(BaseSpecifier):
return self.contains(item)
def contains(
self, item: UnparsedVersion, prereleases: Optional[bool] = None
self, item: UnparsedVersion, prereleases: bool | None = None,
) -> bool:
"""Return whether or not the item is contained in this specifier.
@ -569,7 +577,7 @@ class Specifier(BaseSpecifier):
return operator_callable(normalized_item, self.version)
def filter(
self, iterable: Iterable[UnparsedVersionVar], prereleases: Optional[bool] = None
self, iterable: Iterable[UnparsedVersionVar], prereleases: bool | None = None,
) -> Iterator[UnparsedVersionVar]:
"""Filter items in the given iterable, that match the specifier.
@ -601,7 +609,7 @@ class Specifier(BaseSpecifier):
yielded = False
found_prereleases = []
kw = {"prereleases": prereleases if prereleases is not None else True}
kw = {'prereleases': prereleases if prereleases is not None else True}
# Attempt to iterate over all the values in the iterable and if any of
# them match, yield them.
@ -630,10 +638,10 @@ class Specifier(BaseSpecifier):
yield version
_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$")
_prefix_regex = re.compile(r'^([0-9]+)((?:a|b|c|rc)[0-9]+)$')
def _version_split(version: str) -> List[str]:
def _version_split(version: str) -> list[str]:
"""Split version into components.
The split components are intended for version comparison. The logic does
@ -641,12 +649,12 @@ def _version_split(version: str) -> List[str]:
components back with :func:`_version_join` may not produce the original
version string.
"""
result: List[str] = []
result: list[str] = []
epoch, _, rest = version.rpartition("!")
result.append(epoch or "0")
epoch, _, rest = version.rpartition('!')
result.append(epoch or '0')
for item in rest.split("."):
for item in rest.split('.'):
match = _prefix_regex.search(item)
if match:
result.extend(match.groups())
@ -655,7 +663,7 @@ def _version_split(version: str) -> List[str]:
return result
def _version_join(components: List[str]) -> str:
def _version_join(components: list[str]) -> str:
"""Join split version components into a version string.
This function assumes the input came from :func:`_version_split`, where the
@ -668,11 +676,11 @@ def _version_join(components: List[str]) -> str:
def _is_not_suffix(segment: str) -> bool:
return not any(
segment.startswith(prefix) for prefix in ("dev", "a", "b", "rc", "post")
segment.startswith(prefix) for prefix in ('dev', 'a', 'b', 'rc', 'post')
)
def _pad_version(left: List[str], right: List[str]) -> Tuple[List[str], List[str]]:
def _pad_version(left: list[str], right: list[str]) -> tuple[list[str], list[str]]:
left_split, right_split = [], []
# Get the release segment of our versions
@ -680,12 +688,12 @@ def _pad_version(left: List[str], right: List[str]) -> Tuple[List[str], List[str
right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right)))
# Get the rest of our versions
left_split.append(left[len(left_split[0]) :])
right_split.append(right[len(right_split[0]) :])
left_split.append(left[len(left_split[0]):])
right_split.append(right[len(right_split[0]):])
# Insert our padding
left_split.insert(1, ["0"] * max(0, len(right_split[0]) - len(left_split[0])))
right_split.insert(1, ["0"] * max(0, len(left_split[0]) - len(right_split[0])))
left_split.insert(1, ['0'] * max(0, len(right_split[0]) - len(left_split[0])))
right_split.insert(1, ['0'] * max(0, len(left_split[0]) - len(right_split[0])))
return (
list(itertools.chain.from_iterable(left_split)),
@ -701,7 +709,7 @@ class SpecifierSet(BaseSpecifier):
"""
def __init__(
self, specifiers: str = "", prereleases: Optional[bool] = None
self, specifiers: str = '', prereleases: bool | None = None,
) -> None:
"""Initialize a SpecifierSet instance.
@ -720,7 +728,7 @@ class SpecifierSet(BaseSpecifier):
# Split on `,` to break each individual specifier into it's own item, and
# strip each item to remove leading/trailing whitespace.
split_specifiers = [s.strip() for s in specifiers.split(",") if s.strip()]
split_specifiers = [s.strip() for s in specifiers.split(',') if s.strip()]
# Make each individual specifier a Specifier and save in a frozen set for later.
self._specs = frozenset(map(Specifier, split_specifiers))
@ -730,7 +738,7 @@ class SpecifierSet(BaseSpecifier):
self._prereleases = prereleases
@property
def prereleases(self) -> Optional[bool]:
def prereleases(self) -> bool | None:
# If we have been given an explicit prerelease modifier, then we'll
# pass that through here.
if self._prereleases is not None:
@ -764,12 +772,12 @@ class SpecifierSet(BaseSpecifier):
<SpecifierSet('!=2.0.0,>=1.0.0', prereleases=True)>
"""
pre = (
f", prereleases={self.prereleases!r}"
f', prereleases={self.prereleases!r}'
if self._prereleases is not None
else ""
else ''
)
return f"<SpecifierSet({str(self)!r}{pre})>"
return f'<SpecifierSet({str(self)!r}{pre})>'
def __str__(self) -> str:
"""A string representation of the specifier set that can be round-tripped.
@ -782,12 +790,12 @@ class SpecifierSet(BaseSpecifier):
>>> str(SpecifierSet(">=1.0.0,!=1.0.1", prereleases=False))
'!=1.0.1,>=1.0.0'
"""
return ",".join(sorted(str(s) for s in self._specs))
return ','.join(sorted(str(s) for s in self._specs))
def __hash__(self) -> int:
return hash(self._specs)
def __and__(self, other: Union["SpecifierSet", str]) -> "SpecifierSet":
def __and__(self, other: SpecifierSet | str) -> SpecifierSet:
"""Return a SpecifierSet which is a combination of the two sets.
:param other: The other object to combine with.
@ -813,8 +821,8 @@ class SpecifierSet(BaseSpecifier):
specifier._prereleases = self._prereleases
else:
raise ValueError(
"Cannot combine SpecifierSets with True and False prerelease "
"overrides."
'Cannot combine SpecifierSets with True and False prerelease '
'overrides.',
)
return specifier
@ -883,8 +891,8 @@ class SpecifierSet(BaseSpecifier):
def contains(
self,
item: UnparsedVersion,
prereleases: Optional[bool] = None,
installed: Optional[bool] = None,
prereleases: bool | None = None,
installed: bool | None = None,
) -> bool:
"""Return whether or not the item is contained in this SpecifierSet.
@ -938,7 +946,7 @@ class SpecifierSet(BaseSpecifier):
return all(s.contains(item, prereleases=prereleases) for s in self._specs)
def filter(
self, iterable: Iterable[UnparsedVersionVar], prereleases: Optional[bool] = None
self, iterable: Iterable[UnparsedVersionVar], prereleases: bool | None = None,
) -> Iterator[UnparsedVersionVar]:
"""Filter items in the given iterable, that match the specifiers in this set.
@ -995,8 +1003,8 @@ class SpecifierSet(BaseSpecifier):
# which will filter out any pre-releases, unless there are no final
# releases.
else:
filtered: List[UnparsedVersionVar] = []
found_prereleases: List[UnparsedVersionVar] = []
filtered: list[UnparsedVersionVar] = []
found_prereleases: list[UnparsedVersionVar] = []
for item in iterable:
parsed_version = _coerce_version(item)

View file

@ -1,6 +1,7 @@
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import annotations
import logging
import platform
@ -10,36 +11,35 @@ import subprocess
import sys
import sysconfig
from importlib.machinery import EXTENSION_SUFFIXES
from typing import (
Dict,
FrozenSet,
Iterable,
Iterator,
List,
Optional,
Sequence,
Tuple,
Union,
cast,
)
from typing import cast
from typing import Dict
from typing import FrozenSet
from typing import Iterable
from typing import Iterator
from typing import List
from typing import Optional
from typing import Sequence
from typing import Tuple
from typing import Union
from . import _manylinux, _musllinux
from . import _manylinux
from . import _musllinux
logger = logging.getLogger(__name__)
PythonVersion = Sequence[int]
MacVersion = Tuple[int, int]
INTERPRETER_SHORT_NAMES: Dict[str, str] = {
"python": "py", # Generic.
"cpython": "cp",
"pypy": "pp",
"ironpython": "ip",
"jython": "jy",
INTERPRETER_SHORT_NAMES: dict[str, str] = {
'python': 'py', # Generic.
'cpython': 'cp',
'pypy': 'pp',
'ironpython': 'ip',
'jython': 'jy',
}
_32_BIT_INTERPRETER = struct.calcsize("P") == 4
_32_BIT_INTERPRETER = struct.calcsize('P') == 4
class Tag:
@ -50,7 +50,7 @@ class Tag:
is also supported.
"""
__slots__ = ["_interpreter", "_abi", "_platform", "_hash"]
__slots__ = ['_interpreter', '_abi', '_platform', '_hash']
def __init__(self, interpreter: str, abi: str, platform: str) -> None:
self._interpreter = interpreter.lower()
@ -80,23 +80,23 @@ class Tag:
return NotImplemented
return (
(self._hash == other._hash) # Short-circuit ASAP for perf reasons.
and (self._platform == other._platform)
and (self._abi == other._abi)
and (self._interpreter == other._interpreter)
(self._hash == other._hash) and # Short-circuit ASAP for perf reasons.
(self._platform == other._platform) and
(self._abi == other._abi) and
(self._interpreter == other._interpreter)
)
def __hash__(self) -> int:
return self._hash
def __str__(self) -> str:
return f"{self._interpreter}-{self._abi}-{self._platform}"
return f'{self._interpreter}-{self._abi}-{self._platform}'
def __repr__(self) -> str:
return f"<{self} @ {id(self)}>"
return f'<{self} @ {id(self)}>'
def parse_tag(tag: str) -> FrozenSet[Tag]:
def parse_tag(tag: str) -> frozenset[Tag]:
"""
Parses the provided tag (e.g. `py3-none-any`) into a frozenset of Tag instances.
@ -104,28 +104,28 @@ def parse_tag(tag: str) -> FrozenSet[Tag]:
compressed tag set.
"""
tags = set()
interpreters, abis, platforms = tag.split("-")
for interpreter in interpreters.split("."):
for abi in abis.split("."):
for platform_ in platforms.split("."):
interpreters, abis, platforms = tag.split('-')
for interpreter in interpreters.split('.'):
for abi in abis.split('.'):
for platform_ in platforms.split('.'):
tags.add(Tag(interpreter, abi, platform_))
return frozenset(tags)
def _get_config_var(name: str, warn: bool = False) -> Union[int, str, None]:
value: Union[int, str, None] = sysconfig.get_config_var(name)
def _get_config_var(name: str, warn: bool = False) -> int | str | None:
value: int | str | None = sysconfig.get_config_var(name)
if value is None and warn:
logger.debug(
"Config variable '%s' is unset, Python ABI tag may be incorrect", name
"Config variable '%s' is unset, Python ABI tag may be incorrect", name,
)
return value
def _normalize_string(string: str) -> str:
return string.replace(".", "_").replace("-", "_").replace(" ", "_")
return string.replace('.', '_').replace('-', '_').replace(' ', '_')
def _is_threaded_cpython(abis: List[str]) -> bool:
def _is_threaded_cpython(abis: list[str]) -> bool:
"""
Determine if the ABI corresponds to a threaded (`--disable-gil`) build.
@ -134,11 +134,11 @@ def _is_threaded_cpython(abis: List[str]) -> bool:
if len(abis) == 0:
return False
# expect e.g., cp313
m = re.match(r"cp\d+(.*)", abis[0])
m = re.match(r'cp\d+(.*)', abis[0])
if not m:
return False
abiflags = m.group(1)
return "t" in abiflags
return 't' in abiflags
def _abi3_applies(python_version: PythonVersion, threading: bool) -> bool:
@ -151,43 +151,43 @@ def _abi3_applies(python_version: PythonVersion, threading: bool) -> bool:
return len(python_version) > 1 and tuple(python_version) >= (3, 2) and not threading
def _cpython_abis(py_version: PythonVersion, warn: bool = False) -> List[str]:
def _cpython_abis(py_version: PythonVersion, warn: bool = False) -> list[str]:
py_version = tuple(py_version) # To allow for version comparison.
abis = []
version = _version_nodot(py_version[:2])
threading = debug = pymalloc = ucs4 = ""
with_debug = _get_config_var("Py_DEBUG", warn)
has_refcount = hasattr(sys, "gettotalrefcount")
threading = debug = pymalloc = ucs4 = ''
with_debug = _get_config_var('Py_DEBUG', warn)
has_refcount = hasattr(sys, 'gettotalrefcount')
# Windows doesn't set Py_DEBUG, so checking for support of debug-compiled
# extension modules is the best option.
# https://github.com/pypa/pip/issues/3383#issuecomment-173267692
has_ext = "_d.pyd" in EXTENSION_SUFFIXES
has_ext = '_d.pyd' in EXTENSION_SUFFIXES
if with_debug or (with_debug is None and (has_refcount or has_ext)):
debug = "d"
if py_version >= (3, 13) and _get_config_var("Py_GIL_DISABLED", warn):
threading = "t"
debug = 'd'
if py_version >= (3, 13) and _get_config_var('Py_GIL_DISABLED', warn):
threading = 't'
if py_version < (3, 8):
with_pymalloc = _get_config_var("WITH_PYMALLOC", warn)
with_pymalloc = _get_config_var('WITH_PYMALLOC', warn)
if with_pymalloc or with_pymalloc is None:
pymalloc = "m"
pymalloc = 'm'
if py_version < (3, 3):
unicode_size = _get_config_var("Py_UNICODE_SIZE", warn)
unicode_size = _get_config_var('Py_UNICODE_SIZE', warn)
if unicode_size == 4 or (
unicode_size is None and sys.maxunicode == 0x10FFFF
):
ucs4 = "u"
ucs4 = 'u'
elif debug:
# Debug builds can also load "normal" extension modules.
# We can also assume no UCS-4 or pymalloc requirement.
abis.append(f"cp{version}{threading}")
abis.insert(0, f"cp{version}{threading}{debug}{pymalloc}{ucs4}")
abis.append(f'cp{version}{threading}')
abis.insert(0, f'cp{version}{threading}{debug}{pymalloc}{ucs4}')
return abis
def cpython_tags(
python_version: Optional[PythonVersion] = None,
abis: Optional[Iterable[str]] = None,
platforms: Optional[Iterable[str]] = None,
python_version: PythonVersion | None = None,
abis: Iterable[str] | None = None,
platforms: Iterable[str] | None = None,
*,
warn: bool = False,
) -> Iterator[Tag]:
@ -209,7 +209,7 @@ def cpython_tags(
if not python_version:
python_version = sys.version_info[:2]
interpreter = f"cp{_version_nodot(python_version[:2])}"
interpreter = f'cp{_version_nodot(python_version[:2])}'
if abis is None:
if len(python_version) > 1:
@ -218,7 +218,7 @@ def cpython_tags(
abis = []
abis = list(abis)
# 'abi3' and 'none' are explicitly handled later.
for explicit_abi in ("abi3", "none"):
for explicit_abi in ('abi3', 'none'):
try:
abis.remove(explicit_abi)
except ValueError:
@ -232,19 +232,19 @@ def cpython_tags(
threading = _is_threaded_cpython(abis)
use_abi3 = _abi3_applies(python_version, threading)
if use_abi3:
yield from (Tag(interpreter, "abi3", platform_) for platform_ in platforms)
yield from (Tag(interpreter, "none", platform_) for platform_ in platforms)
yield from (Tag(interpreter, 'abi3', platform_) for platform_ in platforms)
yield from (Tag(interpreter, 'none', platform_) for platform_ in platforms)
if use_abi3:
for minor_version in range(python_version[1] - 1, 1, -1):
for platform_ in platforms:
interpreter = "cp{version}".format(
version=_version_nodot((python_version[0], minor_version))
interpreter = 'cp{version}'.format(
version=_version_nodot((python_version[0], minor_version)),
)
yield Tag(interpreter, "abi3", platform_)
yield Tag(interpreter, 'abi3', platform_)
def _generic_abi() -> List[str]:
def _generic_abi() -> list[str]:
"""
Return the ABI tag based on EXT_SUFFIX.
"""
@ -259,24 +259,24 @@ def _generic_abi() -> List[str]:
# - graalpy: '.graalpy-38-native-x86_64-darwin.dylib'
# => graalpy_38_native
ext_suffix = _get_config_var("EXT_SUFFIX", warn=True)
if not isinstance(ext_suffix, str) or ext_suffix[0] != ".":
ext_suffix = _get_config_var('EXT_SUFFIX', warn=True)
if not isinstance(ext_suffix, str) or ext_suffix[0] != '.':
raise SystemError("invalid sysconfig.get_config_var('EXT_SUFFIX')")
parts = ext_suffix.split(".")
parts = ext_suffix.split('.')
if len(parts) < 3:
# CPython3.7 and earlier uses ".pyd" on Windows.
return _cpython_abis(sys.version_info[:2])
soabi = parts[1]
if soabi.startswith("cpython"):
if soabi.startswith('cpython'):
# non-windows
abi = "cp" + soabi.split("-")[1]
elif soabi.startswith("cp"):
abi = 'cp' + soabi.split('-')[1]
elif soabi.startswith('cp'):
# windows
abi = soabi.split("-")[0]
elif soabi.startswith("pypy"):
abi = "-".join(soabi.split("-")[:2])
elif soabi.startswith("graalpy"):
abi = "-".join(soabi.split("-")[:3])
abi = soabi.split('-')[0]
elif soabi.startswith('pypy'):
abi = '-'.join(soabi.split('-')[:2])
elif soabi.startswith('graalpy'):
abi = '-'.join(soabi.split('-')[:3])
elif soabi:
# pyston, ironpython, others?
abi = soabi
@ -286,9 +286,9 @@ def _generic_abi() -> List[str]:
def generic_tags(
interpreter: Optional[str] = None,
abis: Optional[Iterable[str]] = None,
platforms: Optional[Iterable[str]] = None,
interpreter: str | None = None,
abis: Iterable[str] | None = None,
platforms: Iterable[str] | None = None,
*,
warn: bool = False,
) -> Iterator[Tag]:
@ -303,14 +303,14 @@ def generic_tags(
if not interpreter:
interp_name = interpreter_name()
interp_version = interpreter_version(warn=warn)
interpreter = "".join([interp_name, interp_version])
interpreter = ''.join([interp_name, interp_version])
if abis is None:
abis = _generic_abi()
else:
abis = list(abis)
platforms = list(platforms or platform_tags())
if "none" not in abis:
abis.append("none")
if 'none' not in abis:
abis.append('none')
for abi in abis:
for platform_ in platforms:
yield Tag(interpreter, abi, platform_)
@ -324,17 +324,17 @@ def _py_interpreter_range(py_version: PythonVersion) -> Iterator[str]:
all previous versions of that major version.
"""
if len(py_version) > 1:
yield f"py{_version_nodot(py_version[:2])}"
yield f"py{py_version[0]}"
yield f'py{_version_nodot(py_version[:2])}'
yield f'py{py_version[0]}'
if len(py_version) > 1:
for minor in range(py_version[1] - 1, -1, -1):
yield f"py{_version_nodot((py_version[0], minor))}"
yield f'py{_version_nodot((py_version[0], minor))}'
def compatible_tags(
python_version: Optional[PythonVersion] = None,
interpreter: Optional[str] = None,
platforms: Optional[Iterable[str]] = None,
python_version: PythonVersion | None = None,
interpreter: str | None = None,
platforms: Iterable[str] | None = None,
) -> Iterator[Tag]:
"""
Yields the sequence of tags that are compatible with a specific version of Python.
@ -349,57 +349,57 @@ def compatible_tags(
platforms = list(platforms or platform_tags())
for version in _py_interpreter_range(python_version):
for platform_ in platforms:
yield Tag(version, "none", platform_)
yield Tag(version, 'none', platform_)
if interpreter:
yield Tag(interpreter, "none", "any")
yield Tag(interpreter, 'none', 'any')
for version in _py_interpreter_range(python_version):
yield Tag(version, "none", "any")
yield Tag(version, 'none', 'any')
def _mac_arch(arch: str, is_32bit: bool = _32_BIT_INTERPRETER) -> str:
if not is_32bit:
return arch
if arch.startswith("ppc"):
return "ppc"
if arch.startswith('ppc'):
return 'ppc'
return "i386"
return 'i386'
def _mac_binary_formats(version: MacVersion, cpu_arch: str) -> List[str]:
def _mac_binary_formats(version: MacVersion, cpu_arch: str) -> list[str]:
formats = [cpu_arch]
if cpu_arch == "x86_64":
if cpu_arch == 'x86_64':
if version < (10, 4):
return []
formats.extend(["intel", "fat64", "fat32"])
formats.extend(['intel', 'fat64', 'fat32'])
elif cpu_arch == "i386":
elif cpu_arch == 'i386':
if version < (10, 4):
return []
formats.extend(["intel", "fat32", "fat"])
formats.extend(['intel', 'fat32', 'fat'])
elif cpu_arch == "ppc64":
elif cpu_arch == 'ppc64':
# TODO: Need to care about 32-bit PPC for ppc64 through 10.2?
if version > (10, 5) or version < (10, 4):
return []
formats.append("fat64")
formats.append('fat64')
elif cpu_arch == "ppc":
elif cpu_arch == 'ppc':
if version > (10, 6):
return []
formats.extend(["fat32", "fat"])
formats.extend(['fat32', 'fat'])
if cpu_arch in {"arm64", "x86_64"}:
formats.append("universal2")
if cpu_arch in {'arm64', 'x86_64'}:
formats.append('universal2')
if cpu_arch in {"x86_64", "i386", "ppc64", "ppc", "intel"}:
formats.append("universal")
if cpu_arch in {'x86_64', 'i386', 'ppc64', 'ppc', 'intel'}:
formats.append('universal')
return formats
def mac_platforms(
version: Optional[MacVersion] = None, arch: Optional[str] = None
version: MacVersion | None = None, arch: str | None = None,
) -> Iterator[str]:
"""
Yields the platform tags for a macOS system.
@ -411,23 +411,23 @@ def mac_platforms(
"""
version_str, _, cpu_arch = platform.mac_ver()
if version is None:
version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2])))
version = cast('MacVersion', tuple(map(int, version_str.split('.')[:2])))
if version == (10, 16):
# When built against an older macOS SDK, Python will report macOS 10.16
# instead of the real version.
version_str = subprocess.run(
[
sys.executable,
"-sS",
"-c",
"import platform; print(platform.mac_ver()[0])",
'-sS',
'-c',
'import platform; print(platform.mac_ver()[0])',
],
check=True,
env={"SYSTEM_VERSION_COMPAT": "0"},
env={'SYSTEM_VERSION_COMPAT': '0'},
stdout=subprocess.PIPE,
text=True,
).stdout
version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2])))
version = cast('MacVersion', tuple(map(int, version_str.split('.')[:2])))
else:
version = version
if arch is None:
@ -442,8 +442,8 @@ def mac_platforms(
compat_version = 10, minor_version
binary_formats = _mac_binary_formats(compat_version, arch)
for binary_format in binary_formats:
yield "macosx_{major}_{minor}_{binary_format}".format(
major=10, minor=minor_version, binary_format=binary_format
yield 'macosx_{major}_{minor}_{binary_format}'.format(
major=10, minor=minor_version, binary_format=binary_format,
)
if version >= (11, 0):
@ -453,8 +453,8 @@ def mac_platforms(
compat_version = major_version, 0
binary_formats = _mac_binary_formats(compat_version, arch)
for binary_format in binary_formats:
yield "macosx_{major}_{minor}_{binary_format}".format(
major=major_version, minor=0, binary_format=binary_format
yield 'macosx_{major}_{minor}_{binary_format}'.format(
major=major_version, minor=0, binary_format=binary_format,
)
if version >= (11, 0):
@ -465,12 +465,12 @@ def mac_platforms(
# However, the "universal2" binary format can have a
# macOS version earlier than 11.0 when the x86_64 part of the binary supports
# that version of macOS.
if arch == "x86_64":
if arch == 'x86_64':
for minor_version in range(16, 3, -1):
compat_version = 10, minor_version
binary_formats = _mac_binary_formats(compat_version, arch)
for binary_format in binary_formats:
yield "macosx_{major}_{minor}_{binary_format}".format(
yield 'macosx_{major}_{minor}_{binary_format}'.format(
major=compat_version[0],
minor=compat_version[1],
binary_format=binary_format,
@ -478,8 +478,8 @@ def mac_platforms(
else:
for minor_version in range(16, 3, -1):
compat_version = 10, minor_version
binary_format = "universal2"
yield "macosx_{major}_{minor}_{binary_format}".format(
binary_format = 'universal2'
yield 'macosx_{major}_{minor}_{binary_format}'.format(
major=compat_version[0],
minor=compat_version[1],
binary_format=binary_format,
@ -488,21 +488,21 @@ def mac_platforms(
def _linux_platforms(is_32bit: bool = _32_BIT_INTERPRETER) -> Iterator[str]:
linux = _normalize_string(sysconfig.get_platform())
if not linux.startswith("linux_"):
if not linux.startswith('linux_'):
# we should never be here, just yield the sysconfig one and return
yield linux
return
if is_32bit:
if linux == "linux_x86_64":
linux = "linux_i686"
elif linux == "linux_aarch64":
linux = "linux_armv8l"
_, arch = linux.split("_", 1)
archs = {"armv8l": ["armv8l", "armv7l"]}.get(arch, [arch])
if linux == 'linux_x86_64':
linux = 'linux_i686'
elif linux == 'linux_aarch64':
linux = 'linux_armv8l'
_, arch = linux.split('_', 1)
archs = {'armv8l': ['armv8l', 'armv7l']}.get(arch, [arch])
yield from _manylinux.platform_tags(archs)
yield from _musllinux.platform_tags(archs)
for arch in archs:
yield f"linux_{arch}"
yield f'linux_{arch}'
def _generic_platforms() -> Iterator[str]:
@ -513,9 +513,9 @@ def platform_tags() -> Iterator[str]:
"""
Provides the platform tags for this installation.
"""
if platform.system() == "Darwin":
if platform.system() == 'Darwin':
return mac_platforms()
elif platform.system() == "Linux":
elif platform.system() == 'Linux':
return _linux_platforms()
else:
return _generic_platforms()
@ -536,7 +536,7 @@ def interpreter_version(*, warn: bool = False) -> str:
"""
Returns the version of the running interpreter.
"""
version = _get_config_var("py_version_nodot", warn=warn)
version = _get_config_var('py_version_nodot', warn=warn)
if version:
version = str(version)
else:
@ -545,7 +545,7 @@ def interpreter_version(*, warn: bool = False) -> str:
def _version_nodot(version: PythonVersion) -> str:
return "".join(map(str, version))
return ''.join(map(str, version))
def sys_tags(*, warn: bool = False) -> Iterator[Tag]:
@ -557,15 +557,15 @@ def sys_tags(*, warn: bool = False) -> Iterator[Tag]:
"""
interp_name = interpreter_name()
if interp_name == "cp":
if interp_name == 'cp':
yield from cpython_tags(warn=warn)
else:
yield from generic_tags()
if interp_name == "pp":
interp = "pp3"
elif interp_name == "cp":
interp = "cp" + interpreter_version(warn=warn)
if interp_name == 'pp':
interp = 'pp3'
elif interp_name == 'cp':
interp = 'cp' + interpreter_version(warn=warn)
else:
interp = None
yield from compatible_tags(interpreter=interp)

View file

@ -1,15 +1,22 @@
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import annotations
import re
from typing import FrozenSet, NewType, Tuple, Union, cast
from typing import cast
from typing import FrozenSet
from typing import NewType
from typing import Tuple
from typing import Union
from .tags import Tag, parse_tag
from .version import InvalidVersion, Version
from .tags import parse_tag
from .tags import Tag
from .version import InvalidVersion
from .version import Version
BuildTag = Union[Tuple[()], Tuple[int, str]]
NormalizedName = NewType("NormalizedName", str)
NormalizedName = NewType('NormalizedName', str)
class InvalidName(ValueError):
@ -32,19 +39,19 @@ class InvalidSdistFilename(ValueError):
# Core metadata spec for `Name`
_validate_regex = re.compile(
r"^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$", re.IGNORECASE
r'^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$', re.IGNORECASE,
)
_canonicalize_regex = re.compile(r"[-_.]+")
_normalized_regex = re.compile(r"^([a-z0-9]|[a-z0-9]([a-z0-9-](?!--))*[a-z0-9])$")
_canonicalize_regex = re.compile(r'[-_.]+')
_normalized_regex = re.compile(r'^([a-z0-9]|[a-z0-9]([a-z0-9-](?!--))*[a-z0-9])$')
# PEP 427: The build number must start with a digit.
_build_tag_regex = re.compile(r"(\d+)(.*)")
_build_tag_regex = re.compile(r'(\d+)(.*)')
def canonicalize_name(name: str, *, validate: bool = False) -> NormalizedName:
if validate and not _validate_regex.match(name):
raise InvalidName(f"name is invalid: {name!r}")
raise InvalidName(f'name is invalid: {name!r}')
# This is taken from PEP 503.
value = _canonicalize_regex.sub("-", name).lower()
value = _canonicalize_regex.sub('-', name).lower()
return cast(NormalizedName, value)
@ -53,7 +60,7 @@ def is_normalized_name(name: str) -> bool:
def canonicalize_version(
version: Union[Version, str], *, strip_trailing_zero: bool = True
version: Version | str, *, strip_trailing_zero: bool = True,
) -> str:
"""
This is very similar to Version.__str__, but has one subtle difference
@ -72,61 +79,61 @@ def canonicalize_version(
# Epoch
if parsed.epoch != 0:
parts.append(f"{parsed.epoch}!")
parts.append(f'{parsed.epoch}!')
# Release segment
release_segment = ".".join(str(x) for x in parsed.release)
release_segment = '.'.join(str(x) for x in parsed.release)
if strip_trailing_zero:
# NB: This strips trailing '.0's to normalize
release_segment = re.sub(r"(\.0)+$", "", release_segment)
release_segment = re.sub(r'(\.0)+$', '', release_segment)
parts.append(release_segment)
# Pre-release
if parsed.pre is not None:
parts.append("".join(str(x) for x in parsed.pre))
parts.append(''.join(str(x) for x in parsed.pre))
# Post-release
if parsed.post is not None:
parts.append(f".post{parsed.post}")
parts.append(f'.post{parsed.post}')
# Development release
if parsed.dev is not None:
parts.append(f".dev{parsed.dev}")
parts.append(f'.dev{parsed.dev}')
# Local version segment
if parsed.local is not None:
parts.append(f"+{parsed.local}")
parts.append(f'+{parsed.local}')
return "".join(parts)
return ''.join(parts)
def parse_wheel_filename(
filename: str,
) -> Tuple[NormalizedName, Version, BuildTag, FrozenSet[Tag]]:
if not filename.endswith(".whl"):
) -> tuple[NormalizedName, Version, BuildTag, frozenset[Tag]]:
if not filename.endswith('.whl'):
raise InvalidWheelFilename(
f"Invalid wheel filename (extension must be '.whl'): {filename}"
f"Invalid wheel filename (extension must be '.whl'): {filename}",
)
filename = filename[:-4]
dashes = filename.count("-")
dashes = filename.count('-')
if dashes not in (4, 5):
raise InvalidWheelFilename(
f"Invalid wheel filename (wrong number of parts): {filename}"
f'Invalid wheel filename (wrong number of parts): {filename}',
)
parts = filename.split("-", dashes - 2)
parts = filename.split('-', dashes - 2)
name_part = parts[0]
# See PEP 427 for the rules on escaping the project name.
if "__" in name_part or re.match(r"^[\w\d._]*$", name_part, re.UNICODE) is None:
raise InvalidWheelFilename(f"Invalid project name: {filename}")
if '__' in name_part or re.match(r'^[\w\d._]*$', name_part, re.UNICODE) is None:
raise InvalidWheelFilename(f'Invalid project name: {filename}')
name = canonicalize_name(name_part)
try:
version = Version(parts[1])
except InvalidVersion as e:
raise InvalidWheelFilename(
f"Invalid wheel filename (invalid version): {filename}"
f'Invalid wheel filename (invalid version): {filename}',
) from e
if dashes == 5:
@ -134,7 +141,7 @@ def parse_wheel_filename(
build_match = _build_tag_regex.match(build_part)
if build_match is None:
raise InvalidWheelFilename(
f"Invalid build number: {build_part} in '{filename}'"
f"Invalid build number: {build_part} in '{filename}'",
)
build = cast(BuildTag, (int(build_match.group(1)), build_match.group(2)))
else:
@ -143,22 +150,22 @@ def parse_wheel_filename(
return (name, version, build, tags)
def parse_sdist_filename(filename: str) -> Tuple[NormalizedName, Version]:
if filename.endswith(".tar.gz"):
file_stem = filename[: -len(".tar.gz")]
elif filename.endswith(".zip"):
file_stem = filename[: -len(".zip")]
def parse_sdist_filename(filename: str) -> tuple[NormalizedName, Version]:
if filename.endswith('.tar.gz'):
file_stem = filename[: -len('.tar.gz')]
elif filename.endswith('.zip'):
file_stem = filename[: -len('.zip')]
else:
raise InvalidSdistFilename(
f"Invalid sdist filename (extension must be '.tar.gz' or '.zip'):"
f" {filename}"
f' {filename}',
)
# We are requiring a PEP 440 version, which cannot contain dashes,
# so we split on the last dash.
name_part, sep, version_part = file_stem.rpartition("-")
name_part, sep, version_part = file_stem.rpartition('-')
if not sep:
raise InvalidSdistFilename(f"Invalid sdist filename: {filename}")
raise InvalidSdistFilename(f'Invalid sdist filename: {filename}')
name = canonicalize_name(name_part)
@ -166,7 +173,7 @@ def parse_sdist_filename(filename: str) -> Tuple[NormalizedName, Version]:
version = Version(version_part)
except InvalidVersion as e:
raise InvalidSdistFilename(
f"Invalid sdist filename (invalid version): {filename}"
f'Invalid sdist filename (invalid version): {filename}',
) from e
return (name, version)

View file

@ -6,14 +6,24 @@
from packaging.version import parse, Version
"""
from __future__ import annotations
import itertools
import re
from typing import Any, Callable, NamedTuple, Optional, SupportsInt, Tuple, Union
from typing import Any
from typing import Callable
from typing import NamedTuple
from typing import Optional
from typing import SupportsInt
from typing import Tuple
from typing import Union
from ._structures import Infinity, InfinityType, NegativeInfinity, NegativeInfinityType
from ._structures import Infinity
from ._structures import InfinityType
from ._structures import NegativeInfinity
from ._structures import NegativeInfinityType
__all__ = ["VERSION_PATTERN", "parse", "Version", "InvalidVersion"]
__all__ = ['VERSION_PATTERN', 'parse', 'Version', 'InvalidVersion']
LocalType = Tuple[Union[int, str], ...]
@ -35,14 +45,14 @@ VersionComparisonMethod = Callable[[CmpKey, CmpKey], bool]
class _Version(NamedTuple):
epoch: int
release: Tuple[int, ...]
dev: Optional[Tuple[str, int]]
pre: Optional[Tuple[str, int]]
post: Optional[Tuple[str, int]]
local: Optional[LocalType]
release: tuple[int, ...]
dev: tuple[str, int] | None
pre: tuple[str, int] | None
post: tuple[str, int] | None
local: LocalType | None
def parse(version: str) -> "Version":
def parse(version: str) -> Version:
"""Parse the given version string.
>>> parse('1.0.dev1')
@ -65,7 +75,7 @@ class InvalidVersion(ValueError):
class _BaseVersion:
_key: Tuple[Any, ...]
_key: tuple[Any, ...]
def __hash__(self) -> int:
return hash(self._key)
@ -73,13 +83,13 @@ class _BaseVersion:
# Please keep the duplicated `isinstance` check
# in the six comparisons hereunder
# unless you find a way to avoid adding overhead function calls.
def __lt__(self, other: "_BaseVersion") -> bool:
def __lt__(self, other: _BaseVersion) -> bool:
if not isinstance(other, _BaseVersion):
return NotImplemented
return self._key < other._key
def __le__(self, other: "_BaseVersion") -> bool:
def __le__(self, other: _BaseVersion) -> bool:
if not isinstance(other, _BaseVersion):
return NotImplemented
@ -91,13 +101,13 @@ class _BaseVersion:
return self._key == other._key
def __ge__(self, other: "_BaseVersion") -> bool:
def __ge__(self, other: _BaseVersion) -> bool:
if not isinstance(other, _BaseVersion):
return NotImplemented
return self._key >= other._key
def __gt__(self, other: "_BaseVersion") -> bool:
def __gt__(self, other: _BaseVersion) -> bool:
if not isinstance(other, _BaseVersion):
return NotImplemented
@ -180,7 +190,7 @@ class Version(_BaseVersion):
True
"""
_regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
_regex = re.compile(r'^\s*' + VERSION_PATTERN + r'\s*$', re.VERBOSE | re.IGNORECASE)
_key: CmpKey
def __init__(self, version: str) -> None:
@ -201,14 +211,14 @@ class Version(_BaseVersion):
# Store the parsed out pieces of the version
self._version = _Version(
epoch=int(match.group("epoch")) if match.group("epoch") else 0,
release=tuple(int(i) for i in match.group("release").split(".")),
pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
epoch=int(match.group('epoch')) if match.group('epoch') else 0,
release=tuple(int(i) for i in match.group('release').split('.')),
pre=_parse_letter_version(match.group('pre_l'), match.group('pre_n')),
post=_parse_letter_version(
match.group("post_l"), match.group("post_n1") or match.group("post_n2")
match.group('post_l'), match.group('post_n1') or match.group('post_n2'),
),
dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
local=_parse_local_version(match.group("local")),
dev=_parse_letter_version(match.group('dev_l'), match.group('dev_n')),
local=_parse_local_version(match.group('local')),
)
# Generate a key which will be used for sorting
@ -239,28 +249,28 @@ class Version(_BaseVersion):
# Epoch
if self.epoch != 0:
parts.append(f"{self.epoch}!")
parts.append(f'{self.epoch}!')
# Release segment
parts.append(".".join(str(x) for x in self.release))
parts.append('.'.join(str(x) for x in self.release))
# Pre-release
if self.pre is not None:
parts.append("".join(str(x) for x in self.pre))
parts.append(''.join(str(x) for x in self.pre))
# Post-release
if self.post is not None:
parts.append(f".post{self.post}")
parts.append(f'.post{self.post}')
# Development release
if self.dev is not None:
parts.append(f".dev{self.dev}")
parts.append(f'.dev{self.dev}')
# Local version segment
if self.local is not None:
parts.append(f"+{self.local}")
parts.append(f'+{self.local}')
return "".join(parts)
return ''.join(parts)
@property
def epoch(self) -> int:
@ -274,7 +284,7 @@ class Version(_BaseVersion):
return self._version.epoch
@property
def release(self) -> Tuple[int, ...]:
def release(self) -> tuple[int, ...]:
"""The components of the "release" segment of the version.
>>> Version("1.2.3").release
@ -290,7 +300,7 @@ class Version(_BaseVersion):
return self._version.release
@property
def pre(self) -> Optional[Tuple[str, int]]:
def pre(self) -> tuple[str, int] | None:
"""The pre-release segment of the version.
>>> print(Version("1.2.3").pre)
@ -305,7 +315,7 @@ class Version(_BaseVersion):
return self._version.pre
@property
def post(self) -> Optional[int]:
def post(self) -> int | None:
"""The post-release number of the version.
>>> print(Version("1.2.3").post)
@ -316,7 +326,7 @@ class Version(_BaseVersion):
return self._version.post[1] if self._version.post else None
@property
def dev(self) -> Optional[int]:
def dev(self) -> int | None:
"""The development number of the version.
>>> print(Version("1.2.3").dev)
@ -327,7 +337,7 @@ class Version(_BaseVersion):
return self._version.dev[1] if self._version.dev else None
@property
def local(self) -> Optional[str]:
def local(self) -> str | None:
"""The local version segment of the version.
>>> print(Version("1.2.3").local)
@ -336,7 +346,7 @@ class Version(_BaseVersion):
'abc'
"""
if self._version.local:
return ".".join(str(x) for x in self._version.local)
return '.'.join(str(x) for x in self._version.local)
else:
return None
@ -351,7 +361,7 @@ class Version(_BaseVersion):
>>> Version("1.2.3+abc.dev1").public
'1.2.3'
"""
return str(self).split("+", 1)[0]
return str(self).split('+', 1)[0]
@property
def base_version(self) -> str:
@ -371,12 +381,12 @@ class Version(_BaseVersion):
# Epoch
if self.epoch != 0:
parts.append(f"{self.epoch}!")
parts.append(f'{self.epoch}!')
# Release segment
parts.append(".".join(str(x) for x in self.release))
parts.append('.'.join(str(x) for x in self.release))
return "".join(parts)
return ''.join(parts)
@property
def is_prerelease(self) -> bool:
@ -450,8 +460,8 @@ class Version(_BaseVersion):
def _parse_letter_version(
letter: Optional[str], number: Union[str, bytes, SupportsInt, None]
) -> Optional[Tuple[str, int]]:
letter: str | None, number: str | bytes | SupportsInt | None,
) -> tuple[str, int] | None:
if letter:
# We consider there to be an implicit 0 in a pre-release if there is
@ -465,30 +475,30 @@ def _parse_letter_version(
# We consider some words to be alternate spellings of other words and
# in those cases we want to normalize the spellings to our preferred
# spelling.
if letter == "alpha":
letter = "a"
elif letter == "beta":
letter = "b"
elif letter in ["c", "pre", "preview"]:
letter = "rc"
elif letter in ["rev", "r"]:
letter = "post"
if letter == 'alpha':
letter = 'a'
elif letter == 'beta':
letter = 'b'
elif letter in ['c', 'pre', 'preview']:
letter = 'rc'
elif letter in ['rev', 'r']:
letter = 'post'
return letter, int(number)
if not letter and number:
# We assume if we are given a number, but we are not given a letter
# then this is using the implicit post release syntax (e.g. 1.0-1)
letter = "post"
letter = 'post'
return letter, int(number)
return None
_local_version_separators = re.compile(r"[\._-]")
_local_version_separators = re.compile(r'[\._-]')
def _parse_local_version(local: Optional[str]) -> Optional[LocalType]:
def _parse_local_version(local: str | None) -> LocalType | None:
"""
Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
"""
@ -502,11 +512,11 @@ def _parse_local_version(local: Optional[str]) -> Optional[LocalType]:
def _cmpkey(
epoch: int,
release: Tuple[int, ...],
pre: Optional[Tuple[str, int]],
post: Optional[Tuple[str, int]],
dev: Optional[Tuple[str, int]],
local: Optional[LocalType],
release: tuple[int, ...],
pre: tuple[str, int] | None,
post: tuple[str, int] | None,
dev: tuple[str, int] | None,
local: LocalType | None,
) -> CmpKey:
# When we compare a release version, we want to compare it with all of the
@ -515,7 +525,7 @@ def _cmpkey(
# re-reverse it back into the correct order and make it a tuple and use
# that for our sorting key.
_release = tuple(
reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release)))),
)
# We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
@ -557,7 +567,7 @@ def _cmpkey(
# - Shorter versions sort before longer versions when the prefixes
# match exactly
_local = tuple(
(i, "") if isinstance(i, int) else (NegativeInfinity, i) for i in local
(i, '') if isinstance(i, int) else (NegativeInfinity, i) for i in local
)
return epoch, _release, _pre, _post, _dev, _local