[pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci
This commit is contained in:
pre-commit-ci[bot] 2024-04-13 00:00:18 +00:00
parent 72ad6dc953
commit f4cd1ba0d6
813 changed files with 66015 additions and 58839 deletions

View file

@ -1,7 +1,14 @@
from __future__ import annotations
import contextlib
import re
from dataclasses import dataclass
from typing import Dict, Iterator, NoReturn, Optional, Tuple, Union
from typing import Dict
from typing import Iterator
from typing import NoReturn
from typing import Optional
from typing import Tuple
from typing import Union
from .specifiers import Specifier
@ -21,7 +28,7 @@ class ParserSyntaxError(Exception):
message: str,
*,
source: str,
span: Tuple[int, int],
span: tuple[int, int],
) -> None:
self.span = span
self.message = message
@ -30,18 +37,18 @@ class ParserSyntaxError(Exception):
super().__init__()
def __str__(self) -> str:
marker = " " * self.span[0] + "~" * (self.span[1] - self.span[0]) + "^"
return "\n ".join([self.message, self.source, marker])
marker = ' ' * self.span[0] + '~' * (self.span[1] - self.span[0]) + '^'
return '\n '.join([self.message, self.source, marker])
DEFAULT_RULES: "Dict[str, Union[str, re.Pattern[str]]]" = {
"LEFT_PARENTHESIS": r"\(",
"RIGHT_PARENTHESIS": r"\)",
"LEFT_BRACKET": r"\[",
"RIGHT_BRACKET": r"\]",
"SEMICOLON": r";",
"COMMA": r",",
"QUOTED_STRING": re.compile(
DEFAULT_RULES: Dict[str, Union[str, re.Pattern[str]]] = {
'LEFT_PARENTHESIS': r'\(',
'RIGHT_PARENTHESIS': r'\)',
'LEFT_BRACKET': r'\[',
'RIGHT_BRACKET': r'\]',
'SEMICOLON': r';',
'COMMA': r',',
'QUOTED_STRING': re.compile(
r"""
(
('[^']*')
@ -51,11 +58,11 @@ DEFAULT_RULES: "Dict[str, Union[str, re.Pattern[str]]]" = {
""",
re.VERBOSE,
),
"OP": r"(===|==|~=|!=|<=|>=|<|>)",
"BOOLOP": r"\b(or|and)\b",
"IN": r"\bin\b",
"NOT": r"\bnot\b",
"VARIABLE": re.compile(
'OP': r'(===|==|~=|!=|<=|>=|<|>)',
'BOOLOP': r'\b(or|and)\b',
'IN': r'\bin\b',
'NOT': r'\bnot\b',
'VARIABLE': re.compile(
r"""
\b(
python_version
@ -71,17 +78,17 @@ DEFAULT_RULES: "Dict[str, Union[str, re.Pattern[str]]]" = {
""",
re.VERBOSE,
),
"SPECIFIER": re.compile(
'SPECIFIER': re.compile(
Specifier._operator_regex_str + Specifier._version_regex_str,
re.VERBOSE | re.IGNORECASE,
),
"AT": r"\@",
"URL": r"[^ \t]+",
"IDENTIFIER": r"\b[a-zA-Z0-9][a-zA-Z0-9._-]*\b",
"VERSION_PREFIX_TRAIL": r"\.\*",
"VERSION_LOCAL_LABEL_TRAIL": r"\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*",
"WS": r"[ \t]+",
"END": r"$",
'AT': r'\@',
'URL': r'[^ \t]+',
'IDENTIFIER': r'\b[a-zA-Z0-9][a-zA-Z0-9._-]*\b',
'VERSION_PREFIX_TRAIL': r'\.\*',
'VERSION_LOCAL_LABEL_TRAIL': r'\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*',
'WS': r'[ \t]+',
'END': r'$',
}
@ -96,13 +103,13 @@ class Tokenizer:
self,
source: str,
*,
rules: "Dict[str, Union[str, re.Pattern[str]]]",
rules: Dict[str, Union[str, re.Pattern[str]]],
) -> None:
self.source = source
self.rules: Dict[str, re.Pattern[str]] = {
self.rules: dict[str, re.Pattern[str]] = {
name: re.compile(pattern) for name, pattern in rules.items()
}
self.next_token: Optional[Token] = None
self.next_token: Token | None = None
self.position = 0
def consume(self, name: str) -> None:
@ -119,8 +126,8 @@ class Tokenizer:
"""
assert (
self.next_token is None
), f"Cannot check for {name!r}, already have {self.next_token!r}"
assert name in self.rules, f"Unknown token name: {name!r}"
), f'Cannot check for {name!r}, already have {self.next_token!r}'
assert name in self.rules, f'Unknown token name: {name!r}'
expression = self.rules[name]
@ -137,7 +144,7 @@ class Tokenizer:
The token is *not* read.
"""
if not self.check(name):
raise self.raise_syntax_error(f"Expected {expected}")
raise self.raise_syntax_error(f'Expected {expected}')
return self.read()
def read(self) -> Token:
@ -154,8 +161,8 @@ class Tokenizer:
self,
message: str,
*,
span_start: Optional[int] = None,
span_end: Optional[int] = None,
span_start: int | None = None,
span_end: int | None = None,
) -> NoReturn:
"""Raise ParserSyntaxError at the given position."""
span = (
@ -170,7 +177,7 @@ class Tokenizer:
@contextlib.contextmanager
def enclosing_tokens(
self, open_token: str, close_token: str, *, around: str
self, open_token: str, close_token: str, *, around: str,
) -> Iterator[None]:
if self.check(open_token):
open_position = self.position
@ -185,7 +192,7 @@ class Tokenizer:
if not self.check(close_token):
self.raise_syntax_error(
f"Expected matching {close_token} for {open_token}, after {around}",
f'Expected matching {close_token} for {open_token}, after {around}',
span_start=open_position,
)