mirror of
https://github.com/pre-commit/pre-commit-hooks.git
synced 2026-04-07 20:26:54 +00:00
[pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
This commit is contained in:
parent
72ad6dc953
commit
f4cd1ba0d6
813 changed files with 66015 additions and 58839 deletions
|
|
@ -3,11 +3,18 @@
|
|||
The docstring for each __parse_* function contains ENBF-inspired grammar representing
|
||||
the implementation.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import ast
|
||||
from typing import Any, List, NamedTuple, Optional, Tuple, Union
|
||||
from typing import Any
|
||||
from typing import List
|
||||
from typing import NamedTuple
|
||||
from typing import Optional
|
||||
from typing import Tuple
|
||||
from typing import Union
|
||||
|
||||
from ._tokenizer import DEFAULT_RULES, Tokenizer
|
||||
from ._tokenizer import DEFAULT_RULES
|
||||
from ._tokenizer import Tokenizer
|
||||
|
||||
|
||||
class Node:
|
||||
|
|
@ -52,9 +59,9 @@ MarkerList = List[Any]
|
|||
class ParsedRequirement(NamedTuple):
|
||||
name: str
|
||||
url: str
|
||||
extras: List[str]
|
||||
extras: list[str]
|
||||
specifier: str
|
||||
marker: Optional[MarkerList]
|
||||
marker: MarkerList | None
|
||||
|
||||
|
||||
# --------------------------------------------------------------------------------------
|
||||
|
|
@ -68,68 +75,68 @@ def _parse_requirement(tokenizer: Tokenizer) -> ParsedRequirement:
|
|||
"""
|
||||
requirement = WS? IDENTIFIER WS? extras WS? requirement_details
|
||||
"""
|
||||
tokenizer.consume("WS")
|
||||
tokenizer.consume('WS')
|
||||
|
||||
name_token = tokenizer.expect(
|
||||
"IDENTIFIER", expected="package name at the start of dependency specifier"
|
||||
'IDENTIFIER', expected='package name at the start of dependency specifier',
|
||||
)
|
||||
name = name_token.text
|
||||
tokenizer.consume("WS")
|
||||
tokenizer.consume('WS')
|
||||
|
||||
extras = _parse_extras(tokenizer)
|
||||
tokenizer.consume("WS")
|
||||
tokenizer.consume('WS')
|
||||
|
||||
url, specifier, marker = _parse_requirement_details(tokenizer)
|
||||
tokenizer.expect("END", expected="end of dependency specifier")
|
||||
tokenizer.expect('END', expected='end of dependency specifier')
|
||||
|
||||
return ParsedRequirement(name, url, extras, specifier, marker)
|
||||
|
||||
|
||||
def _parse_requirement_details(
|
||||
tokenizer: Tokenizer,
|
||||
) -> Tuple[str, str, Optional[MarkerList]]:
|
||||
) -> tuple[str, str, MarkerList | None]:
|
||||
"""
|
||||
requirement_details = AT URL (WS requirement_marker?)?
|
||||
| specifier WS? (requirement_marker)?
|
||||
"""
|
||||
|
||||
specifier = ""
|
||||
url = ""
|
||||
specifier = ''
|
||||
url = ''
|
||||
marker = None
|
||||
|
||||
if tokenizer.check("AT"):
|
||||
if tokenizer.check('AT'):
|
||||
tokenizer.read()
|
||||
tokenizer.consume("WS")
|
||||
tokenizer.consume('WS')
|
||||
|
||||
url_start = tokenizer.position
|
||||
url = tokenizer.expect("URL", expected="URL after @").text
|
||||
if tokenizer.check("END", peek=True):
|
||||
url = tokenizer.expect('URL', expected='URL after @').text
|
||||
if tokenizer.check('END', peek=True):
|
||||
return (url, specifier, marker)
|
||||
|
||||
tokenizer.expect("WS", expected="whitespace after URL")
|
||||
tokenizer.expect('WS', expected='whitespace after URL')
|
||||
|
||||
# The input might end after whitespace.
|
||||
if tokenizer.check("END", peek=True):
|
||||
if tokenizer.check('END', peek=True):
|
||||
return (url, specifier, marker)
|
||||
|
||||
marker = _parse_requirement_marker(
|
||||
tokenizer, span_start=url_start, after="URL and whitespace"
|
||||
tokenizer, span_start=url_start, after='URL and whitespace',
|
||||
)
|
||||
else:
|
||||
specifier_start = tokenizer.position
|
||||
specifier = _parse_specifier(tokenizer)
|
||||
tokenizer.consume("WS")
|
||||
tokenizer.consume('WS')
|
||||
|
||||
if tokenizer.check("END", peek=True):
|
||||
if tokenizer.check('END', peek=True):
|
||||
return (url, specifier, marker)
|
||||
|
||||
marker = _parse_requirement_marker(
|
||||
tokenizer,
|
||||
span_start=specifier_start,
|
||||
after=(
|
||||
"version specifier"
|
||||
'version specifier'
|
||||
if specifier
|
||||
else "name and no valid version specifier"
|
||||
else 'name and no valid version specifier'
|
||||
),
|
||||
)
|
||||
|
||||
|
|
@ -137,66 +144,66 @@ def _parse_requirement_details(
|
|||
|
||||
|
||||
def _parse_requirement_marker(
|
||||
tokenizer: Tokenizer, *, span_start: int, after: str
|
||||
tokenizer: Tokenizer, *, span_start: int, after: str,
|
||||
) -> MarkerList:
|
||||
"""
|
||||
requirement_marker = SEMICOLON marker WS?
|
||||
"""
|
||||
|
||||
if not tokenizer.check("SEMICOLON"):
|
||||
if not tokenizer.check('SEMICOLON'):
|
||||
tokenizer.raise_syntax_error(
|
||||
f"Expected end or semicolon (after {after})",
|
||||
f'Expected end or semicolon (after {after})',
|
||||
span_start=span_start,
|
||||
)
|
||||
tokenizer.read()
|
||||
|
||||
marker = _parse_marker(tokenizer)
|
||||
tokenizer.consume("WS")
|
||||
tokenizer.consume('WS')
|
||||
|
||||
return marker
|
||||
|
||||
|
||||
def _parse_extras(tokenizer: Tokenizer) -> List[str]:
|
||||
def _parse_extras(tokenizer: Tokenizer) -> list[str]:
|
||||
"""
|
||||
extras = (LEFT_BRACKET wsp* extras_list? wsp* RIGHT_BRACKET)?
|
||||
"""
|
||||
if not tokenizer.check("LEFT_BRACKET", peek=True):
|
||||
if not tokenizer.check('LEFT_BRACKET', peek=True):
|
||||
return []
|
||||
|
||||
with tokenizer.enclosing_tokens(
|
||||
"LEFT_BRACKET",
|
||||
"RIGHT_BRACKET",
|
||||
around="extras",
|
||||
'LEFT_BRACKET',
|
||||
'RIGHT_BRACKET',
|
||||
around='extras',
|
||||
):
|
||||
tokenizer.consume("WS")
|
||||
tokenizer.consume('WS')
|
||||
extras = _parse_extras_list(tokenizer)
|
||||
tokenizer.consume("WS")
|
||||
tokenizer.consume('WS')
|
||||
|
||||
return extras
|
||||
|
||||
|
||||
def _parse_extras_list(tokenizer: Tokenizer) -> List[str]:
|
||||
def _parse_extras_list(tokenizer: Tokenizer) -> list[str]:
|
||||
"""
|
||||
extras_list = identifier (wsp* ',' wsp* identifier)*
|
||||
"""
|
||||
extras: List[str] = []
|
||||
extras: list[str] = []
|
||||
|
||||
if not tokenizer.check("IDENTIFIER"):
|
||||
if not tokenizer.check('IDENTIFIER'):
|
||||
return extras
|
||||
|
||||
extras.append(tokenizer.read().text)
|
||||
|
||||
while True:
|
||||
tokenizer.consume("WS")
|
||||
if tokenizer.check("IDENTIFIER", peek=True):
|
||||
tokenizer.raise_syntax_error("Expected comma between extra names")
|
||||
elif not tokenizer.check("COMMA"):
|
||||
tokenizer.consume('WS')
|
||||
if tokenizer.check('IDENTIFIER', peek=True):
|
||||
tokenizer.raise_syntax_error('Expected comma between extra names')
|
||||
elif not tokenizer.check('COMMA'):
|
||||
break
|
||||
|
||||
tokenizer.read()
|
||||
tokenizer.consume("WS")
|
||||
tokenizer.consume('WS')
|
||||
|
||||
extra_token = tokenizer.expect("IDENTIFIER", expected="extra name after comma")
|
||||
extra_token = tokenizer.expect('IDENTIFIER', expected='extra name after comma')
|
||||
extras.append(extra_token.text)
|
||||
|
||||
return extras
|
||||
|
|
@ -208,13 +215,13 @@ def _parse_specifier(tokenizer: Tokenizer) -> str:
|
|||
| WS? version_many WS?
|
||||
"""
|
||||
with tokenizer.enclosing_tokens(
|
||||
"LEFT_PARENTHESIS",
|
||||
"RIGHT_PARENTHESIS",
|
||||
around="version specifier",
|
||||
'LEFT_PARENTHESIS',
|
||||
'RIGHT_PARENTHESIS',
|
||||
around='version specifier',
|
||||
):
|
||||
tokenizer.consume("WS")
|
||||
tokenizer.consume('WS')
|
||||
parsed_specifiers = _parse_version_many(tokenizer)
|
||||
tokenizer.consume("WS")
|
||||
tokenizer.consume('WS')
|
||||
|
||||
return parsed_specifiers
|
||||
|
||||
|
|
@ -223,27 +230,27 @@ def _parse_version_many(tokenizer: Tokenizer) -> str:
|
|||
"""
|
||||
version_many = (SPECIFIER (WS? COMMA WS? SPECIFIER)*)?
|
||||
"""
|
||||
parsed_specifiers = ""
|
||||
while tokenizer.check("SPECIFIER"):
|
||||
parsed_specifiers = ''
|
||||
while tokenizer.check('SPECIFIER'):
|
||||
span_start = tokenizer.position
|
||||
parsed_specifiers += tokenizer.read().text
|
||||
if tokenizer.check("VERSION_PREFIX_TRAIL", peek=True):
|
||||
if tokenizer.check('VERSION_PREFIX_TRAIL', peek=True):
|
||||
tokenizer.raise_syntax_error(
|
||||
".* suffix can only be used with `==` or `!=` operators",
|
||||
'.* suffix can only be used with `==` or `!=` operators',
|
||||
span_start=span_start,
|
||||
span_end=tokenizer.position + 1,
|
||||
)
|
||||
if tokenizer.check("VERSION_LOCAL_LABEL_TRAIL", peek=True):
|
||||
if tokenizer.check('VERSION_LOCAL_LABEL_TRAIL', peek=True):
|
||||
tokenizer.raise_syntax_error(
|
||||
"Local version label can only be used with `==` or `!=` operators",
|
||||
'Local version label can only be used with `==` or `!=` operators',
|
||||
span_start=span_start,
|
||||
span_end=tokenizer.position,
|
||||
)
|
||||
tokenizer.consume("WS")
|
||||
if not tokenizer.check("COMMA"):
|
||||
tokenizer.consume('WS')
|
||||
if not tokenizer.check('COMMA'):
|
||||
break
|
||||
parsed_specifiers += tokenizer.read().text
|
||||
tokenizer.consume("WS")
|
||||
tokenizer.consume('WS')
|
||||
|
||||
return parsed_specifiers
|
||||
|
||||
|
|
@ -257,7 +264,7 @@ def parse_marker(source: str) -> MarkerList:
|
|||
|
||||
def _parse_full_marker(tokenizer: Tokenizer) -> MarkerList:
|
||||
retval = _parse_marker(tokenizer)
|
||||
tokenizer.expect("END", expected="end of marker expression")
|
||||
tokenizer.expect('END', expected='end of marker expression')
|
||||
return retval
|
||||
|
||||
|
||||
|
|
@ -266,7 +273,7 @@ def _parse_marker(tokenizer: Tokenizer) -> MarkerList:
|
|||
marker = marker_atom (BOOLOP marker_atom)+
|
||||
"""
|
||||
expression = [_parse_marker_atom(tokenizer)]
|
||||
while tokenizer.check("BOOLOP"):
|
||||
while tokenizer.check('BOOLOP'):
|
||||
token = tokenizer.read()
|
||||
expr_right = _parse_marker_atom(tokenizer)
|
||||
expression.extend((token.text, expr_right))
|
||||
|
|
@ -279,19 +286,19 @@ def _parse_marker_atom(tokenizer: Tokenizer) -> MarkerAtom:
|
|||
| WS? marker_item WS?
|
||||
"""
|
||||
|
||||
tokenizer.consume("WS")
|
||||
if tokenizer.check("LEFT_PARENTHESIS", peek=True):
|
||||
tokenizer.consume('WS')
|
||||
if tokenizer.check('LEFT_PARENTHESIS', peek=True):
|
||||
with tokenizer.enclosing_tokens(
|
||||
"LEFT_PARENTHESIS",
|
||||
"RIGHT_PARENTHESIS",
|
||||
around="marker expression",
|
||||
'LEFT_PARENTHESIS',
|
||||
'RIGHT_PARENTHESIS',
|
||||
around='marker expression',
|
||||
):
|
||||
tokenizer.consume("WS")
|
||||
tokenizer.consume('WS')
|
||||
marker: MarkerAtom = _parse_marker(tokenizer)
|
||||
tokenizer.consume("WS")
|
||||
tokenizer.consume('WS')
|
||||
else:
|
||||
marker = _parse_marker_item(tokenizer)
|
||||
tokenizer.consume("WS")
|
||||
tokenizer.consume('WS')
|
||||
return marker
|
||||
|
||||
|
||||
|
|
@ -299,13 +306,13 @@ def _parse_marker_item(tokenizer: Tokenizer) -> MarkerItem:
|
|||
"""
|
||||
marker_item = WS? marker_var WS? marker_op WS? marker_var WS?
|
||||
"""
|
||||
tokenizer.consume("WS")
|
||||
tokenizer.consume('WS')
|
||||
marker_var_left = _parse_marker_var(tokenizer)
|
||||
tokenizer.consume("WS")
|
||||
tokenizer.consume('WS')
|
||||
marker_op = _parse_marker_op(tokenizer)
|
||||
tokenizer.consume("WS")
|
||||
tokenizer.consume('WS')
|
||||
marker_var_right = _parse_marker_var(tokenizer)
|
||||
tokenizer.consume("WS")
|
||||
tokenizer.consume('WS')
|
||||
return (marker_var_left, marker_op, marker_var_right)
|
||||
|
||||
|
||||
|
|
@ -313,19 +320,19 @@ def _parse_marker_var(tokenizer: Tokenizer) -> MarkerVar:
|
|||
"""
|
||||
marker_var = VARIABLE | QUOTED_STRING
|
||||
"""
|
||||
if tokenizer.check("VARIABLE"):
|
||||
return process_env_var(tokenizer.read().text.replace(".", "_"))
|
||||
elif tokenizer.check("QUOTED_STRING"):
|
||||
if tokenizer.check('VARIABLE'):
|
||||
return process_env_var(tokenizer.read().text.replace('.', '_'))
|
||||
elif tokenizer.check('QUOTED_STRING'):
|
||||
return process_python_str(tokenizer.read().text)
|
||||
else:
|
||||
tokenizer.raise_syntax_error(
|
||||
message="Expected a marker variable or quoted string"
|
||||
message='Expected a marker variable or quoted string',
|
||||
)
|
||||
|
||||
|
||||
def process_env_var(env_var: str) -> Variable:
|
||||
if env_var in ("platform_python_implementation", "python_implementation"):
|
||||
return Variable("platform_python_implementation")
|
||||
if env_var in ('platform_python_implementation', 'python_implementation'):
|
||||
return Variable('platform_python_implementation')
|
||||
else:
|
||||
return Variable(env_var)
|
||||
|
||||
|
|
@ -339,18 +346,18 @@ def _parse_marker_op(tokenizer: Tokenizer) -> Op:
|
|||
"""
|
||||
marker_op = IN | NOT IN | OP
|
||||
"""
|
||||
if tokenizer.check("IN"):
|
||||
if tokenizer.check('IN'):
|
||||
tokenizer.read()
|
||||
return Op("in")
|
||||
elif tokenizer.check("NOT"):
|
||||
return Op('in')
|
||||
elif tokenizer.check('NOT'):
|
||||
tokenizer.read()
|
||||
tokenizer.expect("WS", expected="whitespace after 'not'")
|
||||
tokenizer.expect("IN", expected="'in' after 'not'")
|
||||
return Op("not in")
|
||||
elif tokenizer.check("OP"):
|
||||
tokenizer.expect('WS', expected="whitespace after 'not'")
|
||||
tokenizer.expect('IN', expected="'in' after 'not'")
|
||||
return Op('not in')
|
||||
elif tokenizer.check('OP'):
|
||||
return Op(tokenizer.read().text)
|
||||
else:
|
||||
return tokenizer.raise_syntax_error(
|
||||
"Expected marker operator, one of "
|
||||
"<=, <, !=, ==, >=, >, ~=, ===, in, not in"
|
||||
'Expected marker operator, one of '
|
||||
'<=, <, !=, ==, >=, >, ~=, ===, in, not in',
|
||||
)
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue