[pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci
This commit is contained in:
pre-commit-ci[bot] 2024-04-13 00:00:18 +00:00
parent 72ad6dc953
commit f4cd1ba0d6
813 changed files with 66015 additions and 58839 deletions

View file

@ -1,8 +1,6 @@
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Better tokenizing for coverage.py."""
from __future__ import annotations
import ast
@ -12,11 +10,11 @@ import re
import sys
import token
import tokenize
from typing import Iterable
from coverage import env
from coverage.types import TLineNo, TSourceTokenLines
from coverage.types import TLineNo
from coverage.types import TSourceTokenLines
TokenInfos = Iterable[tokenize.TokenInfo]
@ -34,10 +32,10 @@ def _phys_tokens(toks: TokenInfos) -> TokenInfos:
"""
last_line: str | None = None
last_lineno = -1
last_ttext: str = ""
last_ttext: str = ''
for ttype, ttext, (slineno, scol), (elineno, ecol), ltext in toks:
if last_lineno != elineno:
if last_line and last_line.endswith("\\\n"):
if last_line and last_line.endswith('\\\n'):
# We are at the beginning of a new line, and the last line
# ended with a backslash. We probably have to inject a
# backslash token into the stream. Unfortunately, there's more
@ -54,20 +52,20 @@ def _phys_tokens(toks: TokenInfos) -> TokenInfos:
# so we need to figure out if the backslash is already in the
# string token or not.
inject_backslash = True
if last_ttext.endswith("\\"):
if last_ttext.endswith('\\'):
inject_backslash = False
elif ttype == token.STRING:
if "\n" in ttext and ttext.split("\n", 1)[0][-1] == "\\":
if '\n' in ttext and ttext.split('\n', 1)[0][-1] == '\\':
# It's a multi-line string and the first line ends with
# a backslash, so we don't need to inject another.
inject_backslash = False
if inject_backslash:
# Figure out what column the backslash is in.
ccol = len(last_line.split("\n")[-2]) - 1
ccol = len(last_line.split('\n')[-2]) - 1
# Yield the token, with a fake token type.
yield tokenize.TokenInfo(
99999, "\\\n",
(slineno, ccol), (slineno, ccol+2),
99999, '\\\n',
(slineno, ccol), (slineno, ccol + 2),
last_line,
)
last_line = ltext
@ -79,6 +77,7 @@ def _phys_tokens(toks: TokenInfos) -> TokenInfos:
class SoftKeywordFinder(ast.NodeVisitor):
"""Helper for finding lines with soft keywords, like match/case lines."""
def __init__(self, source: str) -> None:
# This will be the set of line numbers that start with a soft keyword.
self.soft_key_lines: set[TLineNo] = set()
@ -119,7 +118,7 @@ def source_token_lines(source: str) -> TSourceTokenLines:
line: list[tuple[str, str]] = []
col = 0
source = source.expandtabs(8).replace("\r\n", "\n")
source = source.expandtabs(8).replace('\r\n', '\n')
tokgen = generate_tokens(source)
if env.PYBEHAVIOR.soft_keywords:
@ -127,25 +126,25 @@ def source_token_lines(source: str) -> TSourceTokenLines:
for ttype, ttext, (sline, scol), (_, ecol), _ in _phys_tokens(tokgen):
mark_start = True
for part in re.split("(\n)", ttext):
if part == "\n":
for part in re.split('(\n)', ttext):
if part == '\n':
yield line
line = []
col = 0
mark_end = False
elif part == "":
elif part == '':
mark_end = False
elif ttype in ws_tokens:
mark_end = False
else:
if mark_start and scol > col:
line.append(("ws", " " * (scol - col)))
line.append(('ws', ' ' * (scol - col)))
mark_start = False
tok_class = tokenize.tok_name.get(ttype, "xx").lower()[:3]
tok_class = tokenize.tok_name.get(ttype, 'xx').lower()[:3]
if ttype == token.NAME:
if keyword.iskeyword(ttext):
# Hard keywords are always keywords.
tok_class = "key"
tok_class = 'key'
elif sys.version_info >= (3, 10): # PYVERSIONS
# Need the version_info check to keep mypy from borking
# on issoftkeyword here.
@ -154,12 +153,12 @@ def source_token_lines(source: str) -> TSourceTokenLines:
# on lines that start match or case statements.
if len(line) == 0:
is_start_of_line = True
elif (len(line) == 1) and line[0][0] == "ws":
elif (len(line) == 1) and line[0][0] == 'ws':
is_start_of_line = True
else:
is_start_of_line = False
if is_start_of_line and sline in soft_key_lines:
tok_class = "key"
tok_class = 'key'
line.append((tok_class, part))
mark_end = True
scol = 0
@ -181,6 +180,7 @@ class CachedTokenizer:
actually tokenize twice.
"""
def __init__(self) -> None:
self.last_text: str | None = None
self.last_tokens: list[tokenize.TokenInfo] = []
@ -197,6 +197,7 @@ class CachedTokenizer:
raise
return self.last_tokens
# Create our generate_tokens cache as a callable replacement function.
generate_tokens = CachedTokenizer().generate_tokens