remove log_token and EXTRA_VERBOSE

- flake8 spent 5% of execution in `log_token`
- `EXTRA_VERBOSE` was only used by `log_token`
- `python -m tokenize` provides better debug token output
This commit is contained in:
Anthony Sottile 2022-01-23 18:02:39 -05:00
parent 5ecea41b6d
commit 929cf5dfd3
6 changed files with 2 additions and 73 deletions

View file

@ -57,8 +57,6 @@ Utility Functions
.. autofunction:: flake8.processor.is_multiline_string
.. autofunction:: flake8.processor.log_token
.. autofunction:: flake8.processor.mutate_string
.. autofunction:: flake8.processor.token_is_newline

View file

@ -20,19 +20,11 @@ LOG.addHandler(logging.NullHandler())
__version__ = "4.0.1"
__version_info__ = tuple(int(i) for i in __version__.split(".") if i.isdigit())
# There is nothing lower than logging.DEBUG (10) in the logging library,
# but we want an extra level to avoid being too verbose when using -vv.
_EXTRA_VERBOSE = 5
logging.addLevelName(_EXTRA_VERBOSE, "VERBOSE")
_VERBOSITY_TO_LOG_LEVEL = {
# output more than warnings but not debugging info
1: logging.INFO, # INFO is a numerical level of 20
# output debugging information
2: logging.DEBUG, # DEBUG is a numerical level of 10
# output extra verbose debugging information
3: _EXTRA_VERBOSE,
}
LOG_FORMAT = (
@ -58,9 +50,8 @@ def configure_logging(
"""
if verbosity <= 0:
return
if verbosity > 3:
verbosity = 3
verbosity = min(verbosity, max(_VERBOSITY_TO_LOG_LEVEL))
log_level = _VERBOSITY_TO_LOG_LEVEL[verbosity]
if not filename or filename in ("stderr", "stdout"):

View file

@ -510,7 +510,6 @@ class FileChecker:
statistics["tokens"] += 1
self.check_physical_eol(token, prev_physical)
token_type, text = token[0:2]
processor.log_token(LOG, token)
if token_type == tokenize.OP:
parens = processor.count_parentheses(parens, text)
elif parens == 0:

View file

@ -11,7 +11,6 @@ from typing import List
from typing import Optional
from typing import Tuple
import flake8
from flake8 import defaults
from flake8 import utils
from flake8.plugins.finder import LoadedPlugin
@ -412,19 +411,6 @@ def count_parentheses(current_parentheses_count: int, token_text: str) -> int:
return current_parentheses_count
def log_token(log: logging.Logger, token: tokenize.TokenInfo) -> None:
"""Log a token to a provided logging object."""
if token[2][0] == token[3][0]:
pos = "[{}:{}]".format(token[2][1] or "", token[3][1])
else:
pos = f"l.{token[3][0]}"
log.log(
flake8._EXTRA_VERBOSE,
"l.%s\t%s\t%s\t%r"
% (token[2][0], pos, tokenize.tok_name[token[0]], token[1]),
)
def expand_indent(line: str) -> int:
r"""Return the amount of indentation.

View file

@ -10,7 +10,7 @@ def options_from(**kwargs):
kwargs.setdefault("max_line_length", 79)
kwargs.setdefault("max_doc_length", None)
kwargs.setdefault("indent_size", 4)
kwargs.setdefault("verbose", False)
kwargs.setdefault("verbose", 0)
kwargs.setdefault("stdin_display_name", "stdin")
kwargs.setdefault("disable_noqa", False)
return argparse.Namespace(**kwargs)

View file

@ -380,51 +380,6 @@ def test_expand_indent(string, expected):
assert expected == actual
@pytest.mark.parametrize(
"token, log_string",
[
[
(
tokenize.COMMENT,
"# this is a comment",
(1, 0), # (start_row, start_column)
(1, 19), # (end_ro, end_column)
"# this is a comment",
),
"l.1\t[:19]\tCOMMENT\t'# this is a comment'",
],
[
(
tokenize.COMMENT,
"# this is a comment",
(1, 5), # (start_row, start_column)
(1, 19), # (end_ro, end_column)
"# this is a comment",
),
"l.1\t[5:19]\tCOMMENT\t'# this is a comment'",
],
[
(
tokenize.COMMENT,
"# this is a comment",
(1, 0), # (start_row, start_column)
(2, 19), # (end_ro, end_column)
"# this is a comment",
),
"l.1\tl.2\tCOMMENT\t'# this is a comment'",
],
],
)
def test_log_token(token, log_string):
"""Verify we use the log object passed in."""
log = mock.Mock()
processor.log_token(log, token)
log.log.assert_called_once_with(
5, # flake8._EXTRA_VERBOSE
log_string,
)
@pytest.mark.parametrize(
"current_count, token_text, expected",
[