remove --statistics and --benchmarks

This commit is contained in:
Anthony Sottile 2022-09-29 15:04:51 -04:00
parent 2c1bfa1f3d
commit ce274fb742
12 changed files with 8 additions and 443 deletions

View file

@ -91,22 +91,6 @@ if True:
assert cli.main(["t.py"]) == 0
def test_statistics_option(tmpdir, capsys):
"""Ensure that `flake8 --statistics` works."""
with tmpdir.as_cwd():
tmpdir.join("t.py").write("import os\nimport sys\n")
assert cli.main(["--statistics", "t.py"]) == 1
expected = """\
t.py:1:1: F401 'os' imported but unused
t.py:2:1: F401 'sys' imported but unused
2 F401 'os' imported but unused
"""
out, err = capsys.readouterr()
assert out == expected
assert err == ""
def test_show_source_option(tmpdir, capsys):
"""Ensure that --show-source and --no-show-source work."""
with tmpdir.as_cwd():
@ -226,29 +210,6 @@ def test_bug_report_successful(capsys):
assert err == ""
def test_benchmark_successful(tmp_path, capsys):
"""Test that --benchmark does not crash."""
fname = tmp_path.joinpath("t.py")
fname.write_text("print('hello world')\n")
assert cli.main(["--benchmark", str(fname)]) == 0
out, err = capsys.readouterr()
parts = [line.split(maxsplit=1) for line in out.splitlines()]
assert parts == [
[mock.ANY, "seconds elapsed"],
["1", "total logical lines processed"],
[mock.ANY, "logical lines processed per second"],
["1", "total physical lines processed"],
[mock.ANY, "physical lines processed per second"],
["5", "total tokens processed"],
[mock.ANY, "tokens processed per second"],
["1", "total files processed"],
[mock.ANY, "files processed per second"],
]
assert err == ""
def test_specific_noqa_does_not_clobber_pycodestyle_noqa(tmpdir, capsys):
"""See https://github.com/pycqa/flake8/issues/1104."""
with tmpdir.as_cwd():

View file

@ -143,15 +143,3 @@ def test_report_total_errors():
app = mock.Mock(result_count="Fake count")
report = api.Report(app)
assert report.total_errors == "Fake count"
def test_report_get_statistics():
"""Verify that we use the statistics object."""
stats = mock.Mock()
stats.statistics_for.return_value = []
style_guide = mock.Mock(stats=stats)
app = mock.Mock(guide=style_guide)
report = api.Report(app)
assert report.get_statistics("E") == []
stats.statistics_for.assert_called_once_with("E")

View file

@ -1,124 +0,0 @@
"""Tests for the statistics module in Flake8."""
from __future__ import annotations
import pytest
from flake8 import statistics as stats
from flake8.violation import Violation
DEFAULT_ERROR_CODE = "E100"
DEFAULT_FILENAME = "file.py"
DEFAULT_TEXT = "Default text"
def make_error(**kwargs):
"""Create errors with a bunch of default values."""
kwargs.setdefault("code", DEFAULT_ERROR_CODE)
kwargs.setdefault("filename", DEFAULT_FILENAME)
kwargs.setdefault("line_number", 1)
kwargs.setdefault("column_number", 1)
kwargs.setdefault("text", DEFAULT_TEXT)
return Violation(**kwargs, physical_line=None)
def test_key_creation():
"""Verify how we create Keys from Errors."""
key = stats.Key.create_from(make_error())
assert key == (DEFAULT_FILENAME, DEFAULT_ERROR_CODE)
assert key.filename == DEFAULT_FILENAME
assert key.code == DEFAULT_ERROR_CODE
@pytest.mark.parametrize(
"code, filename, args, expected_result",
[
# Error prefix matches
("E123", "file000.py", ("E", None), True),
("E123", "file000.py", ("E1", None), True),
("E123", "file000.py", ("E12", None), True),
("E123", "file000.py", ("E123", None), True),
# Error prefix and filename match
("E123", "file000.py", ("E", "file000.py"), True),
("E123", "file000.py", ("E1", "file000.py"), True),
("E123", "file000.py", ("E12", "file000.py"), True),
("E123", "file000.py", ("E123", "file000.py"), True),
# Error prefix does not match
("E123", "file000.py", ("W", None), False),
# Error prefix matches but filename does not
("E123", "file000.py", ("E", "file001.py"), False),
# Error prefix does not match but filename does
("E123", "file000.py", ("W", "file000.py"), False),
# Neither error prefix match nor filename
("E123", "file000.py", ("W", "file001.py"), False),
],
)
def test_key_matching(code, filename, args, expected_result):
"""Verify Key#matches behaves as we expect with fthe above input."""
key = stats.Key.create_from(make_error(code=code, filename=filename))
assert key.matches(*args) is expected_result
def test_statistic_creation():
"""Verify how we create Statistic objects from Errors."""
stat = stats.Statistic.create_from(make_error())
assert stat.error_code == DEFAULT_ERROR_CODE
assert stat.message == DEFAULT_TEXT
assert stat.filename == DEFAULT_FILENAME
assert stat.count == 0
def test_statistic_increment():
"""Verify we update the count."""
stat = stats.Statistic.create_from(make_error())
assert stat.count == 0
stat.increment()
assert stat.count == 1
def test_recording_statistics():
"""Verify that we appropriately create a new Statistic and store it."""
aggregator = stats.Statistics()
assert list(aggregator.statistics_for("E")) == []
aggregator.record(make_error())
storage = aggregator._store
for key, value in storage.items():
assert isinstance(key, stats.Key)
assert isinstance(value, stats.Statistic)
assert storage[stats.Key(DEFAULT_FILENAME, DEFAULT_ERROR_CODE)].count == 1
def test_statistics_for_single_record():
"""Show we can retrieve the only statistic recorded."""
aggregator = stats.Statistics()
assert list(aggregator.statistics_for("E")) == []
aggregator.record(make_error())
statistics = list(aggregator.statistics_for("E"))
assert len(statistics) == 1
assert isinstance(statistics[0], stats.Statistic)
def test_statistics_for_filters_by_filename():
"""Show we can retrieve the only statistic recorded."""
aggregator = stats.Statistics()
assert list(aggregator.statistics_for("E")) == []
aggregator.record(make_error())
aggregator.record(make_error(filename="example.py"))
statistics = list(aggregator.statistics_for("E", DEFAULT_FILENAME))
assert len(statistics) == 1
assert isinstance(statistics[0], stats.Statistic)
def test_statistic_for_retrieves_more_than_one_value():
"""Show this works for more than a couple statistic values."""
aggregator = stats.Statistics()
for i in range(50):
aggregator.record(make_error(code=f"E1{i:02d}"))
aggregator.record(make_error(code=f"W2{i:02d}"))
statistics = list(aggregator.statistics_for("E"))
assert len(statistics) == 50
statistics = list(aggregator.statistics_for("W22"))
assert len(statistics) == 10

View file

@ -6,7 +6,6 @@ from unittest import mock
import pytest
from flake8 import statistics
from flake8 import style_guide
from flake8 import utils
from flake8.formatting import base
@ -32,7 +31,6 @@ def test_handle_error_does_not_raise_type_errors():
guide = style_guide.StyleGuide(
create_options(select=["T111"], ignore=[]),
formatter=formatter,
stats=statistics.Statistics(),
)
assert 1 == guide.handle_error(
@ -74,7 +72,6 @@ def test_style_guide_applies_to(style_guide_file, filename, expected):
guide = style_guide.StyleGuide(
options,
formatter=formatter,
stats=statistics.Statistics(),
filename=style_guide_file,
)
assert guide.applies_to(filename) is expected