[pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci
This commit is contained in:
pre-commit-ci[bot] 2024-04-13 00:00:18 +00:00
parent 72ad6dc953
commit f4cd1ba0d6
813 changed files with 66015 additions and 58839 deletions

View file

@ -1,11 +1,13 @@
# mypy: allow-untyped-defs
"""Support for skip/xfail functions and markers."""
from collections.abc import Mapping
from __future__ import annotations
import dataclasses
import os
import platform
import sys
import traceback
from collections.abc import Mapping
from typing import Generator
from typing import Optional
from typing import Tuple
@ -26,21 +28,21 @@ from _pytest.stash import StashKey
def pytest_addoption(parser: Parser) -> None:
group = parser.getgroup("general")
group = parser.getgroup('general')
group.addoption(
"--runxfail",
action="store_true",
dest="runxfail",
'--runxfail',
action='store_true',
dest='runxfail',
default=False,
help="Report the results of xfail tests as if they were not marked",
help='Report the results of xfail tests as if they were not marked',
)
parser.addini(
"xfail_strict",
"Default for the strict parameter of xfail "
"markers when not given explicitly (default: False)",
'xfail_strict',
'Default for the strict parameter of xfail '
'markers when not given explicitly (default: False)',
default=False,
type="bool",
type='bool',
)
@ -50,40 +52,40 @@ def pytest_configure(config: Config) -> None:
import pytest
old = pytest.xfail
config.add_cleanup(lambda: setattr(pytest, "xfail", old))
config.add_cleanup(lambda: setattr(pytest, 'xfail', old))
def nop(*args, **kwargs):
pass
nop.Exception = xfail.Exception # type: ignore[attr-defined]
setattr(pytest, "xfail", nop)
setattr(pytest, 'xfail', nop)
config.addinivalue_line(
"markers",
"skip(reason=None): skip the given test function with an optional reason. "
'markers',
'skip(reason=None): skip the given test function with an optional reason. '
'Example: skip(reason="no way of currently testing this") skips the '
"test.",
'test.',
)
config.addinivalue_line(
"markers",
"skipif(condition, ..., *, reason=...): "
"skip the given test function if any of the conditions evaluate to True. "
'markers',
'skipif(condition, ..., *, reason=...): '
'skip the given test function if any of the conditions evaluate to True. '
"Example: skipif(sys.platform == 'win32') skips the test if we are on the win32 platform. "
"See https://docs.pytest.org/en/stable/reference/reference.html#pytest-mark-skipif",
'See https://docs.pytest.org/en/stable/reference/reference.html#pytest-mark-skipif',
)
config.addinivalue_line(
"markers",
"xfail(condition, ..., *, reason=..., run=True, raises=None, strict=xfail_strict): "
"mark the test function as an expected failure if any of the conditions "
"evaluate to True. Optionally specify a reason for better reporting "
'markers',
'xfail(condition, ..., *, reason=..., run=True, raises=None, strict=xfail_strict): '
'mark the test function as an expected failure if any of the conditions '
'evaluate to True. Optionally specify a reason for better reporting '
"and run=False if you don't even want to execute the test function. "
"If only specific exception(s) are expected, you can list them in "
"raises, and if the test fails in other ways, it will be reported as "
"a true failure. See https://docs.pytest.org/en/stable/reference/reference.html#pytest-mark-xfail",
'If only specific exception(s) are expected, you can list them in '
'raises, and if the test fails in other ways, it will be reported as '
'a true failure. See https://docs.pytest.org/en/stable/reference/reference.html#pytest-mark-xfail',
)
def evaluate_condition(item: Item, mark: Mark, condition: object) -> Tuple[bool, str]:
def evaluate_condition(item: Item, mark: Mark, condition: object) -> tuple[bool, str]:
"""Evaluate a single skipif/xfail condition.
If an old-style string condition is given, it is eval()'d, otherwise the
@ -95,40 +97,40 @@ def evaluate_condition(item: Item, mark: Mark, condition: object) -> Tuple[bool,
# String condition.
if isinstance(condition, str):
globals_ = {
"os": os,
"sys": sys,
"platform": platform,
"config": item.config,
'os': os,
'sys': sys,
'platform': platform,
'config': item.config,
}
for dictionary in reversed(
item.ihook.pytest_markeval_namespace(config=item.config)
item.ihook.pytest_markeval_namespace(config=item.config),
):
if not isinstance(dictionary, Mapping):
raise ValueError(
f"pytest_markeval_namespace() needs to return a dict, got {dictionary!r}"
f'pytest_markeval_namespace() needs to return a dict, got {dictionary!r}',
)
globals_.update(dictionary)
if hasattr(item, "obj"):
if hasattr(item, 'obj'):
globals_.update(item.obj.__globals__) # type: ignore[attr-defined]
try:
filename = f"<{mark.name} condition>"
condition_code = compile(condition, filename, "eval")
filename = f'<{mark.name} condition>'
condition_code = compile(condition, filename, 'eval')
result = eval(condition_code, globals_)
except SyntaxError as exc:
msglines = [
"Error evaluating %r condition" % mark.name,
" " + condition,
" " + " " * (exc.offset or 0) + "^",
"SyntaxError: invalid syntax",
'Error evaluating %r condition' % mark.name,
' ' + condition,
' ' + ' ' * (exc.offset or 0) + '^',
'SyntaxError: invalid syntax',
]
fail("\n".join(msglines), pytrace=False)
fail('\n'.join(msglines), pytrace=False)
except Exception as exc:
msglines = [
"Error evaluating %r condition" % mark.name,
" " + condition,
'Error evaluating %r condition' % mark.name,
' ' + condition,
*traceback.format_exception_only(type(exc), exc),
]
fail("\n".join(msglines), pytrace=False)
fail('\n'.join(msglines), pytrace=False)
# Boolean condition.
else:
@ -136,20 +138,20 @@ def evaluate_condition(item: Item, mark: Mark, condition: object) -> Tuple[bool,
result = bool(condition)
except Exception as exc:
msglines = [
"Error evaluating %r condition as a boolean" % mark.name,
'Error evaluating %r condition as a boolean' % mark.name,
*traceback.format_exception_only(type(exc), exc),
]
fail("\n".join(msglines), pytrace=False)
fail('\n'.join(msglines), pytrace=False)
reason = mark.kwargs.get("reason", None)
reason = mark.kwargs.get('reason', None)
if reason is None:
if isinstance(condition, str):
reason = "condition: " + condition
reason = 'condition: ' + condition
else:
# XXX better be checked at collection time
msg = (
"Error evaluating %r: " % mark.name
+ "you need to specify reason=STRING when using booleans as conditions."
'Error evaluating %r: ' % mark.name +
'you need to specify reason=STRING when using booleans as conditions.'
)
fail(msg, pytrace=False)
@ -160,20 +162,20 @@ def evaluate_condition(item: Item, mark: Mark, condition: object) -> Tuple[bool,
class Skip:
"""The result of evaluate_skip_marks()."""
reason: str = "unconditional skip"
reason: str = 'unconditional skip'
def evaluate_skip_marks(item: Item) -> Optional[Skip]:
def evaluate_skip_marks(item: Item) -> Skip | None:
"""Evaluate skip and skipif marks on item, returning Skip if triggered."""
for mark in item.iter_markers(name="skipif"):
if "condition" not in mark.kwargs:
for mark in item.iter_markers(name='skipif'):
if 'condition' not in mark.kwargs:
conditions = mark.args
else:
conditions = (mark.kwargs["condition"],)
conditions = (mark.kwargs['condition'],)
# Unconditional.
if not conditions:
reason = mark.kwargs.get("reason", "")
reason = mark.kwargs.get('reason', '')
return Skip(reason)
# If any of the conditions are true.
@ -182,11 +184,11 @@ def evaluate_skip_marks(item: Item) -> Optional[Skip]:
if result:
return Skip(reason)
for mark in item.iter_markers(name="skip"):
for mark in item.iter_markers(name='skip'):
try:
return Skip(*mark.args, **mark.kwargs)
except TypeError as e:
raise TypeError(str(e) + " - maybe you meant pytest.mark.skipif?") from None
raise TypeError(str(e) + ' - maybe you meant pytest.mark.skipif?') from None
return None
@ -195,28 +197,28 @@ def evaluate_skip_marks(item: Item) -> Optional[Skip]:
class Xfail:
"""The result of evaluate_xfail_marks()."""
__slots__ = ("reason", "run", "strict", "raises")
__slots__ = ('reason', 'run', 'strict', 'raises')
reason: str
run: bool
strict: bool
raises: Optional[Tuple[Type[BaseException], ...]]
raises: tuple[type[BaseException], ...] | None
def evaluate_xfail_marks(item: Item) -> Optional[Xfail]:
def evaluate_xfail_marks(item: Item) -> Xfail | None:
"""Evaluate xfail marks on item, returning Xfail if triggered."""
for mark in item.iter_markers(name="xfail"):
run = mark.kwargs.get("run", True)
strict = mark.kwargs.get("strict", item.config.getini("xfail_strict"))
raises = mark.kwargs.get("raises", None)
if "condition" not in mark.kwargs:
for mark in item.iter_markers(name='xfail'):
run = mark.kwargs.get('run', True)
strict = mark.kwargs.get('strict', item.config.getini('xfail_strict'))
raises = mark.kwargs.get('raises', None)
if 'condition' not in mark.kwargs:
conditions = mark.args
else:
conditions = (mark.kwargs["condition"],)
conditions = (mark.kwargs['condition'],)
# Unconditional.
if not conditions:
reason = mark.kwargs.get("reason", "")
reason = mark.kwargs.get('reason', '')
return Xfail(reason, run, strict, raises)
# If any of the conditions are true.
@ -240,7 +242,7 @@ def pytest_runtest_setup(item: Item) -> None:
item.stash[xfailed_key] = xfailed = evaluate_xfail_marks(item)
if xfailed and not item.config.option.runxfail and not xfailed.run:
xfail("[NOTRUN] " + xfailed.reason)
xfail('[NOTRUN] ' + xfailed.reason)
@hookimpl(wrapper=True)
@ -250,7 +252,7 @@ def pytest_runtest_call(item: Item) -> Generator[None, None, None]:
item.stash[xfailed_key] = xfailed = evaluate_xfail_marks(item)
if xfailed and not item.config.option.runxfail and not xfailed.run:
xfail("[NOTRUN] " + xfailed.reason)
xfail('[NOTRUN] ' + xfailed.reason)
try:
return (yield)
@ -263,7 +265,7 @@ def pytest_runtest_call(item: Item) -> Generator[None, None, None]:
@hookimpl(wrapper=True)
def pytest_runtest_makereport(
item: Item, call: CallInfo[None]
item: Item, call: CallInfo[None],
) -> Generator[None, TestReport, TestReport]:
rep = yield
xfailed = item.stash.get(xfailed_key, None)
@ -271,30 +273,30 @@ def pytest_runtest_makereport(
pass # don't interfere
elif call.excinfo and isinstance(call.excinfo.value, xfail.Exception):
assert call.excinfo.value.msg is not None
rep.wasxfail = "reason: " + call.excinfo.value.msg
rep.outcome = "skipped"
rep.wasxfail = 'reason: ' + call.excinfo.value.msg
rep.outcome = 'skipped'
elif not rep.skipped and xfailed:
if call.excinfo:
raises = xfailed.raises
if raises is not None and not isinstance(call.excinfo.value, raises):
rep.outcome = "failed"
rep.outcome = 'failed'
else:
rep.outcome = "skipped"
rep.outcome = 'skipped'
rep.wasxfail = xfailed.reason
elif call.when == "call":
elif call.when == 'call':
if xfailed.strict:
rep.outcome = "failed"
rep.longrepr = "[XPASS(strict)] " + xfailed.reason
rep.outcome = 'failed'
rep.longrepr = '[XPASS(strict)] ' + xfailed.reason
else:
rep.outcome = "passed"
rep.outcome = 'passed'
rep.wasxfail = xfailed.reason
return rep
def pytest_report_teststatus(report: BaseReport) -> Optional[Tuple[str, str, str]]:
if hasattr(report, "wasxfail"):
def pytest_report_teststatus(report: BaseReport) -> tuple[str, str, str] | None:
if hasattr(report, 'wasxfail'):
if report.skipped:
return "xfailed", "x", "XFAIL"
return 'xfailed', 'x', 'XFAIL'
elif report.passed:
return "xpassed", "X", "XPASS"
return 'xpassed', 'X', 'XPASS'
return None