mirror of
https://github.com/pre-commit/pre-commit-hooks.git
synced 2026-04-08 20:44:18 +00:00
detect_gcp_credentials hook
This commit is contained in:
parent
b73acb198e
commit
e0c61d89d0
929 changed files with 311695 additions and 0 deletions
41
.venv/lib/python3.10/site-packages/coverage/__init__.py
Normal file
41
.venv/lib/python3.10/site-packages/coverage/__init__.py
Normal file
|
|
@ -0,0 +1,41 @@
|
|||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""
|
||||
Code coverage measurement for Python.
|
||||
|
||||
Ned Batchelder
|
||||
https://coverage.readthedocs.io
|
||||
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
# mypy's convention is that "import as" names are public from the module.
|
||||
# We import names as themselves to indicate that. Pylint sees it as pointless,
|
||||
# so disable its warning.
|
||||
# pylint: disable=useless-import-alias
|
||||
|
||||
from coverage.version import (
|
||||
__version__ as __version__,
|
||||
version_info as version_info,
|
||||
)
|
||||
|
||||
from coverage.control import (
|
||||
Coverage as Coverage,
|
||||
process_startup as process_startup,
|
||||
)
|
||||
from coverage.data import CoverageData as CoverageData
|
||||
from coverage.exceptions import CoverageException as CoverageException
|
||||
from coverage.plugin import (
|
||||
CoveragePlugin as CoveragePlugin,
|
||||
FileReporter as FileReporter,
|
||||
FileTracer as FileTracer,
|
||||
)
|
||||
|
||||
# Backward compatibility.
|
||||
coverage = Coverage
|
||||
|
||||
# On Windows, we encode and decode deep enough that something goes wrong and
|
||||
# the encodings.utf_8 module is loaded and then unloaded, I don't know why.
|
||||
# Adding a reference here prevents it from being unloaded. Yuk.
|
||||
10
.venv/lib/python3.10/site-packages/coverage/__main__.py
Normal file
10
.venv/lib/python3.10/site-packages/coverage/__main__.py
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""Coverage.py's main entry point."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import sys
|
||||
from coverage.cmdline import main
|
||||
sys.exit(main())
|
||||
114
.venv/lib/python3.10/site-packages/coverage/annotate.py
Normal file
114
.venv/lib/python3.10/site-packages/coverage/annotate.py
Normal file
|
|
@ -0,0 +1,114 @@
|
|||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""Source file annotation for coverage.py."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import re
|
||||
|
||||
from typing import Iterable, TYPE_CHECKING
|
||||
|
||||
from coverage.files import flat_rootname
|
||||
from coverage.misc import ensure_dir, isolate_module
|
||||
from coverage.plugin import FileReporter
|
||||
from coverage.report_core import get_analysis_to_report
|
||||
from coverage.results import Analysis
|
||||
from coverage.types import TMorf
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from coverage import Coverage
|
||||
|
||||
os = isolate_module(os)
|
||||
|
||||
|
||||
class AnnotateReporter:
|
||||
"""Generate annotated source files showing line coverage.
|
||||
|
||||
This reporter creates annotated copies of the measured source files. Each
|
||||
.py file is copied as a .py,cover file, with a left-hand margin annotating
|
||||
each line::
|
||||
|
||||
> def h(x):
|
||||
- if 0: #pragma: no cover
|
||||
- pass
|
||||
> if x == 1:
|
||||
! a = 1
|
||||
> else:
|
||||
> a = 2
|
||||
|
||||
> h(2)
|
||||
|
||||
Executed lines use ">", lines not executed use "!", lines excluded from
|
||||
consideration use "-".
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, coverage: Coverage) -> None:
|
||||
self.coverage = coverage
|
||||
self.config = self.coverage.config
|
||||
self.directory: str | None = None
|
||||
|
||||
blank_re = re.compile(r"\s*(#|$)")
|
||||
else_re = re.compile(r"\s*else\s*:\s*(#|$)")
|
||||
|
||||
def report(self, morfs: Iterable[TMorf] | None, directory: str | None = None) -> None:
|
||||
"""Run the report.
|
||||
|
||||
See `coverage.report()` for arguments.
|
||||
|
||||
"""
|
||||
self.directory = directory
|
||||
self.coverage.get_data()
|
||||
for fr, analysis in get_analysis_to_report(self.coverage, morfs):
|
||||
self.annotate_file(fr, analysis)
|
||||
|
||||
def annotate_file(self, fr: FileReporter, analysis: Analysis) -> None:
|
||||
"""Annotate a single file.
|
||||
|
||||
`fr` is the FileReporter for the file to annotate.
|
||||
|
||||
"""
|
||||
statements = sorted(analysis.statements)
|
||||
missing = sorted(analysis.missing)
|
||||
excluded = sorted(analysis.excluded)
|
||||
|
||||
if self.directory:
|
||||
ensure_dir(self.directory)
|
||||
dest_file = os.path.join(self.directory, flat_rootname(fr.relative_filename()))
|
||||
if dest_file.endswith("_py"):
|
||||
dest_file = dest_file[:-3] + ".py"
|
||||
dest_file += ",cover"
|
||||
else:
|
||||
dest_file = fr.filename + ",cover"
|
||||
|
||||
with open(dest_file, "w", encoding="utf-8") as dest:
|
||||
i = j = 0
|
||||
covered = True
|
||||
source = fr.source()
|
||||
for lineno, line in enumerate(source.splitlines(True), start=1):
|
||||
while i < len(statements) and statements[i] < lineno:
|
||||
i += 1
|
||||
while j < len(missing) and missing[j] < lineno:
|
||||
j += 1
|
||||
if i < len(statements) and statements[i] == lineno:
|
||||
covered = j >= len(missing) or missing[j] > lineno
|
||||
if self.blank_re.match(line):
|
||||
dest.write(" ")
|
||||
elif self.else_re.match(line):
|
||||
# Special logic for lines containing only "else:".
|
||||
if j >= len(missing):
|
||||
dest.write("> ")
|
||||
elif statements[i] == missing[j]:
|
||||
dest.write("! ")
|
||||
else:
|
||||
dest.write("> ")
|
||||
elif lineno in excluded:
|
||||
dest.write("- ")
|
||||
elif covered:
|
||||
dest.write("> ")
|
||||
else:
|
||||
dest.write("! ")
|
||||
|
||||
dest.write(line)
|
||||
22
.venv/lib/python3.10/site-packages/coverage/bytecode.py
Normal file
22
.venv/lib/python3.10/site-packages/coverage/bytecode.py
Normal file
|
|
@ -0,0 +1,22 @@
|
|||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""Bytecode manipulation for coverage.py"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from types import CodeType
|
||||
from typing import Iterator
|
||||
|
||||
|
||||
def code_objects(code: CodeType) -> Iterator[CodeType]:
|
||||
"""Iterate over all the code objects in `code`."""
|
||||
stack = [code]
|
||||
while stack:
|
||||
# We're going to return the code object on the stack, but first
|
||||
# push its children for later returning.
|
||||
code = stack.pop()
|
||||
for c in code.co_consts:
|
||||
if isinstance(c, CodeType):
|
||||
stack.append(c)
|
||||
yield code
|
||||
1009
.venv/lib/python3.10/site-packages/coverage/cmdline.py
Normal file
1009
.venv/lib/python3.10/site-packages/coverage/cmdline.py
Normal file
File diff suppressed because it is too large
Load diff
544
.venv/lib/python3.10/site-packages/coverage/collector.py
Normal file
544
.venv/lib/python3.10/site-packages/coverage/collector.py
Normal file
|
|
@ -0,0 +1,544 @@
|
|||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""Raw data collector for coverage.py."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import functools
|
||||
import os
|
||||
import sys
|
||||
|
||||
from types import FrameType
|
||||
from typing import (
|
||||
cast, Any, Callable, Dict, List, Mapping, Set, TypeVar,
|
||||
)
|
||||
|
||||
from coverage import env
|
||||
from coverage.config import CoverageConfig
|
||||
from coverage.data import CoverageData
|
||||
from coverage.debug import short_stack
|
||||
from coverage.disposition import FileDisposition
|
||||
from coverage.exceptions import ConfigError
|
||||
from coverage.misc import human_sorted_items, isolate_module
|
||||
from coverage.plugin import CoveragePlugin
|
||||
from coverage.pytracer import PyTracer
|
||||
from coverage.sysmon import SysMonitor
|
||||
from coverage.types import (
|
||||
TArc, TFileDisposition, TTraceData, TTraceFn, TracerCore, TWarnFn,
|
||||
)
|
||||
|
||||
os = isolate_module(os)
|
||||
|
||||
|
||||
try:
|
||||
# Use the C extension code when we can, for speed.
|
||||
from coverage.tracer import CTracer, CFileDisposition
|
||||
HAS_CTRACER = True
|
||||
except ImportError:
|
||||
# Couldn't import the C extension, maybe it isn't built.
|
||||
if os.getenv("COVERAGE_CORE") == "ctrace": # pragma: part covered
|
||||
# During testing, we use the COVERAGE_CORE environment variable
|
||||
# to indicate that we've fiddled with the environment to test this
|
||||
# fallback code. If we thought we had a C tracer, but couldn't import
|
||||
# it, then exit quickly and clearly instead of dribbling confusing
|
||||
# errors. I'm using sys.exit here instead of an exception because an
|
||||
# exception here causes all sorts of other noise in unittest.
|
||||
sys.stderr.write("*** COVERAGE_CORE is 'ctrace' but can't import CTracer!\n")
|
||||
sys.exit(1)
|
||||
HAS_CTRACER = False
|
||||
|
||||
T = TypeVar("T")
|
||||
|
||||
|
||||
class Collector:
|
||||
"""Collects trace data.
|
||||
|
||||
Creates a Tracer object for each thread, since they track stack
|
||||
information. Each Tracer points to the same shared data, contributing
|
||||
traced data points.
|
||||
|
||||
When the Collector is started, it creates a Tracer for the current thread,
|
||||
and installs a function to create Tracers for each new thread started.
|
||||
When the Collector is stopped, all active Tracers are stopped.
|
||||
|
||||
Threads started while the Collector is stopped will never have Tracers
|
||||
associated with them.
|
||||
|
||||
"""
|
||||
|
||||
# The stack of active Collectors. Collectors are added here when started,
|
||||
# and popped when stopped. Collectors on the stack are paused when not
|
||||
# the top, and resumed when they become the top again.
|
||||
_collectors: list[Collector] = []
|
||||
|
||||
# The concurrency settings we support here.
|
||||
LIGHT_THREADS = {"greenlet", "eventlet", "gevent"}
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
should_trace: Callable[[str, FrameType], TFileDisposition],
|
||||
check_include: Callable[[str, FrameType], bool],
|
||||
should_start_context: Callable[[FrameType], str | None] | None,
|
||||
file_mapper: Callable[[str], str],
|
||||
timid: bool,
|
||||
branch: bool,
|
||||
warn: TWarnFn,
|
||||
concurrency: list[str],
|
||||
metacov: bool,
|
||||
) -> None:
|
||||
"""Create a collector.
|
||||
|
||||
`should_trace` is a function, taking a file name and a frame, and
|
||||
returning a `coverage.FileDisposition object`.
|
||||
|
||||
`check_include` is a function taking a file name and a frame. It returns
|
||||
a boolean: True if the file should be traced, False if not.
|
||||
|
||||
`should_start_context` is a function taking a frame, and returning a
|
||||
string. If the frame should be the start of a new context, the string
|
||||
is the new context. If the frame should not be the start of a new
|
||||
context, return None.
|
||||
|
||||
`file_mapper` is a function taking a filename, and returning a Unicode
|
||||
filename. The result is the name that will be recorded in the data
|
||||
file.
|
||||
|
||||
If `timid` is true, then a slower simpler trace function will be
|
||||
used. This is important for some environments where manipulation of
|
||||
tracing functions make the faster more sophisticated trace function not
|
||||
operate properly.
|
||||
|
||||
If `branch` is true, then branches will be measured. This involves
|
||||
collecting data on which statements followed each other (arcs). Use
|
||||
`get_arc_data` to get the arc data.
|
||||
|
||||
`warn` is a warning function, taking a single string message argument
|
||||
and an optional slug argument which will be a string or None, to be
|
||||
used if a warning needs to be issued.
|
||||
|
||||
`concurrency` is a list of strings indicating the concurrency libraries
|
||||
in use. Valid values are "greenlet", "eventlet", "gevent", or "thread"
|
||||
(the default). "thread" can be combined with one of the other three.
|
||||
Other values are ignored.
|
||||
|
||||
"""
|
||||
self.should_trace = should_trace
|
||||
self.check_include = check_include
|
||||
self.should_start_context = should_start_context
|
||||
self.file_mapper = file_mapper
|
||||
self.branch = branch
|
||||
self.warn = warn
|
||||
self.concurrency = concurrency
|
||||
assert isinstance(self.concurrency, list), f"Expected a list: {self.concurrency!r}"
|
||||
|
||||
self.pid = os.getpid()
|
||||
|
||||
self.covdata: CoverageData
|
||||
self.threading = None
|
||||
self.static_context: str | None = None
|
||||
|
||||
self.origin = short_stack()
|
||||
|
||||
self.concur_id_func = None
|
||||
|
||||
self._trace_class: type[TracerCore]
|
||||
self.file_disposition_class: type[TFileDisposition]
|
||||
|
||||
core: str | None
|
||||
if timid:
|
||||
core = "pytrace"
|
||||
else:
|
||||
core = os.getenv("COVERAGE_CORE")
|
||||
|
||||
if core == "sysmon" and not env.PYBEHAVIOR.pep669:
|
||||
self.warn("sys.monitoring isn't available, using default core", slug="no-sysmon")
|
||||
core = None
|
||||
|
||||
if not core:
|
||||
# Once we're comfortable with sysmon as a default:
|
||||
# if env.PYBEHAVIOR.pep669 and self.should_start_context is None:
|
||||
# core = "sysmon"
|
||||
if HAS_CTRACER:
|
||||
core = "ctrace"
|
||||
else:
|
||||
core = "pytrace"
|
||||
|
||||
if core == "sysmon":
|
||||
self._trace_class = SysMonitor
|
||||
self._core_kwargs = {"tool_id": 3 if metacov else 1}
|
||||
self.file_disposition_class = FileDisposition
|
||||
self.supports_plugins = False
|
||||
self.packed_arcs = False
|
||||
self.systrace = False
|
||||
elif core == "ctrace":
|
||||
self._trace_class = CTracer
|
||||
self._core_kwargs = {}
|
||||
self.file_disposition_class = CFileDisposition
|
||||
self.supports_plugins = True
|
||||
self.packed_arcs = True
|
||||
self.systrace = True
|
||||
elif core == "pytrace":
|
||||
self._trace_class = PyTracer
|
||||
self._core_kwargs = {}
|
||||
self.file_disposition_class = FileDisposition
|
||||
self.supports_plugins = False
|
||||
self.packed_arcs = False
|
||||
self.systrace = True
|
||||
else:
|
||||
raise ConfigError(f"Unknown core value: {core!r}")
|
||||
|
||||
# We can handle a few concurrency options here, but only one at a time.
|
||||
concurrencies = set(self.concurrency)
|
||||
unknown = concurrencies - CoverageConfig.CONCURRENCY_CHOICES
|
||||
if unknown:
|
||||
show = ", ".join(sorted(unknown))
|
||||
raise ConfigError(f"Unknown concurrency choices: {show}")
|
||||
light_threads = concurrencies & self.LIGHT_THREADS
|
||||
if len(light_threads) > 1:
|
||||
show = ", ".join(sorted(light_threads))
|
||||
raise ConfigError(f"Conflicting concurrency settings: {show}")
|
||||
do_threading = False
|
||||
|
||||
tried = "nothing" # to satisfy pylint
|
||||
try:
|
||||
if "greenlet" in concurrencies:
|
||||
tried = "greenlet"
|
||||
import greenlet
|
||||
self.concur_id_func = greenlet.getcurrent
|
||||
elif "eventlet" in concurrencies:
|
||||
tried = "eventlet"
|
||||
import eventlet.greenthread # pylint: disable=import-error,useless-suppression
|
||||
self.concur_id_func = eventlet.greenthread.getcurrent
|
||||
elif "gevent" in concurrencies:
|
||||
tried = "gevent"
|
||||
import gevent # pylint: disable=import-error,useless-suppression
|
||||
self.concur_id_func = gevent.getcurrent
|
||||
|
||||
if "thread" in concurrencies:
|
||||
do_threading = True
|
||||
except ImportError as ex:
|
||||
msg = f"Couldn't trace with concurrency={tried}, the module isn't installed."
|
||||
raise ConfigError(msg) from ex
|
||||
|
||||
if self.concur_id_func and not hasattr(self._trace_class, "concur_id_func"):
|
||||
raise ConfigError(
|
||||
"Can't support concurrency={} with {}, only threads are supported.".format(
|
||||
tried, self.tracer_name(),
|
||||
),
|
||||
)
|
||||
|
||||
if do_threading or not concurrencies:
|
||||
# It's important to import threading only if we need it. If
|
||||
# it's imported early, and the program being measured uses
|
||||
# gevent, then gevent's monkey-patching won't work properly.
|
||||
import threading
|
||||
self.threading = threading
|
||||
|
||||
self.reset()
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<Collector at {id(self):#x}: {self.tracer_name()}>"
|
||||
|
||||
def use_data(self, covdata: CoverageData, context: str | None) -> None:
|
||||
"""Use `covdata` for recording data."""
|
||||
self.covdata = covdata
|
||||
self.static_context = context
|
||||
self.covdata.set_context(self.static_context)
|
||||
|
||||
def tracer_name(self) -> str:
|
||||
"""Return the class name of the tracer we're using."""
|
||||
return self._trace_class.__name__
|
||||
|
||||
def _clear_data(self) -> None:
|
||||
"""Clear out existing data, but stay ready for more collection."""
|
||||
# We used to use self.data.clear(), but that would remove filename
|
||||
# keys and data values that were still in use higher up the stack
|
||||
# when we are called as part of switch_context.
|
||||
for d in self.data.values():
|
||||
d.clear()
|
||||
|
||||
for tracer in self.tracers:
|
||||
tracer.reset_activity()
|
||||
|
||||
def reset(self) -> None:
|
||||
"""Clear collected data, and prepare to collect more."""
|
||||
# The trace data we are collecting.
|
||||
self.data: TTraceData = {}
|
||||
|
||||
# A dictionary mapping file names to file tracer plugin names that will
|
||||
# handle them.
|
||||
self.file_tracers: dict[str, str] = {}
|
||||
|
||||
self.disabled_plugins: set[str] = set()
|
||||
|
||||
# The .should_trace_cache attribute is a cache from file names to
|
||||
# coverage.FileDisposition objects, or None. When a file is first
|
||||
# considered for tracing, a FileDisposition is obtained from
|
||||
# Coverage.should_trace. Its .trace attribute indicates whether the
|
||||
# file should be traced or not. If it should be, a plugin with dynamic
|
||||
# file names can decide not to trace it based on the dynamic file name
|
||||
# being excluded by the inclusion rules, in which case the
|
||||
# FileDisposition will be replaced by None in the cache.
|
||||
if env.PYPY:
|
||||
import __pypy__ # pylint: disable=import-error
|
||||
# Alex Gaynor said:
|
||||
# should_trace_cache is a strictly growing key: once a key is in
|
||||
# it, it never changes. Further, the keys used to access it are
|
||||
# generally constant, given sufficient context. That is to say, at
|
||||
# any given point _trace() is called, pypy is able to know the key.
|
||||
# This is because the key is determined by the physical source code
|
||||
# line, and that's invariant with the call site.
|
||||
#
|
||||
# This property of a dict with immutable keys, combined with
|
||||
# call-site-constant keys is a match for PyPy's module dict,
|
||||
# which is optimized for such workloads.
|
||||
#
|
||||
# This gives a 20% benefit on the workload described at
|
||||
# https://bitbucket.org/pypy/pypy/issue/1871/10x-slower-than-cpython-under-coverage
|
||||
self.should_trace_cache = __pypy__.newdict("module")
|
||||
else:
|
||||
self.should_trace_cache = {}
|
||||
|
||||
# Our active Tracers.
|
||||
self.tracers: list[TracerCore] = []
|
||||
|
||||
self._clear_data()
|
||||
|
||||
def _start_tracer(self) -> TTraceFn | None:
|
||||
"""Start a new Tracer object, and store it in self.tracers."""
|
||||
tracer = self._trace_class(**self._core_kwargs)
|
||||
tracer.data = self.data
|
||||
tracer.trace_arcs = self.branch
|
||||
tracer.should_trace = self.should_trace
|
||||
tracer.should_trace_cache = self.should_trace_cache
|
||||
tracer.warn = self.warn
|
||||
|
||||
if hasattr(tracer, 'concur_id_func'):
|
||||
tracer.concur_id_func = self.concur_id_func
|
||||
if hasattr(tracer, 'file_tracers'):
|
||||
tracer.file_tracers = self.file_tracers
|
||||
if hasattr(tracer, 'threading'):
|
||||
tracer.threading = self.threading
|
||||
if hasattr(tracer, 'check_include'):
|
||||
tracer.check_include = self.check_include
|
||||
if hasattr(tracer, 'should_start_context'):
|
||||
tracer.should_start_context = self.should_start_context
|
||||
if hasattr(tracer, 'switch_context'):
|
||||
tracer.switch_context = self.switch_context
|
||||
if hasattr(tracer, 'disable_plugin'):
|
||||
tracer.disable_plugin = self.disable_plugin
|
||||
|
||||
fn = tracer.start()
|
||||
self.tracers.append(tracer)
|
||||
|
||||
return fn
|
||||
|
||||
# The trace function has to be set individually on each thread before
|
||||
# execution begins. Ironically, the only support the threading module has
|
||||
# for running code before the thread main is the tracing function. So we
|
||||
# install this as a trace function, and the first time it's called, it does
|
||||
# the real trace installation.
|
||||
#
|
||||
# New in 3.12: threading.settrace_all_threads: https://github.com/python/cpython/pull/96681
|
||||
|
||||
def _installation_trace(self, frame: FrameType, event: str, arg: Any) -> TTraceFn | None:
|
||||
"""Called on new threads, installs the real tracer."""
|
||||
# Remove ourselves as the trace function.
|
||||
sys.settrace(None)
|
||||
# Install the real tracer.
|
||||
fn: TTraceFn | None = self._start_tracer()
|
||||
# Invoke the real trace function with the current event, to be sure
|
||||
# not to lose an event.
|
||||
if fn:
|
||||
fn = fn(frame, event, arg)
|
||||
# Return the new trace function to continue tracing in this scope.
|
||||
return fn
|
||||
|
||||
def start(self) -> None:
|
||||
"""Start collecting trace information."""
|
||||
# We may be a new collector in a forked process. The old process'
|
||||
# collectors will be in self._collectors, but they won't be usable.
|
||||
# Find them and discard them.
|
||||
keep_collectors = []
|
||||
for c in self._collectors:
|
||||
if c.pid == self.pid:
|
||||
keep_collectors.append(c)
|
||||
else:
|
||||
c.post_fork()
|
||||
self._collectors[:] = keep_collectors
|
||||
|
||||
if self._collectors:
|
||||
self._collectors[-1].pause()
|
||||
|
||||
self.tracers = []
|
||||
|
||||
try:
|
||||
# Install the tracer on this thread.
|
||||
self._start_tracer()
|
||||
except:
|
||||
if self._collectors:
|
||||
self._collectors[-1].resume()
|
||||
raise
|
||||
|
||||
# If _start_tracer succeeded, then we add ourselves to the global
|
||||
# stack of collectors.
|
||||
self._collectors.append(self)
|
||||
|
||||
# Install our installation tracer in threading, to jump-start other
|
||||
# threads.
|
||||
if self.systrace and self.threading:
|
||||
self.threading.settrace(self._installation_trace)
|
||||
|
||||
def stop(self) -> None:
|
||||
"""Stop collecting trace information."""
|
||||
assert self._collectors
|
||||
if self._collectors[-1] is not self:
|
||||
print("self._collectors:")
|
||||
for c in self._collectors:
|
||||
print(f" {c!r}\n{c.origin}")
|
||||
assert self._collectors[-1] is self, (
|
||||
f"Expected current collector to be {self!r}, but it's {self._collectors[-1]!r}"
|
||||
)
|
||||
|
||||
self.pause()
|
||||
|
||||
# Remove this Collector from the stack, and resume the one underneath (if any).
|
||||
self._collectors.pop()
|
||||
if self._collectors:
|
||||
self._collectors[-1].resume()
|
||||
|
||||
def pause(self) -> None:
|
||||
"""Pause tracing, but be prepared to `resume`."""
|
||||
for tracer in self.tracers:
|
||||
tracer.stop()
|
||||
stats = tracer.get_stats()
|
||||
if stats:
|
||||
print("\nCoverage.py tracer stats:")
|
||||
for k, v in human_sorted_items(stats.items()):
|
||||
print(f"{k:>20}: {v}")
|
||||
if self.threading:
|
||||
self.threading.settrace(None)
|
||||
|
||||
def resume(self) -> None:
|
||||
"""Resume tracing after a `pause`."""
|
||||
for tracer in self.tracers:
|
||||
tracer.start()
|
||||
if self.systrace:
|
||||
if self.threading:
|
||||
self.threading.settrace(self._installation_trace)
|
||||
else:
|
||||
self._start_tracer()
|
||||
|
||||
def post_fork(self) -> None:
|
||||
"""After a fork, tracers might need to adjust."""
|
||||
for tracer in self.tracers:
|
||||
if hasattr(tracer, "post_fork"):
|
||||
tracer.post_fork()
|
||||
|
||||
def _activity(self) -> bool:
|
||||
"""Has any activity been traced?
|
||||
|
||||
Returns a boolean, True if any trace function was invoked.
|
||||
|
||||
"""
|
||||
return any(tracer.activity() for tracer in self.tracers)
|
||||
|
||||
def switch_context(self, new_context: str | None) -> None:
|
||||
"""Switch to a new dynamic context."""
|
||||
context: str | None
|
||||
self.flush_data()
|
||||
if self.static_context:
|
||||
context = self.static_context
|
||||
if new_context:
|
||||
context += "|" + new_context
|
||||
else:
|
||||
context = new_context
|
||||
self.covdata.set_context(context)
|
||||
|
||||
def disable_plugin(self, disposition: TFileDisposition) -> None:
|
||||
"""Disable the plugin mentioned in `disposition`."""
|
||||
file_tracer = disposition.file_tracer
|
||||
assert file_tracer is not None
|
||||
plugin = file_tracer._coverage_plugin
|
||||
plugin_name = plugin._coverage_plugin_name
|
||||
self.warn(f"Disabling plug-in {plugin_name!r} due to previous exception")
|
||||
plugin._coverage_enabled = False
|
||||
disposition.trace = False
|
||||
|
||||
@functools.lru_cache(maxsize=None) # pylint: disable=method-cache-max-size-none
|
||||
def cached_mapped_file(self, filename: str) -> str:
|
||||
"""A locally cached version of file names mapped through file_mapper."""
|
||||
return self.file_mapper(filename)
|
||||
|
||||
def mapped_file_dict(self, d: Mapping[str, T]) -> dict[str, T]:
|
||||
"""Return a dict like d, but with keys modified by file_mapper."""
|
||||
# The call to list(items()) ensures that the GIL protects the dictionary
|
||||
# iterator against concurrent modifications by tracers running
|
||||
# in other threads. We try three times in case of concurrent
|
||||
# access, hoping to get a clean copy.
|
||||
runtime_err = None
|
||||
for _ in range(3): # pragma: part covered
|
||||
try:
|
||||
items = list(d.items())
|
||||
except RuntimeError as ex: # pragma: cant happen
|
||||
runtime_err = ex
|
||||
else:
|
||||
break
|
||||
else: # pragma: cant happen
|
||||
assert isinstance(runtime_err, Exception)
|
||||
raise runtime_err
|
||||
|
||||
return {self.cached_mapped_file(k): v for k, v in items if v}
|
||||
|
||||
def plugin_was_disabled(self, plugin: CoveragePlugin) -> None:
|
||||
"""Record that `plugin` was disabled during the run."""
|
||||
self.disabled_plugins.add(plugin._coverage_plugin_name)
|
||||
|
||||
def flush_data(self) -> bool:
|
||||
"""Save the collected data to our associated `CoverageData`.
|
||||
|
||||
Data may have also been saved along the way. This forces the
|
||||
last of the data to be saved.
|
||||
|
||||
Returns True if there was data to save, False if not.
|
||||
"""
|
||||
if not self._activity():
|
||||
return False
|
||||
|
||||
if self.branch:
|
||||
if self.packed_arcs:
|
||||
# Unpack the line number pairs packed into integers. See
|
||||
# tracer.c:CTracer_record_pair for the C code that creates
|
||||
# these packed ints.
|
||||
arc_data: dict[str, list[TArc]] = {}
|
||||
packed_data = cast(Dict[str, Set[int]], self.data)
|
||||
|
||||
# The list() here and in the inner loop are to get a clean copy
|
||||
# even as tracers are continuing to add data.
|
||||
for fname, packeds in list(packed_data.items()):
|
||||
tuples = []
|
||||
for packed in list(packeds):
|
||||
l1 = packed & 0xFFFFF
|
||||
l2 = (packed & (0xFFFFF << 20)) >> 20
|
||||
if packed & (1 << 40):
|
||||
l1 *= -1
|
||||
if packed & (1 << 41):
|
||||
l2 *= -1
|
||||
tuples.append((l1, l2))
|
||||
arc_data[fname] = tuples
|
||||
else:
|
||||
arc_data = cast(Dict[str, List[TArc]], self.data)
|
||||
self.covdata.add_arcs(self.mapped_file_dict(arc_data))
|
||||
else:
|
||||
line_data = cast(Dict[str, Set[int]], self.data)
|
||||
self.covdata.add_lines(self.mapped_file_dict(line_data))
|
||||
|
||||
file_tracers = {
|
||||
k: v for k, v in self.file_tracers.items()
|
||||
if v not in self.disabled_plugins
|
||||
}
|
||||
self.covdata.add_file_tracers(self.mapped_file_dict(file_tracers))
|
||||
|
||||
self._clear_data()
|
||||
return True
|
||||
619
.venv/lib/python3.10/site-packages/coverage/config.py
Normal file
619
.venv/lib/python3.10/site-packages/coverage/config.py
Normal file
|
|
@ -0,0 +1,619 @@
|
|||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""Config file for coverage.py"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import collections
|
||||
import configparser
|
||||
import copy
|
||||
import os
|
||||
import os.path
|
||||
import re
|
||||
|
||||
from typing import (
|
||||
Any, Callable, Iterable, Union,
|
||||
)
|
||||
|
||||
from coverage.exceptions import ConfigError
|
||||
from coverage.misc import isolate_module, human_sorted_items, substitute_variables
|
||||
from coverage.tomlconfig import TomlConfigParser, TomlDecodeError
|
||||
from coverage.types import (
|
||||
TConfigurable, TConfigSectionIn, TConfigValueIn, TConfigSectionOut,
|
||||
TConfigValueOut, TPluginConfig,
|
||||
)
|
||||
|
||||
os = isolate_module(os)
|
||||
|
||||
|
||||
class HandyConfigParser(configparser.ConfigParser):
|
||||
"""Our specialization of ConfigParser."""
|
||||
|
||||
def __init__(self, our_file: bool) -> None:
|
||||
"""Create the HandyConfigParser.
|
||||
|
||||
`our_file` is True if this config file is specifically for coverage,
|
||||
False if we are examining another config file (tox.ini, setup.cfg)
|
||||
for possible settings.
|
||||
"""
|
||||
|
||||
super().__init__(interpolation=None)
|
||||
self.section_prefixes = ["coverage:"]
|
||||
if our_file:
|
||||
self.section_prefixes.append("")
|
||||
|
||||
def read( # type: ignore[override]
|
||||
self,
|
||||
filenames: Iterable[str],
|
||||
encoding_unused: str | None = None,
|
||||
) -> list[str]:
|
||||
"""Read a file name as UTF-8 configuration data."""
|
||||
return super().read(filenames, encoding="utf-8")
|
||||
|
||||
def real_section(self, section: str) -> str | None:
|
||||
"""Get the actual name of a section."""
|
||||
for section_prefix in self.section_prefixes:
|
||||
real_section = section_prefix + section
|
||||
has = super().has_section(real_section)
|
||||
if has:
|
||||
return real_section
|
||||
return None
|
||||
|
||||
def has_option(self, section: str, option: str) -> bool:
|
||||
real_section = self.real_section(section)
|
||||
if real_section is not None:
|
||||
return super().has_option(real_section, option)
|
||||
return False
|
||||
|
||||
def has_section(self, section: str) -> bool:
|
||||
return bool(self.real_section(section))
|
||||
|
||||
def options(self, section: str) -> list[str]:
|
||||
real_section = self.real_section(section)
|
||||
if real_section is not None:
|
||||
return super().options(real_section)
|
||||
raise ConfigError(f"No section: {section!r}")
|
||||
|
||||
def get_section(self, section: str) -> TConfigSectionOut:
|
||||
"""Get the contents of a section, as a dictionary."""
|
||||
d: dict[str, TConfigValueOut] = {}
|
||||
for opt in self.options(section):
|
||||
d[opt] = self.get(section, opt)
|
||||
return d
|
||||
|
||||
def get(self, section: str, option: str, *args: Any, **kwargs: Any) -> str: # type: ignore
|
||||
"""Get a value, replacing environment variables also.
|
||||
|
||||
The arguments are the same as `ConfigParser.get`, but in the found
|
||||
value, ``$WORD`` or ``${WORD}`` are replaced by the value of the
|
||||
environment variable ``WORD``.
|
||||
|
||||
Returns the finished value.
|
||||
|
||||
"""
|
||||
for section_prefix in self.section_prefixes:
|
||||
real_section = section_prefix + section
|
||||
if super().has_option(real_section, option):
|
||||
break
|
||||
else:
|
||||
raise ConfigError(f"No option {option!r} in section: {section!r}")
|
||||
|
||||
v: str = super().get(real_section, option, *args, **kwargs)
|
||||
v = substitute_variables(v, os.environ)
|
||||
return v
|
||||
|
||||
def getlist(self, section: str, option: str) -> list[str]:
|
||||
"""Read a list of strings.
|
||||
|
||||
The value of `section` and `option` is treated as a comma- and newline-
|
||||
separated list of strings. Each value is stripped of white space.
|
||||
|
||||
Returns the list of strings.
|
||||
|
||||
"""
|
||||
value_list = self.get(section, option)
|
||||
values = []
|
||||
for value_line in value_list.split("\n"):
|
||||
for value in value_line.split(","):
|
||||
value = value.strip()
|
||||
if value:
|
||||
values.append(value)
|
||||
return values
|
||||
|
||||
def getregexlist(self, section: str, option: str) -> list[str]:
|
||||
"""Read a list of full-line regexes.
|
||||
|
||||
The value of `section` and `option` is treated as a newline-separated
|
||||
list of regexes. Each value is stripped of white space.
|
||||
|
||||
Returns the list of strings.
|
||||
|
||||
"""
|
||||
line_list = self.get(section, option)
|
||||
value_list = []
|
||||
for value in line_list.splitlines():
|
||||
value = value.strip()
|
||||
try:
|
||||
re.compile(value)
|
||||
except re.error as e:
|
||||
raise ConfigError(
|
||||
f"Invalid [{section}].{option} value {value!r}: {e}",
|
||||
) from e
|
||||
if value:
|
||||
value_list.append(value)
|
||||
return value_list
|
||||
|
||||
|
||||
TConfigParser = Union[HandyConfigParser, TomlConfigParser]
|
||||
|
||||
|
||||
# The default line exclusion regexes.
|
||||
DEFAULT_EXCLUDE = [
|
||||
r"#\s*(pragma|PRAGMA)[:\s]?\s*(no|NO)\s*(cover|COVER)",
|
||||
]
|
||||
|
||||
# The default partial branch regexes, to be modified by the user.
|
||||
DEFAULT_PARTIAL = [
|
||||
r"#\s*(pragma|PRAGMA)[:\s]?\s*(no|NO)\s*(branch|BRANCH)",
|
||||
]
|
||||
|
||||
# The default partial branch regexes, based on Python semantics.
|
||||
# These are any Python branching constructs that can't actually execute all
|
||||
# their branches.
|
||||
DEFAULT_PARTIAL_ALWAYS = [
|
||||
"while (True|1|False|0):",
|
||||
"if (True|1|False|0):",
|
||||
]
|
||||
|
||||
|
||||
class CoverageConfig(TConfigurable, TPluginConfig):
|
||||
"""Coverage.py configuration.
|
||||
|
||||
The attributes of this class are the various settings that control the
|
||||
operation of coverage.py.
|
||||
|
||||
"""
|
||||
# pylint: disable=too-many-instance-attributes
|
||||
|
||||
def __init__(self) -> None:
|
||||
"""Initialize the configuration attributes to their defaults."""
|
||||
# Metadata about the config.
|
||||
# We tried to read these config files.
|
||||
self.attempted_config_files: list[str] = []
|
||||
# We did read these config files, but maybe didn't find any content for us.
|
||||
self.config_files_read: list[str] = []
|
||||
# The file that gave us our configuration.
|
||||
self.config_file: str | None = None
|
||||
self._config_contents: bytes | None = None
|
||||
|
||||
# Defaults for [run] and [report]
|
||||
self._include = None
|
||||
self._omit = None
|
||||
|
||||
# Defaults for [run]
|
||||
self.branch = False
|
||||
self.command_line: str | None = None
|
||||
self.concurrency: list[str] = []
|
||||
self.context: str | None = None
|
||||
self.cover_pylib = False
|
||||
self.data_file = ".coverage"
|
||||
self.debug: list[str] = []
|
||||
self.debug_file: str | None = None
|
||||
self.disable_warnings: list[str] = []
|
||||
self.dynamic_context: str | None = None
|
||||
self.parallel = False
|
||||
self.plugins: list[str] = []
|
||||
self.relative_files = False
|
||||
self.run_include: list[str] = []
|
||||
self.run_omit: list[str] = []
|
||||
self.sigterm = False
|
||||
self.source: list[str] | None = None
|
||||
self.source_pkgs: list[str] = []
|
||||
self.timid = False
|
||||
self._crash: str | None = None
|
||||
|
||||
# Defaults for [report]
|
||||
self.exclude_list = DEFAULT_EXCLUDE[:]
|
||||
self.exclude_also: list[str] = []
|
||||
self.fail_under = 0.0
|
||||
self.format: str | None = None
|
||||
self.ignore_errors = False
|
||||
self.include_namespace_packages = False
|
||||
self.report_include: list[str] | None = None
|
||||
self.report_omit: list[str] | None = None
|
||||
self.partial_always_list = DEFAULT_PARTIAL_ALWAYS[:]
|
||||
self.partial_list = DEFAULT_PARTIAL[:]
|
||||
self.precision = 0
|
||||
self.report_contexts: list[str] | None = None
|
||||
self.show_missing = False
|
||||
self.skip_covered = False
|
||||
self.skip_empty = False
|
||||
self.sort: str | None = None
|
||||
|
||||
# Defaults for [html]
|
||||
self.extra_css: str | None = None
|
||||
self.html_dir = "htmlcov"
|
||||
self.html_skip_covered: bool | None = None
|
||||
self.html_skip_empty: bool | None = None
|
||||
self.html_title = "Coverage report"
|
||||
self.show_contexts = False
|
||||
|
||||
# Defaults for [xml]
|
||||
self.xml_output = "coverage.xml"
|
||||
self.xml_package_depth = 99
|
||||
|
||||
# Defaults for [json]
|
||||
self.json_output = "coverage.json"
|
||||
self.json_pretty_print = False
|
||||
self.json_show_contexts = False
|
||||
|
||||
# Defaults for [lcov]
|
||||
self.lcov_output = "coverage.lcov"
|
||||
|
||||
# Defaults for [paths]
|
||||
self.paths: dict[str, list[str]] = {}
|
||||
|
||||
# Options for plugins
|
||||
self.plugin_options: dict[str, TConfigSectionOut] = {}
|
||||
|
||||
MUST_BE_LIST = {
|
||||
"debug", "concurrency", "plugins",
|
||||
"report_omit", "report_include",
|
||||
"run_omit", "run_include",
|
||||
}
|
||||
|
||||
def from_args(self, **kwargs: TConfigValueIn) -> None:
|
||||
"""Read config values from `kwargs`."""
|
||||
for k, v in kwargs.items():
|
||||
if v is not None:
|
||||
if k in self.MUST_BE_LIST and isinstance(v, str):
|
||||
v = [v]
|
||||
setattr(self, k, v)
|
||||
|
||||
def from_file(self, filename: str, warn: Callable[[str], None], our_file: bool) -> bool:
|
||||
"""Read configuration from a .rc file.
|
||||
|
||||
`filename` is a file name to read.
|
||||
|
||||
`our_file` is True if this config file is specifically for coverage,
|
||||
False if we are examining another config file (tox.ini, setup.cfg)
|
||||
for possible settings.
|
||||
|
||||
Returns True or False, whether the file could be read, and it had some
|
||||
coverage.py settings in it.
|
||||
|
||||
"""
|
||||
_, ext = os.path.splitext(filename)
|
||||
cp: TConfigParser
|
||||
if ext == ".toml":
|
||||
cp = TomlConfigParser(our_file)
|
||||
else:
|
||||
cp = HandyConfigParser(our_file)
|
||||
|
||||
self.attempted_config_files.append(filename)
|
||||
|
||||
try:
|
||||
files_read = cp.read(filename)
|
||||
except (configparser.Error, TomlDecodeError) as err:
|
||||
raise ConfigError(f"Couldn't read config file {filename}: {err}") from err
|
||||
if not files_read:
|
||||
return False
|
||||
|
||||
self.config_files_read.extend(map(os.path.abspath, files_read))
|
||||
|
||||
any_set = False
|
||||
try:
|
||||
for option_spec in self.CONFIG_FILE_OPTIONS:
|
||||
was_set = self._set_attr_from_config_option(cp, *option_spec)
|
||||
if was_set:
|
||||
any_set = True
|
||||
except ValueError as err:
|
||||
raise ConfigError(f"Couldn't read config file {filename}: {err}") from err
|
||||
|
||||
# Check that there are no unrecognized options.
|
||||
all_options = collections.defaultdict(set)
|
||||
for option_spec in self.CONFIG_FILE_OPTIONS:
|
||||
section, option = option_spec[1].split(":")
|
||||
all_options[section].add(option)
|
||||
|
||||
for section, options in all_options.items():
|
||||
real_section = cp.real_section(section)
|
||||
if real_section:
|
||||
for unknown in set(cp.options(section)) - options:
|
||||
warn(
|
||||
"Unrecognized option '[{}] {}=' in config file {}".format(
|
||||
real_section, unknown, filename,
|
||||
),
|
||||
)
|
||||
|
||||
# [paths] is special
|
||||
if cp.has_section("paths"):
|
||||
for option in cp.options("paths"):
|
||||
self.paths[option] = cp.getlist("paths", option)
|
||||
any_set = True
|
||||
|
||||
# plugins can have options
|
||||
for plugin in self.plugins:
|
||||
if cp.has_section(plugin):
|
||||
self.plugin_options[plugin] = cp.get_section(plugin)
|
||||
any_set = True
|
||||
|
||||
# Was this file used as a config file? If it's specifically our file,
|
||||
# then it was used. If we're piggybacking on someone else's file,
|
||||
# then it was only used if we found some settings in it.
|
||||
if our_file:
|
||||
used = True
|
||||
else:
|
||||
used = any_set
|
||||
|
||||
if used:
|
||||
self.config_file = os.path.abspath(filename)
|
||||
with open(filename, "rb") as f:
|
||||
self._config_contents = f.read()
|
||||
|
||||
return used
|
||||
|
||||
def copy(self) -> CoverageConfig:
|
||||
"""Return a copy of the configuration."""
|
||||
return copy.deepcopy(self)
|
||||
|
||||
CONCURRENCY_CHOICES = {"thread", "gevent", "greenlet", "eventlet", "multiprocessing"}
|
||||
|
||||
CONFIG_FILE_OPTIONS = [
|
||||
# These are *args for _set_attr_from_config_option:
|
||||
# (attr, where, type_="")
|
||||
#
|
||||
# attr is the attribute to set on the CoverageConfig object.
|
||||
# where is the section:name to read from the configuration file.
|
||||
# type_ is the optional type to apply, by using .getTYPE to read the
|
||||
# configuration value from the file.
|
||||
|
||||
# [run]
|
||||
("branch", "run:branch", "boolean"),
|
||||
("command_line", "run:command_line"),
|
||||
("concurrency", "run:concurrency", "list"),
|
||||
("context", "run:context"),
|
||||
("cover_pylib", "run:cover_pylib", "boolean"),
|
||||
("data_file", "run:data_file"),
|
||||
("debug", "run:debug", "list"),
|
||||
("debug_file", "run:debug_file"),
|
||||
("disable_warnings", "run:disable_warnings", "list"),
|
||||
("dynamic_context", "run:dynamic_context"),
|
||||
("parallel", "run:parallel", "boolean"),
|
||||
("plugins", "run:plugins", "list"),
|
||||
("relative_files", "run:relative_files", "boolean"),
|
||||
("run_include", "run:include", "list"),
|
||||
("run_omit", "run:omit", "list"),
|
||||
("sigterm", "run:sigterm", "boolean"),
|
||||
("source", "run:source", "list"),
|
||||
("source_pkgs", "run:source_pkgs", "list"),
|
||||
("timid", "run:timid", "boolean"),
|
||||
("_crash", "run:_crash"),
|
||||
|
||||
# [report]
|
||||
("exclude_list", "report:exclude_lines", "regexlist"),
|
||||
("exclude_also", "report:exclude_also", "regexlist"),
|
||||
("fail_under", "report:fail_under", "float"),
|
||||
("format", "report:format"),
|
||||
("ignore_errors", "report:ignore_errors", "boolean"),
|
||||
("include_namespace_packages", "report:include_namespace_packages", "boolean"),
|
||||
("partial_always_list", "report:partial_branches_always", "regexlist"),
|
||||
("partial_list", "report:partial_branches", "regexlist"),
|
||||
("precision", "report:precision", "int"),
|
||||
("report_contexts", "report:contexts", "list"),
|
||||
("report_include", "report:include", "list"),
|
||||
("report_omit", "report:omit", "list"),
|
||||
("show_missing", "report:show_missing", "boolean"),
|
||||
("skip_covered", "report:skip_covered", "boolean"),
|
||||
("skip_empty", "report:skip_empty", "boolean"),
|
||||
("sort", "report:sort"),
|
||||
|
||||
# [html]
|
||||
("extra_css", "html:extra_css"),
|
||||
("html_dir", "html:directory"),
|
||||
("html_skip_covered", "html:skip_covered", "boolean"),
|
||||
("html_skip_empty", "html:skip_empty", "boolean"),
|
||||
("html_title", "html:title"),
|
||||
("show_contexts", "html:show_contexts", "boolean"),
|
||||
|
||||
# [xml]
|
||||
("xml_output", "xml:output"),
|
||||
("xml_package_depth", "xml:package_depth", "int"),
|
||||
|
||||
# [json]
|
||||
("json_output", "json:output"),
|
||||
("json_pretty_print", "json:pretty_print", "boolean"),
|
||||
("json_show_contexts", "json:show_contexts", "boolean"),
|
||||
|
||||
# [lcov]
|
||||
("lcov_output", "lcov:output"),
|
||||
]
|
||||
|
||||
def _set_attr_from_config_option(
|
||||
self,
|
||||
cp: TConfigParser,
|
||||
attr: str,
|
||||
where: str,
|
||||
type_: str = "",
|
||||
) -> bool:
|
||||
"""Set an attribute on self if it exists in the ConfigParser.
|
||||
|
||||
Returns True if the attribute was set.
|
||||
|
||||
"""
|
||||
section, option = where.split(":")
|
||||
if cp.has_option(section, option):
|
||||
method = getattr(cp, "get" + type_)
|
||||
setattr(self, attr, method(section, option))
|
||||
return True
|
||||
return False
|
||||
|
||||
def get_plugin_options(self, plugin: str) -> TConfigSectionOut:
|
||||
"""Get a dictionary of options for the plugin named `plugin`."""
|
||||
return self.plugin_options.get(plugin, {})
|
||||
|
||||
def set_option(self, option_name: str, value: TConfigValueIn | TConfigSectionIn) -> None:
|
||||
"""Set an option in the configuration.
|
||||
|
||||
`option_name` is a colon-separated string indicating the section and
|
||||
option name. For example, the ``branch`` option in the ``[run]``
|
||||
section of the config file would be indicated with `"run:branch"`.
|
||||
|
||||
`value` is the new value for the option.
|
||||
|
||||
"""
|
||||
# Special-cased options.
|
||||
if option_name == "paths":
|
||||
self.paths = value # type: ignore[assignment]
|
||||
return
|
||||
|
||||
# Check all the hard-coded options.
|
||||
for option_spec in self.CONFIG_FILE_OPTIONS:
|
||||
attr, where = option_spec[:2]
|
||||
if where == option_name:
|
||||
setattr(self, attr, value)
|
||||
return
|
||||
|
||||
# See if it's a plugin option.
|
||||
plugin_name, _, key = option_name.partition(":")
|
||||
if key and plugin_name in self.plugins:
|
||||
self.plugin_options.setdefault(plugin_name, {})[key] = value # type: ignore[index]
|
||||
return
|
||||
|
||||
# If we get here, we didn't find the option.
|
||||
raise ConfigError(f"No such option: {option_name!r}")
|
||||
|
||||
def get_option(self, option_name: str) -> TConfigValueOut | None:
|
||||
"""Get an option from the configuration.
|
||||
|
||||
`option_name` is a colon-separated string indicating the section and
|
||||
option name. For example, the ``branch`` option in the ``[run]``
|
||||
section of the config file would be indicated with `"run:branch"`.
|
||||
|
||||
Returns the value of the option.
|
||||
|
||||
"""
|
||||
# Special-cased options.
|
||||
if option_name == "paths":
|
||||
return self.paths # type: ignore[return-value]
|
||||
|
||||
# Check all the hard-coded options.
|
||||
for option_spec in self.CONFIG_FILE_OPTIONS:
|
||||
attr, where = option_spec[:2]
|
||||
if where == option_name:
|
||||
return getattr(self, attr) # type: ignore[no-any-return]
|
||||
|
||||
# See if it's a plugin option.
|
||||
plugin_name, _, key = option_name.partition(":")
|
||||
if key and plugin_name in self.plugins:
|
||||
return self.plugin_options.get(plugin_name, {}).get(key)
|
||||
|
||||
# If we get here, we didn't find the option.
|
||||
raise ConfigError(f"No such option: {option_name!r}")
|
||||
|
||||
def post_process_file(self, path: str) -> str:
|
||||
"""Make final adjustments to a file path to make it usable."""
|
||||
return os.path.expanduser(path)
|
||||
|
||||
def post_process(self) -> None:
|
||||
"""Make final adjustments to settings to make them usable."""
|
||||
self.data_file = self.post_process_file(self.data_file)
|
||||
self.html_dir = self.post_process_file(self.html_dir)
|
||||
self.xml_output = self.post_process_file(self.xml_output)
|
||||
self.paths = {
|
||||
k: [self.post_process_file(f) for f in v]
|
||||
for k, v in self.paths.items()
|
||||
}
|
||||
self.exclude_list += self.exclude_also
|
||||
|
||||
def debug_info(self) -> list[tuple[str, Any]]:
|
||||
"""Make a list of (name, value) pairs for writing debug info."""
|
||||
return human_sorted_items(
|
||||
(k, v) for k, v in self.__dict__.items() if not k.startswith("_")
|
||||
)
|
||||
|
||||
|
||||
def config_files_to_try(config_file: bool | str) -> list[tuple[str, bool, bool]]:
|
||||
"""What config files should we try to read?
|
||||
|
||||
Returns a list of tuples:
|
||||
(filename, is_our_file, was_file_specified)
|
||||
"""
|
||||
|
||||
# Some API users were specifying ".coveragerc" to mean the same as
|
||||
# True, so make it so.
|
||||
if config_file == ".coveragerc":
|
||||
config_file = True
|
||||
specified_file = (config_file is not True)
|
||||
if not specified_file:
|
||||
# No file was specified. Check COVERAGE_RCFILE.
|
||||
rcfile = os.getenv("COVERAGE_RCFILE")
|
||||
if rcfile:
|
||||
config_file = rcfile
|
||||
specified_file = True
|
||||
if not specified_file:
|
||||
# Still no file specified. Default to .coveragerc
|
||||
config_file = ".coveragerc"
|
||||
assert isinstance(config_file, str)
|
||||
files_to_try = [
|
||||
(config_file, True, specified_file),
|
||||
("setup.cfg", False, False),
|
||||
("tox.ini", False, False),
|
||||
("pyproject.toml", False, False),
|
||||
]
|
||||
return files_to_try
|
||||
|
||||
|
||||
def read_coverage_config(
|
||||
config_file: bool | str,
|
||||
warn: Callable[[str], None],
|
||||
**kwargs: TConfigValueIn,
|
||||
) -> CoverageConfig:
|
||||
"""Read the coverage.py configuration.
|
||||
|
||||
Arguments:
|
||||
config_file: a boolean or string, see the `Coverage` class for the
|
||||
tricky details.
|
||||
warn: a function to issue warnings.
|
||||
all others: keyword arguments from the `Coverage` class, used for
|
||||
setting values in the configuration.
|
||||
|
||||
Returns:
|
||||
config:
|
||||
config is a CoverageConfig object read from the appropriate
|
||||
configuration file.
|
||||
|
||||
"""
|
||||
# Build the configuration from a number of sources:
|
||||
# 1) defaults:
|
||||
config = CoverageConfig()
|
||||
|
||||
# 2) from a file:
|
||||
if config_file:
|
||||
files_to_try = config_files_to_try(config_file)
|
||||
|
||||
for fname, our_file, specified_file in files_to_try:
|
||||
config_read = config.from_file(fname, warn, our_file=our_file)
|
||||
if config_read:
|
||||
break
|
||||
if specified_file:
|
||||
raise ConfigError(f"Couldn't read {fname!r} as a config file")
|
||||
|
||||
# 3) from environment variables:
|
||||
env_data_file = os.getenv("COVERAGE_FILE")
|
||||
if env_data_file:
|
||||
config.data_file = env_data_file
|
||||
# $set_env.py: COVERAGE_DEBUG - Debug options: https://coverage.rtfd.io/cmd.html#debug
|
||||
debugs = os.getenv("COVERAGE_DEBUG")
|
||||
if debugs:
|
||||
config.debug.extend(d.strip() for d in debugs.split(","))
|
||||
|
||||
# 4) from constructor arguments:
|
||||
config.from_args(**kwargs)
|
||||
|
||||
# Once all the config has been collected, there's a little post-processing
|
||||
# to do.
|
||||
config.post_process()
|
||||
|
||||
return config
|
||||
72
.venv/lib/python3.10/site-packages/coverage/context.py
Normal file
72
.venv/lib/python3.10/site-packages/coverage/context.py
Normal file
|
|
@ -0,0 +1,72 @@
|
|||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""Determine contexts for coverage.py"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from types import FrameType
|
||||
from typing import cast, Callable, Sequence
|
||||
|
||||
|
||||
def combine_context_switchers(
|
||||
context_switchers: Sequence[Callable[[FrameType], str | None]],
|
||||
) -> Callable[[FrameType], str | None] | None:
|
||||
"""Create a single context switcher from multiple switchers.
|
||||
|
||||
`context_switchers` is a list of functions that take a frame as an
|
||||
argument and return a string to use as the new context label.
|
||||
|
||||
Returns a function that composites `context_switchers` functions, or None
|
||||
if `context_switchers` is an empty list.
|
||||
|
||||
When invoked, the combined switcher calls `context_switchers` one-by-one
|
||||
until a string is returned. The combined switcher returns None if all
|
||||
`context_switchers` return None.
|
||||
"""
|
||||
if not context_switchers:
|
||||
return None
|
||||
|
||||
if len(context_switchers) == 1:
|
||||
return context_switchers[0]
|
||||
|
||||
def should_start_context(frame: FrameType) -> str | None:
|
||||
"""The combiner for multiple context switchers."""
|
||||
for switcher in context_switchers:
|
||||
new_context = switcher(frame)
|
||||
if new_context is not None:
|
||||
return new_context
|
||||
return None
|
||||
|
||||
return should_start_context
|
||||
|
||||
|
||||
def should_start_context_test_function(frame: FrameType) -> str | None:
|
||||
"""Is this frame calling a test_* function?"""
|
||||
co_name = frame.f_code.co_name
|
||||
if co_name.startswith("test") or co_name == "runTest":
|
||||
return qualname_from_frame(frame)
|
||||
return None
|
||||
|
||||
|
||||
def qualname_from_frame(frame: FrameType) -> str | None:
|
||||
"""Get a qualified name for the code running in `frame`."""
|
||||
co = frame.f_code
|
||||
fname = co.co_name
|
||||
method = None
|
||||
if co.co_argcount and co.co_varnames[0] == "self":
|
||||
self = frame.f_locals.get("self", None)
|
||||
method = getattr(self, fname, None)
|
||||
|
||||
if method is None:
|
||||
func = frame.f_globals.get(fname)
|
||||
if func is None:
|
||||
return None
|
||||
return cast(str, func.__module__ + "." + fname)
|
||||
|
||||
func = getattr(method, "__func__", None)
|
||||
if func is None:
|
||||
cls = self.__class__
|
||||
return cast(str, cls.__module__ + "." + cls.__name__ + "." + fname)
|
||||
|
||||
return cast(str, func.__module__ + "." + func.__qualname__)
|
||||
1401
.venv/lib/python3.10/site-packages/coverage/control.py
Normal file
1401
.venv/lib/python3.10/site-packages/coverage/control.py
Normal file
File diff suppressed because it is too large
Load diff
221
.venv/lib/python3.10/site-packages/coverage/data.py
Normal file
221
.venv/lib/python3.10/site-packages/coverage/data.py
Normal file
|
|
@ -0,0 +1,221 @@
|
|||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""Coverage data for coverage.py.
|
||||
|
||||
This file had the 4.x JSON data support, which is now gone. This file still
|
||||
has storage-agnostic helpers, and is kept to avoid changing too many imports.
|
||||
CoverageData is now defined in sqldata.py, and imported here to keep the
|
||||
imports working.
|
||||
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import glob
|
||||
import hashlib
|
||||
import os.path
|
||||
|
||||
from typing import Callable, Iterable
|
||||
|
||||
from coverage.exceptions import CoverageException, NoDataError
|
||||
from coverage.files import PathAliases
|
||||
from coverage.misc import Hasher, file_be_gone, human_sorted, plural
|
||||
from coverage.sqldata import CoverageData
|
||||
|
||||
|
||||
def line_counts(data: CoverageData, fullpath: bool = False) -> dict[str, int]:
|
||||
"""Return a dict summarizing the line coverage data.
|
||||
|
||||
Keys are based on the file names, and values are the number of executed
|
||||
lines. If `fullpath` is true, then the keys are the full pathnames of
|
||||
the files, otherwise they are the basenames of the files.
|
||||
|
||||
Returns a dict mapping file names to counts of lines.
|
||||
|
||||
"""
|
||||
summ = {}
|
||||
filename_fn: Callable[[str], str]
|
||||
if fullpath:
|
||||
# pylint: disable=unnecessary-lambda-assignment
|
||||
filename_fn = lambda f: f
|
||||
else:
|
||||
filename_fn = os.path.basename
|
||||
for filename in data.measured_files():
|
||||
lines = data.lines(filename)
|
||||
assert lines is not None
|
||||
summ[filename_fn(filename)] = len(lines)
|
||||
return summ
|
||||
|
||||
|
||||
def add_data_to_hash(data: CoverageData, filename: str, hasher: Hasher) -> None:
|
||||
"""Contribute `filename`'s data to the `hasher`.
|
||||
|
||||
`hasher` is a `coverage.misc.Hasher` instance to be updated with
|
||||
the file's data. It should only get the results data, not the run
|
||||
data.
|
||||
|
||||
"""
|
||||
if data.has_arcs():
|
||||
hasher.update(sorted(data.arcs(filename) or []))
|
||||
else:
|
||||
hasher.update(sorted_lines(data, filename))
|
||||
hasher.update(data.file_tracer(filename))
|
||||
|
||||
|
||||
def combinable_files(data_file: str, data_paths: Iterable[str] | None = None) -> list[str]:
|
||||
"""Make a list of data files to be combined.
|
||||
|
||||
`data_file` is a path to a data file. `data_paths` is a list of files or
|
||||
directories of files.
|
||||
|
||||
Returns a list of absolute file paths.
|
||||
"""
|
||||
data_dir, local = os.path.split(os.path.abspath(data_file))
|
||||
|
||||
data_paths = data_paths or [data_dir]
|
||||
files_to_combine = []
|
||||
for p in data_paths:
|
||||
if os.path.isfile(p):
|
||||
files_to_combine.append(os.path.abspath(p))
|
||||
elif os.path.isdir(p):
|
||||
pattern = glob.escape(os.path.join(os.path.abspath(p), local)) +".*"
|
||||
files_to_combine.extend(glob.glob(pattern))
|
||||
else:
|
||||
raise NoDataError(f"Couldn't combine from non-existent path '{p}'")
|
||||
|
||||
# SQLite might have made journal files alongside our database files.
|
||||
# We never want to combine those.
|
||||
files_to_combine = [fnm for fnm in files_to_combine if not fnm.endswith("-journal")]
|
||||
|
||||
# Sorting isn't usually needed, since it shouldn't matter what order files
|
||||
# are combined, but sorting makes tests more predictable, and makes
|
||||
# debugging more understandable when things go wrong.
|
||||
return sorted(files_to_combine)
|
||||
|
||||
|
||||
def combine_parallel_data(
|
||||
data: CoverageData,
|
||||
aliases: PathAliases | None = None,
|
||||
data_paths: Iterable[str] | None = None,
|
||||
strict: bool = False,
|
||||
keep: bool = False,
|
||||
message: Callable[[str], None] | None = None,
|
||||
) -> None:
|
||||
"""Combine a number of data files together.
|
||||
|
||||
`data` is a CoverageData.
|
||||
|
||||
Treat `data.filename` as a file prefix, and combine the data from all
|
||||
of the data files starting with that prefix plus a dot.
|
||||
|
||||
If `aliases` is provided, it's a `PathAliases` object that is used to
|
||||
re-map paths to match the local machine's.
|
||||
|
||||
If `data_paths` is provided, it is a list of directories or files to
|
||||
combine. Directories are searched for files that start with
|
||||
`data.filename` plus dot as a prefix, and those files are combined.
|
||||
|
||||
If `data_paths` is not provided, then the directory portion of
|
||||
`data.filename` is used as the directory to search for data files.
|
||||
|
||||
Unless `keep` is True every data file found and combined is then deleted
|
||||
from disk. If a file cannot be read, a warning will be issued, and the
|
||||
file will not be deleted.
|
||||
|
||||
If `strict` is true, and no files are found to combine, an error is
|
||||
raised.
|
||||
|
||||
`message` is a function to use for printing messages to the user.
|
||||
|
||||
"""
|
||||
files_to_combine = combinable_files(data.base_filename(), data_paths)
|
||||
|
||||
if strict and not files_to_combine:
|
||||
raise NoDataError("No data to combine")
|
||||
|
||||
file_hashes = set()
|
||||
combined_any = False
|
||||
|
||||
for f in files_to_combine:
|
||||
if f == data.data_filename():
|
||||
# Sometimes we are combining into a file which is one of the
|
||||
# parallel files. Skip that file.
|
||||
if data._debug.should("dataio"):
|
||||
data._debug.write(f"Skipping combining ourself: {f!r}")
|
||||
continue
|
||||
|
||||
try:
|
||||
rel_file_name = os.path.relpath(f)
|
||||
except ValueError:
|
||||
# ValueError can be raised under Windows when os.getcwd() returns a
|
||||
# folder from a different drive than the drive of f, in which case
|
||||
# we print the original value of f instead of its relative path
|
||||
rel_file_name = f
|
||||
|
||||
with open(f, "rb") as fobj:
|
||||
hasher = hashlib.new("sha3_256")
|
||||
hasher.update(fobj.read())
|
||||
sha = hasher.digest()
|
||||
combine_this_one = sha not in file_hashes
|
||||
|
||||
delete_this_one = not keep
|
||||
if combine_this_one:
|
||||
if data._debug.should("dataio"):
|
||||
data._debug.write(f"Combining data file {f!r}")
|
||||
file_hashes.add(sha)
|
||||
try:
|
||||
new_data = CoverageData(f, debug=data._debug)
|
||||
new_data.read()
|
||||
except CoverageException as exc:
|
||||
if data._warn:
|
||||
# The CoverageException has the file name in it, so just
|
||||
# use the message as the warning.
|
||||
data._warn(str(exc))
|
||||
if message:
|
||||
message(f"Couldn't combine data file {rel_file_name}: {exc}")
|
||||
delete_this_one = False
|
||||
else:
|
||||
data.update(new_data, aliases=aliases)
|
||||
combined_any = True
|
||||
if message:
|
||||
message(f"Combined data file {rel_file_name}")
|
||||
else:
|
||||
if message:
|
||||
message(f"Skipping duplicate data {rel_file_name}")
|
||||
|
||||
if delete_this_one:
|
||||
if data._debug.should("dataio"):
|
||||
data._debug.write(f"Deleting data file {f!r}")
|
||||
file_be_gone(f)
|
||||
|
||||
if strict and not combined_any:
|
||||
raise NoDataError("No usable data files")
|
||||
|
||||
|
||||
def debug_data_file(filename: str) -> None:
|
||||
"""Implementation of 'coverage debug data'."""
|
||||
data = CoverageData(filename)
|
||||
filename = data.data_filename()
|
||||
print(f"path: {filename}")
|
||||
if not os.path.exists(filename):
|
||||
print("No data collected: file doesn't exist")
|
||||
return
|
||||
data.read()
|
||||
print(f"has_arcs: {data.has_arcs()!r}")
|
||||
summary = line_counts(data, fullpath=True)
|
||||
filenames = human_sorted(summary.keys())
|
||||
nfiles = len(filenames)
|
||||
print(f"{nfiles} file{plural(nfiles)}:")
|
||||
for f in filenames:
|
||||
line = f"{f}: {summary[f]} line{plural(summary[f])}"
|
||||
plugin = data.file_tracer(f)
|
||||
if plugin:
|
||||
line += f" [{plugin}]"
|
||||
print(line)
|
||||
|
||||
|
||||
def sorted_lines(data: CoverageData, filename: str) -> list[int]:
|
||||
"""Get the sorted lines for a file, for tests."""
|
||||
lines = data.lines(filename)
|
||||
return sorted(lines or [])
|
||||
613
.venv/lib/python3.10/site-packages/coverage/debug.py
Normal file
613
.venv/lib/python3.10/site-packages/coverage/debug.py
Normal file
|
|
@ -0,0 +1,613 @@
|
|||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""Control of and utilities for debugging."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import atexit
|
||||
import contextlib
|
||||
import functools
|
||||
import inspect
|
||||
import itertools
|
||||
import os
|
||||
import pprint
|
||||
import re
|
||||
import reprlib
|
||||
import sys
|
||||
import traceback
|
||||
import types
|
||||
import _thread
|
||||
|
||||
from typing import (
|
||||
overload,
|
||||
Any, Callable, IO, Iterable, Iterator, Mapping,
|
||||
)
|
||||
|
||||
from coverage.misc import human_sorted_items, isolate_module
|
||||
from coverage.types import AnyCallable, TWritable
|
||||
|
||||
os = isolate_module(os)
|
||||
|
||||
|
||||
# When debugging, it can be helpful to force some options, especially when
|
||||
# debugging the configuration mechanisms you usually use to control debugging!
|
||||
# This is a list of forced debugging options.
|
||||
FORCED_DEBUG: list[str] = []
|
||||
FORCED_DEBUG_FILE = None
|
||||
|
||||
|
||||
class DebugControl:
|
||||
"""Control and output for debugging."""
|
||||
|
||||
show_repr_attr = False # For auto_repr
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
options: Iterable[str],
|
||||
output: IO[str] | None,
|
||||
file_name: str | None = None,
|
||||
) -> None:
|
||||
"""Configure the options and output file for debugging."""
|
||||
self.options = list(options) + FORCED_DEBUG
|
||||
self.suppress_callers = False
|
||||
|
||||
filters = []
|
||||
if self.should("process"):
|
||||
filters.append(CwdTracker().filter)
|
||||
filters.append(ProcessTracker().filter)
|
||||
if self.should("pytest"):
|
||||
filters.append(PytestTracker().filter)
|
||||
if self.should("pid"):
|
||||
filters.append(add_pid_and_tid)
|
||||
|
||||
self.output = DebugOutputFile.get_one(
|
||||
output,
|
||||
file_name=file_name,
|
||||
filters=filters,
|
||||
)
|
||||
self.raw_output = self.output.outfile
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<DebugControl options={self.options!r} raw_output={self.raw_output!r}>"
|
||||
|
||||
def should(self, option: str) -> bool:
|
||||
"""Decide whether to output debug information in category `option`."""
|
||||
if option == "callers" and self.suppress_callers:
|
||||
return False
|
||||
return (option in self.options)
|
||||
|
||||
@contextlib.contextmanager
|
||||
def without_callers(self) -> Iterator[None]:
|
||||
"""A context manager to prevent call stacks from being logged."""
|
||||
old = self.suppress_callers
|
||||
self.suppress_callers = True
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
self.suppress_callers = old
|
||||
|
||||
def write(self, msg: str, *, exc: BaseException | None = None) -> None:
|
||||
"""Write a line of debug output.
|
||||
|
||||
`msg` is the line to write. A newline will be appended.
|
||||
|
||||
If `exc` is provided, a stack trace of the exception will be written
|
||||
after the message.
|
||||
|
||||
"""
|
||||
self.output.write(msg + "\n")
|
||||
if exc is not None:
|
||||
self.output.write("".join(traceback.format_exception(None, exc, exc.__traceback__)))
|
||||
if self.should("self"):
|
||||
caller_self = inspect.stack()[1][0].f_locals.get("self")
|
||||
if caller_self is not None:
|
||||
self.output.write(f"self: {caller_self!r}\n")
|
||||
if self.should("callers"):
|
||||
dump_stack_frames(out=self.output, skip=1)
|
||||
self.output.flush()
|
||||
|
||||
|
||||
class NoDebugging(DebugControl):
|
||||
"""A replacement for DebugControl that will never try to do anything."""
|
||||
def __init__(self) -> None:
|
||||
# pylint: disable=super-init-not-called
|
||||
...
|
||||
|
||||
def should(self, option: str) -> bool:
|
||||
"""Should we write debug messages? Never."""
|
||||
return False
|
||||
|
||||
def write(self, msg: str, *, exc: BaseException | None = None) -> None:
|
||||
"""This will never be called."""
|
||||
raise AssertionError("NoDebugging.write should never be called.")
|
||||
|
||||
|
||||
def info_header(label: str) -> str:
|
||||
"""Make a nice header string."""
|
||||
return "--{:-<60s}".format(" "+label+" ")
|
||||
|
||||
|
||||
def info_formatter(info: Iterable[tuple[str, Any]]) -> Iterator[str]:
|
||||
"""Produce a sequence of formatted lines from info.
|
||||
|
||||
`info` is a sequence of pairs (label, data). The produced lines are
|
||||
nicely formatted, ready to print.
|
||||
|
||||
"""
|
||||
info = list(info)
|
||||
if not info:
|
||||
return
|
||||
label_len = 30
|
||||
assert all(len(l) < label_len for l, _ in info)
|
||||
for label, data in info:
|
||||
if data == []:
|
||||
data = "-none-"
|
||||
if isinstance(data, tuple) and len(repr(tuple(data))) < 30:
|
||||
# Convert to tuple to scrub namedtuples.
|
||||
yield "%*s: %r" % (label_len, label, tuple(data))
|
||||
elif isinstance(data, (list, set, tuple)):
|
||||
prefix = "%*s:" % (label_len, label)
|
||||
for e in data:
|
||||
yield "%*s %s" % (label_len+1, prefix, e)
|
||||
prefix = ""
|
||||
else:
|
||||
yield "%*s: %s" % (label_len, label, data)
|
||||
|
||||
|
||||
def write_formatted_info(
|
||||
write: Callable[[str], None],
|
||||
header: str,
|
||||
info: Iterable[tuple[str, Any]],
|
||||
) -> None:
|
||||
"""Write a sequence of (label,data) pairs nicely.
|
||||
|
||||
`write` is a function write(str) that accepts each line of output.
|
||||
`header` is a string to start the section. `info` is a sequence of
|
||||
(label, data) pairs, where label is a str, and data can be a single
|
||||
value, or a list/set/tuple.
|
||||
|
||||
"""
|
||||
write(info_header(header))
|
||||
for line in info_formatter(info):
|
||||
write(f" {line}")
|
||||
|
||||
|
||||
def exc_one_line(exc: Exception) -> str:
|
||||
"""Get a one-line summary of an exception, including class name and message."""
|
||||
lines = traceback.format_exception_only(type(exc), exc)
|
||||
return "|".join(l.rstrip() for l in lines)
|
||||
|
||||
|
||||
_FILENAME_REGEXES: list[tuple[str, str]] = [
|
||||
(r".*[/\\]pytest-of-.*[/\\]pytest-\d+([/\\]popen-gw\d+)?", "tmp:"),
|
||||
]
|
||||
_FILENAME_SUBS: list[tuple[str, str]] = []
|
||||
|
||||
@overload
|
||||
def short_filename(filename: str) -> str:
|
||||
pass
|
||||
|
||||
@overload
|
||||
def short_filename(filename: None) -> None:
|
||||
pass
|
||||
|
||||
def short_filename(filename: str | None) -> str | None:
|
||||
"""Shorten a file name. Directories are replaced by prefixes like 'syspath:'"""
|
||||
if not _FILENAME_SUBS:
|
||||
for pathdir in sys.path:
|
||||
_FILENAME_SUBS.append((pathdir, "syspath:"))
|
||||
import coverage
|
||||
_FILENAME_SUBS.append((os.path.dirname(coverage.__file__), "cov:"))
|
||||
_FILENAME_SUBS.sort(key=(lambda pair: len(pair[0])), reverse=True)
|
||||
if filename is not None:
|
||||
for pat, sub in _FILENAME_REGEXES:
|
||||
filename = re.sub(pat, sub, filename)
|
||||
for before, after in _FILENAME_SUBS:
|
||||
filename = filename.replace(before, after)
|
||||
return filename
|
||||
|
||||
|
||||
def short_stack(
|
||||
skip: int = 0,
|
||||
full: bool = False,
|
||||
frame_ids: bool = False,
|
||||
short_filenames: bool = False,
|
||||
) -> str:
|
||||
"""Return a string summarizing the call stack.
|
||||
|
||||
The string is multi-line, with one line per stack frame. Each line shows
|
||||
the function name, the file name, and the line number:
|
||||
|
||||
...
|
||||
start_import_stop : /Users/ned/coverage/trunk/tests/coveragetest.py:95
|
||||
import_local_file : /Users/ned/coverage/trunk/tests/coveragetest.py:81
|
||||
import_local_file : /Users/ned/coverage/trunk/coverage/backward.py:159
|
||||
...
|
||||
|
||||
`skip` is the number of closest immediate frames to skip, so that debugging
|
||||
functions can call this and not be included in the result.
|
||||
|
||||
If `full` is true, then include all frames. Otherwise, initial "boring"
|
||||
frames (ones in site-packages and earlier) are omitted.
|
||||
|
||||
`short_filenames` will shorten filenames using `short_filename`, to reduce
|
||||
the amount of repetitive noise in stack traces.
|
||||
|
||||
"""
|
||||
# Regexes in initial frames that we don't care about.
|
||||
BORING_PRELUDE = [
|
||||
"<string>", # pytest-xdist has string execution.
|
||||
r"\bigor.py$", # Our test runner.
|
||||
r"\bsite-packages\b", # pytest etc getting to our tests.
|
||||
]
|
||||
|
||||
stack: Iterable[inspect.FrameInfo] = inspect.stack()[:skip:-1]
|
||||
if not full:
|
||||
for pat in BORING_PRELUDE:
|
||||
stack = itertools.dropwhile(
|
||||
(lambda fi, pat=pat: re.search(pat, fi.filename)), # type: ignore[misc]
|
||||
stack,
|
||||
)
|
||||
lines = []
|
||||
for frame_info in stack:
|
||||
line = f"{frame_info.function:>30s} : "
|
||||
if frame_ids:
|
||||
line += f"{id(frame_info.frame):#x} "
|
||||
filename = frame_info.filename
|
||||
if short_filenames:
|
||||
filename = short_filename(filename)
|
||||
line += f"{filename}:{frame_info.lineno}"
|
||||
lines.append(line)
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def dump_stack_frames(out: TWritable, skip: int = 0) -> None:
|
||||
"""Print a summary of the stack to `out`."""
|
||||
out.write(short_stack(skip=skip+1) + "\n")
|
||||
|
||||
|
||||
def clipped_repr(text: str, numchars: int = 50) -> str:
|
||||
"""`repr(text)`, but limited to `numchars`."""
|
||||
r = reprlib.Repr()
|
||||
r.maxstring = numchars
|
||||
return r.repr(text)
|
||||
|
||||
|
||||
def short_id(id64: int) -> int:
|
||||
"""Given a 64-bit id, make a shorter 16-bit one."""
|
||||
id16 = 0
|
||||
for offset in range(0, 64, 16):
|
||||
id16 ^= id64 >> offset
|
||||
return id16 & 0xFFFF
|
||||
|
||||
|
||||
def add_pid_and_tid(text: str) -> str:
|
||||
"""A filter to add pid and tid to debug messages."""
|
||||
# Thread ids are useful, but too long. Make a shorter one.
|
||||
tid = f"{short_id(_thread.get_ident()):04x}"
|
||||
text = f"{os.getpid():5d}.{tid}: {text}"
|
||||
return text
|
||||
|
||||
|
||||
AUTO_REPR_IGNORE = {"$coverage.object_id"}
|
||||
|
||||
def auto_repr(self: Any) -> str:
|
||||
"""A function implementing an automatic __repr__ for debugging."""
|
||||
show_attrs = (
|
||||
(k, v) for k, v in self.__dict__.items()
|
||||
if getattr(v, "show_repr_attr", True)
|
||||
and not inspect.ismethod(v)
|
||||
and k not in AUTO_REPR_IGNORE
|
||||
)
|
||||
return "<{klass} @{id:#x}{attrs}>".format(
|
||||
klass=self.__class__.__name__,
|
||||
id=id(self),
|
||||
attrs="".join(f" {k}={v!r}" for k, v in show_attrs),
|
||||
)
|
||||
|
||||
|
||||
def simplify(v: Any) -> Any: # pragma: debugging
|
||||
"""Turn things which are nearly dict/list/etc into dict/list/etc."""
|
||||
if isinstance(v, dict):
|
||||
return {k:simplify(vv) for k, vv in v.items()}
|
||||
elif isinstance(v, (list, tuple)):
|
||||
return type(v)(simplify(vv) for vv in v)
|
||||
elif hasattr(v, "__dict__"):
|
||||
return simplify({"."+k: v for k, v in v.__dict__.items()})
|
||||
else:
|
||||
return v
|
||||
|
||||
|
||||
def pp(v: Any) -> None: # pragma: debugging
|
||||
"""Debug helper to pretty-print data, including SimpleNamespace objects."""
|
||||
# Might not be needed in 3.9+
|
||||
pprint.pprint(simplify(v))
|
||||
|
||||
|
||||
def filter_text(text: str, filters: Iterable[Callable[[str], str]]) -> str:
|
||||
"""Run `text` through a series of filters.
|
||||
|
||||
`filters` is a list of functions. Each takes a string and returns a
|
||||
string. Each is run in turn. After each filter, the text is split into
|
||||
lines, and each line is passed through the next filter.
|
||||
|
||||
Returns: the final string that results after all of the filters have
|
||||
run.
|
||||
|
||||
"""
|
||||
clean_text = text.rstrip()
|
||||
ending = text[len(clean_text):]
|
||||
text = clean_text
|
||||
for filter_fn in filters:
|
||||
lines = []
|
||||
for line in text.splitlines():
|
||||
lines.extend(filter_fn(line).splitlines())
|
||||
text = "\n".join(lines)
|
||||
return text + ending
|
||||
|
||||
|
||||
class CwdTracker:
|
||||
"""A class to add cwd info to debug messages."""
|
||||
def __init__(self) -> None:
|
||||
self.cwd: str | None = None
|
||||
|
||||
def filter(self, text: str) -> str:
|
||||
"""Add a cwd message for each new cwd."""
|
||||
cwd = os.getcwd()
|
||||
if cwd != self.cwd:
|
||||
text = f"cwd is now {cwd!r}\n" + text
|
||||
self.cwd = cwd
|
||||
return text
|
||||
|
||||
|
||||
class ProcessTracker:
|
||||
"""Track process creation for debug logging."""
|
||||
def __init__(self) -> None:
|
||||
self.pid: int = os.getpid()
|
||||
self.did_welcome = False
|
||||
|
||||
def filter(self, text: str) -> str:
|
||||
"""Add a message about how new processes came to be."""
|
||||
welcome = ""
|
||||
pid = os.getpid()
|
||||
if self.pid != pid:
|
||||
welcome = f"New process: forked {self.pid} -> {pid}\n"
|
||||
self.pid = pid
|
||||
elif not self.did_welcome:
|
||||
argv = getattr(sys, "argv", None)
|
||||
welcome = (
|
||||
f"New process: {pid=}, executable: {sys.executable!r}\n"
|
||||
+ f"New process: cmd: {argv!r}\n"
|
||||
)
|
||||
if hasattr(os, "getppid"):
|
||||
welcome += f"New process parent pid: {os.getppid()!r}\n"
|
||||
|
||||
if welcome:
|
||||
self.did_welcome = True
|
||||
return welcome + text
|
||||
else:
|
||||
return text
|
||||
|
||||
|
||||
class PytestTracker:
|
||||
"""Track the current pytest test name to add to debug messages."""
|
||||
def __init__(self) -> None:
|
||||
self.test_name: str | None = None
|
||||
|
||||
def filter(self, text: str) -> str:
|
||||
"""Add a message when the pytest test changes."""
|
||||
test_name = os.getenv("PYTEST_CURRENT_TEST")
|
||||
if test_name != self.test_name:
|
||||
text = f"Pytest context: {test_name}\n" + text
|
||||
self.test_name = test_name
|
||||
return text
|
||||
|
||||
|
||||
class DebugOutputFile:
|
||||
"""A file-like object that includes pid and cwd information."""
|
||||
def __init__(
|
||||
self,
|
||||
outfile: IO[str] | None,
|
||||
filters: Iterable[Callable[[str], str]],
|
||||
):
|
||||
self.outfile = outfile
|
||||
self.filters = list(filters)
|
||||
self.pid = os.getpid()
|
||||
|
||||
@classmethod
|
||||
def get_one(
|
||||
cls,
|
||||
fileobj: IO[str] | None = None,
|
||||
file_name: str | None = None,
|
||||
filters: Iterable[Callable[[str], str]] = (),
|
||||
interim: bool = False,
|
||||
) -> DebugOutputFile:
|
||||
"""Get a DebugOutputFile.
|
||||
|
||||
If `fileobj` is provided, then a new DebugOutputFile is made with it.
|
||||
|
||||
If `fileobj` isn't provided, then a file is chosen (`file_name` if
|
||||
provided, or COVERAGE_DEBUG_FILE, or stderr), and a process-wide
|
||||
singleton DebugOutputFile is made.
|
||||
|
||||
`filters` are the text filters to apply to the stream to annotate with
|
||||
pids, etc.
|
||||
|
||||
If `interim` is true, then a future `get_one` can replace this one.
|
||||
|
||||
"""
|
||||
if fileobj is not None:
|
||||
# Make DebugOutputFile around the fileobj passed.
|
||||
return cls(fileobj, filters)
|
||||
|
||||
the_one, is_interim = cls._get_singleton_data()
|
||||
if the_one is None or is_interim:
|
||||
if file_name is not None:
|
||||
fileobj = open(file_name, "a", encoding="utf-8")
|
||||
else:
|
||||
# $set_env.py: COVERAGE_DEBUG_FILE - Where to write debug output
|
||||
file_name = os.getenv("COVERAGE_DEBUG_FILE", FORCED_DEBUG_FILE)
|
||||
if file_name in ("stdout", "stderr"):
|
||||
fileobj = getattr(sys, file_name)
|
||||
elif file_name:
|
||||
fileobj = open(file_name, "a", encoding="utf-8")
|
||||
atexit.register(fileobj.close)
|
||||
else:
|
||||
fileobj = sys.stderr
|
||||
the_one = cls(fileobj, filters)
|
||||
cls._set_singleton_data(the_one, interim)
|
||||
|
||||
if not(the_one.filters):
|
||||
the_one.filters = list(filters)
|
||||
return the_one
|
||||
|
||||
# Because of the way igor.py deletes and re-imports modules,
|
||||
# this class can be defined more than once. But we really want
|
||||
# a process-wide singleton. So stash it in sys.modules instead of
|
||||
# on a class attribute. Yes, this is aggressively gross.
|
||||
|
||||
SYS_MOD_NAME = "$coverage.debug.DebugOutputFile.the_one"
|
||||
SINGLETON_ATTR = "the_one_and_is_interim"
|
||||
|
||||
@classmethod
|
||||
def _set_singleton_data(cls, the_one: DebugOutputFile, interim: bool) -> None:
|
||||
"""Set the one DebugOutputFile to rule them all."""
|
||||
singleton_module = types.ModuleType(cls.SYS_MOD_NAME)
|
||||
setattr(singleton_module, cls.SINGLETON_ATTR, (the_one, interim))
|
||||
sys.modules[cls.SYS_MOD_NAME] = singleton_module
|
||||
|
||||
@classmethod
|
||||
def _get_singleton_data(cls) -> tuple[DebugOutputFile | None, bool]:
|
||||
"""Get the one DebugOutputFile."""
|
||||
singleton_module = sys.modules.get(cls.SYS_MOD_NAME)
|
||||
return getattr(singleton_module, cls.SINGLETON_ATTR, (None, True))
|
||||
|
||||
@classmethod
|
||||
def _del_singleton_data(cls) -> None:
|
||||
"""Delete the one DebugOutputFile, just for tests to use."""
|
||||
if cls.SYS_MOD_NAME in sys.modules:
|
||||
del sys.modules[cls.SYS_MOD_NAME]
|
||||
|
||||
def write(self, text: str) -> None:
|
||||
"""Just like file.write, but filter through all our filters."""
|
||||
assert self.outfile is not None
|
||||
self.outfile.write(filter_text(text, self.filters))
|
||||
self.outfile.flush()
|
||||
|
||||
def flush(self) -> None:
|
||||
"""Flush our file."""
|
||||
assert self.outfile is not None
|
||||
self.outfile.flush()
|
||||
|
||||
|
||||
def log(msg: str, stack: bool = False) -> None: # pragma: debugging
|
||||
"""Write a log message as forcefully as possible."""
|
||||
out = DebugOutputFile.get_one(interim=True)
|
||||
out.write(msg+"\n")
|
||||
if stack:
|
||||
dump_stack_frames(out=out, skip=1)
|
||||
|
||||
|
||||
def decorate_methods(
|
||||
decorator: Callable[..., Any],
|
||||
butnot: Iterable[str] = (),
|
||||
private: bool = False,
|
||||
) -> Callable[..., Any]: # pragma: debugging
|
||||
"""A class decorator to apply a decorator to methods."""
|
||||
def _decorator(cls): # type: ignore[no-untyped-def]
|
||||
for name, meth in inspect.getmembers(cls, inspect.isroutine):
|
||||
if name not in cls.__dict__:
|
||||
continue
|
||||
if name != "__init__":
|
||||
if not private and name.startswith("_"):
|
||||
continue
|
||||
if name in butnot:
|
||||
continue
|
||||
setattr(cls, name, decorator(meth))
|
||||
return cls
|
||||
return _decorator
|
||||
|
||||
|
||||
def break_in_pudb(func: AnyCallable) -> AnyCallable: # pragma: debugging
|
||||
"""A function decorator to stop in the debugger for each call."""
|
||||
@functools.wraps(func)
|
||||
def _wrapper(*args: Any, **kwargs: Any) -> Any:
|
||||
import pudb
|
||||
sys.stdout = sys.__stdout__
|
||||
pudb.set_trace()
|
||||
return func(*args, **kwargs)
|
||||
return _wrapper
|
||||
|
||||
|
||||
OBJ_IDS = itertools.count()
|
||||
CALLS = itertools.count()
|
||||
OBJ_ID_ATTR = "$coverage.object_id"
|
||||
|
||||
def show_calls(
|
||||
show_args: bool = True,
|
||||
show_stack: bool = False,
|
||||
show_return: bool = False,
|
||||
) -> Callable[..., Any]: # pragma: debugging
|
||||
"""A method decorator to debug-log each call to the function."""
|
||||
def _decorator(func: AnyCallable) -> AnyCallable:
|
||||
@functools.wraps(func)
|
||||
def _wrapper(self: Any, *args: Any, **kwargs: Any) -> Any:
|
||||
oid = getattr(self, OBJ_ID_ATTR, None)
|
||||
if oid is None:
|
||||
oid = f"{os.getpid():08d} {next(OBJ_IDS):04d}"
|
||||
setattr(self, OBJ_ID_ATTR, oid)
|
||||
extra = ""
|
||||
if show_args:
|
||||
eargs = ", ".join(map(repr, args))
|
||||
ekwargs = ", ".join("{}={!r}".format(*item) for item in kwargs.items())
|
||||
extra += "("
|
||||
extra += eargs
|
||||
if eargs and ekwargs:
|
||||
extra += ", "
|
||||
extra += ekwargs
|
||||
extra += ")"
|
||||
if show_stack:
|
||||
extra += " @ "
|
||||
extra += "; ".join(short_stack(short_filenames=True).splitlines())
|
||||
callid = next(CALLS)
|
||||
msg = f"{oid} {callid:04d} {func.__name__}{extra}\n"
|
||||
DebugOutputFile.get_one(interim=True).write(msg)
|
||||
ret = func(self, *args, **kwargs)
|
||||
if show_return:
|
||||
msg = f"{oid} {callid:04d} {func.__name__} return {ret!r}\n"
|
||||
DebugOutputFile.get_one(interim=True).write(msg)
|
||||
return ret
|
||||
return _wrapper
|
||||
return _decorator
|
||||
|
||||
|
||||
def relevant_environment_display(env: Mapping[str, str]) -> list[tuple[str, str]]:
|
||||
"""Filter environment variables for a debug display.
|
||||
|
||||
Select variables to display (with COV or PY in the name, or HOME, TEMP, or
|
||||
TMP), and also cloak sensitive values with asterisks.
|
||||
|
||||
Arguments:
|
||||
env: a dict of environment variable names and values.
|
||||
|
||||
Returns:
|
||||
A list of pairs (name, value) to show.
|
||||
|
||||
"""
|
||||
slugs = {"COV", "PY"}
|
||||
include = {"HOME", "TEMP", "TMP"}
|
||||
cloak = {"API", "TOKEN", "KEY", "SECRET", "PASS", "SIGNATURE"}
|
||||
|
||||
to_show = []
|
||||
for name, val in env.items():
|
||||
keep = False
|
||||
if name in include:
|
||||
keep = True
|
||||
elif any(slug in name for slug in slugs):
|
||||
keep = True
|
||||
if keep:
|
||||
if any(slug in name for slug in cloak):
|
||||
val = re.sub(r"\w", "*", val)
|
||||
to_show.append((name, val))
|
||||
return human_sorted_items(to_show)
|
||||
58
.venv/lib/python3.10/site-packages/coverage/disposition.py
Normal file
58
.venv/lib/python3.10/site-packages/coverage/disposition.py
Normal file
|
|
@ -0,0 +1,58 @@
|
|||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""Simple value objects for tracking what to do with files."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from coverage.types import TFileDisposition
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from coverage.plugin import FileTracer
|
||||
|
||||
|
||||
class FileDisposition:
|
||||
"""A simple value type for recording what to do with a file."""
|
||||
|
||||
original_filename: str
|
||||
canonical_filename: str
|
||||
source_filename: str | None
|
||||
trace: bool
|
||||
reason: str
|
||||
file_tracer: FileTracer | None
|
||||
has_dynamic_filename: bool
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<FileDisposition {self.canonical_filename!r}: trace={self.trace}>"
|
||||
|
||||
|
||||
# FileDisposition "methods": FileDisposition is a pure value object, so it can
|
||||
# be implemented in either C or Python. Acting on them is done with these
|
||||
# functions.
|
||||
|
||||
def disposition_init(cls: type[TFileDisposition], original_filename: str) -> TFileDisposition:
|
||||
"""Construct and initialize a new FileDisposition object."""
|
||||
disp = cls()
|
||||
disp.original_filename = original_filename
|
||||
disp.canonical_filename = original_filename
|
||||
disp.source_filename = None
|
||||
disp.trace = False
|
||||
disp.reason = ""
|
||||
disp.file_tracer = None
|
||||
disp.has_dynamic_filename = False
|
||||
return disp
|
||||
|
||||
|
||||
def disposition_debug_msg(disp: TFileDisposition) -> str:
|
||||
"""Make a nice debug message of what the FileDisposition is doing."""
|
||||
if disp.trace:
|
||||
msg = f"Tracing {disp.original_filename!r}"
|
||||
if disp.original_filename != disp.source_filename:
|
||||
msg += f" as {disp.source_filename!r}"
|
||||
if disp.file_tracer:
|
||||
msg += f": will be traced by {disp.file_tracer!r}"
|
||||
else:
|
||||
msg = f"Not tracing {disp.original_filename!r}: {disp.reason}"
|
||||
return msg
|
||||
147
.venv/lib/python3.10/site-packages/coverage/env.py
Normal file
147
.venv/lib/python3.10/site-packages/coverage/env.py
Normal file
|
|
@ -0,0 +1,147 @@
|
|||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""Determine facts about the environment."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import platform
|
||||
import sys
|
||||
|
||||
from typing import Any, Iterable
|
||||
|
||||
# debug_info() at the bottom wants to show all the globals, but not imports.
|
||||
# Grab the global names here to know which names to not show. Nothing defined
|
||||
# above this line will be in the output.
|
||||
_UNINTERESTING_GLOBALS = list(globals())
|
||||
# These names also shouldn't be shown.
|
||||
_UNINTERESTING_GLOBALS += ["PYBEHAVIOR", "debug_info"]
|
||||
|
||||
# Operating systems.
|
||||
WINDOWS = sys.platform == "win32"
|
||||
LINUX = sys.platform.startswith("linux")
|
||||
OSX = sys.platform == "darwin"
|
||||
|
||||
# Python implementations.
|
||||
CPYTHON = (platform.python_implementation() == "CPython")
|
||||
PYPY = (platform.python_implementation() == "PyPy")
|
||||
|
||||
# Python versions. We amend version_info with one more value, a zero if an
|
||||
# official version, or 1 if built from source beyond an official version.
|
||||
# Only use sys.version_info directly where tools like mypy need it to understand
|
||||
# version-specfic code, otherwise use PYVERSION.
|
||||
PYVERSION = sys.version_info + (int(platform.python_version()[-1] == "+"),)
|
||||
|
||||
if PYPY:
|
||||
PYPYVERSION = sys.pypy_version_info # type: ignore[attr-defined]
|
||||
|
||||
# Python behavior.
|
||||
class PYBEHAVIOR:
|
||||
"""Flags indicating this Python's behavior."""
|
||||
|
||||
# Does Python conform to PEP626, Precise line numbers for debugging and other tools.
|
||||
# https://www.python.org/dev/peps/pep-0626
|
||||
pep626 = (PYVERSION > (3, 10, 0, "alpha", 4))
|
||||
|
||||
# Is "if __debug__" optimized away?
|
||||
optimize_if_debug = not pep626
|
||||
|
||||
# Is "if not __debug__" optimized away? The exact details have changed
|
||||
# across versions.
|
||||
if pep626:
|
||||
optimize_if_not_debug = 1
|
||||
elif PYPY:
|
||||
if PYVERSION >= (3, 9):
|
||||
optimize_if_not_debug = 2
|
||||
else:
|
||||
optimize_if_not_debug = 3
|
||||
else:
|
||||
optimize_if_not_debug = 2
|
||||
|
||||
# 3.7 changed how functions with only docstrings are numbered.
|
||||
docstring_only_function = (not PYPY) and (PYVERSION <= (3, 10))
|
||||
|
||||
# When a break/continue/return statement in a try block jumps to a finally
|
||||
# block, does the finally jump back to the break/continue/return (pre-3.10)
|
||||
# to do the work?
|
||||
finally_jumps_back = (PYVERSION < (3, 10))
|
||||
|
||||
# CPython 3.11 now jumps to the decorator line again while executing
|
||||
# the decorator.
|
||||
trace_decorator_line_again = (CPYTHON and PYVERSION > (3, 11, 0, "alpha", 3, 0))
|
||||
|
||||
# CPython 3.9a1 made sys.argv[0] and other reported files absolute paths.
|
||||
report_absolute_files = (
|
||||
(CPYTHON or (PYPY and PYPYVERSION >= (7, 3, 10)))
|
||||
and PYVERSION >= (3, 9)
|
||||
)
|
||||
|
||||
# Lines after break/continue/return/raise are no longer compiled into the
|
||||
# bytecode. They used to be marked as missing, now they aren't executable.
|
||||
omit_after_jump = (
|
||||
pep626
|
||||
or (PYPY and PYVERSION >= (3, 9) and PYPYVERSION >= (7, 3, 12))
|
||||
)
|
||||
|
||||
# PyPy has always omitted statements after return.
|
||||
omit_after_return = omit_after_jump or PYPY
|
||||
|
||||
# Optimize away unreachable try-else clauses.
|
||||
optimize_unreachable_try_else = pep626
|
||||
|
||||
# Modules used to have firstlineno equal to the line number of the first
|
||||
# real line of code. Now they always start at 1.
|
||||
module_firstline_1 = pep626
|
||||
|
||||
# Are "if 0:" lines (and similar) kept in the compiled code?
|
||||
keep_constant_test = pep626
|
||||
|
||||
# When leaving a with-block, do we visit the with-line again for the exit?
|
||||
exit_through_with = (PYVERSION >= (3, 10, 0, "beta"))
|
||||
|
||||
# Match-case construct.
|
||||
match_case = (PYVERSION >= (3, 10))
|
||||
|
||||
# Some words are keywords in some places, identifiers in other places.
|
||||
soft_keywords = (PYVERSION >= (3, 10))
|
||||
|
||||
# Modules start with a line numbered zero. This means empty modules have
|
||||
# only a 0-number line, which is ignored, giving a truly empty module.
|
||||
empty_is_empty = (PYVERSION >= (3, 11, 0, "beta", 4))
|
||||
|
||||
# Are comprehensions inlined (new) or compiled as called functions (old)?
|
||||
# Changed in https://github.com/python/cpython/pull/101441
|
||||
comprehensions_are_functions = (PYVERSION <= (3, 12, 0, "alpha", 7, 0))
|
||||
|
||||
# PEP669 Low Impact Monitoring: https://peps.python.org/pep-0669/
|
||||
pep669 = bool(getattr(sys, "monitoring", None))
|
||||
|
||||
# Where does frame.f_lasti point when yielding from a generator?
|
||||
# It used to point at the YIELD, now it points at the RESUME.
|
||||
# https://github.com/python/cpython/issues/113728
|
||||
lasti_is_yield = (PYVERSION < (3, 13))
|
||||
|
||||
|
||||
# Coverage.py specifics, about testing scenarios. See tests/testenv.py also.
|
||||
|
||||
# Are we coverage-measuring ourselves?
|
||||
METACOV = os.getenv("COVERAGE_COVERAGE") is not None
|
||||
|
||||
# Are we running our test suite?
|
||||
# Even when running tests, you can use COVERAGE_TESTING=0 to disable the
|
||||
# test-specific behavior like AST checking.
|
||||
TESTING = os.getenv("COVERAGE_TESTING") == "True"
|
||||
|
||||
|
||||
def debug_info() -> Iterable[tuple[str, Any]]:
|
||||
"""Return a list of (name, value) pairs for printing debug information."""
|
||||
info = [
|
||||
(name, value) for name, value in globals().items()
|
||||
if not name.startswith("_") and name not in _UNINTERESTING_GLOBALS
|
||||
]
|
||||
info += [
|
||||
(name, value) for name, value in PYBEHAVIOR.__dict__.items()
|
||||
if not name.startswith("_")
|
||||
]
|
||||
return sorted(info)
|
||||
63
.venv/lib/python3.10/site-packages/coverage/exceptions.py
Normal file
63
.venv/lib/python3.10/site-packages/coverage/exceptions.py
Normal file
|
|
@ -0,0 +1,63 @@
|
|||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""Exceptions coverage.py can raise."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
class _BaseCoverageException(Exception):
|
||||
"""The base-base of all Coverage exceptions."""
|
||||
pass
|
||||
|
||||
|
||||
class CoverageException(_BaseCoverageException):
|
||||
"""The base class of all exceptions raised by Coverage.py."""
|
||||
pass
|
||||
|
||||
|
||||
class ConfigError(_BaseCoverageException):
|
||||
"""A problem with a config file, or a value in one."""
|
||||
pass
|
||||
|
||||
|
||||
class DataError(CoverageException):
|
||||
"""An error in using a data file."""
|
||||
pass
|
||||
|
||||
class NoDataError(CoverageException):
|
||||
"""We didn't have data to work with."""
|
||||
pass
|
||||
|
||||
|
||||
class NoSource(CoverageException):
|
||||
"""We couldn't find the source for a module."""
|
||||
pass
|
||||
|
||||
|
||||
class NoCode(NoSource):
|
||||
"""We couldn't find any code at all."""
|
||||
pass
|
||||
|
||||
|
||||
class NotPython(CoverageException):
|
||||
"""A source file turned out not to be parsable Python."""
|
||||
pass
|
||||
|
||||
|
||||
class PluginError(CoverageException):
|
||||
"""A plugin misbehaved."""
|
||||
pass
|
||||
|
||||
|
||||
class _ExceptionDuringRun(CoverageException):
|
||||
"""An exception happened while running customer code.
|
||||
|
||||
Construct it with three arguments, the values from `sys.exc_info`.
|
||||
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class CoverageWarning(Warning):
|
||||
"""A warning from Coverage.py."""
|
||||
pass
|
||||
327
.venv/lib/python3.10/site-packages/coverage/execfile.py
Normal file
327
.venv/lib/python3.10/site-packages/coverage/execfile.py
Normal file
|
|
@ -0,0 +1,327 @@
|
|||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""Execute files of Python code."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import importlib.machinery
|
||||
import importlib.util
|
||||
import inspect
|
||||
import marshal
|
||||
import os
|
||||
import struct
|
||||
import sys
|
||||
|
||||
from importlib.machinery import ModuleSpec
|
||||
from types import CodeType, ModuleType
|
||||
from typing import Any
|
||||
|
||||
from coverage import env
|
||||
from coverage.exceptions import CoverageException, _ExceptionDuringRun, NoCode, NoSource
|
||||
from coverage.files import canonical_filename, python_reported_file
|
||||
from coverage.misc import isolate_module
|
||||
from coverage.python import get_python_source
|
||||
|
||||
os = isolate_module(os)
|
||||
|
||||
|
||||
PYC_MAGIC_NUMBER = importlib.util.MAGIC_NUMBER
|
||||
|
||||
class DummyLoader:
|
||||
"""A shim for the pep302 __loader__, emulating pkgutil.ImpLoader.
|
||||
|
||||
Currently only implements the .fullname attribute
|
||||
"""
|
||||
def __init__(self, fullname: str, *_args: Any) -> None:
|
||||
self.fullname = fullname
|
||||
|
||||
|
||||
def find_module(
|
||||
modulename: str,
|
||||
) -> tuple[str | None, str, ModuleSpec]:
|
||||
"""Find the module named `modulename`.
|
||||
|
||||
Returns the file path of the module, the name of the enclosing
|
||||
package, and the spec.
|
||||
"""
|
||||
try:
|
||||
spec = importlib.util.find_spec(modulename)
|
||||
except ImportError as err:
|
||||
raise NoSource(str(err)) from err
|
||||
if not spec:
|
||||
raise NoSource(f"No module named {modulename!r}")
|
||||
pathname = spec.origin
|
||||
packagename = spec.name
|
||||
if spec.submodule_search_locations:
|
||||
mod_main = modulename + ".__main__"
|
||||
spec = importlib.util.find_spec(mod_main)
|
||||
if not spec:
|
||||
raise NoSource(
|
||||
f"No module named {mod_main}; " +
|
||||
f"{modulename!r} is a package and cannot be directly executed",
|
||||
)
|
||||
pathname = spec.origin
|
||||
packagename = spec.name
|
||||
packagename = packagename.rpartition(".")[0]
|
||||
return pathname, packagename, spec
|
||||
|
||||
|
||||
class PyRunner:
|
||||
"""Multi-stage execution of Python code.
|
||||
|
||||
This is meant to emulate real Python execution as closely as possible.
|
||||
|
||||
"""
|
||||
def __init__(self, args: list[str], as_module: bool = False) -> None:
|
||||
self.args = args
|
||||
self.as_module = as_module
|
||||
|
||||
self.arg0 = args[0]
|
||||
self.package: str | None = None
|
||||
self.modulename: str | None = None
|
||||
self.pathname: str | None = None
|
||||
self.loader: DummyLoader | None = None
|
||||
self.spec: ModuleSpec | None = None
|
||||
|
||||
def prepare(self) -> None:
|
||||
"""Set sys.path properly.
|
||||
|
||||
This needs to happen before any importing, and without importing anything.
|
||||
"""
|
||||
path0: str | None
|
||||
if self.as_module:
|
||||
path0 = os.getcwd()
|
||||
elif os.path.isdir(self.arg0):
|
||||
# Running a directory means running the __main__.py file in that
|
||||
# directory.
|
||||
path0 = self.arg0
|
||||
else:
|
||||
path0 = os.path.abspath(os.path.dirname(self.arg0))
|
||||
|
||||
if os.path.isdir(sys.path[0]):
|
||||
# sys.path fakery. If we are being run as a command, then sys.path[0]
|
||||
# is the directory of the "coverage" script. If this is so, replace
|
||||
# sys.path[0] with the directory of the file we're running, or the
|
||||
# current directory when running modules. If it isn't so, then we
|
||||
# don't know what's going on, and just leave it alone.
|
||||
top_file = inspect.stack()[-1][0].f_code.co_filename
|
||||
sys_path_0_abs = os.path.abspath(sys.path[0])
|
||||
top_file_dir_abs = os.path.abspath(os.path.dirname(top_file))
|
||||
sys_path_0_abs = canonical_filename(sys_path_0_abs)
|
||||
top_file_dir_abs = canonical_filename(top_file_dir_abs)
|
||||
if sys_path_0_abs != top_file_dir_abs:
|
||||
path0 = None
|
||||
|
||||
else:
|
||||
# sys.path[0] is a file. Is the next entry the directory containing
|
||||
# that file?
|
||||
if sys.path[1] == os.path.dirname(sys.path[0]):
|
||||
# Can it be right to always remove that?
|
||||
del sys.path[1]
|
||||
|
||||
if path0 is not None:
|
||||
sys.path[0] = python_reported_file(path0)
|
||||
|
||||
def _prepare2(self) -> None:
|
||||
"""Do more preparation to run Python code.
|
||||
|
||||
Includes finding the module to run and adjusting sys.argv[0].
|
||||
This method is allowed to import code.
|
||||
|
||||
"""
|
||||
if self.as_module:
|
||||
self.modulename = self.arg0
|
||||
pathname, self.package, self.spec = find_module(self.modulename)
|
||||
if self.spec is not None:
|
||||
self.modulename = self.spec.name
|
||||
self.loader = DummyLoader(self.modulename)
|
||||
assert pathname is not None
|
||||
self.pathname = os.path.abspath(pathname)
|
||||
self.args[0] = self.arg0 = self.pathname
|
||||
elif os.path.isdir(self.arg0):
|
||||
# Running a directory means running the __main__.py file in that
|
||||
# directory.
|
||||
for ext in [".py", ".pyc", ".pyo"]:
|
||||
try_filename = os.path.join(self.arg0, "__main__" + ext)
|
||||
# 3.8.10 changed how files are reported when running a
|
||||
# directory. But I'm not sure how far this change is going to
|
||||
# spread, so I'll just hard-code it here for now.
|
||||
if env.PYVERSION >= (3, 8, 10):
|
||||
try_filename = os.path.abspath(try_filename)
|
||||
if os.path.exists(try_filename):
|
||||
self.arg0 = try_filename
|
||||
break
|
||||
else:
|
||||
raise NoSource(f"Can't find '__main__' module in '{self.arg0}'")
|
||||
|
||||
# Make a spec. I don't know if this is the right way to do it.
|
||||
try_filename = python_reported_file(try_filename)
|
||||
self.spec = importlib.machinery.ModuleSpec("__main__", None, origin=try_filename)
|
||||
self.spec.has_location = True
|
||||
self.package = ""
|
||||
self.loader = DummyLoader("__main__")
|
||||
else:
|
||||
self.loader = DummyLoader("__main__")
|
||||
|
||||
self.arg0 = python_reported_file(self.arg0)
|
||||
|
||||
def run(self) -> None:
|
||||
"""Run the Python code!"""
|
||||
|
||||
self._prepare2()
|
||||
|
||||
# Create a module to serve as __main__
|
||||
main_mod = ModuleType("__main__")
|
||||
|
||||
from_pyc = self.arg0.endswith((".pyc", ".pyo"))
|
||||
main_mod.__file__ = self.arg0
|
||||
if from_pyc:
|
||||
main_mod.__file__ = main_mod.__file__[:-1]
|
||||
if self.package is not None:
|
||||
main_mod.__package__ = self.package
|
||||
main_mod.__loader__ = self.loader # type: ignore[assignment]
|
||||
if self.spec is not None:
|
||||
main_mod.__spec__ = self.spec
|
||||
|
||||
main_mod.__builtins__ = sys.modules["builtins"] # type: ignore[attr-defined]
|
||||
|
||||
sys.modules["__main__"] = main_mod
|
||||
|
||||
# Set sys.argv properly.
|
||||
sys.argv = self.args
|
||||
|
||||
try:
|
||||
# Make a code object somehow.
|
||||
if from_pyc:
|
||||
code = make_code_from_pyc(self.arg0)
|
||||
else:
|
||||
code = make_code_from_py(self.arg0)
|
||||
except CoverageException:
|
||||
raise
|
||||
except Exception as exc:
|
||||
msg = f"Couldn't run '{self.arg0}' as Python code: {exc.__class__.__name__}: {exc}"
|
||||
raise CoverageException(msg) from exc
|
||||
|
||||
# Execute the code object.
|
||||
# Return to the original directory in case the test code exits in
|
||||
# a non-existent directory.
|
||||
cwd = os.getcwd()
|
||||
try:
|
||||
exec(code, main_mod.__dict__)
|
||||
except SystemExit: # pylint: disable=try-except-raise
|
||||
# The user called sys.exit(). Just pass it along to the upper
|
||||
# layers, where it will be handled.
|
||||
raise
|
||||
except Exception:
|
||||
# Something went wrong while executing the user code.
|
||||
# Get the exc_info, and pack them into an exception that we can
|
||||
# throw up to the outer loop. We peel one layer off the traceback
|
||||
# so that the coverage.py code doesn't appear in the final printed
|
||||
# traceback.
|
||||
typ, err, tb = sys.exc_info()
|
||||
assert typ is not None
|
||||
assert err is not None
|
||||
assert tb is not None
|
||||
|
||||
# PyPy3 weirdness. If I don't access __context__, then somehow it
|
||||
# is non-None when the exception is reported at the upper layer,
|
||||
# and a nested exception is shown to the user. This getattr fixes
|
||||
# it somehow? https://bitbucket.org/pypy/pypy/issue/1903
|
||||
getattr(err, "__context__", None)
|
||||
|
||||
# Call the excepthook.
|
||||
try:
|
||||
assert err.__traceback__ is not None
|
||||
err.__traceback__ = err.__traceback__.tb_next
|
||||
sys.excepthook(typ, err, tb.tb_next)
|
||||
except SystemExit: # pylint: disable=try-except-raise
|
||||
raise
|
||||
except Exception as exc:
|
||||
# Getting the output right in the case of excepthook
|
||||
# shenanigans is kind of involved.
|
||||
sys.stderr.write("Error in sys.excepthook:\n")
|
||||
typ2, err2, tb2 = sys.exc_info()
|
||||
assert typ2 is not None
|
||||
assert err2 is not None
|
||||
assert tb2 is not None
|
||||
err2.__suppress_context__ = True
|
||||
assert err2.__traceback__ is not None
|
||||
err2.__traceback__ = err2.__traceback__.tb_next
|
||||
sys.__excepthook__(typ2, err2, tb2.tb_next)
|
||||
sys.stderr.write("\nOriginal exception was:\n")
|
||||
raise _ExceptionDuringRun(typ, err, tb.tb_next) from exc
|
||||
else:
|
||||
sys.exit(1)
|
||||
finally:
|
||||
os.chdir(cwd)
|
||||
|
||||
|
||||
def run_python_module(args: list[str]) -> None:
|
||||
"""Run a Python module, as though with ``python -m name args...``.
|
||||
|
||||
`args` is the argument array to present as sys.argv, including the first
|
||||
element naming the module being executed.
|
||||
|
||||
This is a helper for tests, to encapsulate how to use PyRunner.
|
||||
|
||||
"""
|
||||
runner = PyRunner(args, as_module=True)
|
||||
runner.prepare()
|
||||
runner.run()
|
||||
|
||||
|
||||
def run_python_file(args: list[str]) -> None:
|
||||
"""Run a Python file as if it were the main program on the command line.
|
||||
|
||||
`args` is the argument array to present as sys.argv, including the first
|
||||
element naming the file being executed. `package` is the name of the
|
||||
enclosing package, if any.
|
||||
|
||||
This is a helper for tests, to encapsulate how to use PyRunner.
|
||||
|
||||
"""
|
||||
runner = PyRunner(args, as_module=False)
|
||||
runner.prepare()
|
||||
runner.run()
|
||||
|
||||
|
||||
def make_code_from_py(filename: str) -> CodeType:
|
||||
"""Get source from `filename` and make a code object of it."""
|
||||
# Open the source file.
|
||||
try:
|
||||
source = get_python_source(filename)
|
||||
except (OSError, NoSource) as exc:
|
||||
raise NoSource(f"No file to run: '{filename}'") from exc
|
||||
|
||||
return compile(source, filename, "exec", dont_inherit=True)
|
||||
|
||||
|
||||
def make_code_from_pyc(filename: str) -> CodeType:
|
||||
"""Get a code object from a .pyc file."""
|
||||
try:
|
||||
fpyc = open(filename, "rb")
|
||||
except OSError as exc:
|
||||
raise NoCode(f"No file to run: '{filename}'") from exc
|
||||
|
||||
with fpyc:
|
||||
# First four bytes are a version-specific magic number. It has to
|
||||
# match or we won't run the file.
|
||||
magic = fpyc.read(4)
|
||||
if magic != PYC_MAGIC_NUMBER:
|
||||
raise NoCode(f"Bad magic number in .pyc file: {magic!r} != {PYC_MAGIC_NUMBER!r}")
|
||||
|
||||
flags = struct.unpack("<L", fpyc.read(4))[0]
|
||||
hash_based = flags & 0x01
|
||||
if hash_based:
|
||||
fpyc.read(8) # Skip the hash.
|
||||
else:
|
||||
# Skip the junk in the header that we don't need.
|
||||
fpyc.read(4) # Skip the moddate.
|
||||
fpyc.read(4) # Skip the size.
|
||||
|
||||
# The rest of the file is the code object we want.
|
||||
code = marshal.load(fpyc)
|
||||
assert isinstance(code, CodeType)
|
||||
|
||||
return code
|
||||
547
.venv/lib/python3.10/site-packages/coverage/files.py
Normal file
547
.venv/lib/python3.10/site-packages/coverage/files.py
Normal file
|
|
@ -0,0 +1,547 @@
|
|||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""File wrangling."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import hashlib
|
||||
import ntpath
|
||||
import os
|
||||
import os.path
|
||||
import posixpath
|
||||
import re
|
||||
import sys
|
||||
|
||||
from typing import Callable, Iterable
|
||||
|
||||
from coverage import env
|
||||
from coverage.exceptions import ConfigError
|
||||
from coverage.misc import human_sorted, isolate_module, join_regex
|
||||
|
||||
|
||||
os = isolate_module(os)
|
||||
|
||||
|
||||
RELATIVE_DIR: str = ""
|
||||
CANONICAL_FILENAME_CACHE: dict[str, str] = {}
|
||||
|
||||
def set_relative_directory() -> None:
|
||||
"""Set the directory that `relative_filename` will be relative to."""
|
||||
global RELATIVE_DIR, CANONICAL_FILENAME_CACHE
|
||||
|
||||
# The current directory
|
||||
abs_curdir = abs_file(os.curdir)
|
||||
if not abs_curdir.endswith(os.sep):
|
||||
# Suffix with separator only if not at the system root
|
||||
abs_curdir = abs_curdir + os.sep
|
||||
|
||||
# The absolute path to our current directory.
|
||||
RELATIVE_DIR = os.path.normcase(abs_curdir)
|
||||
|
||||
# Cache of results of calling the canonical_filename() method, to
|
||||
# avoid duplicating work.
|
||||
CANONICAL_FILENAME_CACHE = {}
|
||||
|
||||
|
||||
def relative_directory() -> str:
|
||||
"""Return the directory that `relative_filename` is relative to."""
|
||||
return RELATIVE_DIR
|
||||
|
||||
|
||||
def relative_filename(filename: str) -> str:
|
||||
"""Return the relative form of `filename`.
|
||||
|
||||
The file name will be relative to the current directory when the
|
||||
`set_relative_directory` was called.
|
||||
|
||||
"""
|
||||
fnorm = os.path.normcase(filename)
|
||||
if fnorm.startswith(RELATIVE_DIR):
|
||||
filename = filename[len(RELATIVE_DIR):]
|
||||
return filename
|
||||
|
||||
|
||||
def canonical_filename(filename: str) -> str:
|
||||
"""Return a canonical file name for `filename`.
|
||||
|
||||
An absolute path with no redundant components and normalized case.
|
||||
|
||||
"""
|
||||
if filename not in CANONICAL_FILENAME_CACHE:
|
||||
cf = filename
|
||||
if not os.path.isabs(filename):
|
||||
for path in [os.curdir] + sys.path:
|
||||
if path is None:
|
||||
continue # type: ignore[unreachable]
|
||||
f = os.path.join(path, filename)
|
||||
try:
|
||||
exists = os.path.exists(f)
|
||||
except UnicodeError:
|
||||
exists = False
|
||||
if exists:
|
||||
cf = f
|
||||
break
|
||||
cf = abs_file(cf)
|
||||
CANONICAL_FILENAME_CACHE[filename] = cf
|
||||
return CANONICAL_FILENAME_CACHE[filename]
|
||||
|
||||
|
||||
MAX_FLAT = 100
|
||||
|
||||
def flat_rootname(filename: str) -> str:
|
||||
"""A base for a flat file name to correspond to this file.
|
||||
|
||||
Useful for writing files about the code where you want all the files in
|
||||
the same directory, but need to differentiate same-named files from
|
||||
different directories.
|
||||
|
||||
For example, the file a/b/c.py will return 'd_86bbcbe134d28fd2_c_py'
|
||||
|
||||
"""
|
||||
dirname, basename = ntpath.split(filename)
|
||||
if dirname:
|
||||
fp = hashlib.new("sha3_256", dirname.encode("UTF-8")).hexdigest()[:16]
|
||||
prefix = f"d_{fp}_"
|
||||
else:
|
||||
prefix = ""
|
||||
return prefix + basename.replace(".", "_")
|
||||
|
||||
|
||||
if env.WINDOWS:
|
||||
|
||||
_ACTUAL_PATH_CACHE: dict[str, str] = {}
|
||||
_ACTUAL_PATH_LIST_CACHE: dict[str, list[str]] = {}
|
||||
|
||||
def actual_path(path: str) -> str:
|
||||
"""Get the actual path of `path`, including the correct case."""
|
||||
if path in _ACTUAL_PATH_CACHE:
|
||||
return _ACTUAL_PATH_CACHE[path]
|
||||
|
||||
head, tail = os.path.split(path)
|
||||
if not tail:
|
||||
# This means head is the drive spec: normalize it.
|
||||
actpath = head.upper()
|
||||
elif not head:
|
||||
actpath = tail
|
||||
else:
|
||||
head = actual_path(head)
|
||||
if head in _ACTUAL_PATH_LIST_CACHE:
|
||||
files = _ACTUAL_PATH_LIST_CACHE[head]
|
||||
else:
|
||||
try:
|
||||
files = os.listdir(head)
|
||||
except Exception:
|
||||
# This will raise OSError, or this bizarre TypeError:
|
||||
# https://bugs.python.org/issue1776160
|
||||
files = []
|
||||
_ACTUAL_PATH_LIST_CACHE[head] = files
|
||||
normtail = os.path.normcase(tail)
|
||||
for f in files:
|
||||
if os.path.normcase(f) == normtail:
|
||||
tail = f
|
||||
break
|
||||
actpath = os.path.join(head, tail)
|
||||
_ACTUAL_PATH_CACHE[path] = actpath
|
||||
return actpath
|
||||
|
||||
else:
|
||||
def actual_path(path: str) -> str:
|
||||
"""The actual path for non-Windows platforms."""
|
||||
return path
|
||||
|
||||
|
||||
def abs_file(path: str) -> str:
|
||||
"""Return the absolute normalized form of `path`."""
|
||||
return actual_path(os.path.abspath(os.path.realpath(path)))
|
||||
|
||||
|
||||
def zip_location(filename: str) -> tuple[str, str] | None:
|
||||
"""Split a filename into a zipfile / inner name pair.
|
||||
|
||||
Only return a pair if the zipfile exists. No check is made if the inner
|
||||
name is in the zipfile.
|
||||
|
||||
"""
|
||||
for ext in [".zip", ".whl", ".egg", ".pex"]:
|
||||
zipbase, extension, inner = filename.partition(ext + sep(filename))
|
||||
if extension:
|
||||
zipfile = zipbase + ext
|
||||
if os.path.exists(zipfile):
|
||||
return zipfile, inner
|
||||
return None
|
||||
|
||||
|
||||
def source_exists(path: str) -> bool:
|
||||
"""Determine if a source file path exists."""
|
||||
if os.path.exists(path):
|
||||
return True
|
||||
|
||||
if zip_location(path):
|
||||
# If zip_location returns anything, then it's a zipfile that
|
||||
# exists. That's good enough for us.
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def python_reported_file(filename: str) -> str:
|
||||
"""Return the string as Python would describe this file name."""
|
||||
if env.PYBEHAVIOR.report_absolute_files:
|
||||
filename = os.path.abspath(filename)
|
||||
return filename
|
||||
|
||||
|
||||
def isabs_anywhere(filename: str) -> bool:
|
||||
"""Is `filename` an absolute path on any OS?"""
|
||||
return ntpath.isabs(filename) or posixpath.isabs(filename)
|
||||
|
||||
|
||||
def prep_patterns(patterns: Iterable[str]) -> list[str]:
|
||||
"""Prepare the file patterns for use in a `GlobMatcher`.
|
||||
|
||||
If a pattern starts with a wildcard, it is used as a pattern
|
||||
as-is. If it does not start with a wildcard, then it is made
|
||||
absolute with the current directory.
|
||||
|
||||
If `patterns` is None, an empty list is returned.
|
||||
|
||||
"""
|
||||
prepped = []
|
||||
for p in patterns or []:
|
||||
prepped.append(p)
|
||||
if not p.startswith(("*", "?")):
|
||||
prepped.append(abs_file(p))
|
||||
return prepped
|
||||
|
||||
|
||||
class TreeMatcher:
|
||||
"""A matcher for files in a tree.
|
||||
|
||||
Construct with a list of paths, either files or directories. Paths match
|
||||
with the `match` method if they are one of the files, or if they are
|
||||
somewhere in a subtree rooted at one of the directories.
|
||||
|
||||
"""
|
||||
def __init__(self, paths: Iterable[str], name: str = "unknown") -> None:
|
||||
self.original_paths: list[str] = human_sorted(paths)
|
||||
#self.paths = list(map(os.path.normcase, paths))
|
||||
self.paths = [os.path.normcase(p) for p in paths]
|
||||
self.name = name
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<TreeMatcher {self.name} {self.original_paths!r}>"
|
||||
|
||||
def info(self) -> list[str]:
|
||||
"""A list of strings for displaying when dumping state."""
|
||||
return self.original_paths
|
||||
|
||||
def match(self, fpath: str) -> bool:
|
||||
"""Does `fpath` indicate a file in one of our trees?"""
|
||||
fpath = os.path.normcase(fpath)
|
||||
for p in self.paths:
|
||||
if fpath.startswith(p):
|
||||
if fpath == p:
|
||||
# This is the same file!
|
||||
return True
|
||||
if fpath[len(p)] == os.sep:
|
||||
# This is a file in the directory
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
class ModuleMatcher:
|
||||
"""A matcher for modules in a tree."""
|
||||
def __init__(self, module_names: Iterable[str], name:str = "unknown") -> None:
|
||||
self.modules = list(module_names)
|
||||
self.name = name
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<ModuleMatcher {self.name} {self.modules!r}>"
|
||||
|
||||
def info(self) -> list[str]:
|
||||
"""A list of strings for displaying when dumping state."""
|
||||
return self.modules
|
||||
|
||||
def match(self, module_name: str) -> bool:
|
||||
"""Does `module_name` indicate a module in one of our packages?"""
|
||||
if not module_name:
|
||||
return False
|
||||
|
||||
for m in self.modules:
|
||||
if module_name.startswith(m):
|
||||
if module_name == m:
|
||||
return True
|
||||
if module_name[len(m)] == ".":
|
||||
# This is a module in the package
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
class GlobMatcher:
|
||||
"""A matcher for files by file name pattern."""
|
||||
def __init__(self, pats: Iterable[str], name: str = "unknown") -> None:
|
||||
self.pats = list(pats)
|
||||
self.re = globs_to_regex(self.pats, case_insensitive=env.WINDOWS)
|
||||
self.name = name
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<GlobMatcher {self.name} {self.pats!r}>"
|
||||
|
||||
def info(self) -> list[str]:
|
||||
"""A list of strings for displaying when dumping state."""
|
||||
return self.pats
|
||||
|
||||
def match(self, fpath: str) -> bool:
|
||||
"""Does `fpath` match one of our file name patterns?"""
|
||||
return self.re.match(fpath) is not None
|
||||
|
||||
|
||||
def sep(s: str) -> str:
|
||||
"""Find the path separator used in this string, or os.sep if none."""
|
||||
if sep_match := re.search(r"[\\/]", s):
|
||||
the_sep = sep_match[0]
|
||||
else:
|
||||
the_sep = os.sep
|
||||
return the_sep
|
||||
|
||||
|
||||
# Tokenizer for _glob_to_regex.
|
||||
# None as a sub means disallowed.
|
||||
G2RX_TOKENS = [(re.compile(rx), sub) for rx, sub in [
|
||||
(r"\*\*\*+", None), # Can't have ***
|
||||
(r"[^/]+\*\*+", None), # Can't have x**
|
||||
(r"\*\*+[^/]+", None), # Can't have **x
|
||||
(r"\*\*/\*\*", None), # Can't have **/**
|
||||
(r"^\*+/", r"(.*[/\\\\])?"), # ^*/ matches any prefix-slash, or nothing.
|
||||
(r"/\*+$", r"[/\\\\].*"), # /*$ matches any slash-suffix.
|
||||
(r"\*\*/", r"(.*[/\\\\])?"), # **/ matches any subdirs, including none
|
||||
(r"/", r"[/\\\\]"), # / matches either slash or backslash
|
||||
(r"\*", r"[^/\\\\]*"), # * matches any number of non slash-likes
|
||||
(r"\?", r"[^/\\\\]"), # ? matches one non slash-like
|
||||
(r"\[.*?\]", r"\g<0>"), # [a-f] matches [a-f]
|
||||
(r"[a-zA-Z0-9_-]+", r"\g<0>"), # word chars match themselves
|
||||
(r"[\[\]]", None), # Can't have single square brackets
|
||||
(r".", r"\\\g<0>"), # Anything else is escaped to be safe
|
||||
]]
|
||||
|
||||
def _glob_to_regex(pattern: str) -> str:
|
||||
"""Convert a file-path glob pattern into a regex."""
|
||||
# Turn all backslashes into slashes to simplify the tokenizer.
|
||||
pattern = pattern.replace("\\", "/")
|
||||
if "/" not in pattern:
|
||||
pattern = "**/" + pattern
|
||||
path_rx = []
|
||||
pos = 0
|
||||
while pos < len(pattern):
|
||||
for rx, sub in G2RX_TOKENS: # pragma: always breaks
|
||||
if m := rx.match(pattern, pos=pos):
|
||||
if sub is None:
|
||||
raise ConfigError(f"File pattern can't include {m[0]!r}")
|
||||
path_rx.append(m.expand(sub))
|
||||
pos = m.end()
|
||||
break
|
||||
return "".join(path_rx)
|
||||
|
||||
|
||||
def globs_to_regex(
|
||||
patterns: Iterable[str],
|
||||
case_insensitive: bool = False,
|
||||
partial: bool = False,
|
||||
) -> re.Pattern[str]:
|
||||
"""Convert glob patterns to a compiled regex that matches any of them.
|
||||
|
||||
Slashes are always converted to match either slash or backslash, for
|
||||
Windows support, even when running elsewhere.
|
||||
|
||||
If the pattern has no slash or backslash, then it is interpreted as
|
||||
matching a file name anywhere it appears in the tree. Otherwise, the glob
|
||||
pattern must match the whole file path.
|
||||
|
||||
If `partial` is true, then the pattern will match if the target string
|
||||
starts with the pattern. Otherwise, it must match the entire string.
|
||||
|
||||
Returns: a compiled regex object. Use the .match method to compare target
|
||||
strings.
|
||||
|
||||
"""
|
||||
flags = 0
|
||||
if case_insensitive:
|
||||
flags |= re.IGNORECASE
|
||||
rx = join_regex(map(_glob_to_regex, patterns))
|
||||
if not partial:
|
||||
rx = fr"(?:{rx})\Z"
|
||||
compiled = re.compile(rx, flags=flags)
|
||||
return compiled
|
||||
|
||||
|
||||
class PathAliases:
|
||||
"""A collection of aliases for paths.
|
||||
|
||||
When combining data files from remote machines, often the paths to source
|
||||
code are different, for example, due to OS differences, or because of
|
||||
serialized checkouts on continuous integration machines.
|
||||
|
||||
A `PathAliases` object tracks a list of pattern/result pairs, and can
|
||||
map a path through those aliases to produce a unified path.
|
||||
|
||||
"""
|
||||
def __init__(
|
||||
self,
|
||||
debugfn: Callable[[str], None] | None = None,
|
||||
relative: bool = False,
|
||||
) -> None:
|
||||
# A list of (original_pattern, regex, result)
|
||||
self.aliases: list[tuple[str, re.Pattern[str], str]] = []
|
||||
self.debugfn = debugfn or (lambda msg: 0)
|
||||
self.relative = relative
|
||||
self.pprinted = False
|
||||
|
||||
def pprint(self) -> None:
|
||||
"""Dump the important parts of the PathAliases, for debugging."""
|
||||
self.debugfn(f"Aliases (relative={self.relative}):")
|
||||
for original_pattern, regex, result in self.aliases:
|
||||
self.debugfn(f" Rule: {original_pattern!r} -> {result!r} using regex {regex.pattern!r}")
|
||||
|
||||
def add(self, pattern: str, result: str) -> None:
|
||||
"""Add the `pattern`/`result` pair to the list of aliases.
|
||||
|
||||
`pattern` is an `glob`-style pattern. `result` is a simple
|
||||
string. When mapping paths, if a path starts with a match against
|
||||
`pattern`, then that match is replaced with `result`. This models
|
||||
isomorphic source trees being rooted at different places on two
|
||||
different machines.
|
||||
|
||||
`pattern` can't end with a wildcard component, since that would
|
||||
match an entire tree, and not just its root.
|
||||
|
||||
"""
|
||||
original_pattern = pattern
|
||||
pattern_sep = sep(pattern)
|
||||
|
||||
if len(pattern) > 1:
|
||||
pattern = pattern.rstrip(r"\/")
|
||||
|
||||
# The pattern can't end with a wildcard component.
|
||||
if pattern.endswith("*"):
|
||||
raise ConfigError("Pattern must not end with wildcards.")
|
||||
|
||||
# The pattern is meant to match a file path. Let's make it absolute
|
||||
# unless it already is, or is meant to match any prefix.
|
||||
if not self.relative:
|
||||
if not pattern.startswith("*") and not isabs_anywhere(pattern + pattern_sep):
|
||||
pattern = abs_file(pattern)
|
||||
if not pattern.endswith(pattern_sep):
|
||||
pattern += pattern_sep
|
||||
|
||||
# Make a regex from the pattern.
|
||||
regex = globs_to_regex([pattern], case_insensitive=True, partial=True)
|
||||
|
||||
# Normalize the result: it must end with a path separator.
|
||||
result_sep = sep(result)
|
||||
result = result.rstrip(r"\/") + result_sep
|
||||
self.aliases.append((original_pattern, regex, result))
|
||||
|
||||
def map(self, path: str, exists:Callable[[str], bool] = source_exists) -> str:
|
||||
"""Map `path` through the aliases.
|
||||
|
||||
`path` is checked against all of the patterns. The first pattern to
|
||||
match is used to replace the root of the path with the result root.
|
||||
Only one pattern is ever used. If no patterns match, `path` is
|
||||
returned unchanged.
|
||||
|
||||
The separator style in the result is made to match that of the result
|
||||
in the alias.
|
||||
|
||||
`exists` is a function to determine if the resulting path actually
|
||||
exists.
|
||||
|
||||
Returns the mapped path. If a mapping has happened, this is a
|
||||
canonical path. If no mapping has happened, it is the original value
|
||||
of `path` unchanged.
|
||||
|
||||
"""
|
||||
if not self.pprinted:
|
||||
self.pprint()
|
||||
self.pprinted = True
|
||||
|
||||
for original_pattern, regex, result in self.aliases:
|
||||
if m := regex.match(path):
|
||||
new = path.replace(m[0], result)
|
||||
new = new.replace(sep(path), sep(result))
|
||||
if not self.relative:
|
||||
new = canonical_filename(new)
|
||||
dot_start = result.startswith(("./", ".\\")) and len(result) > 2
|
||||
if new.startswith(("./", ".\\")) and not dot_start:
|
||||
new = new[2:]
|
||||
if not exists(new):
|
||||
self.debugfn(
|
||||
f"Rule {original_pattern!r} changed {path!r} to {new!r} " +
|
||||
"which doesn't exist, continuing",
|
||||
)
|
||||
continue
|
||||
self.debugfn(
|
||||
f"Matched path {path!r} to rule {original_pattern!r} -> {result!r}, " +
|
||||
f"producing {new!r}",
|
||||
)
|
||||
return new
|
||||
|
||||
# If we get here, no pattern matched.
|
||||
|
||||
if self.relative:
|
||||
path = relative_filename(path)
|
||||
|
||||
if self.relative and not isabs_anywhere(path):
|
||||
# Auto-generate a pattern to implicitly match relative files
|
||||
parts = re.split(r"[/\\]", path)
|
||||
if len(parts) > 1:
|
||||
dir1 = parts[0]
|
||||
pattern = f"*/{dir1}"
|
||||
regex_pat = fr"^(.*[\\/])?{re.escape(dir1)}[\\/]"
|
||||
result = f"{dir1}{os.sep}"
|
||||
# Only add a new pattern if we don't already have this pattern.
|
||||
if not any(p == pattern for p, _, _ in self.aliases):
|
||||
self.debugfn(
|
||||
f"Generating rule: {pattern!r} -> {result!r} using regex {regex_pat!r}",
|
||||
)
|
||||
self.aliases.append((pattern, re.compile(regex_pat), result))
|
||||
return self.map(path, exists=exists)
|
||||
|
||||
self.debugfn(f"No rules match, path {path!r} is unchanged")
|
||||
return path
|
||||
|
||||
|
||||
def find_python_files(dirname: str, include_namespace_packages: bool) -> Iterable[str]:
|
||||
"""Yield all of the importable Python files in `dirname`, recursively.
|
||||
|
||||
To be importable, the files have to be in a directory with a __init__.py,
|
||||
except for `dirname` itself, which isn't required to have one. The
|
||||
assumption is that `dirname` was specified directly, so the user knows
|
||||
best, but sub-directories are checked for a __init__.py to be sure we only
|
||||
find the importable files.
|
||||
|
||||
If `include_namespace_packages` is True, then the check for __init__.py
|
||||
files is skipped.
|
||||
|
||||
Files with strange characters are skipped, since they couldn't have been
|
||||
imported, and are probably editor side-files.
|
||||
|
||||
"""
|
||||
for i, (dirpath, dirnames, filenames) in enumerate(os.walk(dirname)):
|
||||
if not include_namespace_packages:
|
||||
if i > 0 and "__init__.py" not in filenames:
|
||||
# If a directory doesn't have __init__.py, then it isn't
|
||||
# importable and neither are its files
|
||||
del dirnames[:]
|
||||
continue
|
||||
for filename in filenames:
|
||||
# We're only interested in files that look like reasonable Python
|
||||
# files: Must end with .py or .pyw, and must not have certain funny
|
||||
# characters that probably mean they are editor junk.
|
||||
if re.match(r"^[^.#~!$@%^&*()+=,]+\.pyw?$", filename):
|
||||
yield os.path.join(dirpath, filename)
|
||||
|
||||
|
||||
# Globally set the relative directory.
|
||||
set_relative_directory()
|
||||
656
.venv/lib/python3.10/site-packages/coverage/html.py
Normal file
656
.venv/lib/python3.10/site-packages/coverage/html.py
Normal file
|
|
@ -0,0 +1,656 @@
|
|||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""HTML reporting for coverage.py."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import collections
|
||||
import datetime
|
||||
import functools
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import string
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import Any, Iterable, TYPE_CHECKING, cast
|
||||
|
||||
import coverage
|
||||
from coverage.data import CoverageData, add_data_to_hash
|
||||
from coverage.exceptions import NoDataError
|
||||
from coverage.files import flat_rootname
|
||||
from coverage.misc import ensure_dir, file_be_gone, Hasher, isolate_module, format_local_datetime
|
||||
from coverage.misc import human_sorted, plural, stdout_link
|
||||
from coverage.report_core import get_analysis_to_report
|
||||
from coverage.results import Analysis, Numbers
|
||||
from coverage.templite import Templite
|
||||
from coverage.types import TLineNo, TMorf
|
||||
from coverage.version import __url__
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
# To avoid circular imports:
|
||||
from coverage import Coverage
|
||||
from coverage.plugins import FileReporter
|
||||
|
||||
# To be able to use 3.8 typing features, and still run on 3.7:
|
||||
from typing import TypedDict
|
||||
|
||||
class IndexInfoDict(TypedDict):
|
||||
"""Information for each file, to render the index file."""
|
||||
nums: Numbers
|
||||
html_filename: str
|
||||
relative_filename: str
|
||||
|
||||
class FileInfoDict(TypedDict):
|
||||
"""Summary of the information from last rendering, to avoid duplicate work."""
|
||||
hash: str
|
||||
index: IndexInfoDict
|
||||
|
||||
|
||||
os = isolate_module(os)
|
||||
|
||||
|
||||
def data_filename(fname: str) -> str:
|
||||
"""Return the path to an "htmlfiles" data file of ours.
|
||||
"""
|
||||
static_dir = os.path.join(os.path.dirname(__file__), "htmlfiles")
|
||||
static_filename = os.path.join(static_dir, fname)
|
||||
return static_filename
|
||||
|
||||
|
||||
def read_data(fname: str) -> str:
|
||||
"""Return the contents of a data file of ours."""
|
||||
with open(data_filename(fname)) as data_file:
|
||||
return data_file.read()
|
||||
|
||||
|
||||
def write_html(fname: str, html: str) -> None:
|
||||
"""Write `html` to `fname`, properly encoded."""
|
||||
html = re.sub(r"(\A\s+)|(\s+$)", "", html, flags=re.MULTILINE) + "\n"
|
||||
with open(fname, "wb") as fout:
|
||||
fout.write(html.encode("ascii", "xmlcharrefreplace"))
|
||||
|
||||
|
||||
@dataclass
|
||||
class LineData:
|
||||
"""The data for each source line of HTML output."""
|
||||
tokens: list[tuple[str, str]]
|
||||
number: TLineNo
|
||||
category: str
|
||||
statement: bool
|
||||
contexts: list[str]
|
||||
contexts_label: str
|
||||
context_list: list[str]
|
||||
short_annotations: list[str]
|
||||
long_annotations: list[str]
|
||||
html: str = ""
|
||||
context_str: str | None = None
|
||||
annotate: str | None = None
|
||||
annotate_long: str | None = None
|
||||
css_class: str = ""
|
||||
|
||||
|
||||
@dataclass
|
||||
class FileData:
|
||||
"""The data for each source file of HTML output."""
|
||||
relative_filename: str
|
||||
nums: Numbers
|
||||
lines: list[LineData]
|
||||
|
||||
|
||||
class HtmlDataGeneration:
|
||||
"""Generate structured data to be turned into HTML reports."""
|
||||
|
||||
EMPTY = "(empty)"
|
||||
|
||||
def __init__(self, cov: Coverage) -> None:
|
||||
self.coverage = cov
|
||||
self.config = self.coverage.config
|
||||
data = self.coverage.get_data()
|
||||
self.has_arcs = data.has_arcs()
|
||||
if self.config.show_contexts:
|
||||
if data.measured_contexts() == {""}:
|
||||
self.coverage._warn("No contexts were measured")
|
||||
data.set_query_contexts(self.config.report_contexts)
|
||||
|
||||
def data_for_file(self, fr: FileReporter, analysis: Analysis) -> FileData:
|
||||
"""Produce the data needed for one file's report."""
|
||||
if self.has_arcs:
|
||||
missing_branch_arcs = analysis.missing_branch_arcs()
|
||||
arcs_executed = analysis.arcs_executed()
|
||||
|
||||
if self.config.show_contexts:
|
||||
contexts_by_lineno = analysis.data.contexts_by_lineno(analysis.filename)
|
||||
|
||||
lines = []
|
||||
|
||||
for lineno, tokens in enumerate(fr.source_token_lines(), start=1):
|
||||
# Figure out how to mark this line.
|
||||
category = ""
|
||||
short_annotations = []
|
||||
long_annotations = []
|
||||
|
||||
if lineno in analysis.excluded:
|
||||
category = "exc"
|
||||
elif lineno in analysis.missing:
|
||||
category = "mis"
|
||||
elif self.has_arcs and lineno in missing_branch_arcs:
|
||||
category = "par"
|
||||
for b in missing_branch_arcs[lineno]:
|
||||
if b < 0:
|
||||
short_annotations.append("exit")
|
||||
else:
|
||||
short_annotations.append(str(b))
|
||||
long_annotations.append(fr.missing_arc_description(lineno, b, arcs_executed))
|
||||
elif lineno in analysis.statements:
|
||||
category = "run"
|
||||
|
||||
contexts = []
|
||||
contexts_label = ""
|
||||
context_list = []
|
||||
if category and self.config.show_contexts:
|
||||
contexts = human_sorted(c or self.EMPTY for c in contexts_by_lineno.get(lineno, ()))
|
||||
if contexts == [self.EMPTY]:
|
||||
contexts_label = self.EMPTY
|
||||
else:
|
||||
contexts_label = f"{len(contexts)} ctx"
|
||||
context_list = contexts
|
||||
|
||||
lines.append(LineData(
|
||||
tokens=tokens,
|
||||
number=lineno,
|
||||
category=category,
|
||||
statement=(lineno in analysis.statements),
|
||||
contexts=contexts,
|
||||
contexts_label=contexts_label,
|
||||
context_list=context_list,
|
||||
short_annotations=short_annotations,
|
||||
long_annotations=long_annotations,
|
||||
))
|
||||
|
||||
file_data = FileData(
|
||||
relative_filename=fr.relative_filename(),
|
||||
nums=analysis.numbers,
|
||||
lines=lines,
|
||||
)
|
||||
|
||||
return file_data
|
||||
|
||||
|
||||
class FileToReport:
|
||||
"""A file we're considering reporting."""
|
||||
def __init__(self, fr: FileReporter, analysis: Analysis) -> None:
|
||||
self.fr = fr
|
||||
self.analysis = analysis
|
||||
self.rootname = flat_rootname(fr.relative_filename())
|
||||
self.html_filename = self.rootname + ".html"
|
||||
|
||||
|
||||
HTML_SAFE = string.ascii_letters + string.digits + "!#$%'()*+,-./:;=?@[]^_`{|}~"
|
||||
|
||||
@functools.lru_cache(maxsize=None)
|
||||
def encode_int(n: int) -> str:
|
||||
"""Create a short HTML-safe string from an integer, using HTML_SAFE."""
|
||||
if n == 0:
|
||||
return HTML_SAFE[0]
|
||||
|
||||
r = []
|
||||
while n:
|
||||
n, t = divmod(n, len(HTML_SAFE))
|
||||
r.append(HTML_SAFE[t])
|
||||
return "".join(r)
|
||||
|
||||
|
||||
class HtmlReporter:
|
||||
"""HTML reporting."""
|
||||
|
||||
# These files will be copied from the htmlfiles directory to the output
|
||||
# directory.
|
||||
STATIC_FILES = [
|
||||
"style.css",
|
||||
"coverage_html.js",
|
||||
"keybd_closed.png",
|
||||
"keybd_open.png",
|
||||
"favicon_32.png",
|
||||
]
|
||||
|
||||
def __init__(self, cov: Coverage) -> None:
|
||||
self.coverage = cov
|
||||
self.config = self.coverage.config
|
||||
self.directory = self.config.html_dir
|
||||
|
||||
self.skip_covered = self.config.html_skip_covered
|
||||
if self.skip_covered is None:
|
||||
self.skip_covered = self.config.skip_covered
|
||||
self.skip_empty = self.config.html_skip_empty
|
||||
if self.skip_empty is None:
|
||||
self.skip_empty = self.config.skip_empty
|
||||
self.skipped_covered_count = 0
|
||||
self.skipped_empty_count = 0
|
||||
|
||||
title = self.config.html_title
|
||||
|
||||
self.extra_css: str | None
|
||||
if self.config.extra_css:
|
||||
self.extra_css = os.path.basename(self.config.extra_css)
|
||||
else:
|
||||
self.extra_css = None
|
||||
|
||||
self.data = self.coverage.get_data()
|
||||
self.has_arcs = self.data.has_arcs()
|
||||
|
||||
self.file_summaries: list[IndexInfoDict] = []
|
||||
self.all_files_nums: list[Numbers] = []
|
||||
self.incr = IncrementalChecker(self.directory)
|
||||
self.datagen = HtmlDataGeneration(self.coverage)
|
||||
self.totals = Numbers(precision=self.config.precision)
|
||||
self.directory_was_empty = False
|
||||
self.first_fr = None
|
||||
self.final_fr = None
|
||||
|
||||
self.template_globals = {
|
||||
# Functions available in the templates.
|
||||
"escape": escape,
|
||||
"pair": pair,
|
||||
"len": len,
|
||||
|
||||
# Constants for this report.
|
||||
"__url__": __url__,
|
||||
"__version__": coverage.__version__,
|
||||
"title": title,
|
||||
"time_stamp": format_local_datetime(datetime.datetime.now()),
|
||||
"extra_css": self.extra_css,
|
||||
"has_arcs": self.has_arcs,
|
||||
"show_contexts": self.config.show_contexts,
|
||||
|
||||
# Constants for all reports.
|
||||
# These css classes determine which lines are highlighted by default.
|
||||
"category": {
|
||||
"exc": "exc show_exc",
|
||||
"mis": "mis show_mis",
|
||||
"par": "par run show_par",
|
||||
"run": "run",
|
||||
},
|
||||
}
|
||||
self.pyfile_html_source = read_data("pyfile.html")
|
||||
self.source_tmpl = Templite(self.pyfile_html_source, self.template_globals)
|
||||
|
||||
def report(self, morfs: Iterable[TMorf] | None) -> float:
|
||||
"""Generate an HTML report for `morfs`.
|
||||
|
||||
`morfs` is a list of modules or file names.
|
||||
|
||||
"""
|
||||
# Read the status data and check that this run used the same
|
||||
# global data as the last run.
|
||||
self.incr.read()
|
||||
self.incr.check_global_data(self.config, self.pyfile_html_source)
|
||||
|
||||
# Process all the files. For each page we need to supply a link
|
||||
# to the next and previous page.
|
||||
files_to_report = []
|
||||
|
||||
for fr, analysis in get_analysis_to_report(self.coverage, morfs):
|
||||
ftr = FileToReport(fr, analysis)
|
||||
should = self.should_report_file(ftr)
|
||||
if should:
|
||||
files_to_report.append(ftr)
|
||||
else:
|
||||
file_be_gone(os.path.join(self.directory, ftr.html_filename))
|
||||
|
||||
for i, ftr in enumerate(files_to_report):
|
||||
if i == 0:
|
||||
prev_html = "index.html"
|
||||
else:
|
||||
prev_html = files_to_report[i - 1].html_filename
|
||||
if i == len(files_to_report) - 1:
|
||||
next_html = "index.html"
|
||||
else:
|
||||
next_html = files_to_report[i + 1].html_filename
|
||||
self.write_html_file(ftr, prev_html, next_html)
|
||||
|
||||
if not self.all_files_nums:
|
||||
raise NoDataError("No data to report.")
|
||||
|
||||
self.totals = cast(Numbers, sum(self.all_files_nums))
|
||||
|
||||
# Write the index file.
|
||||
if files_to_report:
|
||||
first_html = files_to_report[0].html_filename
|
||||
final_html = files_to_report[-1].html_filename
|
||||
else:
|
||||
first_html = final_html = "index.html"
|
||||
self.index_file(first_html, final_html)
|
||||
|
||||
self.make_local_static_report_files()
|
||||
return self.totals.n_statements and self.totals.pc_covered
|
||||
|
||||
def make_directory(self) -> None:
|
||||
"""Make sure our htmlcov directory exists."""
|
||||
ensure_dir(self.directory)
|
||||
if not os.listdir(self.directory):
|
||||
self.directory_was_empty = True
|
||||
|
||||
def make_local_static_report_files(self) -> None:
|
||||
"""Make local instances of static files for HTML report."""
|
||||
# The files we provide must always be copied.
|
||||
for static in self.STATIC_FILES:
|
||||
shutil.copyfile(data_filename(static), os.path.join(self.directory, static))
|
||||
|
||||
# Only write the .gitignore file if the directory was originally empty.
|
||||
# .gitignore can't be copied from the source tree because it would
|
||||
# prevent the static files from being checked in.
|
||||
if self.directory_was_empty:
|
||||
with open(os.path.join(self.directory, ".gitignore"), "w") as fgi:
|
||||
fgi.write("# Created by coverage.py\n*\n")
|
||||
|
||||
# The user may have extra CSS they want copied.
|
||||
if self.extra_css:
|
||||
assert self.config.extra_css is not None
|
||||
shutil.copyfile(self.config.extra_css, os.path.join(self.directory, self.extra_css))
|
||||
|
||||
def should_report_file(self, ftr: FileToReport) -> bool:
|
||||
"""Determine if we'll report this file."""
|
||||
# Get the numbers for this file.
|
||||
nums = ftr.analysis.numbers
|
||||
self.all_files_nums.append(nums)
|
||||
|
||||
if self.skip_covered:
|
||||
# Don't report on 100% files.
|
||||
no_missing_lines = (nums.n_missing == 0)
|
||||
no_missing_branches = (nums.n_partial_branches == 0)
|
||||
if no_missing_lines and no_missing_branches:
|
||||
# If there's an existing file, remove it.
|
||||
self.skipped_covered_count += 1
|
||||
return False
|
||||
|
||||
if self.skip_empty:
|
||||
# Don't report on empty files.
|
||||
if nums.n_statements == 0:
|
||||
self.skipped_empty_count += 1
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def write_html_file(self, ftr: FileToReport, prev_html: str, next_html: str) -> None:
|
||||
"""Generate an HTML file for one source file."""
|
||||
self.make_directory()
|
||||
|
||||
# Find out if the file on disk is already correct.
|
||||
if self.incr.can_skip_file(self.data, ftr.fr, ftr.rootname):
|
||||
self.file_summaries.append(self.incr.index_info(ftr.rootname))
|
||||
return
|
||||
|
||||
# Write the HTML page for this file.
|
||||
file_data = self.datagen.data_for_file(ftr.fr, ftr.analysis)
|
||||
|
||||
contexts = collections.Counter(c for cline in file_data.lines for c in cline.contexts)
|
||||
context_codes = {y: i for (i, y) in enumerate(x[0] for x in contexts.most_common())}
|
||||
if context_codes:
|
||||
contexts_json = json.dumps(
|
||||
{encode_int(v): k for (k, v) in context_codes.items()},
|
||||
indent=2,
|
||||
)
|
||||
else:
|
||||
contexts_json = None
|
||||
|
||||
for ldata in file_data.lines:
|
||||
# Build the HTML for the line.
|
||||
html_parts = []
|
||||
for tok_type, tok_text in ldata.tokens:
|
||||
if tok_type == "ws":
|
||||
html_parts.append(escape(tok_text))
|
||||
else:
|
||||
tok_html = escape(tok_text) or " "
|
||||
html_parts.append(f'<span class="{tok_type}">{tok_html}</span>')
|
||||
ldata.html = "".join(html_parts)
|
||||
if ldata.context_list:
|
||||
encoded_contexts = [
|
||||
encode_int(context_codes[c_context]) for c_context in ldata.context_list
|
||||
]
|
||||
code_width = max(len(ec) for ec in encoded_contexts)
|
||||
ldata.context_str = (
|
||||
str(code_width)
|
||||
+ "".join(ec.ljust(code_width) for ec in encoded_contexts)
|
||||
)
|
||||
else:
|
||||
ldata.context_str = ""
|
||||
|
||||
if ldata.short_annotations:
|
||||
# 202F is NARROW NO-BREAK SPACE.
|
||||
# 219B is RIGHTWARDS ARROW WITH STROKE.
|
||||
ldata.annotate = ", ".join(
|
||||
f"{ldata.number} ↛ {d}"
|
||||
for d in ldata.short_annotations
|
||||
)
|
||||
else:
|
||||
ldata.annotate = None
|
||||
|
||||
if ldata.long_annotations:
|
||||
longs = ldata.long_annotations
|
||||
if len(longs) == 1:
|
||||
ldata.annotate_long = longs[0]
|
||||
else:
|
||||
ldata.annotate_long = "{:d} missed branches: {}".format(
|
||||
len(longs),
|
||||
", ".join(
|
||||
f"{num:d}) {ann_long}"
|
||||
for num, ann_long in enumerate(longs, start=1)
|
||||
),
|
||||
)
|
||||
else:
|
||||
ldata.annotate_long = None
|
||||
|
||||
css_classes = []
|
||||
if ldata.category:
|
||||
css_classes.append(
|
||||
self.template_globals["category"][ldata.category], # type: ignore[index]
|
||||
)
|
||||
ldata.css_class = " ".join(css_classes) or "pln"
|
||||
|
||||
html_path = os.path.join(self.directory, ftr.html_filename)
|
||||
html = self.source_tmpl.render({
|
||||
**file_data.__dict__,
|
||||
"contexts_json": contexts_json,
|
||||
"prev_html": prev_html,
|
||||
"next_html": next_html,
|
||||
})
|
||||
write_html(html_path, html)
|
||||
|
||||
# Save this file's information for the index file.
|
||||
index_info: IndexInfoDict = {
|
||||
"nums": ftr.analysis.numbers,
|
||||
"html_filename": ftr.html_filename,
|
||||
"relative_filename": ftr.fr.relative_filename(),
|
||||
}
|
||||
self.file_summaries.append(index_info)
|
||||
self.incr.set_index_info(ftr.rootname, index_info)
|
||||
|
||||
def index_file(self, first_html: str, final_html: str) -> None:
|
||||
"""Write the index.html file for this report."""
|
||||
self.make_directory()
|
||||
index_tmpl = Templite(read_data("index.html"), self.template_globals)
|
||||
|
||||
skipped_covered_msg = skipped_empty_msg = ""
|
||||
if self.skipped_covered_count:
|
||||
n = self.skipped_covered_count
|
||||
skipped_covered_msg = f"{n} file{plural(n)} skipped due to complete coverage."
|
||||
if self.skipped_empty_count:
|
||||
n = self.skipped_empty_count
|
||||
skipped_empty_msg = f"{n} empty file{plural(n)} skipped."
|
||||
|
||||
html = index_tmpl.render({
|
||||
"files": self.file_summaries,
|
||||
"totals": self.totals,
|
||||
"skipped_covered_msg": skipped_covered_msg,
|
||||
"skipped_empty_msg": skipped_empty_msg,
|
||||
"first_html": first_html,
|
||||
"final_html": final_html,
|
||||
})
|
||||
|
||||
index_file = os.path.join(self.directory, "index.html")
|
||||
write_html(index_file, html)
|
||||
|
||||
print_href = stdout_link(index_file, f"file://{os.path.abspath(index_file)}")
|
||||
self.coverage._message(f"Wrote HTML report to {print_href}")
|
||||
|
||||
# Write the latest hashes for next time.
|
||||
self.incr.write()
|
||||
|
||||
|
||||
class IncrementalChecker:
|
||||
"""Logic and data to support incremental reporting."""
|
||||
|
||||
STATUS_FILE = "status.json"
|
||||
STATUS_FORMAT = 2
|
||||
NOTE = (
|
||||
"This file is an internal implementation detail to speed up HTML report"
|
||||
+ " generation. Its format can change at any time. You might be looking"
|
||||
+ " for the JSON report: https://coverage.rtfd.io/cmd.html#cmd-json"
|
||||
)
|
||||
|
||||
# The data looks like:
|
||||
#
|
||||
# {
|
||||
# "format": 2,
|
||||
# "globals": "540ee119c15d52a68a53fe6f0897346d",
|
||||
# "version": "4.0a1",
|
||||
# "files": {
|
||||
# "cogapp___init__": {
|
||||
# "hash": "e45581a5b48f879f301c0f30bf77a50c",
|
||||
# "index": {
|
||||
# "html_filename": "cogapp___init__.html",
|
||||
# "relative_filename": "cogapp/__init__",
|
||||
# "nums": [ 1, 14, 0, 0, 0, 0, 0 ]
|
||||
# }
|
||||
# },
|
||||
# ...
|
||||
# "cogapp_whiteutils": {
|
||||
# "hash": "8504bb427fc488c4176809ded0277d51",
|
||||
# "index": {
|
||||
# "html_filename": "cogapp_whiteutils.html",
|
||||
# "relative_filename": "cogapp/whiteutils",
|
||||
# "nums": [ 1, 59, 0, 1, 28, 2, 2 ]
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
|
||||
def __init__(self, directory: str) -> None:
|
||||
self.directory = directory
|
||||
self.reset()
|
||||
|
||||
def reset(self) -> None:
|
||||
"""Initialize to empty. Causes all files to be reported."""
|
||||
self.globals = ""
|
||||
self.files: dict[str, FileInfoDict] = {}
|
||||
|
||||
def read(self) -> None:
|
||||
"""Read the information we stored last time."""
|
||||
usable = False
|
||||
try:
|
||||
status_file = os.path.join(self.directory, self.STATUS_FILE)
|
||||
with open(status_file) as fstatus:
|
||||
status = json.load(fstatus)
|
||||
except (OSError, ValueError):
|
||||
usable = False
|
||||
else:
|
||||
usable = True
|
||||
if status["format"] != self.STATUS_FORMAT:
|
||||
usable = False
|
||||
elif status["version"] != coverage.__version__:
|
||||
usable = False
|
||||
|
||||
if usable:
|
||||
self.files = {}
|
||||
for filename, fileinfo in status["files"].items():
|
||||
fileinfo["index"]["nums"] = Numbers(*fileinfo["index"]["nums"])
|
||||
self.files[filename] = fileinfo
|
||||
self.globals = status["globals"]
|
||||
else:
|
||||
self.reset()
|
||||
|
||||
def write(self) -> None:
|
||||
"""Write the current status."""
|
||||
status_file = os.path.join(self.directory, self.STATUS_FILE)
|
||||
files = {}
|
||||
for filename, fileinfo in self.files.items():
|
||||
index = fileinfo["index"]
|
||||
index["nums"] = index["nums"].init_args() # type: ignore[typeddict-item]
|
||||
files[filename] = fileinfo
|
||||
|
||||
status = {
|
||||
"note": self.NOTE,
|
||||
"format": self.STATUS_FORMAT,
|
||||
"version": coverage.__version__,
|
||||
"globals": self.globals,
|
||||
"files": files,
|
||||
}
|
||||
with open(status_file, "w") as fout:
|
||||
json.dump(status, fout, separators=(",", ":"))
|
||||
|
||||
def check_global_data(self, *data: Any) -> None:
|
||||
"""Check the global data that can affect incremental reporting."""
|
||||
m = Hasher()
|
||||
for d in data:
|
||||
m.update(d)
|
||||
these_globals = m.hexdigest()
|
||||
if self.globals != these_globals:
|
||||
self.reset()
|
||||
self.globals = these_globals
|
||||
|
||||
def can_skip_file(self, data: CoverageData, fr: FileReporter, rootname: str) -> bool:
|
||||
"""Can we skip reporting this file?
|
||||
|
||||
`data` is a CoverageData object, `fr` is a `FileReporter`, and
|
||||
`rootname` is the name being used for the file.
|
||||
"""
|
||||
m = Hasher()
|
||||
m.update(fr.source().encode("utf-8"))
|
||||
add_data_to_hash(data, fr.filename, m)
|
||||
this_hash = m.hexdigest()
|
||||
|
||||
that_hash = self.file_hash(rootname)
|
||||
|
||||
if this_hash == that_hash:
|
||||
# Nothing has changed to require the file to be reported again.
|
||||
return True
|
||||
else:
|
||||
self.set_file_hash(rootname, this_hash)
|
||||
return False
|
||||
|
||||
def file_hash(self, fname: str) -> str:
|
||||
"""Get the hash of `fname`'s contents."""
|
||||
return self.files.get(fname, {}).get("hash", "") # type: ignore[call-overload]
|
||||
|
||||
def set_file_hash(self, fname: str, val: str) -> None:
|
||||
"""Set the hash of `fname`'s contents."""
|
||||
self.files.setdefault(fname, {})["hash"] = val # type: ignore[typeddict-item]
|
||||
|
||||
def index_info(self, fname: str) -> IndexInfoDict:
|
||||
"""Get the information for index.html for `fname`."""
|
||||
return self.files.get(fname, {}).get("index", {}) # type: ignore
|
||||
|
||||
def set_index_info(self, fname: str, info: IndexInfoDict) -> None:
|
||||
"""Set the information for index.html for `fname`."""
|
||||
self.files.setdefault(fname, {})["index"] = info # type: ignore[typeddict-item]
|
||||
|
||||
|
||||
# Helpers for templates and generating HTML
|
||||
|
||||
def escape(t: str) -> str:
|
||||
"""HTML-escape the text in `t`.
|
||||
|
||||
This is only suitable for HTML text, not attributes.
|
||||
|
||||
"""
|
||||
# Convert HTML special chars into HTML entities.
|
||||
return t.replace("&", "&").replace("<", "<")
|
||||
|
||||
|
||||
def pair(ratio: tuple[int, int]) -> str:
|
||||
"""Format a pair of numbers so JavaScript can read them in an attribute."""
|
||||
return "{} {}".format(*ratio)
|
||||
|
|
@ -0,0 +1,624 @@
|
|||
// Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
// For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
// Coverage.py HTML report browser code.
|
||||
/*jslint browser: true, sloppy: true, vars: true, plusplus: true, maxerr: 50, indent: 4 */
|
||||
/*global coverage: true, document, window, $ */
|
||||
|
||||
coverage = {};
|
||||
|
||||
// General helpers
|
||||
function debounce(callback, wait) {
|
||||
let timeoutId = null;
|
||||
return function(...args) {
|
||||
clearTimeout(timeoutId);
|
||||
timeoutId = setTimeout(() => {
|
||||
callback.apply(this, args);
|
||||
}, wait);
|
||||
};
|
||||
};
|
||||
|
||||
function checkVisible(element) {
|
||||
const rect = element.getBoundingClientRect();
|
||||
const viewBottom = Math.max(document.documentElement.clientHeight, window.innerHeight);
|
||||
const viewTop = 30;
|
||||
return !(rect.bottom < viewTop || rect.top >= viewBottom);
|
||||
}
|
||||
|
||||
function on_click(sel, fn) {
|
||||
const elt = document.querySelector(sel);
|
||||
if (elt) {
|
||||
elt.addEventListener("click", fn);
|
||||
}
|
||||
}
|
||||
|
||||
// Helpers for table sorting
|
||||
function getCellValue(row, column = 0) {
|
||||
const cell = row.cells[column] // nosemgrep: eslint.detect-object-injection
|
||||
if (cell.childElementCount == 1) {
|
||||
const child = cell.firstElementChild
|
||||
if (child instanceof HTMLTimeElement && child.dateTime) {
|
||||
return child.dateTime
|
||||
} else if (child instanceof HTMLDataElement && child.value) {
|
||||
return child.value
|
||||
}
|
||||
}
|
||||
return cell.innerText || cell.textContent;
|
||||
}
|
||||
|
||||
function rowComparator(rowA, rowB, column = 0) {
|
||||
let valueA = getCellValue(rowA, column);
|
||||
let valueB = getCellValue(rowB, column);
|
||||
if (!isNaN(valueA) && !isNaN(valueB)) {
|
||||
return valueA - valueB
|
||||
}
|
||||
return valueA.localeCompare(valueB, undefined, {numeric: true});
|
||||
}
|
||||
|
||||
function sortColumn(th) {
|
||||
// Get the current sorting direction of the selected header,
|
||||
// clear state on other headers and then set the new sorting direction
|
||||
const currentSortOrder = th.getAttribute("aria-sort");
|
||||
[...th.parentElement.cells].forEach(header => header.setAttribute("aria-sort", "none"));
|
||||
if (currentSortOrder === "none") {
|
||||
th.setAttribute("aria-sort", th.dataset.defaultSortOrder || "ascending");
|
||||
} else {
|
||||
th.setAttribute("aria-sort", currentSortOrder === "ascending" ? "descending" : "ascending");
|
||||
}
|
||||
|
||||
const column = [...th.parentElement.cells].indexOf(th)
|
||||
|
||||
// Sort all rows and afterwards append them in order to move them in the DOM
|
||||
Array.from(th.closest("table").querySelectorAll("tbody tr"))
|
||||
.sort((rowA, rowB) => rowComparator(rowA, rowB, column) * (th.getAttribute("aria-sort") === "ascending" ? 1 : -1))
|
||||
.forEach(tr => tr.parentElement.appendChild(tr) );
|
||||
}
|
||||
|
||||
// Find all the elements with data-shortcut attribute, and use them to assign a shortcut key.
|
||||
coverage.assign_shortkeys = function () {
|
||||
document.querySelectorAll("[data-shortcut]").forEach(element => {
|
||||
document.addEventListener("keypress", event => {
|
||||
if (event.target.tagName.toLowerCase() === "input") {
|
||||
return; // ignore keypress from search filter
|
||||
}
|
||||
if (event.key === element.dataset.shortcut) {
|
||||
element.click();
|
||||
}
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
// Create the events for the filter box.
|
||||
coverage.wire_up_filter = function () {
|
||||
// Cache elements.
|
||||
const table = document.querySelector("table.index");
|
||||
const table_body_rows = table.querySelectorAll("tbody tr");
|
||||
const no_rows = document.getElementById("no_rows");
|
||||
|
||||
// Observe filter keyevents.
|
||||
document.getElementById("filter").addEventListener("input", debounce(event => {
|
||||
// Keep running total of each metric, first index contains number of shown rows
|
||||
const totals = new Array(table.rows[0].cells.length).fill(0);
|
||||
// Accumulate the percentage as fraction
|
||||
totals[totals.length - 1] = { "numer": 0, "denom": 0 }; // nosemgrep: eslint.detect-object-injection
|
||||
|
||||
// Hide / show elements.
|
||||
table_body_rows.forEach(row => {
|
||||
if (!row.cells[0].textContent.includes(event.target.value)) {
|
||||
// hide
|
||||
row.classList.add("hidden");
|
||||
return;
|
||||
}
|
||||
|
||||
// show
|
||||
row.classList.remove("hidden");
|
||||
totals[0]++;
|
||||
|
||||
for (let column = 1; column < totals.length; column++) {
|
||||
// Accumulate dynamic totals
|
||||
cell = row.cells[column] // nosemgrep: eslint.detect-object-injection
|
||||
if (column === totals.length - 1) {
|
||||
// Last column contains percentage
|
||||
const [numer, denom] = cell.dataset.ratio.split(" ");
|
||||
totals[column]["numer"] += parseInt(numer, 10); // nosemgrep: eslint.detect-object-injection
|
||||
totals[column]["denom"] += parseInt(denom, 10); // nosemgrep: eslint.detect-object-injection
|
||||
} else {
|
||||
totals[column] += parseInt(cell.textContent, 10); // nosemgrep: eslint.detect-object-injection
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Show placeholder if no rows will be displayed.
|
||||
if (!totals[0]) {
|
||||
// Show placeholder, hide table.
|
||||
no_rows.style.display = "block";
|
||||
table.style.display = "none";
|
||||
return;
|
||||
}
|
||||
|
||||
// Hide placeholder, show table.
|
||||
no_rows.style.display = null;
|
||||
table.style.display = null;
|
||||
|
||||
const footer = table.tFoot.rows[0];
|
||||
// Calculate new dynamic sum values based on visible rows.
|
||||
for (let column = 1; column < totals.length; column++) {
|
||||
// Get footer cell element.
|
||||
const cell = footer.cells[column]; // nosemgrep: eslint.detect-object-injection
|
||||
|
||||
// Set value into dynamic footer cell element.
|
||||
if (column === totals.length - 1) {
|
||||
// Percentage column uses the numerator and denominator,
|
||||
// and adapts to the number of decimal places.
|
||||
const match = /\.([0-9]+)/.exec(cell.textContent);
|
||||
const places = match ? match[1].length : 0;
|
||||
const { numer, denom } = totals[column]; // nosemgrep: eslint.detect-object-injection
|
||||
cell.dataset.ratio = `${numer} ${denom}`;
|
||||
// Check denom to prevent NaN if filtered files contain no statements
|
||||
cell.textContent = denom
|
||||
? `${(numer * 100 / denom).toFixed(places)}%`
|
||||
: `${(100).toFixed(places)}%`;
|
||||
} else {
|
||||
cell.textContent = totals[column]; // nosemgrep: eslint.detect-object-injection
|
||||
}
|
||||
}
|
||||
}));
|
||||
|
||||
// Trigger change event on setup, to force filter on page refresh
|
||||
// (filter value may still be present).
|
||||
document.getElementById("filter").dispatchEvent(new Event("input"));
|
||||
};
|
||||
|
||||
coverage.INDEX_SORT_STORAGE = "COVERAGE_INDEX_SORT_2";
|
||||
|
||||
// Loaded on index.html
|
||||
coverage.index_ready = function () {
|
||||
coverage.assign_shortkeys();
|
||||
coverage.wire_up_filter();
|
||||
document.querySelectorAll("[data-sortable] th[aria-sort]").forEach(
|
||||
th => th.addEventListener("click", e => sortColumn(e.target))
|
||||
);
|
||||
|
||||
// Look for a localStorage item containing previous sort settings:
|
||||
const stored_list = localStorage.getItem(coverage.INDEX_SORT_STORAGE);
|
||||
|
||||
if (stored_list) {
|
||||
const {column, direction} = JSON.parse(stored_list);
|
||||
const th = document.querySelector("[data-sortable]").tHead.rows[0].cells[column]; // nosemgrep: eslint.detect-object-injection
|
||||
th.setAttribute("aria-sort", direction === "ascending" ? "descending" : "ascending");
|
||||
th.click()
|
||||
}
|
||||
|
||||
// Watch for page unload events so we can save the final sort settings:
|
||||
window.addEventListener("unload", function () {
|
||||
const th = document.querySelector('[data-sortable] th[aria-sort="ascending"], [data-sortable] [aria-sort="descending"]');
|
||||
if (!th) {
|
||||
return;
|
||||
}
|
||||
localStorage.setItem(coverage.INDEX_SORT_STORAGE, JSON.stringify({
|
||||
column: [...th.parentElement.cells].indexOf(th),
|
||||
direction: th.getAttribute("aria-sort"),
|
||||
}));
|
||||
});
|
||||
|
||||
on_click(".button_prev_file", coverage.to_prev_file);
|
||||
on_click(".button_next_file", coverage.to_next_file);
|
||||
|
||||
on_click(".button_show_hide_help", coverage.show_hide_help);
|
||||
};
|
||||
|
||||
// -- pyfile stuff --
|
||||
|
||||
coverage.LINE_FILTERS_STORAGE = "COVERAGE_LINE_FILTERS";
|
||||
|
||||
coverage.pyfile_ready = function () {
|
||||
// If we're directed to a particular line number, highlight the line.
|
||||
var frag = location.hash;
|
||||
if (frag.length > 2 && frag[1] === "t") {
|
||||
document.querySelector(frag).closest(".n").classList.add("highlight");
|
||||
coverage.set_sel(parseInt(frag.substr(2), 10));
|
||||
} else {
|
||||
coverage.set_sel(0);
|
||||
}
|
||||
|
||||
on_click(".button_toggle_run", coverage.toggle_lines);
|
||||
on_click(".button_toggle_mis", coverage.toggle_lines);
|
||||
on_click(".button_toggle_exc", coverage.toggle_lines);
|
||||
on_click(".button_toggle_par", coverage.toggle_lines);
|
||||
|
||||
on_click(".button_next_chunk", coverage.to_next_chunk_nicely);
|
||||
on_click(".button_prev_chunk", coverage.to_prev_chunk_nicely);
|
||||
on_click(".button_top_of_page", coverage.to_top);
|
||||
on_click(".button_first_chunk", coverage.to_first_chunk);
|
||||
|
||||
on_click(".button_prev_file", coverage.to_prev_file);
|
||||
on_click(".button_next_file", coverage.to_next_file);
|
||||
on_click(".button_to_index", coverage.to_index);
|
||||
|
||||
on_click(".button_show_hide_help", coverage.show_hide_help);
|
||||
|
||||
coverage.filters = undefined;
|
||||
try {
|
||||
coverage.filters = localStorage.getItem(coverage.LINE_FILTERS_STORAGE);
|
||||
} catch(err) {}
|
||||
|
||||
if (coverage.filters) {
|
||||
coverage.filters = JSON.parse(coverage.filters);
|
||||
}
|
||||
else {
|
||||
coverage.filters = {run: false, exc: true, mis: true, par: true};
|
||||
}
|
||||
|
||||
for (cls in coverage.filters) {
|
||||
coverage.set_line_visibilty(cls, coverage.filters[cls]); // nosemgrep: eslint.detect-object-injection
|
||||
}
|
||||
|
||||
coverage.assign_shortkeys();
|
||||
coverage.init_scroll_markers();
|
||||
coverage.wire_up_sticky_header();
|
||||
|
||||
document.querySelectorAll("[id^=ctxs]").forEach(
|
||||
cbox => cbox.addEventListener("click", coverage.expand_contexts)
|
||||
);
|
||||
|
||||
// Rebuild scroll markers when the window height changes.
|
||||
window.addEventListener("resize", coverage.build_scroll_markers);
|
||||
};
|
||||
|
||||
coverage.toggle_lines = function (event) {
|
||||
const btn = event.target.closest("button");
|
||||
const category = btn.value
|
||||
const show = !btn.classList.contains("show_" + category);
|
||||
coverage.set_line_visibilty(category, show);
|
||||
coverage.build_scroll_markers();
|
||||
coverage.filters[category] = show;
|
||||
try {
|
||||
localStorage.setItem(coverage.LINE_FILTERS_STORAGE, JSON.stringify(coverage.filters));
|
||||
} catch(err) {}
|
||||
};
|
||||
|
||||
coverage.set_line_visibilty = function (category, should_show) {
|
||||
const cls = "show_" + category;
|
||||
const btn = document.querySelector(".button_toggle_" + category);
|
||||
if (btn) {
|
||||
if (should_show) {
|
||||
document.querySelectorAll("#source ." + category).forEach(e => e.classList.add(cls));
|
||||
btn.classList.add(cls);
|
||||
}
|
||||
else {
|
||||
document.querySelectorAll("#source ." + category).forEach(e => e.classList.remove(cls));
|
||||
btn.classList.remove(cls);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Return the nth line div.
|
||||
coverage.line_elt = function (n) {
|
||||
return document.getElementById("t" + n)?.closest("p");
|
||||
};
|
||||
|
||||
// Set the selection. b and e are line numbers.
|
||||
coverage.set_sel = function (b, e) {
|
||||
// The first line selected.
|
||||
coverage.sel_begin = b;
|
||||
// The next line not selected.
|
||||
coverage.sel_end = (e === undefined) ? b+1 : e;
|
||||
};
|
||||
|
||||
coverage.to_top = function () {
|
||||
coverage.set_sel(0, 1);
|
||||
coverage.scroll_window(0);
|
||||
};
|
||||
|
||||
coverage.to_first_chunk = function () {
|
||||
coverage.set_sel(0, 1);
|
||||
coverage.to_next_chunk();
|
||||
};
|
||||
|
||||
coverage.to_prev_file = function () {
|
||||
window.location = document.getElementById("prevFileLink").href;
|
||||
}
|
||||
|
||||
coverage.to_next_file = function () {
|
||||
window.location = document.getElementById("nextFileLink").href;
|
||||
}
|
||||
|
||||
coverage.to_index = function () {
|
||||
location.href = document.getElementById("indexLink").href;
|
||||
}
|
||||
|
||||
coverage.show_hide_help = function () {
|
||||
const helpCheck = document.getElementById("help_panel_state")
|
||||
helpCheck.checked = !helpCheck.checked;
|
||||
}
|
||||
|
||||
// Return a string indicating what kind of chunk this line belongs to,
|
||||
// or null if not a chunk.
|
||||
coverage.chunk_indicator = function (line_elt) {
|
||||
const classes = line_elt?.className;
|
||||
if (!classes) {
|
||||
return null;
|
||||
}
|
||||
const match = classes.match(/\bshow_\w+\b/);
|
||||
if (!match) {
|
||||
return null;
|
||||
}
|
||||
return match[0];
|
||||
};
|
||||
|
||||
coverage.to_next_chunk = function () {
|
||||
const c = coverage;
|
||||
|
||||
// Find the start of the next colored chunk.
|
||||
var probe = c.sel_end;
|
||||
var chunk_indicator, probe_line;
|
||||
while (true) {
|
||||
probe_line = c.line_elt(probe);
|
||||
if (!probe_line) {
|
||||
return;
|
||||
}
|
||||
chunk_indicator = c.chunk_indicator(probe_line);
|
||||
if (chunk_indicator) {
|
||||
break;
|
||||
}
|
||||
probe++;
|
||||
}
|
||||
|
||||
// There's a next chunk, `probe` points to it.
|
||||
var begin = probe;
|
||||
|
||||
// Find the end of this chunk.
|
||||
var next_indicator = chunk_indicator;
|
||||
while (next_indicator === chunk_indicator) {
|
||||
probe++;
|
||||
probe_line = c.line_elt(probe);
|
||||
next_indicator = c.chunk_indicator(probe_line);
|
||||
}
|
||||
c.set_sel(begin, probe);
|
||||
c.show_selection();
|
||||
};
|
||||
|
||||
coverage.to_prev_chunk = function () {
|
||||
const c = coverage;
|
||||
|
||||
// Find the end of the prev colored chunk.
|
||||
var probe = c.sel_begin-1;
|
||||
var probe_line = c.line_elt(probe);
|
||||
if (!probe_line) {
|
||||
return;
|
||||
}
|
||||
var chunk_indicator = c.chunk_indicator(probe_line);
|
||||
while (probe > 1 && !chunk_indicator) {
|
||||
probe--;
|
||||
probe_line = c.line_elt(probe);
|
||||
if (!probe_line) {
|
||||
return;
|
||||
}
|
||||
chunk_indicator = c.chunk_indicator(probe_line);
|
||||
}
|
||||
|
||||
// There's a prev chunk, `probe` points to its last line.
|
||||
var end = probe+1;
|
||||
|
||||
// Find the beginning of this chunk.
|
||||
var prev_indicator = chunk_indicator;
|
||||
while (prev_indicator === chunk_indicator) {
|
||||
probe--;
|
||||
if (probe <= 0) {
|
||||
return;
|
||||
}
|
||||
probe_line = c.line_elt(probe);
|
||||
prev_indicator = c.chunk_indicator(probe_line);
|
||||
}
|
||||
c.set_sel(probe+1, end);
|
||||
c.show_selection();
|
||||
};
|
||||
|
||||
// Returns 0, 1, or 2: how many of the two ends of the selection are on
|
||||
// the screen right now?
|
||||
coverage.selection_ends_on_screen = function () {
|
||||
if (coverage.sel_begin === 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
const begin = coverage.line_elt(coverage.sel_begin);
|
||||
const end = coverage.line_elt(coverage.sel_end-1);
|
||||
|
||||
return (
|
||||
(checkVisible(begin) ? 1 : 0)
|
||||
+ (checkVisible(end) ? 1 : 0)
|
||||
);
|
||||
};
|
||||
|
||||
coverage.to_next_chunk_nicely = function () {
|
||||
if (coverage.selection_ends_on_screen() === 0) {
|
||||
// The selection is entirely off the screen:
|
||||
// Set the top line on the screen as selection.
|
||||
|
||||
// This will select the top-left of the viewport
|
||||
// As this is most likely the span with the line number we take the parent
|
||||
const line = document.elementFromPoint(0, 0).parentElement;
|
||||
if (line.parentElement !== document.getElementById("source")) {
|
||||
// The element is not a source line but the header or similar
|
||||
coverage.select_line_or_chunk(1);
|
||||
} else {
|
||||
// We extract the line number from the id
|
||||
coverage.select_line_or_chunk(parseInt(line.id.substring(1), 10));
|
||||
}
|
||||
}
|
||||
coverage.to_next_chunk();
|
||||
};
|
||||
|
||||
coverage.to_prev_chunk_nicely = function () {
|
||||
if (coverage.selection_ends_on_screen() === 0) {
|
||||
// The selection is entirely off the screen:
|
||||
// Set the lowest line on the screen as selection.
|
||||
|
||||
// This will select the bottom-left of the viewport
|
||||
// As this is most likely the span with the line number we take the parent
|
||||
const line = document.elementFromPoint(document.documentElement.clientHeight-1, 0).parentElement;
|
||||
if (line.parentElement !== document.getElementById("source")) {
|
||||
// The element is not a source line but the header or similar
|
||||
coverage.select_line_or_chunk(coverage.lines_len);
|
||||
} else {
|
||||
// We extract the line number from the id
|
||||
coverage.select_line_or_chunk(parseInt(line.id.substring(1), 10));
|
||||
}
|
||||
}
|
||||
coverage.to_prev_chunk();
|
||||
};
|
||||
|
||||
// Select line number lineno, or if it is in a colored chunk, select the
|
||||
// entire chunk
|
||||
coverage.select_line_or_chunk = function (lineno) {
|
||||
var c = coverage;
|
||||
var probe_line = c.line_elt(lineno);
|
||||
if (!probe_line) {
|
||||
return;
|
||||
}
|
||||
var the_indicator = c.chunk_indicator(probe_line);
|
||||
if (the_indicator) {
|
||||
// The line is in a highlighted chunk.
|
||||
// Search backward for the first line.
|
||||
var probe = lineno;
|
||||
var indicator = the_indicator;
|
||||
while (probe > 0 && indicator === the_indicator) {
|
||||
probe--;
|
||||
probe_line = c.line_elt(probe);
|
||||
if (!probe_line) {
|
||||
break;
|
||||
}
|
||||
indicator = c.chunk_indicator(probe_line);
|
||||
}
|
||||
var begin = probe + 1;
|
||||
|
||||
// Search forward for the last line.
|
||||
probe = lineno;
|
||||
indicator = the_indicator;
|
||||
while (indicator === the_indicator) {
|
||||
probe++;
|
||||
probe_line = c.line_elt(probe);
|
||||
indicator = c.chunk_indicator(probe_line);
|
||||
}
|
||||
|
||||
coverage.set_sel(begin, probe);
|
||||
}
|
||||
else {
|
||||
coverage.set_sel(lineno);
|
||||
}
|
||||
};
|
||||
|
||||
coverage.show_selection = function () {
|
||||
// Highlight the lines in the chunk
|
||||
document.querySelectorAll("#source .highlight").forEach(e => e.classList.remove("highlight"));
|
||||
for (let probe = coverage.sel_begin; probe < coverage.sel_end; probe++) {
|
||||
coverage.line_elt(probe).querySelector(".n").classList.add("highlight");
|
||||
}
|
||||
|
||||
coverage.scroll_to_selection();
|
||||
};
|
||||
|
||||
coverage.scroll_to_selection = function () {
|
||||
// Scroll the page if the chunk isn't fully visible.
|
||||
if (coverage.selection_ends_on_screen() < 2) {
|
||||
const element = coverage.line_elt(coverage.sel_begin);
|
||||
coverage.scroll_window(element.offsetTop - 60);
|
||||
}
|
||||
};
|
||||
|
||||
coverage.scroll_window = function (to_pos) {
|
||||
window.scroll({top: to_pos, behavior: "smooth"});
|
||||
};
|
||||
|
||||
coverage.init_scroll_markers = function () {
|
||||
// Init some variables
|
||||
coverage.lines_len = document.querySelectorAll("#source > p").length;
|
||||
|
||||
// Build html
|
||||
coverage.build_scroll_markers();
|
||||
};
|
||||
|
||||
coverage.build_scroll_markers = function () {
|
||||
const temp_scroll_marker = document.getElementById("scroll_marker")
|
||||
if (temp_scroll_marker) temp_scroll_marker.remove();
|
||||
// Don't build markers if the window has no scroll bar.
|
||||
if (document.body.scrollHeight <= window.innerHeight) {
|
||||
return;
|
||||
}
|
||||
|
||||
const marker_scale = window.innerHeight / document.body.scrollHeight;
|
||||
const line_height = Math.min(Math.max(3, window.innerHeight / coverage.lines_len), 10);
|
||||
|
||||
let previous_line = -99, last_mark, last_top;
|
||||
|
||||
const scroll_marker = document.createElement("div");
|
||||
scroll_marker.id = "scroll_marker";
|
||||
document.getElementById("source").querySelectorAll(
|
||||
"p.show_run, p.show_mis, p.show_exc, p.show_exc, p.show_par"
|
||||
).forEach(element => {
|
||||
const line_top = Math.floor(element.offsetTop * marker_scale);
|
||||
const line_number = parseInt(element.querySelector(".n a").id.substr(1));
|
||||
|
||||
if (line_number === previous_line + 1) {
|
||||
// If this solid missed block just make previous mark higher.
|
||||
last_mark.style.height = `${line_top + line_height - last_top}px`;
|
||||
} else {
|
||||
// Add colored line in scroll_marker block.
|
||||
last_mark = document.createElement("div");
|
||||
last_mark.id = `m${line_number}`;
|
||||
last_mark.classList.add("marker");
|
||||
last_mark.style.height = `${line_height}px`;
|
||||
last_mark.style.top = `${line_top}px`;
|
||||
scroll_marker.append(last_mark);
|
||||
last_top = line_top;
|
||||
}
|
||||
|
||||
previous_line = line_number;
|
||||
});
|
||||
|
||||
// Append last to prevent layout calculation
|
||||
document.body.append(scroll_marker);
|
||||
};
|
||||
|
||||
coverage.wire_up_sticky_header = function () {
|
||||
const header = document.querySelector("header");
|
||||
const header_bottom = (
|
||||
header.querySelector(".content h2").getBoundingClientRect().top -
|
||||
header.getBoundingClientRect().top
|
||||
);
|
||||
|
||||
function updateHeader() {
|
||||
if (window.scrollY > header_bottom) {
|
||||
header.classList.add("sticky");
|
||||
} else {
|
||||
header.classList.remove("sticky");
|
||||
}
|
||||
}
|
||||
|
||||
window.addEventListener("scroll", updateHeader);
|
||||
updateHeader();
|
||||
};
|
||||
|
||||
coverage.expand_contexts = function (e) {
|
||||
var ctxs = e.target.parentNode.querySelector(".ctxs");
|
||||
|
||||
if (!ctxs.classList.contains("expanded")) {
|
||||
var ctxs_text = ctxs.textContent;
|
||||
var width = Number(ctxs_text[0]);
|
||||
ctxs.textContent = "";
|
||||
for (var i = 1; i < ctxs_text.length; i += width) {
|
||||
key = ctxs_text.substring(i, i + width).trim();
|
||||
ctxs.appendChild(document.createTextNode(contexts[key]));
|
||||
ctxs.appendChild(document.createElement("br"));
|
||||
}
|
||||
ctxs.classList.add("expanded");
|
||||
}
|
||||
};
|
||||
|
||||
document.addEventListener("DOMContentLoaded", () => {
|
||||
if (document.body.classList.contains("indexfile")) {
|
||||
coverage.index_ready();
|
||||
} else {
|
||||
coverage.pyfile_ready();
|
||||
}
|
||||
});
|
||||
Binary file not shown.
|
After Width: | Height: | Size: 1.7 KiB |
142
.venv/lib/python3.10/site-packages/coverage/htmlfiles/index.html
Normal file
142
.venv/lib/python3.10/site-packages/coverage/htmlfiles/index.html
Normal file
|
|
@ -0,0 +1,142 @@
|
|||
{# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 #}
|
||||
{# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt #}
|
||||
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
|
||||
<title>{{ title|escape }}</title>
|
||||
<link rel="icon" sizes="32x32" href="favicon_32.png">
|
||||
<link rel="stylesheet" href="style.css" type="text/css">
|
||||
{% if extra_css %}
|
||||
<link rel="stylesheet" href="{{ extra_css }}" type="text/css">
|
||||
{% endif %}
|
||||
<script type="text/javascript" src="coverage_html.js" defer></script>
|
||||
</head>
|
||||
<body class="indexfile">
|
||||
|
||||
<header>
|
||||
<div class="content">
|
||||
<h1>{{ title|escape }}:
|
||||
<span class="pc_cov">{{totals.pc_covered_str}}%</span>
|
||||
</h1>
|
||||
|
||||
<aside id="help_panel_wrapper">
|
||||
<input id="help_panel_state" type="checkbox">
|
||||
<label for="help_panel_state">
|
||||
<img id="keyboard_icon" src="keybd_closed.png" alt="Show/hide keyboard shortcuts" />
|
||||
</label>
|
||||
<div id="help_panel">
|
||||
<p class="legend">Shortcuts on this page</p>
|
||||
<div class="keyhelp">
|
||||
<p>
|
||||
<kbd>n</kbd>
|
||||
<kbd>s</kbd>
|
||||
<kbd>m</kbd>
|
||||
<kbd>x</kbd>
|
||||
{% if has_arcs %}
|
||||
<kbd>b</kbd>
|
||||
<kbd>p</kbd>
|
||||
{% endif %}
|
||||
<kbd>c</kbd>
|
||||
change column sorting
|
||||
</p>
|
||||
<p>
|
||||
<kbd>[</kbd>
|
||||
<kbd>]</kbd>
|
||||
prev/next file
|
||||
</p>
|
||||
<p>
|
||||
<kbd>?</kbd> show/hide this help
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
</aside>
|
||||
|
||||
<form id="filter_container">
|
||||
<input id="filter" type="text" value="" placeholder="filter..." />
|
||||
</form>
|
||||
|
||||
<p class="text">
|
||||
<a class="nav" href="{{__url__}}">coverage.py v{{__version__}}</a>,
|
||||
created at {{ time_stamp }}
|
||||
</p>
|
||||
</div>
|
||||
</header>
|
||||
|
||||
<main id="index">
|
||||
<table class="index" data-sortable>
|
||||
<thead>
|
||||
{# The title="" attr doesn"t work in Safari. #}
|
||||
<tr class="tablehead" title="Click to sort">
|
||||
<th class="name left" aria-sort="none" data-shortcut="n">Module</th>
|
||||
<th aria-sort="none" data-default-sort-order="descending" data-shortcut="s">statements</th>
|
||||
<th aria-sort="none" data-default-sort-order="descending" data-shortcut="m">missing</th>
|
||||
<th aria-sort="none" data-default-sort-order="descending" data-shortcut="x">excluded</th>
|
||||
{% if has_arcs %}
|
||||
<th aria-sort="none" data-default-sort-order="descending" data-shortcut="b">branches</th>
|
||||
<th aria-sort="none" data-default-sort-order="descending" data-shortcut="p">partial</th>
|
||||
{% endif %}
|
||||
<th class="right" aria-sort="none" data-shortcut="c">coverage</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{% for file in files %}
|
||||
<tr class="file">
|
||||
<td class="name left"><a href="{{file.html_filename}}">{{file.relative_filename}}</a></td>
|
||||
<td>{{file.nums.n_statements}}</td>
|
||||
<td>{{file.nums.n_missing}}</td>
|
||||
<td>{{file.nums.n_excluded}}</td>
|
||||
{% if has_arcs %}
|
||||
<td>{{file.nums.n_branches}}</td>
|
||||
<td>{{file.nums.n_partial_branches}}</td>
|
||||
{% endif %}
|
||||
<td class="right" data-ratio="{{file.nums.ratio_covered|pair}}">{{file.nums.pc_covered_str}}%</td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
</tbody>
|
||||
<tfoot>
|
||||
<tr class="total">
|
||||
<td class="name left">Total</td>
|
||||
<td>{{totals.n_statements}}</td>
|
||||
<td>{{totals.n_missing}}</td>
|
||||
<td>{{totals.n_excluded}}</td>
|
||||
{% if has_arcs %}
|
||||
<td>{{totals.n_branches}}</td>
|
||||
<td>{{totals.n_partial_branches}}</td>
|
||||
{% endif %}
|
||||
<td class="right" data-ratio="{{totals.ratio_covered|pair}}">{{totals.pc_covered_str}}%</td>
|
||||
</tr>
|
||||
</tfoot>
|
||||
</table>
|
||||
|
||||
<p id="no_rows">
|
||||
No items found using the specified filter.
|
||||
</p>
|
||||
|
||||
{% if skipped_covered_msg %}
|
||||
<p>{{ skipped_covered_msg }}</p>
|
||||
{% endif %}
|
||||
{% if skipped_empty_msg %}
|
||||
<p>{{ skipped_empty_msg }}</p>
|
||||
{% endif %}
|
||||
</main>
|
||||
|
||||
<footer>
|
||||
<div class="content">
|
||||
<p>
|
||||
<a class="nav" href="{{__url__}}">coverage.py v{{__version__}}</a>,
|
||||
created at {{ time_stamp }}
|
||||
</p>
|
||||
</div>
|
||||
<aside class="hidden">
|
||||
<a id="prevFileLink" class="nav" href="{{ final_html }}"/>
|
||||
<a id="nextFileLink" class="nav" href="{{ first_html }}"/>
|
||||
<button type="button" class="button_prev_file" data-shortcut="["/>
|
||||
<button type="button" class="button_next_file" data-shortcut="]"/>
|
||||
<button type="button" class="button_show_hide_help" data-shortcut="?"/>
|
||||
</aside>
|
||||
</footer>
|
||||
|
||||
</body>
|
||||
</html>
|
||||
Binary file not shown.
|
After Width: | Height: | Size: 8.8 KiB |
Binary file not shown.
|
After Width: | Height: | Size: 8.8 KiB |
|
|
@ -0,0 +1,149 @@
|
|||
{# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 #}
|
||||
{# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt #}
|
||||
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
|
||||
<title>Coverage for {{relative_filename|escape}}: {{nums.pc_covered_str}}%</title>
|
||||
<link rel="icon" sizes="32x32" href="favicon_32.png">
|
||||
<link rel="stylesheet" href="style.css" type="text/css">
|
||||
{% if extra_css %}
|
||||
<link rel="stylesheet" href="{{ extra_css }}" type="text/css">
|
||||
{% endif %}
|
||||
|
||||
{% if contexts_json %}
|
||||
<script type="text/javascript">
|
||||
contexts = {{ contexts_json }}
|
||||
</script>
|
||||
{% endif %}
|
||||
|
||||
<script type="text/javascript" src="coverage_html.js" defer></script>
|
||||
</head>
|
||||
<body class="pyfile">
|
||||
|
||||
<header>
|
||||
<div class="content">
|
||||
<h1>
|
||||
<span class="text">Coverage for </span><b>{{relative_filename|escape}}</b>:
|
||||
<span class="pc_cov">{{nums.pc_covered_str}}%</span>
|
||||
</h1>
|
||||
|
||||
<aside id="help_panel_wrapper">
|
||||
<input id="help_panel_state" type="checkbox">
|
||||
<label for="help_panel_state">
|
||||
<img id="keyboard_icon" src="keybd_closed.png" alt="Show/hide keyboard shortcuts" />
|
||||
</label>
|
||||
<div id="help_panel">
|
||||
<p class="legend">Shortcuts on this page</p>
|
||||
<div class="keyhelp">
|
||||
<p>
|
||||
<kbd>r</kbd>
|
||||
<kbd>m</kbd>
|
||||
<kbd>x</kbd>
|
||||
{% if has_arcs %}
|
||||
<kbd>p</kbd>
|
||||
{% endif %}
|
||||
toggle line displays
|
||||
</p>
|
||||
<p>
|
||||
<kbd>j</kbd>
|
||||
<kbd>k</kbd>
|
||||
next/prev highlighted chunk
|
||||
</p>
|
||||
<p>
|
||||
<kbd>0</kbd> (zero) top of page
|
||||
</p>
|
||||
<p>
|
||||
<kbd>1</kbd> (one) first highlighted chunk
|
||||
</p>
|
||||
<p>
|
||||
<kbd>[</kbd>
|
||||
<kbd>]</kbd>
|
||||
prev/next file
|
||||
</p>
|
||||
<p>
|
||||
<kbd>u</kbd> up to the index
|
||||
</p>
|
||||
<p>
|
||||
<kbd>?</kbd> show/hide this help
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
</aside>
|
||||
|
||||
<h2>
|
||||
<span class="text">{{nums.n_statements}} statements </span>
|
||||
<button type="button" class="{{category.run}} button_toggle_run" value="run" data-shortcut="r" title="Toggle lines run">{{nums.n_executed}}<span class="text"> run</span></button>
|
||||
<button type="button" class="{{category.mis}} button_toggle_mis" value="mis" data-shortcut="m" title="Toggle lines missing">{{nums.n_missing}}<span class="text"> missing</span></button>
|
||||
<button type="button" class="{{category.exc}} button_toggle_exc" value="exc" data-shortcut="x" title="Toggle lines excluded">{{nums.n_excluded}}<span class="text"> excluded</span></button>
|
||||
{% if has_arcs %}
|
||||
<button type="button" class="{{category.par}} button_toggle_par" value="par" data-shortcut="p" title="Toggle lines partially run">{{nums.n_partial_branches}}<span class="text"> partial</span></button>
|
||||
{% endif %}
|
||||
</h2>
|
||||
|
||||
<p class="text">
|
||||
<a id="prevFileLink" class="nav" href="{{ prev_html }}">« prev</a>
|
||||
<a id="indexLink" class="nav" href="index.html">^ index</a>
|
||||
<a id="nextFileLink" class="nav" href="{{ next_html }}">» next</a>
|
||||
|
||||
<a class="nav" href="{{__url__}}">coverage.py v{{__version__}}</a>,
|
||||
created at {{ time_stamp }}
|
||||
</p>
|
||||
|
||||
<aside class="hidden">
|
||||
<button type="button" class="button_next_chunk" data-shortcut="j"/>
|
||||
<button type="button" class="button_prev_chunk" data-shortcut="k"/>
|
||||
<button type="button" class="button_top_of_page" data-shortcut="0"/>
|
||||
<button type="button" class="button_first_chunk" data-shortcut="1"/>
|
||||
<button type="button" class="button_prev_file" data-shortcut="["/>
|
||||
<button type="button" class="button_next_file" data-shortcut="]"/>
|
||||
<button type="button" class="button_to_index" data-shortcut="u"/>
|
||||
<button type="button" class="button_show_hide_help" data-shortcut="?"/>
|
||||
</aside>
|
||||
</div>
|
||||
</header>
|
||||
|
||||
<main id="source">
|
||||
{% for line in lines -%}
|
||||
{% joined %}
|
||||
<p class="{{line.css_class}}">
|
||||
<span class="n"><a id="t{{line.number}}" href="#t{{line.number}}">{{line.number}}</a></span>
|
||||
<span class="t">{{line.html}} </span>
|
||||
{% if line.context_list %}
|
||||
<input type="checkbox" id="ctxs{{line.number}}" />
|
||||
{% endif %}
|
||||
{# Things that should float right in the line. #}
|
||||
<span class="r">
|
||||
{% if line.annotate %}
|
||||
<span class="annotate short">{{line.annotate}}</span>
|
||||
<span class="annotate long">{{line.annotate_long}}</span>
|
||||
{% endif %}
|
||||
{% if line.contexts %}
|
||||
<label for="ctxs{{line.number}}" class="ctx">{{ line.contexts_label }}</label>
|
||||
{% endif %}
|
||||
</span>
|
||||
{# Things that should appear below the line. #}
|
||||
{% if line.context_str %}
|
||||
<span class="ctxs">{{ line.context_str }}</span>
|
||||
{% endif %}
|
||||
</p>
|
||||
{% endjoined %}
|
||||
{% endfor %}
|
||||
</main>
|
||||
|
||||
<footer>
|
||||
<div class="content">
|
||||
<p>
|
||||
<a id="prevFileLink" class="nav" href="{{ prev_html }}">« prev</a>
|
||||
<a id="indexLink" class="nav" href="index.html">^ index</a>
|
||||
<a id="nextFileLink" class="nav" href="{{ next_html }}">» next</a>
|
||||
|
||||
<a class="nav" href="{{__url__}}">coverage.py v{{__version__}}</a>,
|
||||
created at {{ time_stamp }}
|
||||
</p>
|
||||
</div>
|
||||
</footer>
|
||||
|
||||
</body>
|
||||
</html>
|
||||
309
.venv/lib/python3.10/site-packages/coverage/htmlfiles/style.css
Normal file
309
.venv/lib/python3.10/site-packages/coverage/htmlfiles/style.css
Normal file
|
|
@ -0,0 +1,309 @@
|
|||
@charset "UTF-8";
|
||||
/* Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 */
|
||||
/* For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt */
|
||||
/* Don't edit this .css file. Edit the .scss file instead! */
|
||||
html, body, h1, h2, h3, p, table, td, th { margin: 0; padding: 0; border: 0; font-weight: inherit; font-style: inherit; font-size: 100%; font-family: inherit; vertical-align: baseline; }
|
||||
|
||||
body { font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; font-size: 1em; background: #fff; color: #000; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { body { background: #1e1e1e; } }
|
||||
|
||||
@media (prefers-color-scheme: dark) { body { color: #eee; } }
|
||||
|
||||
html > body { font-size: 16px; }
|
||||
|
||||
a:active, a:focus { outline: 2px dashed #007acc; }
|
||||
|
||||
p { font-size: .875em; line-height: 1.4em; }
|
||||
|
||||
table { border-collapse: collapse; }
|
||||
|
||||
td { vertical-align: top; }
|
||||
|
||||
table tr.hidden { display: none !important; }
|
||||
|
||||
p#no_rows { display: none; font-size: 1.2em; }
|
||||
|
||||
a.nav { text-decoration: none; color: inherit; }
|
||||
|
||||
a.nav:hover { text-decoration: underline; color: inherit; }
|
||||
|
||||
.hidden { display: none; }
|
||||
|
||||
header { background: #f8f8f8; width: 100%; z-index: 2; border-bottom: 1px solid #ccc; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { header { background: black; } }
|
||||
|
||||
@media (prefers-color-scheme: dark) { header { border-color: #333; } }
|
||||
|
||||
header .content { padding: 1rem 3.5rem; }
|
||||
|
||||
header h2 { margin-top: .5em; font-size: 1em; }
|
||||
|
||||
header p.text { margin: .5em 0 -.5em; color: #666; font-style: italic; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { header p.text { color: #aaa; } }
|
||||
|
||||
header.sticky { position: fixed; left: 0; right: 0; height: 2.5em; }
|
||||
|
||||
header.sticky .text { display: none; }
|
||||
|
||||
header.sticky h1, header.sticky h2 { font-size: 1em; margin-top: 0; display: inline-block; }
|
||||
|
||||
header.sticky .content { padding: 0.5rem 3.5rem; }
|
||||
|
||||
header.sticky .content p { font-size: 1em; }
|
||||
|
||||
header.sticky ~ #source { padding-top: 6.5em; }
|
||||
|
||||
main { position: relative; z-index: 1; }
|
||||
|
||||
footer { margin: 1rem 3.5rem; }
|
||||
|
||||
footer .content { padding: 0; color: #666; font-style: italic; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { footer .content { color: #aaa; } }
|
||||
|
||||
#index { margin: 1rem 0 0 3.5rem; }
|
||||
|
||||
h1 { font-size: 1.25em; display: inline-block; }
|
||||
|
||||
#filter_container { float: right; margin: 0 2em 0 0; }
|
||||
|
||||
#filter_container input { width: 10em; padding: 0.2em 0.5em; border: 2px solid #ccc; background: #fff; color: #000; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #filter_container input { border-color: #444; } }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #filter_container input { background: #1e1e1e; } }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #filter_container input { color: #eee; } }
|
||||
|
||||
#filter_container input:focus { border-color: #007acc; }
|
||||
|
||||
header button { font-family: inherit; font-size: inherit; border: 1px solid; border-radius: .2em; color: inherit; padding: .1em .5em; margin: 1px calc(.1em + 1px); cursor: pointer; border-color: #ccc; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { header button { border-color: #444; } }
|
||||
|
||||
header button:active, header button:focus { outline: 2px dashed #007acc; }
|
||||
|
||||
header button.run { background: #eeffee; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { header button.run { background: #373d29; } }
|
||||
|
||||
header button.run.show_run { background: #dfd; border: 2px solid #00dd00; margin: 0 .1em; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { header button.run.show_run { background: #373d29; } }
|
||||
|
||||
header button.mis { background: #ffeeee; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { header button.mis { background: #4b1818; } }
|
||||
|
||||
header button.mis.show_mis { background: #fdd; border: 2px solid #ff0000; margin: 0 .1em; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { header button.mis.show_mis { background: #4b1818; } }
|
||||
|
||||
header button.exc { background: #f7f7f7; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { header button.exc { background: #333; } }
|
||||
|
||||
header button.exc.show_exc { background: #eee; border: 2px solid #808080; margin: 0 .1em; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { header button.exc.show_exc { background: #333; } }
|
||||
|
||||
header button.par { background: #ffffd5; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { header button.par { background: #650; } }
|
||||
|
||||
header button.par.show_par { background: #ffa; border: 2px solid #bbbb00; margin: 0 .1em; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { header button.par.show_par { background: #650; } }
|
||||
|
||||
#help_panel, #source p .annotate.long { display: none; position: absolute; z-index: 999; background: #ffffcc; border: 1px solid #888; border-radius: .2em; color: #333; padding: .25em .5em; }
|
||||
|
||||
#source p .annotate.long { white-space: normal; float: right; top: 1.75em; right: 1em; height: auto; }
|
||||
|
||||
#help_panel_wrapper { float: right; position: relative; }
|
||||
|
||||
#keyboard_icon { margin: 5px; }
|
||||
|
||||
#help_panel_state { display: none; }
|
||||
|
||||
#help_panel { top: 25px; right: 0; padding: .75em; border: 1px solid #883; color: #333; }
|
||||
|
||||
#help_panel .keyhelp p { margin-top: .75em; }
|
||||
|
||||
#help_panel .legend { font-style: italic; margin-bottom: 1em; }
|
||||
|
||||
.indexfile #help_panel { width: 25em; }
|
||||
|
||||
.pyfile #help_panel { width: 18em; }
|
||||
|
||||
#help_panel_state:checked ~ #help_panel { display: block; }
|
||||
|
||||
kbd { border: 1px solid black; border-color: #888 #333 #333 #888; padding: .1em .35em; font-family: SFMono-Regular, Menlo, Monaco, Consolas, monospace; font-weight: bold; background: #eee; border-radius: 3px; }
|
||||
|
||||
#source { padding: 1em 0 1em 3.5rem; font-family: SFMono-Regular, Menlo, Monaco, Consolas, monospace; }
|
||||
|
||||
#source p { position: relative; white-space: pre; }
|
||||
|
||||
#source p * { box-sizing: border-box; }
|
||||
|
||||
#source p .n { float: left; text-align: right; width: 3.5rem; box-sizing: border-box; margin-left: -3.5rem; padding-right: 1em; color: #999; user-select: none; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #source p .n { color: #777; } }
|
||||
|
||||
#source p .n.highlight { background: #ffdd00; }
|
||||
|
||||
#source p .n a { scroll-margin-top: 6em; text-decoration: none; color: #999; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #source p .n a { color: #777; } }
|
||||
|
||||
#source p .n a:hover { text-decoration: underline; color: #999; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #source p .n a:hover { color: #777; } }
|
||||
|
||||
#source p .t { display: inline-block; width: 100%; box-sizing: border-box; margin-left: -.5em; padding-left: 0.3em; border-left: 0.2em solid #fff; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #source p .t { border-color: #1e1e1e; } }
|
||||
|
||||
#source p .t:hover { background: #f2f2f2; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #source p .t:hover { background: #282828; } }
|
||||
|
||||
#source p .t:hover ~ .r .annotate.long { display: block; }
|
||||
|
||||
#source p .t .com { color: #008000; font-style: italic; line-height: 1px; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #source p .t .com { color: #6a9955; } }
|
||||
|
||||
#source p .t .key { font-weight: bold; line-height: 1px; }
|
||||
|
||||
#source p .t .str { color: #0451a5; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #source p .t .str { color: #9cdcfe; } }
|
||||
|
||||
#source p.mis .t { border-left: 0.2em solid #ff0000; }
|
||||
|
||||
#source p.mis.show_mis .t { background: #fdd; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #source p.mis.show_mis .t { background: #4b1818; } }
|
||||
|
||||
#source p.mis.show_mis .t:hover { background: #f2d2d2; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #source p.mis.show_mis .t:hover { background: #532323; } }
|
||||
|
||||
#source p.run .t { border-left: 0.2em solid #00dd00; }
|
||||
|
||||
#source p.run.show_run .t { background: #dfd; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #source p.run.show_run .t { background: #373d29; } }
|
||||
|
||||
#source p.run.show_run .t:hover { background: #d2f2d2; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #source p.run.show_run .t:hover { background: #404633; } }
|
||||
|
||||
#source p.exc .t { border-left: 0.2em solid #808080; }
|
||||
|
||||
#source p.exc.show_exc .t { background: #eee; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #source p.exc.show_exc .t { background: #333; } }
|
||||
|
||||
#source p.exc.show_exc .t:hover { background: #e2e2e2; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #source p.exc.show_exc .t:hover { background: #3c3c3c; } }
|
||||
|
||||
#source p.par .t { border-left: 0.2em solid #bbbb00; }
|
||||
|
||||
#source p.par.show_par .t { background: #ffa; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #source p.par.show_par .t { background: #650; } }
|
||||
|
||||
#source p.par.show_par .t:hover { background: #f2f2a2; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #source p.par.show_par .t:hover { background: #6d5d0c; } }
|
||||
|
||||
#source p .r { position: absolute; top: 0; right: 2.5em; font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; }
|
||||
|
||||
#source p .annotate { font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; color: #666; padding-right: .5em; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #source p .annotate { color: #ddd; } }
|
||||
|
||||
#source p .annotate.short:hover ~ .long { display: block; }
|
||||
|
||||
#source p .annotate.long { width: 30em; right: 2.5em; }
|
||||
|
||||
#source p input { display: none; }
|
||||
|
||||
#source p input ~ .r label.ctx { cursor: pointer; border-radius: .25em; }
|
||||
|
||||
#source p input ~ .r label.ctx::before { content: "▶ "; }
|
||||
|
||||
#source p input ~ .r label.ctx:hover { background: #e8f4ff; color: #666; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #source p input ~ .r label.ctx:hover { background: #0f3a42; } }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #source p input ~ .r label.ctx:hover { color: #aaa; } }
|
||||
|
||||
#source p input:checked ~ .r label.ctx { background: #d0e8ff; color: #666; border-radius: .75em .75em 0 0; padding: 0 .5em; margin: -.25em 0; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #source p input:checked ~ .r label.ctx { background: #056; } }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #source p input:checked ~ .r label.ctx { color: #aaa; } }
|
||||
|
||||
#source p input:checked ~ .r label.ctx::before { content: "▼ "; }
|
||||
|
||||
#source p input:checked ~ .ctxs { padding: .25em .5em; overflow-y: scroll; max-height: 10.5em; }
|
||||
|
||||
#source p label.ctx { color: #999; display: inline-block; padding: 0 .5em; font-size: .8333em; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #source p label.ctx { color: #777; } }
|
||||
|
||||
#source p .ctxs { display: block; max-height: 0; overflow-y: hidden; transition: all .2s; padding: 0 .5em; font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; white-space: nowrap; background: #d0e8ff; border-radius: .25em; margin-right: 1.75em; text-align: right; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #source p .ctxs { background: #056; } }
|
||||
|
||||
#index { font-family: SFMono-Regular, Menlo, Monaco, Consolas, monospace; font-size: 0.875em; }
|
||||
|
||||
#index table.index { margin-left: -.5em; }
|
||||
|
||||
#index td, #index th { text-align: right; width: 5em; padding: .25em .5em; border-bottom: 1px solid #eee; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #index td, #index th { border-color: #333; } }
|
||||
|
||||
#index td.name, #index th.name { text-align: left; width: auto; }
|
||||
|
||||
#index th { font-style: italic; color: #333; cursor: pointer; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #index th { color: #ddd; } }
|
||||
|
||||
#index th:hover { background: #eee; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #index th:hover { background: #333; } }
|
||||
|
||||
#index th[aria-sort="ascending"], #index th[aria-sort="descending"] { white-space: nowrap; background: #eee; padding-left: .5em; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #index th[aria-sort="ascending"], #index th[aria-sort="descending"] { background: #333; } }
|
||||
|
||||
#index th[aria-sort="ascending"]::after { font-family: sans-serif; content: " ↑"; }
|
||||
|
||||
#index th[aria-sort="descending"]::after { font-family: sans-serif; content: " ↓"; }
|
||||
|
||||
#index td.name a { text-decoration: none; color: inherit; }
|
||||
|
||||
#index tr.total td, #index tr.total_dynamic td { font-weight: bold; border-top: 1px solid #ccc; border-bottom: none; }
|
||||
|
||||
#index tr.file:hover { background: #eee; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #index tr.file:hover { background: #333; } }
|
||||
|
||||
#index tr.file:hover td.name { text-decoration: underline; color: inherit; }
|
||||
|
||||
#scroll_marker { position: fixed; z-index: 3; right: 0; top: 0; width: 16px; height: 100%; background: #fff; border-left: 1px solid #eee; will-change: transform; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #scroll_marker { background: #1e1e1e; } }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #scroll_marker { border-color: #333; } }
|
||||
|
||||
#scroll_marker .marker { background: #ccc; position: absolute; min-height: 3px; width: 100%; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #scroll_marker .marker { background: #444; } }
|
||||
715
.venv/lib/python3.10/site-packages/coverage/htmlfiles/style.scss
Normal file
715
.venv/lib/python3.10/site-packages/coverage/htmlfiles/style.scss
Normal file
|
|
@ -0,0 +1,715 @@
|
|||
/* Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 */
|
||||
/* For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt */
|
||||
|
||||
// CSS styles for coverage.py HTML reports.
|
||||
|
||||
// When you edit this file, you need to run "make css" to get the CSS file
|
||||
// generated, and then check in both the .scss and the .css files.
|
||||
|
||||
// When working on the file, this command is useful:
|
||||
// sass --watch --style=compact --sourcemap=none --no-cache coverage/htmlfiles/style.scss:htmlcov/style.css
|
||||
//
|
||||
// OR you can process sass purely in python with `pip install pysass`, then:
|
||||
// pysassc --style=compact coverage/htmlfiles/style.scss coverage/htmlfiles/style.css
|
||||
|
||||
// Ignore this comment, it's for the CSS output file:
|
||||
/* Don't edit this .css file. Edit the .scss file instead! */
|
||||
|
||||
// Dimensions
|
||||
$left-gutter: 3.5rem;
|
||||
|
||||
//
|
||||
// Declare colors and variables
|
||||
//
|
||||
|
||||
$font-normal: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif;
|
||||
$font-code: SFMono-Regular, Menlo, Monaco, Consolas, monospace;
|
||||
|
||||
$off-button-lighten: 50%;
|
||||
$hover-dark-amt: 95%;
|
||||
|
||||
$focus-color: #007acc;
|
||||
|
||||
$mis-color: #ff0000;
|
||||
$run-color: #00dd00;
|
||||
$exc-color: #808080;
|
||||
$par-color: #bbbb00;
|
||||
|
||||
$light-bg: #fff;
|
||||
$light-fg: #000;
|
||||
$light-gray1: #f8f8f8;
|
||||
$light-gray2: #eee;
|
||||
$light-gray3: #ccc;
|
||||
$light-gray4: #999;
|
||||
$light-gray5: #666;
|
||||
$light-gray6: #333;
|
||||
$light-pln-bg: $light-bg;
|
||||
$light-mis-bg: #fdd;
|
||||
$light-run-bg: #dfd;
|
||||
$light-exc-bg: $light-gray2;
|
||||
$light-par-bg: #ffa;
|
||||
$light-token-com: #008000;
|
||||
$light-token-str: #0451a5;
|
||||
$light-context-bg-color: #d0e8ff;
|
||||
|
||||
$dark-bg: #1e1e1e;
|
||||
$dark-fg: #eee;
|
||||
$dark-gray1: #222;
|
||||
$dark-gray2: #333;
|
||||
$dark-gray3: #444;
|
||||
$dark-gray4: #777;
|
||||
$dark-gray5: #aaa;
|
||||
$dark-gray6: #ddd;
|
||||
$dark-pln-bg: $dark-bg;
|
||||
$dark-mis-bg: #4b1818;
|
||||
$dark-run-bg: #373d29;
|
||||
$dark-exc-bg: $dark-gray2;
|
||||
$dark-par-bg: #650;
|
||||
$dark-token-com: #6a9955;
|
||||
$dark-token-str: #9cdcfe;
|
||||
$dark-context-bg-color: #056;
|
||||
|
||||
//
|
||||
// Mixins and utilities
|
||||
//
|
||||
|
||||
@mixin background-dark($color) {
|
||||
@media (prefers-color-scheme: dark) {
|
||||
background: $color;
|
||||
}
|
||||
}
|
||||
@mixin color-dark($color) {
|
||||
@media (prefers-color-scheme: dark) {
|
||||
color: $color;
|
||||
}
|
||||
}
|
||||
@mixin border-color-dark($color) {
|
||||
@media (prefers-color-scheme: dark) {
|
||||
border-color: $color;
|
||||
}
|
||||
}
|
||||
|
||||
// Add visual outline to navigable elements on focus improve accessibility.
|
||||
@mixin focus-border {
|
||||
&:active, &:focus {
|
||||
outline: 2px dashed $focus-color;
|
||||
}
|
||||
}
|
||||
|
||||
// Page-wide styles
|
||||
html, body, h1, h2, h3, p, table, td, th {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
border: 0;
|
||||
font-weight: inherit;
|
||||
font-style: inherit;
|
||||
font-size: 100%;
|
||||
font-family: inherit;
|
||||
vertical-align: baseline;
|
||||
}
|
||||
|
||||
// Set baseline grid to 16 pt.
|
||||
body {
|
||||
font-family: $font-normal;
|
||||
font-size: 1em;
|
||||
background: $light-bg;
|
||||
color: $light-fg;
|
||||
@include background-dark($dark-bg);
|
||||
@include color-dark($dark-fg);
|
||||
}
|
||||
|
||||
html>body {
|
||||
font-size: 16px;
|
||||
}
|
||||
|
||||
a {
|
||||
@include focus-border;
|
||||
}
|
||||
|
||||
p {
|
||||
font-size: .875em;
|
||||
line-height: 1.4em;
|
||||
}
|
||||
|
||||
table {
|
||||
border-collapse: collapse;
|
||||
}
|
||||
td {
|
||||
vertical-align: top;
|
||||
}
|
||||
table tr.hidden {
|
||||
display: none !important;
|
||||
}
|
||||
|
||||
p#no_rows {
|
||||
display: none;
|
||||
font-size: 1.2em;
|
||||
}
|
||||
|
||||
a.nav {
|
||||
text-decoration: none;
|
||||
color: inherit;
|
||||
|
||||
&:hover {
|
||||
text-decoration: underline;
|
||||
color: inherit;
|
||||
}
|
||||
}
|
||||
|
||||
.hidden {
|
||||
display: none;
|
||||
}
|
||||
|
||||
// Page structure
|
||||
header {
|
||||
background: $light-gray1;
|
||||
@include background-dark(black);
|
||||
width: 100%;
|
||||
z-index: 2;
|
||||
border-bottom: 1px solid $light-gray3;
|
||||
@include border-color-dark($dark-gray2);
|
||||
|
||||
.content {
|
||||
padding: 1rem $left-gutter;
|
||||
}
|
||||
|
||||
h2 {
|
||||
margin-top: .5em;
|
||||
font-size: 1em;
|
||||
}
|
||||
|
||||
p.text {
|
||||
margin: .5em 0 -.5em;
|
||||
color: $light-gray5;
|
||||
@include color-dark($dark-gray5);
|
||||
font-style: italic;
|
||||
}
|
||||
|
||||
&.sticky {
|
||||
position: fixed;
|
||||
left: 0;
|
||||
right: 0;
|
||||
height: 2.5em;
|
||||
|
||||
.text {
|
||||
display: none;
|
||||
}
|
||||
|
||||
h1, h2 {
|
||||
font-size: 1em;
|
||||
margin-top: 0;
|
||||
display: inline-block;
|
||||
}
|
||||
|
||||
.content {
|
||||
padding: .5rem $left-gutter;
|
||||
p {
|
||||
font-size: 1em;
|
||||
}
|
||||
}
|
||||
|
||||
& ~ #source {
|
||||
padding-top: 6.5em;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
main {
|
||||
position: relative;
|
||||
z-index: 1;
|
||||
}
|
||||
|
||||
footer {
|
||||
margin: 1rem $left-gutter;
|
||||
|
||||
.content {
|
||||
padding: 0;
|
||||
color: $light-gray5;
|
||||
@include color-dark($dark-gray5);
|
||||
font-style: italic;
|
||||
}
|
||||
}
|
||||
|
||||
#index {
|
||||
margin: 1rem 0 0 $left-gutter;
|
||||
}
|
||||
|
||||
// Header styles
|
||||
|
||||
h1 {
|
||||
font-size: 1.25em;
|
||||
display: inline-block;
|
||||
}
|
||||
|
||||
#filter_container {
|
||||
float: right;
|
||||
margin: 0 2em 0 0;
|
||||
|
||||
input {
|
||||
width: 10em;
|
||||
padding: 0.2em 0.5em;
|
||||
border: 2px solid $light-gray3;
|
||||
background: $light-bg;
|
||||
color: $light-fg;
|
||||
@include border-color-dark($dark-gray3);
|
||||
@include background-dark($dark-bg);
|
||||
@include color-dark($dark-fg);
|
||||
&:focus {
|
||||
border-color: $focus-color;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
header button {
|
||||
font-family: inherit;
|
||||
font-size: inherit;
|
||||
border: 1px solid;
|
||||
border-radius: .2em;
|
||||
color: inherit;
|
||||
padding: .1em .5em;
|
||||
margin: 1px calc(.1em + 1px);
|
||||
cursor: pointer;
|
||||
border-color: $light-gray3;
|
||||
@include border-color-dark($dark-gray3);
|
||||
@include focus-border;
|
||||
|
||||
&.run {
|
||||
background: mix($light-run-bg, $light-bg, $off-button-lighten);
|
||||
@include background-dark($dark-run-bg);
|
||||
&.show_run {
|
||||
background: $light-run-bg;
|
||||
@include background-dark($dark-run-bg);
|
||||
border: 2px solid $run-color;
|
||||
margin: 0 .1em;
|
||||
}
|
||||
}
|
||||
&.mis {
|
||||
background: mix($light-mis-bg, $light-bg, $off-button-lighten);
|
||||
@include background-dark($dark-mis-bg);
|
||||
&.show_mis {
|
||||
background: $light-mis-bg;
|
||||
@include background-dark($dark-mis-bg);
|
||||
border: 2px solid $mis-color;
|
||||
margin: 0 .1em;
|
||||
}
|
||||
}
|
||||
&.exc {
|
||||
background: mix($light-exc-bg, $light-bg, $off-button-lighten);
|
||||
@include background-dark($dark-exc-bg);
|
||||
&.show_exc {
|
||||
background: $light-exc-bg;
|
||||
@include background-dark($dark-exc-bg);
|
||||
border: 2px solid $exc-color;
|
||||
margin: 0 .1em;
|
||||
}
|
||||
}
|
||||
&.par {
|
||||
background: mix($light-par-bg, $light-bg, $off-button-lighten);
|
||||
@include background-dark($dark-par-bg);
|
||||
&.show_par {
|
||||
background: $light-par-bg;
|
||||
@include background-dark($dark-par-bg);
|
||||
border: 2px solid $par-color;
|
||||
margin: 0 .1em;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Yellow post-it things.
|
||||
%popup {
|
||||
display: none;
|
||||
position: absolute;
|
||||
z-index: 999;
|
||||
background: #ffffcc;
|
||||
border: 1px solid #888;
|
||||
border-radius: .2em;
|
||||
color: #333;
|
||||
padding: .25em .5em;
|
||||
}
|
||||
|
||||
// Yellow post-it's in the text listings.
|
||||
%in-text-popup {
|
||||
@extend %popup;
|
||||
white-space: normal;
|
||||
float: right;
|
||||
top: 1.75em;
|
||||
right: 1em;
|
||||
height: auto;
|
||||
}
|
||||
|
||||
// Help panel
|
||||
#help_panel_wrapper {
|
||||
float: right;
|
||||
position: relative;
|
||||
}
|
||||
|
||||
#keyboard_icon {
|
||||
margin: 5px;
|
||||
}
|
||||
|
||||
#help_panel_state {
|
||||
display: none;
|
||||
}
|
||||
|
||||
#help_panel {
|
||||
@extend %popup;
|
||||
top: 25px;
|
||||
right: 0;
|
||||
padding: .75em;
|
||||
border: 1px solid #883;
|
||||
|
||||
color: #333;
|
||||
|
||||
.keyhelp p {
|
||||
margin-top: .75em;
|
||||
}
|
||||
|
||||
.legend {
|
||||
font-style: italic;
|
||||
margin-bottom: 1em;
|
||||
}
|
||||
|
||||
.indexfile & {
|
||||
width: 25em;
|
||||
}
|
||||
|
||||
.pyfile & {
|
||||
width: 18em;
|
||||
}
|
||||
|
||||
#help_panel_state:checked ~ & {
|
||||
display: block;
|
||||
}
|
||||
}
|
||||
|
||||
kbd {
|
||||
border: 1px solid black;
|
||||
border-color: #888 #333 #333 #888;
|
||||
padding: .1em .35em;
|
||||
font-family: $font-code;
|
||||
font-weight: bold;
|
||||
background: #eee;
|
||||
border-radius: 3px;
|
||||
}
|
||||
|
||||
// Source file styles
|
||||
|
||||
// The slim bar at the left edge of the source lines, colored by coverage.
|
||||
$border-indicator-width: .2em;
|
||||
|
||||
#source {
|
||||
padding: 1em 0 1em $left-gutter;
|
||||
font-family: $font-code;
|
||||
|
||||
p {
|
||||
// position relative makes position:absolute pop-ups appear in the right place.
|
||||
position: relative;
|
||||
white-space: pre;
|
||||
|
||||
* {
|
||||
box-sizing: border-box;
|
||||
}
|
||||
|
||||
.n {
|
||||
float: left;
|
||||
text-align: right;
|
||||
width: $left-gutter;
|
||||
box-sizing: border-box;
|
||||
margin-left: -$left-gutter;
|
||||
padding-right: 1em;
|
||||
color: $light-gray4;
|
||||
user-select: none;
|
||||
@include color-dark($dark-gray4);
|
||||
|
||||
&.highlight {
|
||||
background: #ffdd00;
|
||||
}
|
||||
|
||||
a {
|
||||
// Make anchors to the line scroll the line to be
|
||||
// visible beneath the fixed-position header.
|
||||
scroll-margin-top: 6em;
|
||||
text-decoration: none;
|
||||
color: $light-gray4;
|
||||
@include color-dark($dark-gray4);
|
||||
&:hover {
|
||||
text-decoration: underline;
|
||||
color: $light-gray4;
|
||||
@include color-dark($dark-gray4);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
.t {
|
||||
display: inline-block;
|
||||
width: 100%;
|
||||
box-sizing: border-box;
|
||||
margin-left: -.5em;
|
||||
padding-left: .5em - $border-indicator-width;
|
||||
border-left: $border-indicator-width solid $light-bg;
|
||||
@include border-color-dark($dark-bg);
|
||||
|
||||
&:hover {
|
||||
background: mix($light-pln-bg, $light-fg, $hover-dark-amt);
|
||||
@include background-dark(mix($dark-pln-bg, $dark-fg, $hover-dark-amt));
|
||||
|
||||
& ~ .r .annotate.long {
|
||||
display: block;
|
||||
}
|
||||
}
|
||||
|
||||
// Syntax coloring
|
||||
.com {
|
||||
color: $light-token-com;
|
||||
@include color-dark($dark-token-com);
|
||||
font-style: italic;
|
||||
line-height: 1px;
|
||||
}
|
||||
.key {
|
||||
font-weight: bold;
|
||||
line-height: 1px;
|
||||
}
|
||||
.str {
|
||||
color: $light-token-str;
|
||||
@include color-dark($dark-token-str);
|
||||
}
|
||||
}
|
||||
|
||||
&.mis {
|
||||
.t {
|
||||
border-left: $border-indicator-width solid $mis-color;
|
||||
}
|
||||
|
||||
&.show_mis .t {
|
||||
background: $light-mis-bg;
|
||||
@include background-dark($dark-mis-bg);
|
||||
|
||||
&:hover {
|
||||
background: mix($light-mis-bg, $light-fg, $hover-dark-amt);
|
||||
@include background-dark(mix($dark-mis-bg, $dark-fg, $hover-dark-amt));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
&.run {
|
||||
.t {
|
||||
border-left: $border-indicator-width solid $run-color;
|
||||
}
|
||||
|
||||
&.show_run .t {
|
||||
background: $light-run-bg;
|
||||
@include background-dark($dark-run-bg);
|
||||
|
||||
&:hover {
|
||||
background: mix($light-run-bg, $light-fg, $hover-dark-amt);
|
||||
@include background-dark(mix($dark-run-bg, $dark-fg, $hover-dark-amt));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
&.exc {
|
||||
.t {
|
||||
border-left: $border-indicator-width solid $exc-color;
|
||||
}
|
||||
|
||||
&.show_exc .t {
|
||||
background: $light-exc-bg;
|
||||
@include background-dark($dark-exc-bg);
|
||||
|
||||
&:hover {
|
||||
background: mix($light-exc-bg, $light-fg, $hover-dark-amt);
|
||||
@include background-dark(mix($dark-exc-bg, $dark-fg, $hover-dark-amt));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
&.par {
|
||||
.t {
|
||||
border-left: $border-indicator-width solid $par-color;
|
||||
}
|
||||
|
||||
&.show_par .t {
|
||||
background: $light-par-bg;
|
||||
@include background-dark($dark-par-bg);
|
||||
|
||||
&:hover {
|
||||
background: mix($light-par-bg, $light-fg, $hover-dark-amt);
|
||||
@include background-dark(mix($dark-par-bg, $dark-fg, $hover-dark-amt));
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
.r {
|
||||
position: absolute;
|
||||
top: 0;
|
||||
right: 2.5em;
|
||||
font-family: $font-normal;
|
||||
}
|
||||
|
||||
.annotate {
|
||||
font-family: $font-normal;
|
||||
color: $light-gray5;
|
||||
@include color-dark($dark-gray6);
|
||||
padding-right: .5em;
|
||||
|
||||
&.short:hover ~ .long {
|
||||
display: block;
|
||||
}
|
||||
|
||||
&.long {
|
||||
@extend %in-text-popup;
|
||||
width: 30em;
|
||||
right: 2.5em;
|
||||
}
|
||||
}
|
||||
|
||||
input {
|
||||
display: none;
|
||||
|
||||
& ~ .r label.ctx {
|
||||
cursor: pointer;
|
||||
border-radius: .25em;
|
||||
&::before {
|
||||
content: "▶ ";
|
||||
}
|
||||
&:hover {
|
||||
background: mix($light-context-bg-color, $light-bg, $off-button-lighten);
|
||||
@include background-dark(mix($dark-context-bg-color, $dark-bg, $off-button-lighten));
|
||||
color: $light-gray5;
|
||||
@include color-dark($dark-gray5);
|
||||
}
|
||||
}
|
||||
|
||||
&:checked ~ .r label.ctx {
|
||||
background: $light-context-bg-color;
|
||||
@include background-dark($dark-context-bg-color);
|
||||
color: $light-gray5;
|
||||
@include color-dark($dark-gray5);
|
||||
border-radius: .75em .75em 0 0;
|
||||
padding: 0 .5em;
|
||||
margin: -.25em 0;
|
||||
&::before {
|
||||
content: "▼ ";
|
||||
}
|
||||
}
|
||||
|
||||
&:checked ~ .ctxs {
|
||||
padding: .25em .5em;
|
||||
overflow-y: scroll;
|
||||
max-height: 10.5em;
|
||||
}
|
||||
}
|
||||
|
||||
label.ctx {
|
||||
color: $light-gray4;
|
||||
@include color-dark($dark-gray4);
|
||||
display: inline-block;
|
||||
padding: 0 .5em;
|
||||
font-size: .8333em; // 10/12
|
||||
}
|
||||
|
||||
.ctxs {
|
||||
display: block;
|
||||
max-height: 0;
|
||||
overflow-y: hidden;
|
||||
transition: all .2s;
|
||||
padding: 0 .5em;
|
||||
font-family: $font-normal;
|
||||
white-space: nowrap;
|
||||
background: $light-context-bg-color;
|
||||
@include background-dark($dark-context-bg-color);
|
||||
border-radius: .25em;
|
||||
margin-right: 1.75em;
|
||||
text-align: right;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// index styles
|
||||
#index {
|
||||
font-family: $font-code;
|
||||
font-size: 0.875em;
|
||||
|
||||
table.index {
|
||||
margin-left: -.5em;
|
||||
}
|
||||
td, th {
|
||||
text-align: right;
|
||||
width: 5em;
|
||||
padding: .25em .5em;
|
||||
border-bottom: 1px solid $light-gray2;
|
||||
@include border-color-dark($dark-gray2);
|
||||
&.name {
|
||||
text-align: left;
|
||||
width: auto;
|
||||
}
|
||||
}
|
||||
th {
|
||||
font-style: italic;
|
||||
color: $light-gray6;
|
||||
@include color-dark($dark-gray6);
|
||||
cursor: pointer;
|
||||
&:hover {
|
||||
background: $light-gray2;
|
||||
@include background-dark($dark-gray2);
|
||||
}
|
||||
&[aria-sort="ascending"], &[aria-sort="descending"] {
|
||||
white-space: nowrap;
|
||||
background: $light-gray2;
|
||||
@include background-dark($dark-gray2);
|
||||
padding-left: .5em;
|
||||
}
|
||||
&[aria-sort="ascending"]::after {
|
||||
font-family: sans-serif;
|
||||
content: " ↑";
|
||||
}
|
||||
&[aria-sort="descending"]::after {
|
||||
font-family: sans-serif;
|
||||
content: " ↓";
|
||||
}
|
||||
}
|
||||
td.name a {
|
||||
text-decoration: none;
|
||||
color: inherit;
|
||||
}
|
||||
|
||||
tr.total td,
|
||||
tr.total_dynamic td {
|
||||
font-weight: bold;
|
||||
border-top: 1px solid #ccc;
|
||||
border-bottom: none;
|
||||
}
|
||||
tr.file:hover {
|
||||
background: $light-gray2;
|
||||
@include background-dark($dark-gray2);
|
||||
td.name {
|
||||
text-decoration: underline;
|
||||
color: inherit;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// scroll marker styles
|
||||
#scroll_marker {
|
||||
position: fixed;
|
||||
z-index: 3;
|
||||
right: 0;
|
||||
top: 0;
|
||||
width: 16px;
|
||||
height: 100%;
|
||||
background: $light-bg;
|
||||
border-left: 1px solid $light-gray2;
|
||||
@include background-dark($dark-bg);
|
||||
@include border-color-dark($dark-gray2);
|
||||
will-change: transform; // for faster scrolling of fixed element in Chrome
|
||||
|
||||
.marker {
|
||||
background: $light-gray3;
|
||||
@include background-dark($dark-gray3);
|
||||
position: absolute;
|
||||
min-height: 3px;
|
||||
width: 100%;
|
||||
}
|
||||
}
|
||||
592
.venv/lib/python3.10/site-packages/coverage/inorout.py
Normal file
592
.venv/lib/python3.10/site-packages/coverage/inorout.py
Normal file
|
|
@ -0,0 +1,592 @@
|
|||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""Determining whether files are being measured/reported or not."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import importlib.util
|
||||
import inspect
|
||||
import itertools
|
||||
import os
|
||||
import platform
|
||||
import re
|
||||
import sys
|
||||
import sysconfig
|
||||
import traceback
|
||||
|
||||
from types import FrameType, ModuleType
|
||||
from typing import (
|
||||
cast, Any, Iterable, TYPE_CHECKING,
|
||||
)
|
||||
|
||||
from coverage import env
|
||||
from coverage.disposition import FileDisposition, disposition_init
|
||||
from coverage.exceptions import CoverageException, PluginError
|
||||
from coverage.files import TreeMatcher, GlobMatcher, ModuleMatcher
|
||||
from coverage.files import prep_patterns, find_python_files, canonical_filename
|
||||
from coverage.misc import sys_modules_saved
|
||||
from coverage.python import source_for_file, source_for_morf
|
||||
from coverage.types import TFileDisposition, TMorf, TWarnFn, TDebugCtl
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from coverage.config import CoverageConfig
|
||||
from coverage.plugin_support import Plugins
|
||||
|
||||
|
||||
# Pypy has some unusual stuff in the "stdlib". Consider those locations
|
||||
# when deciding where the stdlib is. These modules are not used for anything,
|
||||
# they are modules importable from the pypy lib directories, so that we can
|
||||
# find those directories.
|
||||
modules_we_happen_to_have: list[ModuleType] = [
|
||||
inspect, itertools, os, platform, re, sysconfig, traceback,
|
||||
]
|
||||
|
||||
if env.PYPY:
|
||||
try:
|
||||
import _structseq
|
||||
modules_we_happen_to_have.append(_structseq)
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
try:
|
||||
import _pypy_irc_topic
|
||||
modules_we_happen_to_have.append(_pypy_irc_topic)
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
||||
def canonical_path(morf: TMorf, directory: bool = False) -> str:
|
||||
"""Return the canonical path of the module or file `morf`.
|
||||
|
||||
If the module is a package, then return its directory. If it is a
|
||||
module, then return its file, unless `directory` is True, in which
|
||||
case return its enclosing directory.
|
||||
|
||||
"""
|
||||
morf_path = canonical_filename(source_for_morf(morf))
|
||||
if morf_path.endswith("__init__.py") or directory:
|
||||
morf_path = os.path.split(morf_path)[0]
|
||||
return morf_path
|
||||
|
||||
|
||||
def name_for_module(filename: str, frame: FrameType | None) -> str:
|
||||
"""Get the name of the module for a filename and frame.
|
||||
|
||||
For configurability's sake, we allow __main__ modules to be matched by
|
||||
their importable name.
|
||||
|
||||
If loaded via runpy (aka -m), we can usually recover the "original"
|
||||
full dotted module name, otherwise, we resort to interpreting the
|
||||
file name to get the module's name. In the case that the module name
|
||||
can't be determined, None is returned.
|
||||
|
||||
"""
|
||||
module_globals = frame.f_globals if frame is not None else {}
|
||||
dunder_name: str = module_globals.get("__name__", None)
|
||||
|
||||
if isinstance(dunder_name, str) and dunder_name != "__main__":
|
||||
# This is the usual case: an imported module.
|
||||
return dunder_name
|
||||
|
||||
spec = module_globals.get("__spec__", None)
|
||||
if spec:
|
||||
fullname = spec.name
|
||||
if isinstance(fullname, str) and fullname != "__main__":
|
||||
# Module loaded via: runpy -m
|
||||
return fullname
|
||||
|
||||
# Script as first argument to Python command line.
|
||||
inspectedname = inspect.getmodulename(filename)
|
||||
if inspectedname is not None:
|
||||
return inspectedname
|
||||
else:
|
||||
return dunder_name
|
||||
|
||||
|
||||
def module_is_namespace(mod: ModuleType) -> bool:
|
||||
"""Is the module object `mod` a PEP420 namespace module?"""
|
||||
return hasattr(mod, "__path__") and getattr(mod, "__file__", None) is None
|
||||
|
||||
|
||||
def module_has_file(mod: ModuleType) -> bool:
|
||||
"""Does the module object `mod` have an existing __file__ ?"""
|
||||
mod__file__ = getattr(mod, "__file__", None)
|
||||
if mod__file__ is None:
|
||||
return False
|
||||
return os.path.exists(mod__file__)
|
||||
|
||||
|
||||
def file_and_path_for_module(modulename: str) -> tuple[str | None, list[str]]:
|
||||
"""Find the file and search path for `modulename`.
|
||||
|
||||
Returns:
|
||||
filename: The filename of the module, or None.
|
||||
path: A list (possibly empty) of directories to find submodules in.
|
||||
|
||||
"""
|
||||
filename = None
|
||||
path = []
|
||||
try:
|
||||
spec = importlib.util.find_spec(modulename)
|
||||
except Exception:
|
||||
pass
|
||||
else:
|
||||
if spec is not None:
|
||||
filename = spec.origin
|
||||
path = list(spec.submodule_search_locations or ())
|
||||
return filename, path
|
||||
|
||||
|
||||
def add_stdlib_paths(paths: set[str]) -> None:
|
||||
"""Add paths where the stdlib can be found to the set `paths`."""
|
||||
# Look at where some standard modules are located. That's the
|
||||
# indication for "installed with the interpreter". In some
|
||||
# environments (virtualenv, for example), these modules may be
|
||||
# spread across a few locations. Look at all the candidate modules
|
||||
# we've imported, and take all the different ones.
|
||||
for m in modules_we_happen_to_have:
|
||||
if hasattr(m, "__file__"):
|
||||
paths.add(canonical_path(m, directory=True))
|
||||
|
||||
|
||||
def add_third_party_paths(paths: set[str]) -> None:
|
||||
"""Add locations for third-party packages to the set `paths`."""
|
||||
# Get the paths that sysconfig knows about.
|
||||
scheme_names = set(sysconfig.get_scheme_names())
|
||||
|
||||
for scheme in scheme_names:
|
||||
# https://foss.heptapod.net/pypy/pypy/-/issues/3433
|
||||
better_scheme = "pypy_posix" if scheme == "pypy" else scheme
|
||||
if os.name in better_scheme.split("_"):
|
||||
config_paths = sysconfig.get_paths(scheme)
|
||||
for path_name in ["platlib", "purelib", "scripts"]:
|
||||
paths.add(config_paths[path_name])
|
||||
|
||||
|
||||
def add_coverage_paths(paths: set[str]) -> None:
|
||||
"""Add paths where coverage.py code can be found to the set `paths`."""
|
||||
cover_path = canonical_path(__file__, directory=True)
|
||||
paths.add(cover_path)
|
||||
if env.TESTING:
|
||||
# Don't include our own test code.
|
||||
paths.add(os.path.join(cover_path, "tests"))
|
||||
|
||||
|
||||
class InOrOut:
|
||||
"""Machinery for determining what files to measure."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
config: CoverageConfig,
|
||||
warn: TWarnFn,
|
||||
debug: TDebugCtl | None,
|
||||
include_namespace_packages: bool,
|
||||
) -> None:
|
||||
self.warn = warn
|
||||
self.debug = debug
|
||||
self.include_namespace_packages = include_namespace_packages
|
||||
|
||||
self.source: list[str] = []
|
||||
self.source_pkgs: list[str] = []
|
||||
self.source_pkgs.extend(config.source_pkgs)
|
||||
for src in config.source or []:
|
||||
if os.path.isdir(src):
|
||||
self.source.append(canonical_filename(src))
|
||||
else:
|
||||
self.source_pkgs.append(src)
|
||||
self.source_pkgs_unmatched = self.source_pkgs[:]
|
||||
|
||||
self.include = prep_patterns(config.run_include)
|
||||
self.omit = prep_patterns(config.run_omit)
|
||||
|
||||
# The directories for files considered "installed with the interpreter".
|
||||
self.pylib_paths: set[str] = set()
|
||||
if not config.cover_pylib:
|
||||
add_stdlib_paths(self.pylib_paths)
|
||||
|
||||
# To avoid tracing the coverage.py code itself, we skip anything
|
||||
# located where we are.
|
||||
self.cover_paths: set[str] = set()
|
||||
add_coverage_paths(self.cover_paths)
|
||||
|
||||
# Find where third-party packages are installed.
|
||||
self.third_paths: set[str] = set()
|
||||
add_third_party_paths(self.third_paths)
|
||||
|
||||
def _debug(msg: str) -> None:
|
||||
if self.debug:
|
||||
self.debug.write(msg)
|
||||
|
||||
# The matchers for should_trace.
|
||||
|
||||
# Generally useful information
|
||||
_debug("sys.path:" + "".join(f"\n {p}" for p in sys.path))
|
||||
|
||||
# Create the matchers we need for should_trace
|
||||
self.source_match = None
|
||||
self.source_pkgs_match = None
|
||||
self.pylib_match = None
|
||||
self.include_match = self.omit_match = None
|
||||
|
||||
if self.source or self.source_pkgs:
|
||||
against = []
|
||||
if self.source:
|
||||
self.source_match = TreeMatcher(self.source, "source")
|
||||
against.append(f"trees {self.source_match!r}")
|
||||
if self.source_pkgs:
|
||||
self.source_pkgs_match = ModuleMatcher(self.source_pkgs, "source_pkgs")
|
||||
against.append(f"modules {self.source_pkgs_match!r}")
|
||||
_debug("Source matching against " + " and ".join(against))
|
||||
else:
|
||||
if self.pylib_paths:
|
||||
self.pylib_match = TreeMatcher(self.pylib_paths, "pylib")
|
||||
_debug(f"Python stdlib matching: {self.pylib_match!r}")
|
||||
if self.include:
|
||||
self.include_match = GlobMatcher(self.include, "include")
|
||||
_debug(f"Include matching: {self.include_match!r}")
|
||||
if self.omit:
|
||||
self.omit_match = GlobMatcher(self.omit, "omit")
|
||||
_debug(f"Omit matching: {self.omit_match!r}")
|
||||
|
||||
self.cover_match = TreeMatcher(self.cover_paths, "coverage")
|
||||
_debug(f"Coverage code matching: {self.cover_match!r}")
|
||||
|
||||
self.third_match = TreeMatcher(self.third_paths, "third")
|
||||
_debug(f"Third-party lib matching: {self.third_match!r}")
|
||||
|
||||
# Check if the source we want to measure has been installed as a
|
||||
# third-party package.
|
||||
# Is the source inside a third-party area?
|
||||
self.source_in_third_paths = set()
|
||||
with sys_modules_saved():
|
||||
for pkg in self.source_pkgs:
|
||||
try:
|
||||
modfile, path = file_and_path_for_module(pkg)
|
||||
_debug(f"Imported source package {pkg!r} as {modfile!r}")
|
||||
except CoverageException as exc:
|
||||
_debug(f"Couldn't import source package {pkg!r}: {exc}")
|
||||
continue
|
||||
if modfile:
|
||||
if self.third_match.match(modfile):
|
||||
_debug(
|
||||
f"Source in third-party: source_pkg {pkg!r} at {modfile!r}",
|
||||
)
|
||||
self.source_in_third_paths.add(canonical_path(source_for_file(modfile)))
|
||||
else:
|
||||
for pathdir in path:
|
||||
if self.third_match.match(pathdir):
|
||||
_debug(
|
||||
f"Source in third-party: {pkg!r} path directory at {pathdir!r}",
|
||||
)
|
||||
self.source_in_third_paths.add(pathdir)
|
||||
|
||||
for src in self.source:
|
||||
if self.third_match.match(src):
|
||||
_debug(f"Source in third-party: source directory {src!r}")
|
||||
self.source_in_third_paths.add(src)
|
||||
self.source_in_third_match = TreeMatcher(self.source_in_third_paths, "source_in_third")
|
||||
_debug(f"Source in third-party matching: {self.source_in_third_match}")
|
||||
|
||||
self.plugins: Plugins
|
||||
self.disp_class: type[TFileDisposition] = FileDisposition
|
||||
|
||||
def should_trace(self, filename: str, frame: FrameType | None = None) -> TFileDisposition:
|
||||
"""Decide whether to trace execution in `filename`, with a reason.
|
||||
|
||||
This function is called from the trace function. As each new file name
|
||||
is encountered, this function determines whether it is traced or not.
|
||||
|
||||
Returns a FileDisposition object.
|
||||
|
||||
"""
|
||||
original_filename = filename
|
||||
disp = disposition_init(self.disp_class, filename)
|
||||
|
||||
def nope(disp: TFileDisposition, reason: str) -> TFileDisposition:
|
||||
"""Simple helper to make it easy to return NO."""
|
||||
disp.trace = False
|
||||
disp.reason = reason
|
||||
return disp
|
||||
|
||||
if original_filename.startswith("<"):
|
||||
return nope(disp, "original file name is not real")
|
||||
|
||||
if frame is not None:
|
||||
# Compiled Python files have two file names: frame.f_code.co_filename is
|
||||
# the file name at the time the .pyc was compiled. The second name is
|
||||
# __file__, which is where the .pyc was actually loaded from. Since
|
||||
# .pyc files can be moved after compilation (for example, by being
|
||||
# installed), we look for __file__ in the frame and prefer it to the
|
||||
# co_filename value.
|
||||
dunder_file = frame.f_globals and frame.f_globals.get("__file__")
|
||||
if dunder_file:
|
||||
filename = source_for_file(dunder_file)
|
||||
if original_filename and not original_filename.startswith("<"):
|
||||
orig = os.path.basename(original_filename)
|
||||
if orig != os.path.basename(filename):
|
||||
# Files shouldn't be renamed when moved. This happens when
|
||||
# exec'ing code. If it seems like something is wrong with
|
||||
# the frame's file name, then just use the original.
|
||||
filename = original_filename
|
||||
|
||||
if not filename:
|
||||
# Empty string is pretty useless.
|
||||
return nope(disp, "empty string isn't a file name")
|
||||
|
||||
if filename.startswith("memory:"):
|
||||
return nope(disp, "memory isn't traceable")
|
||||
|
||||
if filename.startswith("<"):
|
||||
# Lots of non-file execution is represented with artificial
|
||||
# file names like "<string>", "<doctest readme.txt[0]>", or
|
||||
# "<exec_function>". Don't ever trace these executions, since we
|
||||
# can't do anything with the data later anyway.
|
||||
return nope(disp, "file name is not real")
|
||||
|
||||
canonical = canonical_filename(filename)
|
||||
disp.canonical_filename = canonical
|
||||
|
||||
# Try the plugins, see if they have an opinion about the file.
|
||||
plugin = None
|
||||
for plugin in self.plugins.file_tracers:
|
||||
if not plugin._coverage_enabled:
|
||||
continue
|
||||
|
||||
try:
|
||||
file_tracer = plugin.file_tracer(canonical)
|
||||
if file_tracer is not None:
|
||||
file_tracer._coverage_plugin = plugin
|
||||
disp.trace = True
|
||||
disp.file_tracer = file_tracer
|
||||
if file_tracer.has_dynamic_source_filename():
|
||||
disp.has_dynamic_filename = True
|
||||
else:
|
||||
disp.source_filename = canonical_filename(
|
||||
file_tracer.source_filename(),
|
||||
)
|
||||
break
|
||||
except Exception:
|
||||
plugin_name = plugin._coverage_plugin_name
|
||||
tb = traceback.format_exc()
|
||||
self.warn(f"Disabling plug-in {plugin_name!r} due to an exception:\n{tb}")
|
||||
plugin._coverage_enabled = False
|
||||
continue
|
||||
else:
|
||||
# No plugin wanted it: it's Python.
|
||||
disp.trace = True
|
||||
disp.source_filename = canonical
|
||||
|
||||
if not disp.has_dynamic_filename:
|
||||
if not disp.source_filename:
|
||||
raise PluginError(
|
||||
f"Plugin {plugin!r} didn't set source_filename for '{disp.original_filename}'",
|
||||
)
|
||||
reason = self.check_include_omit_etc(disp.source_filename, frame)
|
||||
if reason:
|
||||
nope(disp, reason)
|
||||
|
||||
return disp
|
||||
|
||||
def check_include_omit_etc(self, filename: str, frame: FrameType | None) -> str | None:
|
||||
"""Check a file name against the include, omit, etc, rules.
|
||||
|
||||
Returns a string or None. String means, don't trace, and is the reason
|
||||
why. None means no reason found to not trace.
|
||||
|
||||
"""
|
||||
modulename = name_for_module(filename, frame)
|
||||
|
||||
# If the user specified source or include, then that's authoritative
|
||||
# about the outer bound of what to measure and we don't have to apply
|
||||
# any canned exclusions. If they didn't, then we have to exclude the
|
||||
# stdlib and coverage.py directories.
|
||||
if self.source_match or self.source_pkgs_match:
|
||||
extra = ""
|
||||
ok = False
|
||||
if self.source_pkgs_match:
|
||||
if self.source_pkgs_match.match(modulename):
|
||||
ok = True
|
||||
if modulename in self.source_pkgs_unmatched:
|
||||
self.source_pkgs_unmatched.remove(modulename)
|
||||
else:
|
||||
extra = f"module {modulename!r} "
|
||||
if not ok and self.source_match:
|
||||
if self.source_match.match(filename):
|
||||
ok = True
|
||||
if not ok:
|
||||
return extra + "falls outside the --source spec"
|
||||
if self.third_match.match(filename) and not self.source_in_third_match.match(filename):
|
||||
return "inside --source, but is third-party"
|
||||
elif self.include_match:
|
||||
if not self.include_match.match(filename):
|
||||
return "falls outside the --include trees"
|
||||
else:
|
||||
# We exclude the coverage.py code itself, since a little of it
|
||||
# will be measured otherwise.
|
||||
if self.cover_match.match(filename):
|
||||
return "is part of coverage.py"
|
||||
|
||||
# If we aren't supposed to trace installed code, then check if this
|
||||
# is near the Python standard library and skip it if so.
|
||||
if self.pylib_match and self.pylib_match.match(filename):
|
||||
return "is in the stdlib"
|
||||
|
||||
# Exclude anything in the third-party installation areas.
|
||||
if self.third_match.match(filename):
|
||||
return "is a third-party module"
|
||||
|
||||
# Check the file against the omit pattern.
|
||||
if self.omit_match and self.omit_match.match(filename):
|
||||
return "is inside an --omit pattern"
|
||||
|
||||
# No point tracing a file we can't later write to SQLite.
|
||||
try:
|
||||
filename.encode("utf-8")
|
||||
except UnicodeEncodeError:
|
||||
return "non-encodable filename"
|
||||
|
||||
# No reason found to skip this file.
|
||||
return None
|
||||
|
||||
def warn_conflicting_settings(self) -> None:
|
||||
"""Warn if there are settings that conflict."""
|
||||
if self.include:
|
||||
if self.source or self.source_pkgs:
|
||||
self.warn("--include is ignored because --source is set", slug="include-ignored")
|
||||
|
||||
def warn_already_imported_files(self) -> None:
|
||||
"""Warn if files have already been imported that we will be measuring."""
|
||||
if self.include or self.source or self.source_pkgs:
|
||||
warned = set()
|
||||
for mod in list(sys.modules.values()):
|
||||
filename = getattr(mod, "__file__", None)
|
||||
if filename is None:
|
||||
continue
|
||||
if filename in warned:
|
||||
continue
|
||||
|
||||
if len(getattr(mod, "__path__", ())) > 1:
|
||||
# A namespace package, which confuses this code, so ignore it.
|
||||
continue
|
||||
|
||||
disp = self.should_trace(filename)
|
||||
if disp.has_dynamic_filename:
|
||||
# A plugin with dynamic filenames: the Python file
|
||||
# shouldn't cause a warning, since it won't be the subject
|
||||
# of tracing anyway.
|
||||
continue
|
||||
if disp.trace:
|
||||
msg = f"Already imported a file that will be measured: {filename}"
|
||||
self.warn(msg, slug="already-imported")
|
||||
warned.add(filename)
|
||||
elif self.debug and self.debug.should("trace"):
|
||||
self.debug.write(
|
||||
"Didn't trace already imported file {!r}: {}".format(
|
||||
disp.original_filename, disp.reason,
|
||||
),
|
||||
)
|
||||
|
||||
def warn_unimported_source(self) -> None:
|
||||
"""Warn about source packages that were of interest, but never traced."""
|
||||
for pkg in self.source_pkgs_unmatched:
|
||||
self._warn_about_unmeasured_code(pkg)
|
||||
|
||||
def _warn_about_unmeasured_code(self, pkg: str) -> None:
|
||||
"""Warn about a package or module that we never traced.
|
||||
|
||||
`pkg` is a string, the name of the package or module.
|
||||
|
||||
"""
|
||||
mod = sys.modules.get(pkg)
|
||||
if mod is None:
|
||||
self.warn(f"Module {pkg} was never imported.", slug="module-not-imported")
|
||||
return
|
||||
|
||||
if module_is_namespace(mod):
|
||||
# A namespace package. It's OK for this not to have been traced,
|
||||
# since there is no code directly in it.
|
||||
return
|
||||
|
||||
if not module_has_file(mod):
|
||||
self.warn(f"Module {pkg} has no Python source.", slug="module-not-python")
|
||||
return
|
||||
|
||||
# The module was in sys.modules, and seems like a module with code, but
|
||||
# we never measured it. I guess that means it was imported before
|
||||
# coverage even started.
|
||||
msg = f"Module {pkg} was previously imported, but not measured"
|
||||
self.warn(msg, slug="module-not-measured")
|
||||
|
||||
def find_possibly_unexecuted_files(self) -> Iterable[tuple[str, str | None]]:
|
||||
"""Find files in the areas of interest that might be untraced.
|
||||
|
||||
Yields pairs: file path, and responsible plug-in name.
|
||||
"""
|
||||
for pkg in self.source_pkgs:
|
||||
if (pkg not in sys.modules or
|
||||
not module_has_file(sys.modules[pkg])):
|
||||
continue
|
||||
pkg_file = source_for_file(cast(str, sys.modules[pkg].__file__))
|
||||
yield from self._find_executable_files(canonical_path(pkg_file))
|
||||
|
||||
for src in self.source:
|
||||
yield from self._find_executable_files(src)
|
||||
|
||||
def _find_plugin_files(self, src_dir: str) -> Iterable[tuple[str, str]]:
|
||||
"""Get executable files from the plugins."""
|
||||
for plugin in self.plugins.file_tracers:
|
||||
for x_file in plugin.find_executable_files(src_dir):
|
||||
yield x_file, plugin._coverage_plugin_name
|
||||
|
||||
def _find_executable_files(self, src_dir: str) -> Iterable[tuple[str, str | None]]:
|
||||
"""Find executable files in `src_dir`.
|
||||
|
||||
Search for files in `src_dir` that can be executed because they
|
||||
are probably importable. Don't include ones that have been omitted
|
||||
by the configuration.
|
||||
|
||||
Yield the file path, and the plugin name that handles the file.
|
||||
|
||||
"""
|
||||
py_files = (
|
||||
(py_file, None) for py_file in
|
||||
find_python_files(src_dir, self.include_namespace_packages)
|
||||
)
|
||||
plugin_files = self._find_plugin_files(src_dir)
|
||||
|
||||
for file_path, plugin_name in itertools.chain(py_files, plugin_files):
|
||||
file_path = canonical_filename(file_path)
|
||||
if self.omit_match and self.omit_match.match(file_path):
|
||||
# Turns out this file was omitted, so don't pull it back
|
||||
# in as un-executed.
|
||||
continue
|
||||
yield file_path, plugin_name
|
||||
|
||||
def sys_info(self) -> Iterable[tuple[str, Any]]:
|
||||
"""Our information for Coverage.sys_info.
|
||||
|
||||
Returns a list of (key, value) pairs.
|
||||
"""
|
||||
info = [
|
||||
("coverage_paths", self.cover_paths),
|
||||
("stdlib_paths", self.pylib_paths),
|
||||
("third_party_paths", self.third_paths),
|
||||
("source_in_third_party_paths", self.source_in_third_paths),
|
||||
]
|
||||
|
||||
matcher_names = [
|
||||
"source_match", "source_pkgs_match",
|
||||
"include_match", "omit_match",
|
||||
"cover_match", "pylib_match", "third_match", "source_in_third_match",
|
||||
]
|
||||
|
||||
for matcher_name in matcher_names:
|
||||
matcher = getattr(self, matcher_name)
|
||||
if matcher:
|
||||
matcher_info = matcher.info()
|
||||
else:
|
||||
matcher_info = "-none-"
|
||||
info.append((matcher_name, matcher_info))
|
||||
|
||||
return info
|
||||
134
.venv/lib/python3.10/site-packages/coverage/jsonreport.py
Normal file
134
.venv/lib/python3.10/site-packages/coverage/jsonreport.py
Normal file
|
|
@ -0,0 +1,134 @@
|
|||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""Json reporting for coverage.py"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import datetime
|
||||
import json
|
||||
import sys
|
||||
|
||||
from typing import Any, IO, Iterable, TYPE_CHECKING
|
||||
|
||||
from coverage import __version__
|
||||
from coverage.report_core import get_analysis_to_report
|
||||
from coverage.results import Analysis, Numbers
|
||||
from coverage.types import TMorf, TLineNo
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from coverage import Coverage
|
||||
from coverage.data import CoverageData
|
||||
|
||||
|
||||
# "Version 1" had no format number at all.
|
||||
# 2: add the meta.format field.
|
||||
FORMAT_VERSION = 2
|
||||
|
||||
class JsonReporter:
|
||||
"""A reporter for writing JSON coverage results."""
|
||||
|
||||
report_type = "JSON report"
|
||||
|
||||
def __init__(self, coverage: Coverage) -> None:
|
||||
self.coverage = coverage
|
||||
self.config = self.coverage.config
|
||||
self.total = Numbers(self.config.precision)
|
||||
self.report_data: dict[str, Any] = {}
|
||||
|
||||
def report(self, morfs: Iterable[TMorf] | None, outfile: IO[str]) -> float:
|
||||
"""Generate a json report for `morfs`.
|
||||
|
||||
`morfs` is a list of modules or file names.
|
||||
|
||||
`outfile` is a file object to write the json to.
|
||||
|
||||
"""
|
||||
outfile = outfile or sys.stdout
|
||||
coverage_data = self.coverage.get_data()
|
||||
coverage_data.set_query_contexts(self.config.report_contexts)
|
||||
self.report_data["meta"] = {
|
||||
"format": FORMAT_VERSION,
|
||||
"version": __version__,
|
||||
"timestamp": datetime.datetime.now().isoformat(),
|
||||
"branch_coverage": coverage_data.has_arcs(),
|
||||
"show_contexts": self.config.json_show_contexts,
|
||||
}
|
||||
|
||||
measured_files = {}
|
||||
for file_reporter, analysis in get_analysis_to_report(self.coverage, morfs):
|
||||
measured_files[file_reporter.relative_filename()] = self.report_one_file(
|
||||
coverage_data,
|
||||
analysis,
|
||||
)
|
||||
|
||||
self.report_data["files"] = measured_files
|
||||
|
||||
self.report_data["totals"] = {
|
||||
"covered_lines": self.total.n_executed,
|
||||
"num_statements": self.total.n_statements,
|
||||
"percent_covered": self.total.pc_covered,
|
||||
"percent_covered_display": self.total.pc_covered_str,
|
||||
"missing_lines": self.total.n_missing,
|
||||
"excluded_lines": self.total.n_excluded,
|
||||
}
|
||||
|
||||
if coverage_data.has_arcs():
|
||||
self.report_data["totals"].update({
|
||||
"num_branches": self.total.n_branches,
|
||||
"num_partial_branches": self.total.n_partial_branches,
|
||||
"covered_branches": self.total.n_executed_branches,
|
||||
"missing_branches": self.total.n_missing_branches,
|
||||
})
|
||||
|
||||
json.dump(
|
||||
self.report_data,
|
||||
outfile,
|
||||
indent=(4 if self.config.json_pretty_print else None),
|
||||
)
|
||||
|
||||
return self.total.n_statements and self.total.pc_covered
|
||||
|
||||
def report_one_file(self, coverage_data: CoverageData, analysis: Analysis) -> dict[str, Any]:
|
||||
"""Extract the relevant report data for a single file."""
|
||||
nums = analysis.numbers
|
||||
self.total += nums
|
||||
summary = {
|
||||
"covered_lines": nums.n_executed,
|
||||
"num_statements": nums.n_statements,
|
||||
"percent_covered": nums.pc_covered,
|
||||
"percent_covered_display": nums.pc_covered_str,
|
||||
"missing_lines": nums.n_missing,
|
||||
"excluded_lines": nums.n_excluded,
|
||||
}
|
||||
reported_file = {
|
||||
"executed_lines": sorted(analysis.executed),
|
||||
"summary": summary,
|
||||
"missing_lines": sorted(analysis.missing),
|
||||
"excluded_lines": sorted(analysis.excluded),
|
||||
}
|
||||
if self.config.json_show_contexts:
|
||||
reported_file["contexts"] = analysis.data.contexts_by_lineno(analysis.filename)
|
||||
if coverage_data.has_arcs():
|
||||
summary.update({
|
||||
"num_branches": nums.n_branches,
|
||||
"num_partial_branches": nums.n_partial_branches,
|
||||
"covered_branches": nums.n_executed_branches,
|
||||
"missing_branches": nums.n_missing_branches,
|
||||
})
|
||||
reported_file["executed_branches"] = list(
|
||||
_convert_branch_arcs(analysis.executed_branch_arcs()),
|
||||
)
|
||||
reported_file["missing_branches"] = list(
|
||||
_convert_branch_arcs(analysis.missing_branch_arcs()),
|
||||
)
|
||||
return reported_file
|
||||
|
||||
|
||||
def _convert_branch_arcs(
|
||||
branch_arcs: dict[TLineNo, list[TLineNo]],
|
||||
) -> Iterable[tuple[TLineNo, TLineNo]]:
|
||||
"""Convert branch arcs to a list of two-element tuples."""
|
||||
for source, targets in branch_arcs.items():
|
||||
for target in targets:
|
||||
yield source, target
|
||||
130
.venv/lib/python3.10/site-packages/coverage/lcovreport.py
Normal file
130
.venv/lib/python3.10/site-packages/coverage/lcovreport.py
Normal file
|
|
@ -0,0 +1,130 @@
|
|||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""LCOV reporting for coverage.py."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import base64
|
||||
import hashlib
|
||||
import sys
|
||||
|
||||
from typing import IO, Iterable, TYPE_CHECKING
|
||||
|
||||
from coverage.plugin import FileReporter
|
||||
from coverage.report_core import get_analysis_to_report
|
||||
from coverage.results import Analysis, Numbers
|
||||
from coverage.types import TMorf
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from coverage import Coverage
|
||||
|
||||
|
||||
def line_hash(line: str) -> str:
|
||||
"""Produce a hash of a source line for use in the LCOV file."""
|
||||
hashed = hashlib.md5(line.encode("utf-8")).digest()
|
||||
return base64.b64encode(hashed).decode("ascii").rstrip("=")
|
||||
|
||||
|
||||
class LcovReporter:
|
||||
"""A reporter for writing LCOV coverage reports."""
|
||||
|
||||
report_type = "LCOV report"
|
||||
|
||||
def __init__(self, coverage: Coverage) -> None:
|
||||
self.coverage = coverage
|
||||
self.total = Numbers(self.coverage.config.precision)
|
||||
|
||||
def report(self, morfs: Iterable[TMorf] | None, outfile: IO[str]) -> float:
|
||||
"""Renders the full lcov report.
|
||||
|
||||
`morfs` is a list of modules or filenames
|
||||
|
||||
outfile is the file object to write the file into.
|
||||
"""
|
||||
|
||||
self.coverage.get_data()
|
||||
outfile = outfile or sys.stdout
|
||||
|
||||
for fr, analysis in get_analysis_to_report(self.coverage, morfs):
|
||||
self.get_lcov(fr, analysis, outfile)
|
||||
|
||||
return self.total.n_statements and self.total.pc_covered
|
||||
|
||||
def get_lcov(self, fr: FileReporter, analysis: Analysis, outfile: IO[str]) -> None:
|
||||
"""Produces the lcov data for a single file.
|
||||
|
||||
This currently supports both line and branch coverage,
|
||||
however function coverage is not supported.
|
||||
"""
|
||||
self.total += analysis.numbers
|
||||
|
||||
outfile.write("TN:\n")
|
||||
outfile.write(f"SF:{fr.relative_filename()}\n")
|
||||
source_lines = fr.source().splitlines()
|
||||
for covered in sorted(analysis.executed):
|
||||
if covered in analysis.excluded:
|
||||
# Do not report excluded as executed
|
||||
continue
|
||||
# Note: Coverage.py currently only supports checking *if* a line
|
||||
# has been executed, not how many times, so we set this to 1 for
|
||||
# nice output even if it's technically incorrect.
|
||||
|
||||
# The lines below calculate a 64-bit encoded md5 hash of the line
|
||||
# corresponding to the DA lines in the lcov file, for either case
|
||||
# of the line being covered or missed in coverage.py. The final two
|
||||
# characters of the encoding ("==") are removed from the hash to
|
||||
# allow genhtml to run on the resulting lcov file.
|
||||
if source_lines:
|
||||
if covered-1 >= len(source_lines):
|
||||
break
|
||||
line = source_lines[covered-1]
|
||||
else:
|
||||
line = ""
|
||||
outfile.write(f"DA:{covered},1,{line_hash(line)}\n")
|
||||
|
||||
for missed in sorted(analysis.missing):
|
||||
# We don't have to skip excluded lines here, because `missing`
|
||||
# already doesn't have them.
|
||||
assert source_lines
|
||||
line = source_lines[missed-1]
|
||||
outfile.write(f"DA:{missed},0,{line_hash(line)}\n")
|
||||
|
||||
outfile.write(f"LF:{analysis.numbers.n_statements}\n")
|
||||
outfile.write(f"LH:{analysis.numbers.n_executed}\n")
|
||||
|
||||
# More information dense branch coverage data.
|
||||
missing_arcs = analysis.missing_branch_arcs()
|
||||
executed_arcs = analysis.executed_branch_arcs()
|
||||
for block_number, block_line_number in enumerate(
|
||||
sorted(analysis.branch_stats().keys()),
|
||||
):
|
||||
for branch_number, line_number in enumerate(
|
||||
sorted(missing_arcs[block_line_number]),
|
||||
):
|
||||
# The exit branches have a negative line number,
|
||||
# this will not produce valid lcov. Setting
|
||||
# the line number of the exit branch to 0 will allow
|
||||
# for valid lcov, while preserving the data.
|
||||
line_number = max(line_number, 0)
|
||||
outfile.write(f"BRDA:{line_number},{block_number},{branch_number},-\n")
|
||||
|
||||
# The start value below allows for the block number to be
|
||||
# preserved between these two for loops (stopping the loop from
|
||||
# resetting the value of the block number to 0).
|
||||
for branch_number, line_number in enumerate(
|
||||
sorted(executed_arcs[block_line_number]),
|
||||
start=len(missing_arcs[block_line_number]),
|
||||
):
|
||||
line_number = max(line_number, 0)
|
||||
outfile.write(f"BRDA:{line_number},{block_number},{branch_number},1\n")
|
||||
|
||||
# Summary of the branch coverage.
|
||||
if analysis.has_arcs():
|
||||
branch_stats = analysis.branch_stats()
|
||||
brf = sum(t for t, k in branch_stats.values())
|
||||
brh = brf - sum(t - k for t, k in branch_stats.values())
|
||||
outfile.write(f"BRF:{brf}\n")
|
||||
outfile.write(f"BRH:{brh}\n")
|
||||
|
||||
outfile.write("end_of_record\n")
|
||||
403
.venv/lib/python3.10/site-packages/coverage/misc.py
Normal file
403
.venv/lib/python3.10/site-packages/coverage/misc.py
Normal file
|
|
@ -0,0 +1,403 @@
|
|||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""Miscellaneous stuff for coverage.py."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import contextlib
|
||||
import datetime
|
||||
import errno
|
||||
import hashlib
|
||||
import importlib
|
||||
import importlib.util
|
||||
import inspect
|
||||
import locale
|
||||
import os
|
||||
import os.path
|
||||
import re
|
||||
import sys
|
||||
import types
|
||||
|
||||
from types import ModuleType
|
||||
from typing import (
|
||||
Any, Callable, IO, Iterable, Iterator, Mapping, NoReturn, Sequence, TypeVar,
|
||||
)
|
||||
|
||||
from coverage import env
|
||||
from coverage.exceptions import CoverageException
|
||||
from coverage.types import TArc
|
||||
|
||||
# In 6.0, the exceptions moved from misc.py to exceptions.py. But a number of
|
||||
# other packages were importing the exceptions from misc, so import them here.
|
||||
# pylint: disable=unused-wildcard-import
|
||||
from coverage.exceptions import * # pylint: disable=wildcard-import
|
||||
|
||||
ISOLATED_MODULES: dict[ModuleType, ModuleType] = {}
|
||||
|
||||
|
||||
def isolate_module(mod: ModuleType) -> ModuleType:
|
||||
"""Copy a module so that we are isolated from aggressive mocking.
|
||||
|
||||
If a test suite mocks os.path.exists (for example), and then we need to use
|
||||
it during the test, everything will get tangled up if we use their mock.
|
||||
Making a copy of the module when we import it will isolate coverage.py from
|
||||
those complications.
|
||||
"""
|
||||
if mod not in ISOLATED_MODULES:
|
||||
new_mod = types.ModuleType(mod.__name__)
|
||||
ISOLATED_MODULES[mod] = new_mod
|
||||
for name in dir(mod):
|
||||
value = getattr(mod, name)
|
||||
if isinstance(value, types.ModuleType):
|
||||
value = isolate_module(value)
|
||||
setattr(new_mod, name, value)
|
||||
return ISOLATED_MODULES[mod]
|
||||
|
||||
os = isolate_module(os)
|
||||
|
||||
|
||||
class SysModuleSaver:
|
||||
"""Saves the contents of sys.modules, and removes new modules later."""
|
||||
def __init__(self) -> None:
|
||||
self.old_modules = set(sys.modules)
|
||||
|
||||
def restore(self) -> None:
|
||||
"""Remove any modules imported since this object started."""
|
||||
new_modules = set(sys.modules) - self.old_modules
|
||||
for m in new_modules:
|
||||
del sys.modules[m]
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def sys_modules_saved() -> Iterator[None]:
|
||||
"""A context manager to remove any modules imported during a block."""
|
||||
saver = SysModuleSaver()
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
saver.restore()
|
||||
|
||||
|
||||
def import_third_party(modname: str) -> tuple[ModuleType, bool]:
|
||||
"""Import a third-party module we need, but might not be installed.
|
||||
|
||||
This also cleans out the module after the import, so that coverage won't
|
||||
appear to have imported it. This lets the third party use coverage for
|
||||
their own tests.
|
||||
|
||||
Arguments:
|
||||
modname (str): the name of the module to import.
|
||||
|
||||
Returns:
|
||||
The imported module, and a boolean indicating if the module could be imported.
|
||||
|
||||
If the boolean is False, the module returned is not the one you want: don't use it.
|
||||
|
||||
"""
|
||||
with sys_modules_saved():
|
||||
try:
|
||||
return importlib.import_module(modname), True
|
||||
except ImportError:
|
||||
return sys, False
|
||||
|
||||
|
||||
def nice_pair(pair: TArc) -> str:
|
||||
"""Make a nice string representation of a pair of numbers.
|
||||
|
||||
If the numbers are equal, just return the number, otherwise return the pair
|
||||
with a dash between them, indicating the range.
|
||||
|
||||
"""
|
||||
start, end = pair
|
||||
if start == end:
|
||||
return "%d" % start
|
||||
else:
|
||||
return "%d-%d" % (start, end)
|
||||
|
||||
|
||||
TSelf = TypeVar("TSelf")
|
||||
TRetVal = TypeVar("TRetVal")
|
||||
|
||||
def expensive(fn: Callable[[TSelf], TRetVal]) -> Callable[[TSelf], TRetVal]:
|
||||
"""A decorator to indicate that a method shouldn't be called more than once.
|
||||
|
||||
Normally, this does nothing. During testing, this raises an exception if
|
||||
called more than once.
|
||||
|
||||
"""
|
||||
if env.TESTING:
|
||||
attr = "_once_" + fn.__name__
|
||||
|
||||
def _wrapper(self: TSelf) -> TRetVal:
|
||||
if hasattr(self, attr):
|
||||
raise AssertionError(f"Shouldn't have called {fn.__name__} more than once")
|
||||
setattr(self, attr, True)
|
||||
return fn(self)
|
||||
return _wrapper
|
||||
else:
|
||||
return fn # pragma: not testing
|
||||
|
||||
|
||||
def bool_or_none(b: Any) -> bool | None:
|
||||
"""Return bool(b), but preserve None."""
|
||||
if b is None:
|
||||
return None
|
||||
else:
|
||||
return bool(b)
|
||||
|
||||
|
||||
def join_regex(regexes: Iterable[str]) -> str:
|
||||
"""Combine a series of regex strings into one that matches any of them."""
|
||||
regexes = list(regexes)
|
||||
if len(regexes) == 1:
|
||||
return regexes[0]
|
||||
else:
|
||||
return "|".join(f"(?:{r})" for r in regexes)
|
||||
|
||||
|
||||
def file_be_gone(path: str) -> None:
|
||||
"""Remove a file, and don't get annoyed if it doesn't exist."""
|
||||
try:
|
||||
os.remove(path)
|
||||
except OSError as e:
|
||||
if e.errno != errno.ENOENT:
|
||||
raise
|
||||
|
||||
|
||||
def ensure_dir(directory: str) -> None:
|
||||
"""Make sure the directory exists.
|
||||
|
||||
If `directory` is None or empty, do nothing.
|
||||
"""
|
||||
if directory:
|
||||
os.makedirs(directory, exist_ok=True)
|
||||
|
||||
|
||||
def ensure_dir_for_file(path: str) -> None:
|
||||
"""Make sure the directory for the path exists."""
|
||||
ensure_dir(os.path.dirname(path))
|
||||
|
||||
|
||||
def output_encoding(outfile: IO[str] | None = None) -> str:
|
||||
"""Determine the encoding to use for output written to `outfile` or stdout."""
|
||||
if outfile is None:
|
||||
outfile = sys.stdout
|
||||
encoding = (
|
||||
getattr(outfile, "encoding", None) or
|
||||
getattr(sys.__stdout__, "encoding", None) or
|
||||
locale.getpreferredencoding()
|
||||
)
|
||||
return encoding
|
||||
|
||||
|
||||
class Hasher:
|
||||
"""Hashes Python data for fingerprinting."""
|
||||
def __init__(self) -> None:
|
||||
self.hash = hashlib.new("sha3_256")
|
||||
|
||||
def update(self, v: Any) -> None:
|
||||
"""Add `v` to the hash, recursively if needed."""
|
||||
self.hash.update(str(type(v)).encode("utf-8"))
|
||||
if isinstance(v, str):
|
||||
self.hash.update(v.encode("utf-8"))
|
||||
elif isinstance(v, bytes):
|
||||
self.hash.update(v)
|
||||
elif v is None:
|
||||
pass
|
||||
elif isinstance(v, (int, float)):
|
||||
self.hash.update(str(v).encode("utf-8"))
|
||||
elif isinstance(v, (tuple, list)):
|
||||
for e in v:
|
||||
self.update(e)
|
||||
elif isinstance(v, dict):
|
||||
keys = v.keys()
|
||||
for k in sorted(keys):
|
||||
self.update(k)
|
||||
self.update(v[k])
|
||||
else:
|
||||
for k in dir(v):
|
||||
if k.startswith("__"):
|
||||
continue
|
||||
a = getattr(v, k)
|
||||
if inspect.isroutine(a):
|
||||
continue
|
||||
self.update(k)
|
||||
self.update(a)
|
||||
self.hash.update(b".")
|
||||
|
||||
def hexdigest(self) -> str:
|
||||
"""Retrieve the hex digest of the hash."""
|
||||
return self.hash.hexdigest()[:32]
|
||||
|
||||
|
||||
def _needs_to_implement(that: Any, func_name: str) -> NoReturn:
|
||||
"""Helper to raise NotImplementedError in interface stubs."""
|
||||
if hasattr(that, "_coverage_plugin_name"):
|
||||
thing = "Plugin"
|
||||
name = that._coverage_plugin_name
|
||||
else:
|
||||
thing = "Class"
|
||||
klass = that.__class__
|
||||
name = f"{klass.__module__}.{klass.__name__}"
|
||||
|
||||
raise NotImplementedError(
|
||||
f"{thing} {name!r} needs to implement {func_name}()",
|
||||
)
|
||||
|
||||
|
||||
class DefaultValue:
|
||||
"""A sentinel object to use for unusual default-value needs.
|
||||
|
||||
Construct with a string that will be used as the repr, for display in help
|
||||
and Sphinx output.
|
||||
|
||||
"""
|
||||
def __init__(self, display_as: str) -> None:
|
||||
self.display_as = display_as
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return self.display_as
|
||||
|
||||
|
||||
def substitute_variables(text: str, variables: Mapping[str, str]) -> str:
|
||||
"""Substitute ``${VAR}`` variables in `text` with their values.
|
||||
|
||||
Variables in the text can take a number of shell-inspired forms::
|
||||
|
||||
$VAR
|
||||
${VAR}
|
||||
${VAR?} strict: an error if VAR isn't defined.
|
||||
${VAR-missing} defaulted: "missing" if VAR isn't defined.
|
||||
$$ just a dollar sign.
|
||||
|
||||
`variables` is a dictionary of variable values.
|
||||
|
||||
Returns the resulting text with values substituted.
|
||||
|
||||
"""
|
||||
dollar_pattern = r"""(?x) # Use extended regex syntax
|
||||
\$ # A dollar sign,
|
||||
(?: # then
|
||||
(?P<dollar>\$) | # a dollar sign, or
|
||||
(?P<word1>\w+) | # a plain word, or
|
||||
{ # a {-wrapped
|
||||
(?P<word2>\w+) # word,
|
||||
(?:
|
||||
(?P<strict>\?) | # with a strict marker
|
||||
-(?P<defval>[^}]*) # or a default value
|
||||
)? # maybe.
|
||||
}
|
||||
)
|
||||
"""
|
||||
|
||||
dollar_groups = ("dollar", "word1", "word2")
|
||||
|
||||
def dollar_replace(match: re.Match[str]) -> str:
|
||||
"""Called for each $replacement."""
|
||||
# Only one of the dollar_groups will have matched, just get its text.
|
||||
word = next(g for g in match.group(*dollar_groups) if g) # pragma: always breaks
|
||||
if word == "$":
|
||||
return "$"
|
||||
elif word in variables:
|
||||
return variables[word]
|
||||
elif match["strict"]:
|
||||
msg = f"Variable {word} is undefined: {text!r}"
|
||||
raise CoverageException(msg)
|
||||
else:
|
||||
return match["defval"]
|
||||
|
||||
text = re.sub(dollar_pattern, dollar_replace, text)
|
||||
return text
|
||||
|
||||
|
||||
def format_local_datetime(dt: datetime.datetime) -> str:
|
||||
"""Return a string with local timezone representing the date.
|
||||
"""
|
||||
return dt.astimezone().strftime("%Y-%m-%d %H:%M %z")
|
||||
|
||||
|
||||
def import_local_file(modname: str, modfile: str | None = None) -> ModuleType:
|
||||
"""Import a local file as a module.
|
||||
|
||||
Opens a file in the current directory named `modname`.py, imports it
|
||||
as `modname`, and returns the module object. `modfile` is the file to
|
||||
import if it isn't in the current directory.
|
||||
|
||||
"""
|
||||
if modfile is None:
|
||||
modfile = modname + ".py"
|
||||
spec = importlib.util.spec_from_file_location(modname, modfile)
|
||||
assert spec is not None
|
||||
mod = importlib.util.module_from_spec(spec)
|
||||
sys.modules[modname] = mod
|
||||
assert spec.loader is not None
|
||||
spec.loader.exec_module(mod)
|
||||
|
||||
return mod
|
||||
|
||||
|
||||
def _human_key(s: str) -> tuple[list[str | int], str]:
|
||||
"""Turn a string into a list of string and number chunks.
|
||||
|
||||
"z23a" -> (["z", 23, "a"], "z23a")
|
||||
|
||||
The original string is appended as a last value to ensure the
|
||||
key is unique enough so that "x1y" and "x001y" can be distinguished.
|
||||
"""
|
||||
def tryint(s: str) -> str | int:
|
||||
"""If `s` is a number, return an int, else `s` unchanged."""
|
||||
try:
|
||||
return int(s)
|
||||
except ValueError:
|
||||
return s
|
||||
|
||||
return ([tryint(c) for c in re.split(r"(\d+)", s)], s)
|
||||
|
||||
def human_sorted(strings: Iterable[str]) -> list[str]:
|
||||
"""Sort the given iterable of strings the way that humans expect.
|
||||
|
||||
Numeric components in the strings are sorted as numbers.
|
||||
|
||||
Returns the sorted list.
|
||||
|
||||
"""
|
||||
return sorted(strings, key=_human_key)
|
||||
|
||||
SortableItem = TypeVar("SortableItem", bound=Sequence[Any])
|
||||
|
||||
def human_sorted_items(
|
||||
items: Iterable[SortableItem],
|
||||
reverse: bool = False,
|
||||
) -> list[SortableItem]:
|
||||
"""Sort (string, ...) items the way humans expect.
|
||||
|
||||
The elements of `items` can be any tuple/list. They'll be sorted by the
|
||||
first element (a string), with ties broken by the remaining elements.
|
||||
|
||||
Returns the sorted list of items.
|
||||
"""
|
||||
return sorted(items, key=lambda item: (_human_key(item[0]), *item[1:]), reverse=reverse)
|
||||
|
||||
|
||||
def plural(n: int, thing: str = "", things: str = "") -> str:
|
||||
"""Pluralize a word.
|
||||
|
||||
If n is 1, return thing. Otherwise return things, or thing+s.
|
||||
"""
|
||||
if n == 1:
|
||||
return thing
|
||||
else:
|
||||
return things or (thing + "s")
|
||||
|
||||
|
||||
def stdout_link(text: str, url: str) -> str:
|
||||
"""Format text+url as a clickable link for stdout.
|
||||
|
||||
If attached to a terminal, use escape sequences. Otherwise, just return
|
||||
the text.
|
||||
"""
|
||||
if hasattr(sys.stdout, "isatty") and sys.stdout.isatty():
|
||||
return f"\033]8;;{url}\a{text}\033]8;;\a"
|
||||
else:
|
||||
return text
|
||||
115
.venv/lib/python3.10/site-packages/coverage/multiproc.py
Normal file
115
.venv/lib/python3.10/site-packages/coverage/multiproc.py
Normal file
|
|
@ -0,0 +1,115 @@
|
|||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""Monkey-patching to add multiprocessing support for coverage.py"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import multiprocessing
|
||||
import multiprocessing.process
|
||||
import os
|
||||
import os.path
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
from typing import Any
|
||||
|
||||
from coverage.debug import DebugControl
|
||||
|
||||
# An attribute that will be set on the module to indicate that it has been
|
||||
# monkey-patched.
|
||||
PATCHED_MARKER = "_coverage$patched"
|
||||
|
||||
|
||||
OriginalProcess = multiprocessing.process.BaseProcess
|
||||
original_bootstrap = OriginalProcess._bootstrap # type: ignore[attr-defined]
|
||||
|
||||
class ProcessWithCoverage(OriginalProcess): # pylint: disable=abstract-method
|
||||
"""A replacement for multiprocess.Process that starts coverage."""
|
||||
|
||||
def _bootstrap(self, *args, **kwargs): # type: ignore[no-untyped-def]
|
||||
"""Wrapper around _bootstrap to start coverage."""
|
||||
debug: DebugControl | None = None
|
||||
try:
|
||||
from coverage import Coverage # avoid circular import
|
||||
cov = Coverage(data_suffix=True, auto_data=True)
|
||||
cov._warn_preimported_source = False
|
||||
cov.start()
|
||||
_debug = cov._debug
|
||||
assert _debug is not None
|
||||
if _debug.should("multiproc"):
|
||||
debug = _debug
|
||||
if debug:
|
||||
debug.write("Calling multiprocessing bootstrap")
|
||||
except Exception:
|
||||
print("Exception during multiprocessing bootstrap init:", file=sys.stderr)
|
||||
traceback.print_exc(file=sys.stderr)
|
||||
sys.stderr.flush()
|
||||
raise
|
||||
try:
|
||||
return original_bootstrap(self, *args, **kwargs)
|
||||
finally:
|
||||
if debug:
|
||||
debug.write("Finished multiprocessing bootstrap")
|
||||
try:
|
||||
cov.stop()
|
||||
cov.save()
|
||||
except Exception as exc:
|
||||
if debug:
|
||||
debug.write("Exception during multiprocessing bootstrap cleanup", exc=exc)
|
||||
raise
|
||||
if debug:
|
||||
debug.write("Saved multiprocessing data")
|
||||
|
||||
class Stowaway:
|
||||
"""An object to pickle, so when it is unpickled, it can apply the monkey-patch."""
|
||||
def __init__(self, rcfile: str) -> None:
|
||||
self.rcfile = rcfile
|
||||
|
||||
def __getstate__(self) -> dict[str, str]:
|
||||
return {"rcfile": self.rcfile}
|
||||
|
||||
def __setstate__(self, state: dict[str, str]) -> None:
|
||||
patch_multiprocessing(state["rcfile"])
|
||||
|
||||
|
||||
def patch_multiprocessing(rcfile: str) -> None:
|
||||
"""Monkey-patch the multiprocessing module.
|
||||
|
||||
This enables coverage measurement of processes started by multiprocessing.
|
||||
This involves aggressive monkey-patching.
|
||||
|
||||
`rcfile` is the path to the rcfile being used.
|
||||
|
||||
"""
|
||||
|
||||
if hasattr(multiprocessing, PATCHED_MARKER):
|
||||
return
|
||||
|
||||
OriginalProcess._bootstrap = ProcessWithCoverage._bootstrap # type: ignore[attr-defined]
|
||||
|
||||
# Set the value in ProcessWithCoverage that will be pickled into the child
|
||||
# process.
|
||||
os.environ["COVERAGE_RCFILE"] = os.path.abspath(rcfile)
|
||||
|
||||
# When spawning processes rather than forking them, we have no state in the
|
||||
# new process. We sneak in there with a Stowaway: we stuff one of our own
|
||||
# objects into the data that gets pickled and sent to the sub-process. When
|
||||
# the Stowaway is unpickled, its __setstate__ method is called, which
|
||||
# re-applies the monkey-patch.
|
||||
# Windows only spawns, so this is needed to keep Windows working.
|
||||
try:
|
||||
from multiprocessing import spawn
|
||||
original_get_preparation_data = spawn.get_preparation_data
|
||||
except (ImportError, AttributeError):
|
||||
pass
|
||||
else:
|
||||
def get_preparation_data_with_stowaway(name: str) -> dict[str, Any]:
|
||||
"""Get the original preparation data, and also insert our stowaway."""
|
||||
d = original_get_preparation_data(name)
|
||||
d["stowaway"] = Stowaway(rcfile)
|
||||
return d
|
||||
|
||||
spawn.get_preparation_data = get_preparation_data_with_stowaway
|
||||
|
||||
setattr(multiprocessing, PATCHED_MARKER, True)
|
||||
147
.venv/lib/python3.10/site-packages/coverage/numbits.py
Normal file
147
.venv/lib/python3.10/site-packages/coverage/numbits.py
Normal file
|
|
@ -0,0 +1,147 @@
|
|||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""
|
||||
Functions to manipulate packed binary representations of number sets.
|
||||
|
||||
To save space, coverage stores sets of line numbers in SQLite using a packed
|
||||
binary representation called a numbits. A numbits is a set of positive
|
||||
integers.
|
||||
|
||||
A numbits is stored as a blob in the database. The exact meaning of the bytes
|
||||
in the blobs should be considered an implementation detail that might change in
|
||||
the future. Use these functions to work with those binary blobs of data.
|
||||
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import sqlite3
|
||||
|
||||
from itertools import zip_longest
|
||||
from typing import Iterable
|
||||
|
||||
|
||||
def nums_to_numbits(nums: Iterable[int]) -> bytes:
|
||||
"""Convert `nums` into a numbits.
|
||||
|
||||
Arguments:
|
||||
nums: a reusable iterable of integers, the line numbers to store.
|
||||
|
||||
Returns:
|
||||
A binary blob.
|
||||
"""
|
||||
try:
|
||||
nbytes = max(nums) // 8 + 1
|
||||
except ValueError:
|
||||
# nums was empty.
|
||||
return b""
|
||||
b = bytearray(nbytes)
|
||||
for num in nums:
|
||||
b[num//8] |= 1 << num % 8
|
||||
return bytes(b)
|
||||
|
||||
|
||||
def numbits_to_nums(numbits: bytes) -> list[int]:
|
||||
"""Convert a numbits into a list of numbers.
|
||||
|
||||
Arguments:
|
||||
numbits: a binary blob, the packed number set.
|
||||
|
||||
Returns:
|
||||
A list of ints.
|
||||
|
||||
When registered as a SQLite function by :func:`register_sqlite_functions`,
|
||||
this returns a string, a JSON-encoded list of ints.
|
||||
|
||||
"""
|
||||
nums = []
|
||||
for byte_i, byte in enumerate(numbits):
|
||||
for bit_i in range(8):
|
||||
if (byte & (1 << bit_i)):
|
||||
nums.append(byte_i * 8 + bit_i)
|
||||
return nums
|
||||
|
||||
|
||||
def numbits_union(numbits1: bytes, numbits2: bytes) -> bytes:
|
||||
"""Compute the union of two numbits.
|
||||
|
||||
Returns:
|
||||
A new numbits, the union of `numbits1` and `numbits2`.
|
||||
"""
|
||||
byte_pairs = zip_longest(numbits1, numbits2, fillvalue=0)
|
||||
return bytes(b1 | b2 for b1, b2 in byte_pairs)
|
||||
|
||||
|
||||
def numbits_intersection(numbits1: bytes, numbits2: bytes) -> bytes:
|
||||
"""Compute the intersection of two numbits.
|
||||
|
||||
Returns:
|
||||
A new numbits, the intersection `numbits1` and `numbits2`.
|
||||
"""
|
||||
byte_pairs = zip_longest(numbits1, numbits2, fillvalue=0)
|
||||
intersection_bytes = bytes(b1 & b2 for b1, b2 in byte_pairs)
|
||||
return intersection_bytes.rstrip(b"\0")
|
||||
|
||||
|
||||
def numbits_any_intersection(numbits1: bytes, numbits2: bytes) -> bool:
|
||||
"""Is there any number that appears in both numbits?
|
||||
|
||||
Determine whether two number sets have a non-empty intersection. This is
|
||||
faster than computing the intersection.
|
||||
|
||||
Returns:
|
||||
A bool, True if there is any number in both `numbits1` and `numbits2`.
|
||||
"""
|
||||
byte_pairs = zip_longest(numbits1, numbits2, fillvalue=0)
|
||||
return any(b1 & b2 for b1, b2 in byte_pairs)
|
||||
|
||||
|
||||
def num_in_numbits(num: int, numbits: bytes) -> bool:
|
||||
"""Does the integer `num` appear in `numbits`?
|
||||
|
||||
Returns:
|
||||
A bool, True if `num` is a member of `numbits`.
|
||||
"""
|
||||
nbyte, nbit = divmod(num, 8)
|
||||
if nbyte >= len(numbits):
|
||||
return False
|
||||
return bool(numbits[nbyte] & (1 << nbit))
|
||||
|
||||
|
||||
def register_sqlite_functions(connection: sqlite3.Connection) -> None:
|
||||
"""
|
||||
Define numbits functions in a SQLite connection.
|
||||
|
||||
This defines these functions for use in SQLite statements:
|
||||
|
||||
* :func:`numbits_union`
|
||||
* :func:`numbits_intersection`
|
||||
* :func:`numbits_any_intersection`
|
||||
* :func:`num_in_numbits`
|
||||
* :func:`numbits_to_nums`
|
||||
|
||||
`connection` is a :class:`sqlite3.Connection <python:sqlite3.Connection>`
|
||||
object. After creating the connection, pass it to this function to
|
||||
register the numbits functions. Then you can use numbits functions in your
|
||||
queries::
|
||||
|
||||
import sqlite3
|
||||
from coverage.numbits import register_sqlite_functions
|
||||
|
||||
conn = sqlite3.connect("example.db")
|
||||
register_sqlite_functions(conn)
|
||||
c = conn.cursor()
|
||||
# Kind of a nonsense query:
|
||||
# Find all the files and contexts that executed line 47 in any file:
|
||||
c.execute(
|
||||
"select file_id, context_id from line_bits where num_in_numbits(?, numbits)",
|
||||
(47,)
|
||||
)
|
||||
"""
|
||||
connection.create_function("numbits_union", 2, numbits_union)
|
||||
connection.create_function("numbits_intersection", 2, numbits_intersection)
|
||||
connection.create_function("numbits_any_intersection", 2, numbits_any_intersection)
|
||||
connection.create_function("num_in_numbits", 2, num_in_numbits)
|
||||
connection.create_function("numbits_to_nums", 1, lambda b: json.dumps(numbits_to_nums(b)))
|
||||
1365
.venv/lib/python3.10/site-packages/coverage/parser.py
Normal file
1365
.venv/lib/python3.10/site-packages/coverage/parser.py
Normal file
File diff suppressed because it is too large
Load diff
213
.venv/lib/python3.10/site-packages/coverage/phystokens.py
Normal file
213
.venv/lib/python3.10/site-packages/coverage/phystokens.py
Normal file
|
|
@ -0,0 +1,213 @@
|
|||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""Better tokenizing for coverage.py."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import ast
|
||||
import io
|
||||
import keyword
|
||||
import re
|
||||
import sys
|
||||
import token
|
||||
import tokenize
|
||||
|
||||
from typing import Iterable
|
||||
|
||||
from coverage import env
|
||||
from coverage.types import TLineNo, TSourceTokenLines
|
||||
|
||||
|
||||
TokenInfos = Iterable[tokenize.TokenInfo]
|
||||
|
||||
|
||||
def _phys_tokens(toks: TokenInfos) -> TokenInfos:
|
||||
"""Return all physical tokens, even line continuations.
|
||||
|
||||
tokenize.generate_tokens() doesn't return a token for the backslash that
|
||||
continues lines. This wrapper provides those tokens so that we can
|
||||
re-create a faithful representation of the original source.
|
||||
|
||||
Returns the same values as generate_tokens()
|
||||
|
||||
"""
|
||||
last_line: str | None = None
|
||||
last_lineno = -1
|
||||
last_ttext: str = ""
|
||||
for ttype, ttext, (slineno, scol), (elineno, ecol), ltext in toks:
|
||||
if last_lineno != elineno:
|
||||
if last_line and last_line.endswith("\\\n"):
|
||||
# We are at the beginning of a new line, and the last line
|
||||
# ended with a backslash. We probably have to inject a
|
||||
# backslash token into the stream. Unfortunately, there's more
|
||||
# to figure out. This code::
|
||||
#
|
||||
# usage = """\
|
||||
# HEY THERE
|
||||
# """
|
||||
#
|
||||
# triggers this condition, but the token text is::
|
||||
#
|
||||
# '"""\\\nHEY THERE\n"""'
|
||||
#
|
||||
# so we need to figure out if the backslash is already in the
|
||||
# string token or not.
|
||||
inject_backslash = True
|
||||
if last_ttext.endswith("\\"):
|
||||
inject_backslash = False
|
||||
elif ttype == token.STRING:
|
||||
if "\n" in ttext and ttext.split("\n", 1)[0][-1] == "\\":
|
||||
# It's a multi-line string and the first line ends with
|
||||
# a backslash, so we don't need to inject another.
|
||||
inject_backslash = False
|
||||
if inject_backslash:
|
||||
# Figure out what column the backslash is in.
|
||||
ccol = len(last_line.split("\n")[-2]) - 1
|
||||
# Yield the token, with a fake token type.
|
||||
yield tokenize.TokenInfo(
|
||||
99999, "\\\n",
|
||||
(slineno, ccol), (slineno, ccol+2),
|
||||
last_line,
|
||||
)
|
||||
last_line = ltext
|
||||
if ttype not in (tokenize.NEWLINE, tokenize.NL):
|
||||
last_ttext = ttext
|
||||
yield tokenize.TokenInfo(ttype, ttext, (slineno, scol), (elineno, ecol), ltext)
|
||||
last_lineno = elineno
|
||||
|
||||
|
||||
class SoftKeywordFinder(ast.NodeVisitor):
|
||||
"""Helper for finding lines with soft keywords, like match/case lines."""
|
||||
def __init__(self, source: str) -> None:
|
||||
# This will be the set of line numbers that start with a soft keyword.
|
||||
self.soft_key_lines: set[TLineNo] = set()
|
||||
self.visit(ast.parse(source))
|
||||
|
||||
if sys.version_info >= (3, 10):
|
||||
def visit_Match(self, node: ast.Match) -> None:
|
||||
"""Invoked by ast.NodeVisitor.visit"""
|
||||
self.soft_key_lines.add(node.lineno)
|
||||
for case in node.cases:
|
||||
self.soft_key_lines.add(case.pattern.lineno)
|
||||
self.generic_visit(node)
|
||||
|
||||
if sys.version_info >= (3, 12):
|
||||
def visit_TypeAlias(self, node: ast.TypeAlias) -> None:
|
||||
"""Invoked by ast.NodeVisitor.visit"""
|
||||
self.soft_key_lines.add(node.lineno)
|
||||
self.generic_visit(node)
|
||||
|
||||
|
||||
def source_token_lines(source: str) -> TSourceTokenLines:
|
||||
"""Generate a series of lines, one for each line in `source`.
|
||||
|
||||
Each line is a list of pairs, each pair is a token::
|
||||
|
||||
[('key', 'def'), ('ws', ' '), ('nam', 'hello'), ('op', '('), ... ]
|
||||
|
||||
Each pair has a token class, and the token text.
|
||||
|
||||
If you concatenate all the token texts, and then join them with newlines,
|
||||
you should have your original `source` back, with two differences:
|
||||
trailing white space is not preserved, and a final line with no newline
|
||||
is indistinguishable from a final line with a newline.
|
||||
|
||||
"""
|
||||
|
||||
ws_tokens = {token.INDENT, token.DEDENT, token.NEWLINE, tokenize.NL}
|
||||
line: list[tuple[str, str]] = []
|
||||
col = 0
|
||||
|
||||
source = source.expandtabs(8).replace("\r\n", "\n")
|
||||
tokgen = generate_tokens(source)
|
||||
|
||||
if env.PYBEHAVIOR.soft_keywords:
|
||||
soft_key_lines = SoftKeywordFinder(source).soft_key_lines
|
||||
|
||||
for ttype, ttext, (sline, scol), (_, ecol), _ in _phys_tokens(tokgen):
|
||||
mark_start = True
|
||||
for part in re.split("(\n)", ttext):
|
||||
if part == "\n":
|
||||
yield line
|
||||
line = []
|
||||
col = 0
|
||||
mark_end = False
|
||||
elif part == "":
|
||||
mark_end = False
|
||||
elif ttype in ws_tokens:
|
||||
mark_end = False
|
||||
else:
|
||||
if mark_start and scol > col:
|
||||
line.append(("ws", " " * (scol - col)))
|
||||
mark_start = False
|
||||
tok_class = tokenize.tok_name.get(ttype, "xx").lower()[:3]
|
||||
if ttype == token.NAME:
|
||||
if keyword.iskeyword(ttext):
|
||||
# Hard keywords are always keywords.
|
||||
tok_class = "key"
|
||||
elif sys.version_info >= (3, 10): # PYVERSIONS
|
||||
# Need the version_info check to keep mypy from borking
|
||||
# on issoftkeyword here.
|
||||
if env.PYBEHAVIOR.soft_keywords and keyword.issoftkeyword(ttext):
|
||||
# Soft keywords appear at the start of the line,
|
||||
# on lines that start match or case statements.
|
||||
if len(line) == 0:
|
||||
is_start_of_line = True
|
||||
elif (len(line) == 1) and line[0][0] == "ws":
|
||||
is_start_of_line = True
|
||||
else:
|
||||
is_start_of_line = False
|
||||
if is_start_of_line and sline in soft_key_lines:
|
||||
tok_class = "key"
|
||||
line.append((tok_class, part))
|
||||
mark_end = True
|
||||
scol = 0
|
||||
if mark_end:
|
||||
col = ecol
|
||||
|
||||
if line:
|
||||
yield line
|
||||
|
||||
|
||||
class CachedTokenizer:
|
||||
"""A one-element cache around tokenize.generate_tokens.
|
||||
|
||||
When reporting, coverage.py tokenizes files twice, once to find the
|
||||
structure of the file, and once to syntax-color it. Tokenizing is
|
||||
expensive, and easily cached.
|
||||
|
||||
This is a one-element cache so that our twice-in-a-row tokenizing doesn't
|
||||
actually tokenize twice.
|
||||
|
||||
"""
|
||||
def __init__(self) -> None:
|
||||
self.last_text: str | None = None
|
||||
self.last_tokens: list[tokenize.TokenInfo] = []
|
||||
|
||||
def generate_tokens(self, text: str) -> TokenInfos:
|
||||
"""A stand-in for `tokenize.generate_tokens`."""
|
||||
if text != self.last_text:
|
||||
self.last_text = text
|
||||
readline = io.StringIO(text).readline
|
||||
try:
|
||||
self.last_tokens = list(tokenize.generate_tokens(readline))
|
||||
except:
|
||||
self.last_text = None
|
||||
raise
|
||||
return self.last_tokens
|
||||
|
||||
# Create our generate_tokens cache as a callable replacement function.
|
||||
generate_tokens = CachedTokenizer().generate_tokens
|
||||
|
||||
|
||||
def source_encoding(source: bytes) -> str:
|
||||
"""Determine the encoding for `source`, according to PEP 263.
|
||||
|
||||
`source` is a byte string: the text of the program.
|
||||
|
||||
Returns a string, the name of the encoding.
|
||||
|
||||
"""
|
||||
readline = iter(source.splitlines(True)).__next__
|
||||
return tokenize.detect_encoding(readline)[0]
|
||||
553
.venv/lib/python3.10/site-packages/coverage/plugin.py
Normal file
553
.venv/lib/python3.10/site-packages/coverage/plugin.py
Normal file
|
|
@ -0,0 +1,553 @@
|
|||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""
|
||||
.. versionadded:: 4.0
|
||||
|
||||
Plug-in interfaces for coverage.py.
|
||||
|
||||
Coverage.py supports a few different kinds of plug-ins that change its
|
||||
behavior:
|
||||
|
||||
* File tracers implement tracing of non-Python file types.
|
||||
|
||||
* Configurers add custom configuration, using Python code to change the
|
||||
configuration.
|
||||
|
||||
* Dynamic context switchers decide when the dynamic context has changed, for
|
||||
example, to record what test function produced the coverage.
|
||||
|
||||
To write a coverage.py plug-in, create a module with a subclass of
|
||||
:class:`~coverage.CoveragePlugin`. You will override methods in your class to
|
||||
participate in various aspects of coverage.py's processing.
|
||||
Different types of plug-ins have to override different methods.
|
||||
|
||||
Any plug-in can optionally implement :meth:`~coverage.CoveragePlugin.sys_info`
|
||||
to provide debugging information about their operation.
|
||||
|
||||
Your module must also contain a ``coverage_init`` function that registers an
|
||||
instance of your plug-in class::
|
||||
|
||||
import coverage
|
||||
|
||||
class MyPlugin(coverage.CoveragePlugin):
|
||||
...
|
||||
|
||||
def coverage_init(reg, options):
|
||||
reg.add_file_tracer(MyPlugin())
|
||||
|
||||
You use the `reg` parameter passed to your ``coverage_init`` function to
|
||||
register your plug-in object. The registration method you call depends on
|
||||
what kind of plug-in it is.
|
||||
|
||||
If your plug-in takes options, the `options` parameter is a dictionary of your
|
||||
plug-in's options from the coverage.py configuration file. Use them however
|
||||
you want to configure your object before registering it.
|
||||
|
||||
Coverage.py will store its own information on your plug-in object, using
|
||||
attributes whose names start with ``_coverage_``. Don't be startled.
|
||||
|
||||
.. warning::
|
||||
Plug-ins are imported by coverage.py before it begins measuring code.
|
||||
If you write a plugin in your own project, it might import your product
|
||||
code before coverage.py can start measuring. This can result in your
|
||||
own code being reported as missing.
|
||||
|
||||
One solution is to put your plugins in your project tree, but not in
|
||||
your importable Python package.
|
||||
|
||||
|
||||
.. _file_tracer_plugins:
|
||||
|
||||
File Tracers
|
||||
============
|
||||
|
||||
File tracers implement measurement support for non-Python files. File tracers
|
||||
implement the :meth:`~coverage.CoveragePlugin.file_tracer` method to claim
|
||||
files and the :meth:`~coverage.CoveragePlugin.file_reporter` method to report
|
||||
on those files.
|
||||
|
||||
In your ``coverage_init`` function, use the ``add_file_tracer`` method to
|
||||
register your file tracer.
|
||||
|
||||
|
||||
.. _configurer_plugins:
|
||||
|
||||
Configurers
|
||||
===========
|
||||
|
||||
.. versionadded:: 4.5
|
||||
|
||||
Configurers modify the configuration of coverage.py during start-up.
|
||||
Configurers implement the :meth:`~coverage.CoveragePlugin.configure` method to
|
||||
change the configuration.
|
||||
|
||||
In your ``coverage_init`` function, use the ``add_configurer`` method to
|
||||
register your configurer.
|
||||
|
||||
|
||||
.. _dynamic_context_plugins:
|
||||
|
||||
Dynamic Context Switchers
|
||||
=========================
|
||||
|
||||
.. versionadded:: 5.0
|
||||
|
||||
Dynamic context switcher plugins implement the
|
||||
:meth:`~coverage.CoveragePlugin.dynamic_context` method to dynamically compute
|
||||
the context label for each measured frame.
|
||||
|
||||
Computed context labels are useful when you want to group measured data without
|
||||
modifying the source code.
|
||||
|
||||
For example, you could write a plugin that checks `frame.f_code` to inspect
|
||||
the currently executed method, and set the context label to a fully qualified
|
||||
method name if it's an instance method of `unittest.TestCase` and the method
|
||||
name starts with 'test'. Such a plugin would provide basic coverage grouping
|
||||
by test and could be used with test runners that have no built-in coveragepy
|
||||
support.
|
||||
|
||||
In your ``coverage_init`` function, use the ``add_dynamic_context`` method to
|
||||
register your dynamic context switcher.
|
||||
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import functools
|
||||
|
||||
from types import FrameType
|
||||
from typing import Any, Iterable
|
||||
|
||||
from coverage import files
|
||||
from coverage.misc import _needs_to_implement
|
||||
from coverage.types import TArc, TConfigurable, TLineNo, TSourceTokenLines
|
||||
|
||||
|
||||
class CoveragePlugin:
|
||||
"""Base class for coverage.py plug-ins."""
|
||||
|
||||
_coverage_plugin_name: str
|
||||
_coverage_enabled: bool
|
||||
|
||||
def file_tracer(self, filename: str) -> FileTracer | None: # pylint: disable=unused-argument
|
||||
"""Get a :class:`FileTracer` object for a file.
|
||||
|
||||
Plug-in type: file tracer.
|
||||
|
||||
Every Python source file is offered to your plug-in to give it a chance
|
||||
to take responsibility for tracing the file. If your plug-in can
|
||||
handle the file, it should return a :class:`FileTracer` object.
|
||||
Otherwise return None.
|
||||
|
||||
There is no way to register your plug-in for particular files.
|
||||
Instead, this method is invoked for all files as they are executed,
|
||||
and the plug-in decides whether it can trace the file or not.
|
||||
Be prepared for `filename` to refer to all kinds of files that have
|
||||
nothing to do with your plug-in.
|
||||
|
||||
The file name will be a Python file being executed. There are two
|
||||
broad categories of behavior for a plug-in, depending on the kind of
|
||||
files your plug-in supports:
|
||||
|
||||
* Static file names: each of your original source files has been
|
||||
converted into a distinct Python file. Your plug-in is invoked with
|
||||
the Python file name, and it maps it back to its original source
|
||||
file.
|
||||
|
||||
* Dynamic file names: all of your source files are executed by the same
|
||||
Python file. In this case, your plug-in implements
|
||||
:meth:`FileTracer.dynamic_source_filename` to provide the actual
|
||||
source file for each execution frame.
|
||||
|
||||
`filename` is a string, the path to the file being considered. This is
|
||||
the absolute real path to the file. If you are comparing to other
|
||||
paths, be sure to take this into account.
|
||||
|
||||
Returns a :class:`FileTracer` object to use to trace `filename`, or
|
||||
None if this plug-in cannot trace this file.
|
||||
|
||||
"""
|
||||
return None
|
||||
|
||||
def file_reporter(
|
||||
self,
|
||||
filename: str, # pylint: disable=unused-argument
|
||||
) -> FileReporter | str: # str should be Literal["python"]
|
||||
"""Get the :class:`FileReporter` class to use for a file.
|
||||
|
||||
Plug-in type: file tracer.
|
||||
|
||||
This will only be invoked if `filename` returns non-None from
|
||||
:meth:`file_tracer`. It's an error to return None from this method.
|
||||
|
||||
Returns a :class:`FileReporter` object to use to report on `filename`,
|
||||
or the string `"python"` to have coverage.py treat the file as Python.
|
||||
|
||||
"""
|
||||
_needs_to_implement(self, "file_reporter")
|
||||
|
||||
def dynamic_context(
|
||||
self,
|
||||
frame: FrameType, # pylint: disable=unused-argument
|
||||
) -> str | None:
|
||||
"""Get the dynamically computed context label for `frame`.
|
||||
|
||||
Plug-in type: dynamic context.
|
||||
|
||||
This method is invoked for each frame when outside of a dynamic
|
||||
context, to see if a new dynamic context should be started. If it
|
||||
returns a string, a new context label is set for this and deeper
|
||||
frames. The dynamic context ends when this frame returns.
|
||||
|
||||
Returns a string to start a new dynamic context, or None if no new
|
||||
context should be started.
|
||||
|
||||
"""
|
||||
return None
|
||||
|
||||
def find_executable_files(
|
||||
self,
|
||||
src_dir: str, # pylint: disable=unused-argument
|
||||
) -> Iterable[str]:
|
||||
"""Yield all of the executable files in `src_dir`, recursively.
|
||||
|
||||
Plug-in type: file tracer.
|
||||
|
||||
Executability is a plug-in-specific property, but generally means files
|
||||
which would have been considered for coverage analysis, had they been
|
||||
included automatically.
|
||||
|
||||
Returns or yields a sequence of strings, the paths to files that could
|
||||
have been executed, including files that had been executed.
|
||||
|
||||
"""
|
||||
return []
|
||||
|
||||
def configure(self, config: TConfigurable) -> None:
|
||||
"""Modify the configuration of coverage.py.
|
||||
|
||||
Plug-in type: configurer.
|
||||
|
||||
This method is called during coverage.py start-up, to give your plug-in
|
||||
a chance to change the configuration. The `config` parameter is an
|
||||
object with :meth:`~coverage.Coverage.get_option` and
|
||||
:meth:`~coverage.Coverage.set_option` methods. Do not call any other
|
||||
methods on the `config` object.
|
||||
|
||||
"""
|
||||
pass
|
||||
|
||||
def sys_info(self) -> Iterable[tuple[str, Any]]:
|
||||
"""Get a list of information useful for debugging.
|
||||
|
||||
Plug-in type: any.
|
||||
|
||||
This method will be invoked for ``--debug=sys``. Your
|
||||
plug-in can return any information it wants to be displayed.
|
||||
|
||||
Returns a list of pairs: `[(name, value), ...]`.
|
||||
|
||||
"""
|
||||
return []
|
||||
|
||||
|
||||
class CoveragePluginBase:
|
||||
"""Plugins produce specialized objects, which point back to the original plugin."""
|
||||
_coverage_plugin: CoveragePlugin
|
||||
|
||||
|
||||
class FileTracer(CoveragePluginBase):
|
||||
"""Support needed for files during the execution phase.
|
||||
|
||||
File tracer plug-ins implement subclasses of FileTracer to return from
|
||||
their :meth:`~CoveragePlugin.file_tracer` method.
|
||||
|
||||
You may construct this object from :meth:`CoveragePlugin.file_tracer` any
|
||||
way you like. A natural choice would be to pass the file name given to
|
||||
`file_tracer`.
|
||||
|
||||
`FileTracer` objects should only be created in the
|
||||
:meth:`CoveragePlugin.file_tracer` method.
|
||||
|
||||
See :ref:`howitworks` for details of the different coverage.py phases.
|
||||
|
||||
"""
|
||||
|
||||
def source_filename(self) -> str:
|
||||
"""The source file name for this file.
|
||||
|
||||
This may be any file name you like. A key responsibility of a plug-in
|
||||
is to own the mapping from Python execution back to whatever source
|
||||
file name was originally the source of the code.
|
||||
|
||||
See :meth:`CoveragePlugin.file_tracer` for details about static and
|
||||
dynamic file names.
|
||||
|
||||
Returns the file name to credit with this execution.
|
||||
|
||||
"""
|
||||
_needs_to_implement(self, "source_filename")
|
||||
|
||||
def has_dynamic_source_filename(self) -> bool:
|
||||
"""Does this FileTracer have dynamic source file names?
|
||||
|
||||
FileTracers can provide dynamically determined file names by
|
||||
implementing :meth:`dynamic_source_filename`. Invoking that function
|
||||
is expensive. To determine whether to invoke it, coverage.py uses the
|
||||
result of this function to know if it needs to bother invoking
|
||||
:meth:`dynamic_source_filename`.
|
||||
|
||||
See :meth:`CoveragePlugin.file_tracer` for details about static and
|
||||
dynamic file names.
|
||||
|
||||
Returns True if :meth:`dynamic_source_filename` should be called to get
|
||||
dynamic source file names.
|
||||
|
||||
"""
|
||||
return False
|
||||
|
||||
def dynamic_source_filename(
|
||||
self,
|
||||
filename: str, # pylint: disable=unused-argument
|
||||
frame: FrameType, # pylint: disable=unused-argument
|
||||
) -> str | None:
|
||||
"""Get a dynamically computed source file name.
|
||||
|
||||
Some plug-ins need to compute the source file name dynamically for each
|
||||
frame.
|
||||
|
||||
This function will not be invoked if
|
||||
:meth:`has_dynamic_source_filename` returns False.
|
||||
|
||||
Returns the source file name for this frame, or None if this frame
|
||||
shouldn't be measured.
|
||||
|
||||
"""
|
||||
return None
|
||||
|
||||
def line_number_range(self, frame: FrameType) -> tuple[TLineNo, TLineNo]:
|
||||
"""Get the range of source line numbers for a given a call frame.
|
||||
|
||||
The call frame is examined, and the source line number in the original
|
||||
file is returned. The return value is a pair of numbers, the starting
|
||||
line number and the ending line number, both inclusive. For example,
|
||||
returning (5, 7) means that lines 5, 6, and 7 should be considered
|
||||
executed.
|
||||
|
||||
This function might decide that the frame doesn't indicate any lines
|
||||
from the source file were executed. Return (-1, -1) in this case to
|
||||
tell coverage.py that no lines should be recorded for this frame.
|
||||
|
||||
"""
|
||||
lineno = frame.f_lineno
|
||||
return lineno, lineno
|
||||
|
||||
|
||||
@functools.total_ordering
|
||||
class FileReporter(CoveragePluginBase):
|
||||
"""Support needed for files during the analysis and reporting phases.
|
||||
|
||||
File tracer plug-ins implement a subclass of `FileReporter`, and return
|
||||
instances from their :meth:`CoveragePlugin.file_reporter` method.
|
||||
|
||||
There are many methods here, but only :meth:`lines` is required, to provide
|
||||
the set of executable lines in the file.
|
||||
|
||||
See :ref:`howitworks` for details of the different coverage.py phases.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, filename: str) -> None:
|
||||
"""Simple initialization of a `FileReporter`.
|
||||
|
||||
The `filename` argument is the path to the file being reported. This
|
||||
will be available as the `.filename` attribute on the object. Other
|
||||
method implementations on this base class rely on this attribute.
|
||||
|
||||
"""
|
||||
self.filename = filename
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<{self.__class__.__name__} filename={self.filename!r}>"
|
||||
|
||||
def relative_filename(self) -> str:
|
||||
"""Get the relative file name for this file.
|
||||
|
||||
This file path will be displayed in reports. The default
|
||||
implementation will supply the actual project-relative file path. You
|
||||
only need to supply this method if you have an unusual syntax for file
|
||||
paths.
|
||||
|
||||
"""
|
||||
return files.relative_filename(self.filename)
|
||||
|
||||
def source(self) -> str:
|
||||
"""Get the source for the file.
|
||||
|
||||
Returns a Unicode string.
|
||||
|
||||
The base implementation simply reads the `self.filename` file and
|
||||
decodes it as UTF-8. Override this method if your file isn't readable
|
||||
as a text file, or if you need other encoding support.
|
||||
|
||||
"""
|
||||
with open(self.filename, encoding="utf-8") as f:
|
||||
return f.read()
|
||||
|
||||
def lines(self) -> set[TLineNo]:
|
||||
"""Get the executable lines in this file.
|
||||
|
||||
Your plug-in must determine which lines in the file were possibly
|
||||
executable. This method returns a set of those line numbers.
|
||||
|
||||
Returns a set of line numbers.
|
||||
|
||||
"""
|
||||
_needs_to_implement(self, "lines")
|
||||
|
||||
def excluded_lines(self) -> set[TLineNo]:
|
||||
"""Get the excluded executable lines in this file.
|
||||
|
||||
Your plug-in can use any method it likes to allow the user to exclude
|
||||
executable lines from consideration.
|
||||
|
||||
Returns a set of line numbers.
|
||||
|
||||
The base implementation returns the empty set.
|
||||
|
||||
"""
|
||||
return set()
|
||||
|
||||
def translate_lines(self, lines: Iterable[TLineNo]) -> set[TLineNo]:
|
||||
"""Translate recorded lines into reported lines.
|
||||
|
||||
Some file formats will want to report lines slightly differently than
|
||||
they are recorded. For example, Python records the last line of a
|
||||
multi-line statement, but reports are nicer if they mention the first
|
||||
line.
|
||||
|
||||
Your plug-in can optionally define this method to perform these kinds
|
||||
of adjustment.
|
||||
|
||||
`lines` is a sequence of integers, the recorded line numbers.
|
||||
|
||||
Returns a set of integers, the adjusted line numbers.
|
||||
|
||||
The base implementation returns the numbers unchanged.
|
||||
|
||||
"""
|
||||
return set(lines)
|
||||
|
||||
def arcs(self) -> set[TArc]:
|
||||
"""Get the executable arcs in this file.
|
||||
|
||||
To support branch coverage, your plug-in needs to be able to indicate
|
||||
possible execution paths, as a set of line number pairs. Each pair is
|
||||
a `(prev, next)` pair indicating that execution can transition from the
|
||||
`prev` line number to the `next` line number.
|
||||
|
||||
Returns a set of pairs of line numbers. The default implementation
|
||||
returns an empty set.
|
||||
|
||||
"""
|
||||
return set()
|
||||
|
||||
def no_branch_lines(self) -> set[TLineNo]:
|
||||
"""Get the lines excused from branch coverage in this file.
|
||||
|
||||
Your plug-in can use any method it likes to allow the user to exclude
|
||||
lines from consideration of branch coverage.
|
||||
|
||||
Returns a set of line numbers.
|
||||
|
||||
The base implementation returns the empty set.
|
||||
|
||||
"""
|
||||
return set()
|
||||
|
||||
def translate_arcs(self, arcs: Iterable[TArc]) -> set[TArc]:
|
||||
"""Translate recorded arcs into reported arcs.
|
||||
|
||||
Similar to :meth:`translate_lines`, but for arcs. `arcs` is a set of
|
||||
line number pairs.
|
||||
|
||||
Returns a set of line number pairs.
|
||||
|
||||
The default implementation returns `arcs` unchanged.
|
||||
|
||||
"""
|
||||
return set(arcs)
|
||||
|
||||
def exit_counts(self) -> dict[TLineNo, int]:
|
||||
"""Get a count of exits from that each line.
|
||||
|
||||
To determine which lines are branches, coverage.py looks for lines that
|
||||
have more than one exit. This function creates a dict mapping each
|
||||
executable line number to a count of how many exits it has.
|
||||
|
||||
To be honest, this feels wrong, and should be refactored. Let me know
|
||||
if you attempt to implement this method in your plug-in...
|
||||
|
||||
"""
|
||||
return {}
|
||||
|
||||
def missing_arc_description(
|
||||
self,
|
||||
start: TLineNo,
|
||||
end: TLineNo,
|
||||
executed_arcs: Iterable[TArc] | None = None, # pylint: disable=unused-argument
|
||||
) -> str:
|
||||
"""Provide an English sentence describing a missing arc.
|
||||
|
||||
The `start` and `end` arguments are the line numbers of the missing
|
||||
arc. Negative numbers indicate entering or exiting code objects.
|
||||
|
||||
The `executed_arcs` argument is a set of line number pairs, the arcs
|
||||
that were executed in this file.
|
||||
|
||||
By default, this simply returns the string "Line {start} didn't jump
|
||||
to {end}".
|
||||
|
||||
"""
|
||||
return f"Line {start} didn't jump to line {end}"
|
||||
|
||||
def source_token_lines(self) -> TSourceTokenLines:
|
||||
"""Generate a series of tokenized lines, one for each line in `source`.
|
||||
|
||||
These tokens are used for syntax-colored reports.
|
||||
|
||||
Each line is a list of pairs, each pair is a token::
|
||||
|
||||
[("key", "def"), ("ws", " "), ("nam", "hello"), ("op", "("), ... ]
|
||||
|
||||
Each pair has a token class, and the token text. The token classes
|
||||
are:
|
||||
|
||||
* ``"com"``: a comment
|
||||
* ``"key"``: a keyword
|
||||
* ``"nam"``: a name, or identifier
|
||||
* ``"num"``: a number
|
||||
* ``"op"``: an operator
|
||||
* ``"str"``: a string literal
|
||||
* ``"ws"``: some white space
|
||||
* ``"txt"``: some other kind of text
|
||||
|
||||
If you concatenate all the token texts, and then join them with
|
||||
newlines, you should have your original source back.
|
||||
|
||||
The default implementation simply returns each line tagged as
|
||||
``"txt"``.
|
||||
|
||||
"""
|
||||
for line in self.source().splitlines():
|
||||
yield [("txt", line)]
|
||||
|
||||
def __eq__(self, other: Any) -> bool:
|
||||
return isinstance(other, FileReporter) and self.filename == other.filename
|
||||
|
||||
def __lt__(self, other: Any) -> bool:
|
||||
return isinstance(other, FileReporter) and self.filename < other.filename
|
||||
|
||||
# This object doesn't need to be hashed.
|
||||
__hash__ = None # type: ignore[assignment]
|
||||
297
.venv/lib/python3.10/site-packages/coverage/plugin_support.py
Normal file
297
.venv/lib/python3.10/site-packages/coverage/plugin_support.py
Normal file
|
|
@ -0,0 +1,297 @@
|
|||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""Support for plugins."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import os.path
|
||||
import sys
|
||||
|
||||
from types import FrameType
|
||||
from typing import Any, Iterable, Iterator
|
||||
|
||||
from coverage.exceptions import PluginError
|
||||
from coverage.misc import isolate_module
|
||||
from coverage.plugin import CoveragePlugin, FileTracer, FileReporter
|
||||
from coverage.types import (
|
||||
TArc, TConfigurable, TDebugCtl, TLineNo, TPluginConfig, TSourceTokenLines,
|
||||
)
|
||||
|
||||
os = isolate_module(os)
|
||||
|
||||
|
||||
class Plugins:
|
||||
"""The currently loaded collection of coverage.py plugins."""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.order: list[CoveragePlugin] = []
|
||||
self.names: dict[str, CoveragePlugin] = {}
|
||||
self.file_tracers: list[CoveragePlugin] = []
|
||||
self.configurers: list[CoveragePlugin] = []
|
||||
self.context_switchers: list[CoveragePlugin] = []
|
||||
|
||||
self.current_module: str | None = None
|
||||
self.debug: TDebugCtl | None
|
||||
|
||||
@classmethod
|
||||
def load_plugins(
|
||||
cls,
|
||||
modules: Iterable[str],
|
||||
config: TPluginConfig,
|
||||
debug: TDebugCtl | None = None,
|
||||
) -> Plugins:
|
||||
"""Load plugins from `modules`.
|
||||
|
||||
Returns a Plugins object with the loaded and configured plugins.
|
||||
|
||||
"""
|
||||
plugins = cls()
|
||||
plugins.debug = debug
|
||||
|
||||
for module in modules:
|
||||
plugins.current_module = module
|
||||
__import__(module)
|
||||
mod = sys.modules[module]
|
||||
|
||||
coverage_init = getattr(mod, "coverage_init", None)
|
||||
if not coverage_init:
|
||||
raise PluginError(
|
||||
f"Plugin module {module!r} didn't define a coverage_init function",
|
||||
)
|
||||
|
||||
options = config.get_plugin_options(module)
|
||||
coverage_init(plugins, options)
|
||||
|
||||
plugins.current_module = None
|
||||
return plugins
|
||||
|
||||
def add_file_tracer(self, plugin: CoveragePlugin) -> None:
|
||||
"""Add a file tracer plugin.
|
||||
|
||||
`plugin` is an instance of a third-party plugin class. It must
|
||||
implement the :meth:`CoveragePlugin.file_tracer` method.
|
||||
|
||||
"""
|
||||
self._add_plugin(plugin, self.file_tracers)
|
||||
|
||||
def add_configurer(self, plugin: CoveragePlugin) -> None:
|
||||
"""Add a configuring plugin.
|
||||
|
||||
`plugin` is an instance of a third-party plugin class. It must
|
||||
implement the :meth:`CoveragePlugin.configure` method.
|
||||
|
||||
"""
|
||||
self._add_plugin(plugin, self.configurers)
|
||||
|
||||
def add_dynamic_context(self, plugin: CoveragePlugin) -> None:
|
||||
"""Add a dynamic context plugin.
|
||||
|
||||
`plugin` is an instance of a third-party plugin class. It must
|
||||
implement the :meth:`CoveragePlugin.dynamic_context` method.
|
||||
|
||||
"""
|
||||
self._add_plugin(plugin, self.context_switchers)
|
||||
|
||||
def add_noop(self, plugin: CoveragePlugin) -> None:
|
||||
"""Add a plugin that does nothing.
|
||||
|
||||
This is only useful for testing the plugin support.
|
||||
|
||||
"""
|
||||
self._add_plugin(plugin, None)
|
||||
|
||||
def _add_plugin(
|
||||
self,
|
||||
plugin: CoveragePlugin,
|
||||
specialized: list[CoveragePlugin] | None,
|
||||
) -> None:
|
||||
"""Add a plugin object.
|
||||
|
||||
`plugin` is a :class:`CoveragePlugin` instance to add. `specialized`
|
||||
is a list to append the plugin to.
|
||||
|
||||
"""
|
||||
plugin_name = f"{self.current_module}.{plugin.__class__.__name__}"
|
||||
if self.debug and self.debug.should("plugin"):
|
||||
self.debug.write(f"Loaded plugin {self.current_module!r}: {plugin!r}")
|
||||
labelled = LabelledDebug(f"plugin {self.current_module!r}", self.debug)
|
||||
plugin = DebugPluginWrapper(plugin, labelled)
|
||||
|
||||
plugin._coverage_plugin_name = plugin_name
|
||||
plugin._coverage_enabled = True
|
||||
self.order.append(plugin)
|
||||
self.names[plugin_name] = plugin
|
||||
if specialized is not None:
|
||||
specialized.append(plugin)
|
||||
|
||||
def __bool__(self) -> bool:
|
||||
return bool(self.order)
|
||||
|
||||
def __iter__(self) -> Iterator[CoveragePlugin]:
|
||||
return iter(self.order)
|
||||
|
||||
def get(self, plugin_name: str) -> CoveragePlugin:
|
||||
"""Return a plugin by name."""
|
||||
return self.names[plugin_name]
|
||||
|
||||
|
||||
class LabelledDebug:
|
||||
"""A Debug writer, but with labels for prepending to the messages."""
|
||||
|
||||
def __init__(self, label: str, debug: TDebugCtl, prev_labels: Iterable[str] = ()):
|
||||
self.labels = list(prev_labels) + [label]
|
||||
self.debug = debug
|
||||
|
||||
def add_label(self, label: str) -> LabelledDebug:
|
||||
"""Add a label to the writer, and return a new `LabelledDebug`."""
|
||||
return LabelledDebug(label, self.debug, self.labels)
|
||||
|
||||
def message_prefix(self) -> str:
|
||||
"""The prefix to use on messages, combining the labels."""
|
||||
prefixes = self.labels + [""]
|
||||
return ":\n".join(" "*i+label for i, label in enumerate(prefixes))
|
||||
|
||||
def write(self, message: str) -> None:
|
||||
"""Write `message`, but with the labels prepended."""
|
||||
self.debug.write(f"{self.message_prefix()}{message}")
|
||||
|
||||
|
||||
class DebugPluginWrapper(CoveragePlugin):
|
||||
"""Wrap a plugin, and use debug to report on what it's doing."""
|
||||
|
||||
def __init__(self, plugin: CoveragePlugin, debug: LabelledDebug) -> None:
|
||||
super().__init__()
|
||||
self.plugin = plugin
|
||||
self.debug = debug
|
||||
|
||||
def file_tracer(self, filename: str) -> FileTracer | None:
|
||||
tracer = self.plugin.file_tracer(filename)
|
||||
self.debug.write(f"file_tracer({filename!r}) --> {tracer!r}")
|
||||
if tracer:
|
||||
debug = self.debug.add_label(f"file {filename!r}")
|
||||
tracer = DebugFileTracerWrapper(tracer, debug)
|
||||
return tracer
|
||||
|
||||
def file_reporter(self, filename: str) -> FileReporter | str:
|
||||
reporter = self.plugin.file_reporter(filename)
|
||||
assert isinstance(reporter, FileReporter)
|
||||
self.debug.write(f"file_reporter({filename!r}) --> {reporter!r}")
|
||||
if reporter:
|
||||
debug = self.debug.add_label(f"file {filename!r}")
|
||||
reporter = DebugFileReporterWrapper(filename, reporter, debug)
|
||||
return reporter
|
||||
|
||||
def dynamic_context(self, frame: FrameType) -> str | None:
|
||||
context = self.plugin.dynamic_context(frame)
|
||||
self.debug.write(f"dynamic_context({frame!r}) --> {context!r}")
|
||||
return context
|
||||
|
||||
def find_executable_files(self, src_dir: str) -> Iterable[str]:
|
||||
executable_files = self.plugin.find_executable_files(src_dir)
|
||||
self.debug.write(f"find_executable_files({src_dir!r}) --> {executable_files!r}")
|
||||
return executable_files
|
||||
|
||||
def configure(self, config: TConfigurable) -> None:
|
||||
self.debug.write(f"configure({config!r})")
|
||||
self.plugin.configure(config)
|
||||
|
||||
def sys_info(self) -> Iterable[tuple[str, Any]]:
|
||||
return self.plugin.sys_info()
|
||||
|
||||
|
||||
class DebugFileTracerWrapper(FileTracer):
|
||||
"""A debugging `FileTracer`."""
|
||||
|
||||
def __init__(self, tracer: FileTracer, debug: LabelledDebug) -> None:
|
||||
self.tracer = tracer
|
||||
self.debug = debug
|
||||
|
||||
def _show_frame(self, frame: FrameType) -> str:
|
||||
"""A short string identifying a frame, for debug messages."""
|
||||
return "%s@%d" % (
|
||||
os.path.basename(frame.f_code.co_filename),
|
||||
frame.f_lineno,
|
||||
)
|
||||
|
||||
def source_filename(self) -> str:
|
||||
sfilename = self.tracer.source_filename()
|
||||
self.debug.write(f"source_filename() --> {sfilename!r}")
|
||||
return sfilename
|
||||
|
||||
def has_dynamic_source_filename(self) -> bool:
|
||||
has = self.tracer.has_dynamic_source_filename()
|
||||
self.debug.write(f"has_dynamic_source_filename() --> {has!r}")
|
||||
return has
|
||||
|
||||
def dynamic_source_filename(self, filename: str, frame: FrameType) -> str | None:
|
||||
dyn = self.tracer.dynamic_source_filename(filename, frame)
|
||||
self.debug.write("dynamic_source_filename({!r}, {}) --> {!r}".format(
|
||||
filename, self._show_frame(frame), dyn,
|
||||
))
|
||||
return dyn
|
||||
|
||||
def line_number_range(self, frame: FrameType) -> tuple[TLineNo, TLineNo]:
|
||||
pair = self.tracer.line_number_range(frame)
|
||||
self.debug.write(f"line_number_range({self._show_frame(frame)}) --> {pair!r}")
|
||||
return pair
|
||||
|
||||
|
||||
class DebugFileReporterWrapper(FileReporter):
|
||||
"""A debugging `FileReporter`."""
|
||||
|
||||
def __init__(self, filename: str, reporter: FileReporter, debug: LabelledDebug) -> None:
|
||||
super().__init__(filename)
|
||||
self.reporter = reporter
|
||||
self.debug = debug
|
||||
|
||||
def relative_filename(self) -> str:
|
||||
ret = self.reporter.relative_filename()
|
||||
self.debug.write(f"relative_filename() --> {ret!r}")
|
||||
return ret
|
||||
|
||||
def lines(self) -> set[TLineNo]:
|
||||
ret = self.reporter.lines()
|
||||
self.debug.write(f"lines() --> {ret!r}")
|
||||
return ret
|
||||
|
||||
def excluded_lines(self) -> set[TLineNo]:
|
||||
ret = self.reporter.excluded_lines()
|
||||
self.debug.write(f"excluded_lines() --> {ret!r}")
|
||||
return ret
|
||||
|
||||
def translate_lines(self, lines: Iterable[TLineNo]) -> set[TLineNo]:
|
||||
ret = self.reporter.translate_lines(lines)
|
||||
self.debug.write(f"translate_lines({lines!r}) --> {ret!r}")
|
||||
return ret
|
||||
|
||||
def translate_arcs(self, arcs: Iterable[TArc]) -> set[TArc]:
|
||||
ret = self.reporter.translate_arcs(arcs)
|
||||
self.debug.write(f"translate_arcs({arcs!r}) --> {ret!r}")
|
||||
return ret
|
||||
|
||||
def no_branch_lines(self) -> set[TLineNo]:
|
||||
ret = self.reporter.no_branch_lines()
|
||||
self.debug.write(f"no_branch_lines() --> {ret!r}")
|
||||
return ret
|
||||
|
||||
def exit_counts(self) -> dict[TLineNo, int]:
|
||||
ret = self.reporter.exit_counts()
|
||||
self.debug.write(f"exit_counts() --> {ret!r}")
|
||||
return ret
|
||||
|
||||
def arcs(self) -> set[TArc]:
|
||||
ret = self.reporter.arcs()
|
||||
self.debug.write(f"arcs() --> {ret!r}")
|
||||
return ret
|
||||
|
||||
def source(self) -> str:
|
||||
ret = self.reporter.source()
|
||||
self.debug.write("source() --> %d chars" % (len(ret),))
|
||||
return ret
|
||||
|
||||
def source_token_lines(self) -> TSourceTokenLines:
|
||||
ret = list(self.reporter.source_token_lines())
|
||||
self.debug.write("source_token_lines() --> %d tokens" % (len(ret),))
|
||||
return ret
|
||||
1
.venv/lib/python3.10/site-packages/coverage/py.typed
Normal file
1
.venv/lib/python3.10/site-packages/coverage/py.typed
Normal file
|
|
@ -0,0 +1 @@
|
|||
# Marker file for PEP 561 to indicate that this package has type hints.
|
||||
256
.venv/lib/python3.10/site-packages/coverage/python.py
Normal file
256
.venv/lib/python3.10/site-packages/coverage/python.py
Normal file
|
|
@ -0,0 +1,256 @@
|
|||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""Python source expertise for coverage.py"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os.path
|
||||
import types
|
||||
import zipimport
|
||||
|
||||
from typing import Iterable, TYPE_CHECKING
|
||||
|
||||
from coverage import env
|
||||
from coverage.exceptions import CoverageException, NoSource
|
||||
from coverage.files import canonical_filename, relative_filename, zip_location
|
||||
from coverage.misc import expensive, isolate_module, join_regex
|
||||
from coverage.parser import PythonParser
|
||||
from coverage.phystokens import source_token_lines, source_encoding
|
||||
from coverage.plugin import FileReporter
|
||||
from coverage.types import TArc, TLineNo, TMorf, TSourceTokenLines
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from coverage import Coverage
|
||||
|
||||
os = isolate_module(os)
|
||||
|
||||
|
||||
def read_python_source(filename: str) -> bytes:
|
||||
"""Read the Python source text from `filename`.
|
||||
|
||||
Returns bytes.
|
||||
|
||||
"""
|
||||
with open(filename, "rb") as f:
|
||||
source = f.read()
|
||||
|
||||
return source.replace(b"\r\n", b"\n").replace(b"\r", b"\n")
|
||||
|
||||
|
||||
def get_python_source(filename: str) -> str:
|
||||
"""Return the source code, as unicode."""
|
||||
base, ext = os.path.splitext(filename)
|
||||
if ext == ".py" and env.WINDOWS:
|
||||
exts = [".py", ".pyw"]
|
||||
else:
|
||||
exts = [ext]
|
||||
|
||||
source_bytes: bytes | None
|
||||
for ext in exts:
|
||||
try_filename = base + ext
|
||||
if os.path.exists(try_filename):
|
||||
# A regular text file: open it.
|
||||
source_bytes = read_python_source(try_filename)
|
||||
break
|
||||
|
||||
# Maybe it's in a zip file?
|
||||
source_bytes = get_zip_bytes(try_filename)
|
||||
if source_bytes is not None:
|
||||
break
|
||||
else:
|
||||
# Couldn't find source.
|
||||
raise NoSource(f"No source for code: '{filename}'.")
|
||||
|
||||
# Replace \f because of http://bugs.python.org/issue19035
|
||||
source_bytes = source_bytes.replace(b"\f", b" ")
|
||||
source = source_bytes.decode(source_encoding(source_bytes), "replace")
|
||||
|
||||
# Python code should always end with a line with a newline.
|
||||
if source and source[-1] != "\n":
|
||||
source += "\n"
|
||||
|
||||
return source
|
||||
|
||||
|
||||
def get_zip_bytes(filename: str) -> bytes | None:
|
||||
"""Get data from `filename` if it is a zip file path.
|
||||
|
||||
Returns the bytestring data read from the zip file, or None if no zip file
|
||||
could be found or `filename` isn't in it. The data returned will be
|
||||
an empty string if the file is empty.
|
||||
|
||||
"""
|
||||
zipfile_inner = zip_location(filename)
|
||||
if zipfile_inner is not None:
|
||||
zipfile, inner = zipfile_inner
|
||||
try:
|
||||
zi = zipimport.zipimporter(zipfile)
|
||||
except zipimport.ZipImportError:
|
||||
return None
|
||||
try:
|
||||
data = zi.get_data(inner)
|
||||
except OSError:
|
||||
return None
|
||||
return data
|
||||
return None
|
||||
|
||||
|
||||
def source_for_file(filename: str) -> str:
|
||||
"""Return the source filename for `filename`.
|
||||
|
||||
Given a file name being traced, return the best guess as to the source
|
||||
file to attribute it to.
|
||||
|
||||
"""
|
||||
if filename.endswith(".py"):
|
||||
# .py files are themselves source files.
|
||||
return filename
|
||||
|
||||
elif filename.endswith((".pyc", ".pyo")):
|
||||
# Bytecode files probably have source files near them.
|
||||
py_filename = filename[:-1]
|
||||
if os.path.exists(py_filename):
|
||||
# Found a .py file, use that.
|
||||
return py_filename
|
||||
if env.WINDOWS:
|
||||
# On Windows, it could be a .pyw file.
|
||||
pyw_filename = py_filename + "w"
|
||||
if os.path.exists(pyw_filename):
|
||||
return pyw_filename
|
||||
# Didn't find source, but it's probably the .py file we want.
|
||||
return py_filename
|
||||
|
||||
# No idea, just use the file name as-is.
|
||||
return filename
|
||||
|
||||
|
||||
def source_for_morf(morf: TMorf) -> str:
|
||||
"""Get the source filename for the module-or-file `morf`."""
|
||||
if hasattr(morf, "__file__") and morf.__file__:
|
||||
filename = morf.__file__
|
||||
elif isinstance(morf, types.ModuleType):
|
||||
# A module should have had .__file__, otherwise we can't use it.
|
||||
# This could be a PEP-420 namespace package.
|
||||
raise CoverageException(f"Module {morf} has no file")
|
||||
else:
|
||||
filename = morf
|
||||
|
||||
filename = source_for_file(filename)
|
||||
return filename
|
||||
|
||||
|
||||
class PythonFileReporter(FileReporter):
|
||||
"""Report support for a Python file."""
|
||||
|
||||
def __init__(self, morf: TMorf, coverage: Coverage | None = None) -> None:
|
||||
self.coverage = coverage
|
||||
|
||||
filename = source_for_morf(morf)
|
||||
|
||||
fname = filename
|
||||
canonicalize = True
|
||||
if self.coverage is not None:
|
||||
if self.coverage.config.relative_files:
|
||||
canonicalize = False
|
||||
if canonicalize:
|
||||
fname = canonical_filename(filename)
|
||||
super().__init__(fname)
|
||||
|
||||
if hasattr(morf, "__name__"):
|
||||
name = morf.__name__.replace(".", os.sep)
|
||||
if os.path.basename(filename).startswith("__init__."):
|
||||
name += os.sep + "__init__"
|
||||
name += ".py"
|
||||
else:
|
||||
name = relative_filename(filename)
|
||||
self.relname = name
|
||||
|
||||
self._source: str | None = None
|
||||
self._parser: PythonParser | None = None
|
||||
self._excluded = None
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<PythonFileReporter {self.filename!r}>"
|
||||
|
||||
def relative_filename(self) -> str:
|
||||
return self.relname
|
||||
|
||||
@property
|
||||
def parser(self) -> PythonParser:
|
||||
"""Lazily create a :class:`PythonParser`."""
|
||||
assert self.coverage is not None
|
||||
if self._parser is None:
|
||||
self._parser = PythonParser(
|
||||
filename=self.filename,
|
||||
exclude=self.coverage._exclude_regex("exclude"),
|
||||
)
|
||||
self._parser.parse_source()
|
||||
return self._parser
|
||||
|
||||
def lines(self) -> set[TLineNo]:
|
||||
"""Return the line numbers of statements in the file."""
|
||||
return self.parser.statements
|
||||
|
||||
def excluded_lines(self) -> set[TLineNo]:
|
||||
"""Return the line numbers of statements in the file."""
|
||||
return self.parser.excluded
|
||||
|
||||
def translate_lines(self, lines: Iterable[TLineNo]) -> set[TLineNo]:
|
||||
return self.parser.translate_lines(lines)
|
||||
|
||||
def translate_arcs(self, arcs: Iterable[TArc]) -> set[TArc]:
|
||||
return self.parser.translate_arcs(arcs)
|
||||
|
||||
@expensive
|
||||
def no_branch_lines(self) -> set[TLineNo]:
|
||||
assert self.coverage is not None
|
||||
no_branch = self.parser.lines_matching(
|
||||
join_regex(self.coverage.config.partial_list),
|
||||
join_regex(self.coverage.config.partial_always_list),
|
||||
)
|
||||
return no_branch
|
||||
|
||||
@expensive
|
||||
def arcs(self) -> set[TArc]:
|
||||
return self.parser.arcs()
|
||||
|
||||
@expensive
|
||||
def exit_counts(self) -> dict[TLineNo, int]:
|
||||
return self.parser.exit_counts()
|
||||
|
||||
def missing_arc_description(
|
||||
self,
|
||||
start: TLineNo,
|
||||
end: TLineNo,
|
||||
executed_arcs: Iterable[TArc] | None = None,
|
||||
) -> str:
|
||||
return self.parser.missing_arc_description(start, end, executed_arcs)
|
||||
|
||||
def source(self) -> str:
|
||||
if self._source is None:
|
||||
self._source = get_python_source(self.filename)
|
||||
return self._source
|
||||
|
||||
def should_be_python(self) -> bool:
|
||||
"""Does it seem like this file should contain Python?
|
||||
|
||||
This is used to decide if a file reported as part of the execution of
|
||||
a program was really likely to have contained Python in the first
|
||||
place.
|
||||
|
||||
"""
|
||||
# Get the file extension.
|
||||
_, ext = os.path.splitext(self.filename)
|
||||
|
||||
# Anything named *.py* should be Python.
|
||||
if ext.startswith(".py"):
|
||||
return True
|
||||
# A file with no extension should be Python.
|
||||
if not ext:
|
||||
return True
|
||||
# Everything else is probably not Python.
|
||||
return False
|
||||
|
||||
def source_token_lines(self) -> TSourceTokenLines:
|
||||
return source_token_lines(self.source())
|
||||
346
.venv/lib/python3.10/site-packages/coverage/pytracer.py
Normal file
346
.venv/lib/python3.10/site-packages/coverage/pytracer.py
Normal file
|
|
@ -0,0 +1,346 @@
|
|||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""Raw data collector for coverage.py."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import atexit
|
||||
import dis
|
||||
import itertools
|
||||
import sys
|
||||
import threading
|
||||
|
||||
from types import FrameType, ModuleType
|
||||
from typing import Any, Callable, Set, cast
|
||||
|
||||
from coverage import env
|
||||
from coverage.types import (
|
||||
TArc, TFileDisposition, TLineNo, TTraceData, TTraceFileData, TTraceFn,
|
||||
TracerCore, TWarnFn,
|
||||
)
|
||||
|
||||
# We need the YIELD_VALUE opcode below, in a comparison-friendly form.
|
||||
# PYVERSIONS: RESUME is new in Python3.11
|
||||
RESUME = dis.opmap.get("RESUME")
|
||||
RETURN_VALUE = dis.opmap["RETURN_VALUE"]
|
||||
if RESUME is None:
|
||||
YIELD_VALUE = dis.opmap["YIELD_VALUE"]
|
||||
YIELD_FROM = dis.opmap["YIELD_FROM"]
|
||||
YIELD_FROM_OFFSET = 0 if env.PYPY else 2
|
||||
|
||||
# When running meta-coverage, this file can try to trace itself, which confuses
|
||||
# everything. Don't trace ourselves.
|
||||
|
||||
THIS_FILE = __file__.rstrip("co")
|
||||
|
||||
class PyTracer(TracerCore):
|
||||
"""Python implementation of the raw data tracer."""
|
||||
|
||||
# Because of poor implementations of trace-function-manipulating tools,
|
||||
# the Python trace function must be kept very simple. In particular, there
|
||||
# must be only one function ever set as the trace function, both through
|
||||
# sys.settrace, and as the return value from the trace function. Put
|
||||
# another way, the trace function must always return itself. It cannot
|
||||
# swap in other functions, or return None to avoid tracing a particular
|
||||
# frame.
|
||||
#
|
||||
# The trace manipulator that introduced this restriction is DecoratorTools,
|
||||
# which sets a trace function, and then later restores the pre-existing one
|
||||
# by calling sys.settrace with a function it found in the current frame.
|
||||
#
|
||||
# Systems that use DecoratorTools (or similar trace manipulations) must use
|
||||
# PyTracer to get accurate results. The command-line --timid argument is
|
||||
# used to force the use of this tracer.
|
||||
|
||||
tracer_ids = itertools.count()
|
||||
|
||||
def __init__(self) -> None:
|
||||
# Which tracer are we?
|
||||
self.id = next(self.tracer_ids)
|
||||
|
||||
# Attributes set from the collector:
|
||||
self.data: TTraceData
|
||||
self.trace_arcs = False
|
||||
self.should_trace: Callable[[str, FrameType], TFileDisposition]
|
||||
self.should_trace_cache: dict[str, TFileDisposition | None]
|
||||
self.should_start_context: Callable[[FrameType], str | None] | None = None
|
||||
self.switch_context: Callable[[str | None], None] | None = None
|
||||
self.warn: TWarnFn
|
||||
|
||||
# The threading module to use, if any.
|
||||
self.threading: ModuleType | None = None
|
||||
|
||||
self.cur_file_data: TTraceFileData | None = None
|
||||
self.last_line: TLineNo = 0
|
||||
self.cur_file_name: str | None = None
|
||||
self.context: str | None = None
|
||||
self.started_context = False
|
||||
|
||||
# The data_stack parallels the Python call stack. Each entry is
|
||||
# information about an active frame, a four-element tuple:
|
||||
# [0] The TTraceData for this frame's file. Could be None if we
|
||||
# aren't tracing this frame.
|
||||
# [1] The current file name for the frame. None if we aren't tracing
|
||||
# this frame.
|
||||
# [2] The last line number executed in this frame.
|
||||
# [3] Boolean: did this frame start a new context?
|
||||
self.data_stack: list[tuple[TTraceFileData | None, str | None, TLineNo, bool]] = []
|
||||
self.thread: threading.Thread | None = None
|
||||
self.stopped = False
|
||||
self._activity = False
|
||||
|
||||
self.in_atexit = False
|
||||
# On exit, self.in_atexit = True
|
||||
atexit.register(setattr, self, "in_atexit", True)
|
||||
|
||||
# Cache a bound method on the instance, so that we don't have to
|
||||
# re-create a bound method object all the time.
|
||||
self._cached_bound_method_trace: TTraceFn = self._trace
|
||||
|
||||
def __repr__(self) -> str:
|
||||
points = sum(len(v) for v in self.data.values())
|
||||
files = len(self.data)
|
||||
return f"<PyTracer at {id(self):#x}: {points} data points in {files} files>"
|
||||
|
||||
def log(self, marker: str, *args: Any) -> None:
|
||||
"""For hard-core logging of what this tracer is doing."""
|
||||
with open("/tmp/debug_trace.txt", "a") as f:
|
||||
f.write(f"{marker} {self.id}[{len(self.data_stack)}]")
|
||||
if 0: # if you want thread ids..
|
||||
f.write(".{:x}.{:x}".format( # type: ignore[unreachable]
|
||||
self.thread.ident,
|
||||
self.threading.current_thread().ident,
|
||||
))
|
||||
f.write(" {}".format(" ".join(map(str, args))))
|
||||
if 0: # if you want callers..
|
||||
f.write(" | ") # type: ignore[unreachable]
|
||||
stack = " / ".join(
|
||||
(fname or "???").rpartition("/")[-1]
|
||||
for _, fname, _, _ in self.data_stack
|
||||
)
|
||||
f.write(stack)
|
||||
f.write("\n")
|
||||
|
||||
def _trace(
|
||||
self,
|
||||
frame: FrameType,
|
||||
event: str,
|
||||
arg: Any, # pylint: disable=unused-argument
|
||||
lineno: TLineNo | None = None, # pylint: disable=unused-argument
|
||||
) -> TTraceFn | None:
|
||||
"""The trace function passed to sys.settrace."""
|
||||
|
||||
if THIS_FILE in frame.f_code.co_filename:
|
||||
return None
|
||||
|
||||
# f = frame; code = f.f_code
|
||||
# self.log(":", f"{code.co_filename} {f.f_lineno} {code.co_name}()", event)
|
||||
|
||||
if (self.stopped and sys.gettrace() == self._cached_bound_method_trace): # pylint: disable=comparison-with-callable
|
||||
# The PyTrace.stop() method has been called, possibly by another
|
||||
# thread, let's deactivate ourselves now.
|
||||
if 0:
|
||||
f = frame # type: ignore[unreachable]
|
||||
self.log("---\nX", f.f_code.co_filename, f.f_lineno)
|
||||
while f:
|
||||
self.log(">", f.f_code.co_filename, f.f_lineno, f.f_code.co_name, f.f_trace)
|
||||
f = f.f_back
|
||||
sys.settrace(None)
|
||||
try:
|
||||
self.cur_file_data, self.cur_file_name, self.last_line, self.started_context = (
|
||||
self.data_stack.pop()
|
||||
)
|
||||
except IndexError:
|
||||
self.log(
|
||||
"Empty stack!",
|
||||
frame.f_code.co_filename,
|
||||
frame.f_lineno,
|
||||
frame.f_code.co_name,
|
||||
)
|
||||
return None
|
||||
|
||||
# if event != "call" and frame.f_code.co_filename != self.cur_file_name:
|
||||
# self.log("---\n*", frame.f_code.co_filename, self.cur_file_name, frame.f_lineno)
|
||||
|
||||
if event == "call":
|
||||
# Should we start a new context?
|
||||
if self.should_start_context and self.context is None:
|
||||
context_maybe = self.should_start_context(frame)
|
||||
if context_maybe is not None:
|
||||
self.context = context_maybe
|
||||
started_context = True
|
||||
assert self.switch_context is not None
|
||||
self.switch_context(self.context)
|
||||
else:
|
||||
started_context = False
|
||||
else:
|
||||
started_context = False
|
||||
self.started_context = started_context
|
||||
|
||||
# Entering a new frame. Decide if we should trace in this file.
|
||||
self._activity = True
|
||||
self.data_stack.append(
|
||||
(
|
||||
self.cur_file_data,
|
||||
self.cur_file_name,
|
||||
self.last_line,
|
||||
started_context,
|
||||
),
|
||||
)
|
||||
|
||||
# Improve tracing performance: when calling a function, both caller
|
||||
# and callee are often within the same file. if that's the case, we
|
||||
# don't have to re-check whether to trace the corresponding
|
||||
# function (which is a little bit expensive since it involves
|
||||
# dictionary lookups). This optimization is only correct if we
|
||||
# didn't start a context.
|
||||
filename = frame.f_code.co_filename
|
||||
if filename != self.cur_file_name or started_context:
|
||||
self.cur_file_name = filename
|
||||
disp = self.should_trace_cache.get(filename)
|
||||
if disp is None:
|
||||
disp = self.should_trace(filename, frame)
|
||||
self.should_trace_cache[filename] = disp
|
||||
|
||||
self.cur_file_data = None
|
||||
if disp.trace:
|
||||
tracename = disp.source_filename
|
||||
assert tracename is not None
|
||||
if tracename not in self.data:
|
||||
self.data[tracename] = set()
|
||||
self.cur_file_data = self.data[tracename]
|
||||
else:
|
||||
frame.f_trace_lines = False
|
||||
elif not self.cur_file_data:
|
||||
frame.f_trace_lines = False
|
||||
|
||||
# The call event is really a "start frame" event, and happens for
|
||||
# function calls and re-entering generators. The f_lasti field is
|
||||
# -1 for calls, and a real offset for generators. Use <0 as the
|
||||
# line number for calls, and the real line number for generators.
|
||||
if RESUME is not None:
|
||||
# The current opcode is guaranteed to be RESUME. The argument
|
||||
# determines what kind of resume it is.
|
||||
oparg = frame.f_code.co_code[frame.f_lasti + 1]
|
||||
real_call = (oparg == 0)
|
||||
else:
|
||||
real_call = (getattr(frame, "f_lasti", -1) < 0)
|
||||
if real_call:
|
||||
self.last_line = -frame.f_code.co_firstlineno
|
||||
else:
|
||||
self.last_line = frame.f_lineno
|
||||
|
||||
elif event == "line":
|
||||
# Record an executed line.
|
||||
if self.cur_file_data is not None:
|
||||
flineno: TLineNo = frame.f_lineno
|
||||
|
||||
if self.trace_arcs:
|
||||
cast(Set[TArc], self.cur_file_data).add((self.last_line, flineno))
|
||||
else:
|
||||
cast(Set[TLineNo], self.cur_file_data).add(flineno)
|
||||
self.last_line = flineno
|
||||
|
||||
elif event == "return":
|
||||
if self.trace_arcs and self.cur_file_data:
|
||||
# Record an arc leaving the function, but beware that a
|
||||
# "return" event might just mean yielding from a generator.
|
||||
code = frame.f_code.co_code
|
||||
lasti = frame.f_lasti
|
||||
if RESUME is not None:
|
||||
if len(code) == lasti + 2:
|
||||
# A return from the end of a code object is a real return.
|
||||
real_return = True
|
||||
else:
|
||||
# It is a real return if we aren't going to resume next.
|
||||
if env.PYBEHAVIOR.lasti_is_yield:
|
||||
lasti += 2
|
||||
real_return = (code[lasti] != RESUME)
|
||||
else:
|
||||
if code[lasti] == RETURN_VALUE:
|
||||
real_return = True
|
||||
elif code[lasti] == YIELD_VALUE:
|
||||
real_return = False
|
||||
elif len(code) <= lasti + YIELD_FROM_OFFSET:
|
||||
real_return = True
|
||||
elif code[lasti + YIELD_FROM_OFFSET] == YIELD_FROM:
|
||||
real_return = False
|
||||
else:
|
||||
real_return = True
|
||||
if real_return:
|
||||
first = frame.f_code.co_firstlineno
|
||||
cast(Set[TArc], self.cur_file_data).add((self.last_line, -first))
|
||||
|
||||
# Leaving this function, pop the filename stack.
|
||||
self.cur_file_data, self.cur_file_name, self.last_line, self.started_context = (
|
||||
self.data_stack.pop()
|
||||
)
|
||||
# Leaving a context?
|
||||
if self.started_context:
|
||||
assert self.switch_context is not None
|
||||
self.context = None
|
||||
self.switch_context(None)
|
||||
return self._cached_bound_method_trace
|
||||
|
||||
def start(self) -> TTraceFn:
|
||||
"""Start this Tracer.
|
||||
|
||||
Return a Python function suitable for use with sys.settrace().
|
||||
|
||||
"""
|
||||
self.stopped = False
|
||||
if self.threading:
|
||||
if self.thread is None:
|
||||
self.thread = self.threading.current_thread()
|
||||
|
||||
sys.settrace(self._cached_bound_method_trace)
|
||||
return self._cached_bound_method_trace
|
||||
|
||||
def stop(self) -> None:
|
||||
"""Stop this Tracer."""
|
||||
# Get the active tracer callback before setting the stop flag to be
|
||||
# able to detect if the tracer was changed prior to stopping it.
|
||||
tf = sys.gettrace()
|
||||
|
||||
# Set the stop flag. The actual call to sys.settrace(None) will happen
|
||||
# in the self._trace callback itself to make sure to call it from the
|
||||
# right thread.
|
||||
self.stopped = True
|
||||
|
||||
if self.threading:
|
||||
assert self.thread is not None
|
||||
if self.thread.ident != self.threading.current_thread().ident:
|
||||
# Called on a different thread than started us: we can't unhook
|
||||
# ourselves, but we've set the flag that we should stop, so we
|
||||
# won't do any more tracing.
|
||||
#self.log("~", "stopping on different threads")
|
||||
return
|
||||
|
||||
# PyPy clears the trace function before running atexit functions,
|
||||
# so don't warn if we are in atexit on PyPy and the trace function
|
||||
# has changed to None. Metacoverage also messes this up, so don't
|
||||
# warn if we are measuring ourselves.
|
||||
suppress_warning = (
|
||||
(env.PYPY and self.in_atexit and tf is None)
|
||||
or env.METACOV
|
||||
)
|
||||
if self.warn and not suppress_warning:
|
||||
if tf != self._cached_bound_method_trace: # pylint: disable=comparison-with-callable
|
||||
self.warn(
|
||||
"Trace function changed, data is likely wrong: " +
|
||||
f"{tf!r} != {self._cached_bound_method_trace!r}",
|
||||
slug="trace-changed",
|
||||
)
|
||||
|
||||
def activity(self) -> bool:
|
||||
"""Has there been any activity?"""
|
||||
return self._activity
|
||||
|
||||
def reset_activity(self) -> None:
|
||||
"""Reset the activity() flag."""
|
||||
self._activity = False
|
||||
|
||||
def get_stats(self) -> dict[str, int] | None:
|
||||
"""Return a dictionary of statistics, or None."""
|
||||
return None
|
||||
281
.venv/lib/python3.10/site-packages/coverage/report.py
Normal file
281
.venv/lib/python3.10/site-packages/coverage/report.py
Normal file
|
|
@ -0,0 +1,281 @@
|
|||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""Summary reporting"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import sys
|
||||
|
||||
from typing import Any, IO, Iterable, TYPE_CHECKING
|
||||
|
||||
from coverage.exceptions import ConfigError, NoDataError
|
||||
from coverage.misc import human_sorted_items
|
||||
from coverage.plugin import FileReporter
|
||||
from coverage.report_core import get_analysis_to_report
|
||||
from coverage.results import Analysis, Numbers
|
||||
from coverage.types import TMorf
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from coverage import Coverage
|
||||
|
||||
|
||||
class SummaryReporter:
|
||||
"""A reporter for writing the summary report."""
|
||||
|
||||
def __init__(self, coverage: Coverage) -> None:
|
||||
self.coverage = coverage
|
||||
self.config = self.coverage.config
|
||||
self.branches = coverage.get_data().has_arcs()
|
||||
self.outfile: IO[str] | None = None
|
||||
self.output_format = self.config.format or "text"
|
||||
if self.output_format not in {"text", "markdown", "total"}:
|
||||
raise ConfigError(f"Unknown report format choice: {self.output_format!r}")
|
||||
self.fr_analysis: list[tuple[FileReporter, Analysis]] = []
|
||||
self.skipped_count = 0
|
||||
self.empty_count = 0
|
||||
self.total = Numbers(precision=self.config.precision)
|
||||
|
||||
def write(self, line: str) -> None:
|
||||
"""Write a line to the output, adding a newline."""
|
||||
assert self.outfile is not None
|
||||
self.outfile.write(line.rstrip())
|
||||
self.outfile.write("\n")
|
||||
|
||||
def write_items(self, items: Iterable[str]) -> None:
|
||||
"""Write a list of strings, joined together."""
|
||||
self.write("".join(items))
|
||||
|
||||
def _report_text(
|
||||
self,
|
||||
header: list[str],
|
||||
lines_values: list[list[Any]],
|
||||
total_line: list[Any],
|
||||
end_lines: list[str],
|
||||
) -> None:
|
||||
"""Internal method that prints report data in text format.
|
||||
|
||||
`header` is a list with captions.
|
||||
`lines_values` is list of lists of sortable values.
|
||||
`total_line` is a list with values of the total line.
|
||||
`end_lines` is a list of ending lines with information about skipped files.
|
||||
|
||||
"""
|
||||
# Prepare the formatting strings, header, and column sorting.
|
||||
max_name = max([len(line[0]) for line in lines_values] + [5]) + 1
|
||||
max_n = max(len(total_line[header.index("Cover")]) + 2, len(" Cover")) + 1
|
||||
max_n = max([max_n] + [len(line[header.index("Cover")]) + 2 for line in lines_values])
|
||||
formats = dict(
|
||||
Name="{:{name_len}}",
|
||||
Stmts="{:>7}",
|
||||
Miss="{:>7}",
|
||||
Branch="{:>7}",
|
||||
BrPart="{:>7}",
|
||||
Cover="{:>{n}}",
|
||||
Missing="{:>10}",
|
||||
)
|
||||
header_items = [
|
||||
formats[item].format(item, name_len=max_name, n=max_n)
|
||||
for item in header
|
||||
]
|
||||
header_str = "".join(header_items)
|
||||
rule = "-" * len(header_str)
|
||||
|
||||
# Write the header
|
||||
self.write(header_str)
|
||||
self.write(rule)
|
||||
|
||||
formats.update(dict(Cover="{:>{n}}%"), Missing=" {:9}")
|
||||
for values in lines_values:
|
||||
# build string with line values
|
||||
line_items = [
|
||||
formats[item].format(str(value),
|
||||
name_len=max_name, n=max_n-1) for item, value in zip(header, values)
|
||||
]
|
||||
self.write_items(line_items)
|
||||
|
||||
# Write a TOTAL line
|
||||
if lines_values:
|
||||
self.write(rule)
|
||||
|
||||
line_items = [
|
||||
formats[item].format(str(value),
|
||||
name_len=max_name, n=max_n-1) for item, value in zip(header, total_line)
|
||||
]
|
||||
self.write_items(line_items)
|
||||
|
||||
for end_line in end_lines:
|
||||
self.write(end_line)
|
||||
|
||||
def _report_markdown(
|
||||
self,
|
||||
header: list[str],
|
||||
lines_values: list[list[Any]],
|
||||
total_line: list[Any],
|
||||
end_lines: list[str],
|
||||
) -> None:
|
||||
"""Internal method that prints report data in markdown format.
|
||||
|
||||
`header` is a list with captions.
|
||||
`lines_values` is a sorted list of lists containing coverage information.
|
||||
`total_line` is a list with values of the total line.
|
||||
`end_lines` is a list of ending lines with information about skipped files.
|
||||
|
||||
"""
|
||||
# Prepare the formatting strings, header, and column sorting.
|
||||
max_name = max((len(line[0].replace("_", "\\_")) for line in lines_values), default=0)
|
||||
max_name = max(max_name, len("**TOTAL**")) + 1
|
||||
formats = dict(
|
||||
Name="| {:{name_len}}|",
|
||||
Stmts="{:>9} |",
|
||||
Miss="{:>9} |",
|
||||
Branch="{:>9} |",
|
||||
BrPart="{:>9} |",
|
||||
Cover="{:>{n}} |",
|
||||
Missing="{:>10} |",
|
||||
)
|
||||
max_n = max(len(total_line[header.index("Cover")]) + 6, len(" Cover "))
|
||||
header_items = [formats[item].format(item, name_len=max_name, n=max_n) for item in header]
|
||||
header_str = "".join(header_items)
|
||||
rule_str = "|" + " ".join(["- |".rjust(len(header_items[0])-1, "-")] +
|
||||
["-: |".rjust(len(item)-1, "-") for item in header_items[1:]],
|
||||
)
|
||||
|
||||
# Write the header
|
||||
self.write(header_str)
|
||||
self.write(rule_str)
|
||||
|
||||
for values in lines_values:
|
||||
# build string with line values
|
||||
formats.update(dict(Cover="{:>{n}}% |"))
|
||||
line_items = [
|
||||
formats[item].format(str(value).replace("_", "\\_"), name_len=max_name, n=max_n-1)
|
||||
for item, value in zip(header, values)
|
||||
]
|
||||
self.write_items(line_items)
|
||||
|
||||
# Write the TOTAL line
|
||||
formats.update(dict(Name="|{:>{name_len}} |", Cover="{:>{n}} |"))
|
||||
total_line_items: list[str] = []
|
||||
for item, value in zip(header, total_line):
|
||||
if value == "":
|
||||
insert = value
|
||||
elif item == "Cover":
|
||||
insert = f" **{value}%**"
|
||||
else:
|
||||
insert = f" **{value}**"
|
||||
total_line_items += formats[item].format(insert, name_len=max_name, n=max_n)
|
||||
self.write_items(total_line_items)
|
||||
for end_line in end_lines:
|
||||
self.write(end_line)
|
||||
|
||||
def report(self, morfs: Iterable[TMorf] | None, outfile: IO[str] | None = None) -> float:
|
||||
"""Writes a report summarizing coverage statistics per module.
|
||||
|
||||
`outfile` is a text-mode file object to write the summary to.
|
||||
|
||||
"""
|
||||
self.outfile = outfile or sys.stdout
|
||||
|
||||
self.coverage.get_data().set_query_contexts(self.config.report_contexts)
|
||||
for fr, analysis in get_analysis_to_report(self.coverage, morfs):
|
||||
self.report_one_file(fr, analysis)
|
||||
|
||||
if not self.total.n_files and not self.skipped_count:
|
||||
raise NoDataError("No data to report.")
|
||||
|
||||
if self.output_format == "total":
|
||||
self.write(self.total.pc_covered_str)
|
||||
else:
|
||||
self.tabular_report()
|
||||
|
||||
return self.total.pc_covered
|
||||
|
||||
def tabular_report(self) -> None:
|
||||
"""Writes tabular report formats."""
|
||||
# Prepare the header line and column sorting.
|
||||
header = ["Name", "Stmts", "Miss"]
|
||||
if self.branches:
|
||||
header += ["Branch", "BrPart"]
|
||||
header += ["Cover"]
|
||||
if self.config.show_missing:
|
||||
header += ["Missing"]
|
||||
|
||||
column_order = dict(name=0, stmts=1, miss=2, cover=-1)
|
||||
if self.branches:
|
||||
column_order.update(dict(branch=3, brpart=4))
|
||||
|
||||
# `lines_values` is list of lists of sortable values.
|
||||
lines_values = []
|
||||
|
||||
for (fr, analysis) in self.fr_analysis:
|
||||
nums = analysis.numbers
|
||||
|
||||
args = [fr.relative_filename(), nums.n_statements, nums.n_missing]
|
||||
if self.branches:
|
||||
args += [nums.n_branches, nums.n_partial_branches]
|
||||
args += [nums.pc_covered_str]
|
||||
if self.config.show_missing:
|
||||
args += [analysis.missing_formatted(branches=True)]
|
||||
args += [nums.pc_covered]
|
||||
lines_values.append(args)
|
||||
|
||||
# Line sorting.
|
||||
sort_option = (self.config.sort or "name").lower()
|
||||
reverse = False
|
||||
if sort_option[0] == "-":
|
||||
reverse = True
|
||||
sort_option = sort_option[1:]
|
||||
elif sort_option[0] == "+":
|
||||
sort_option = sort_option[1:]
|
||||
sort_idx = column_order.get(sort_option)
|
||||
if sort_idx is None:
|
||||
raise ConfigError(f"Invalid sorting option: {self.config.sort!r}")
|
||||
if sort_option == "name":
|
||||
lines_values = human_sorted_items(lines_values, reverse=reverse)
|
||||
else:
|
||||
lines_values.sort(
|
||||
key=lambda line: (line[sort_idx], line[0]),
|
||||
reverse=reverse,
|
||||
)
|
||||
|
||||
# Calculate total if we had at least one file.
|
||||
total_line = ["TOTAL", self.total.n_statements, self.total.n_missing]
|
||||
if self.branches:
|
||||
total_line += [self.total.n_branches, self.total.n_partial_branches]
|
||||
total_line += [self.total.pc_covered_str]
|
||||
if self.config.show_missing:
|
||||
total_line += [""]
|
||||
|
||||
# Create other final lines.
|
||||
end_lines = []
|
||||
if self.config.skip_covered and self.skipped_count:
|
||||
file_suffix = "s" if self.skipped_count>1 else ""
|
||||
end_lines.append(
|
||||
f"\n{self.skipped_count} file{file_suffix} skipped due to complete coverage.",
|
||||
)
|
||||
if self.config.skip_empty and self.empty_count:
|
||||
file_suffix = "s" if self.empty_count > 1 else ""
|
||||
end_lines.append(f"\n{self.empty_count} empty file{file_suffix} skipped.")
|
||||
|
||||
if self.output_format == "markdown":
|
||||
formatter = self._report_markdown
|
||||
else:
|
||||
formatter = self._report_text
|
||||
formatter(header, lines_values, total_line, end_lines)
|
||||
|
||||
def report_one_file(self, fr: FileReporter, analysis: Analysis) -> None:
|
||||
"""Report on just one file, the callback from report()."""
|
||||
nums = analysis.numbers
|
||||
self.total += nums
|
||||
|
||||
no_missing_lines = (nums.n_missing == 0)
|
||||
no_missing_branches = (nums.n_partial_branches == 0)
|
||||
if self.config.skip_covered and no_missing_lines and no_missing_branches:
|
||||
# Don't report on 100% files.
|
||||
self.skipped_count += 1
|
||||
elif self.config.skip_empty and nums.n_statements == 0:
|
||||
# Don't report on empty files.
|
||||
self.empty_count += 1
|
||||
else:
|
||||
self.fr_analysis.append((fr, analysis))
|
||||
119
.venv/lib/python3.10/site-packages/coverage/report_core.py
Normal file
119
.venv/lib/python3.10/site-packages/coverage/report_core.py
Normal file
|
|
@ -0,0 +1,119 @@
|
|||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""Reporter foundation for coverage.py."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import sys
|
||||
|
||||
from typing import (
|
||||
Callable, Iterable, Iterator, IO, Protocol, TYPE_CHECKING,
|
||||
)
|
||||
|
||||
from coverage.exceptions import NoDataError, NotPython
|
||||
from coverage.files import prep_patterns, GlobMatcher
|
||||
from coverage.misc import ensure_dir_for_file, file_be_gone
|
||||
from coverage.plugin import FileReporter
|
||||
from coverage.results import Analysis
|
||||
from coverage.types import TMorf
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from coverage import Coverage
|
||||
|
||||
|
||||
class Reporter(Protocol):
|
||||
"""What we expect of reporters."""
|
||||
|
||||
report_type: str
|
||||
|
||||
def report(self, morfs: Iterable[TMorf] | None, outfile: IO[str]) -> float:
|
||||
"""Generate a report of `morfs`, written to `outfile`."""
|
||||
|
||||
|
||||
def render_report(
|
||||
output_path: str,
|
||||
reporter: Reporter,
|
||||
morfs: Iterable[TMorf] | None,
|
||||
msgfn: Callable[[str], None],
|
||||
) -> float:
|
||||
"""Run a one-file report generator, managing the output file.
|
||||
|
||||
This function ensures the output file is ready to be written to. Then writes
|
||||
the report to it. Then closes the file and cleans up.
|
||||
|
||||
"""
|
||||
file_to_close = None
|
||||
delete_file = False
|
||||
|
||||
if output_path == "-":
|
||||
outfile = sys.stdout
|
||||
else:
|
||||
# Ensure that the output directory is created; done here because this
|
||||
# report pre-opens the output file. HtmlReporter does this on its own
|
||||
# because its task is more complex, being multiple files.
|
||||
ensure_dir_for_file(output_path)
|
||||
outfile = open(output_path, "w", encoding="utf-8")
|
||||
file_to_close = outfile
|
||||
delete_file = True
|
||||
|
||||
try:
|
||||
ret = reporter.report(morfs, outfile=outfile)
|
||||
if file_to_close is not None:
|
||||
msgfn(f"Wrote {reporter.report_type} to {output_path}")
|
||||
delete_file = False
|
||||
return ret
|
||||
finally:
|
||||
if file_to_close is not None:
|
||||
file_to_close.close()
|
||||
if delete_file:
|
||||
file_be_gone(output_path) # pragma: part covered (doesn't return)
|
||||
|
||||
|
||||
def get_analysis_to_report(
|
||||
coverage: Coverage,
|
||||
morfs: Iterable[TMorf] | None,
|
||||
) -> Iterator[tuple[FileReporter, Analysis]]:
|
||||
"""Get the files to report on.
|
||||
|
||||
For each morf in `morfs`, if it should be reported on (based on the omit
|
||||
and include configuration options), yield a pair, the `FileReporter` and
|
||||
`Analysis` for the morf.
|
||||
|
||||
"""
|
||||
file_reporters = coverage._get_file_reporters(morfs)
|
||||
config = coverage.config
|
||||
|
||||
if config.report_include:
|
||||
matcher = GlobMatcher(prep_patterns(config.report_include), "report_include")
|
||||
file_reporters = [fr for fr in file_reporters if matcher.match(fr.filename)]
|
||||
|
||||
if config.report_omit:
|
||||
matcher = GlobMatcher(prep_patterns(config.report_omit), "report_omit")
|
||||
file_reporters = [fr for fr in file_reporters if not matcher.match(fr.filename)]
|
||||
|
||||
if not file_reporters:
|
||||
raise NoDataError("No data to report.")
|
||||
|
||||
for fr in sorted(file_reporters):
|
||||
try:
|
||||
analysis = coverage._analyze(fr)
|
||||
except NotPython:
|
||||
# Only report errors for .py files, and only if we didn't
|
||||
# explicitly suppress those errors.
|
||||
# NotPython is only raised by PythonFileReporter, which has a
|
||||
# should_be_python() method.
|
||||
if fr.should_be_python(): # type: ignore[attr-defined]
|
||||
if config.ignore_errors:
|
||||
msg = f"Couldn't parse Python file '{fr.filename}'"
|
||||
coverage._warn(msg, slug="couldnt-parse")
|
||||
else:
|
||||
raise
|
||||
except Exception as exc:
|
||||
if config.ignore_errors:
|
||||
msg = f"Couldn't parse '{fr.filename}': {exc}".rstrip()
|
||||
coverage._warn(msg, slug="couldnt-parse")
|
||||
else:
|
||||
raise
|
||||
else:
|
||||
yield (fr, analysis)
|
||||
385
.venv/lib/python3.10/site-packages/coverage/results.py
Normal file
385
.venv/lib/python3.10/site-packages/coverage/results.py
Normal file
|
|
@ -0,0 +1,385 @@
|
|||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""Results of coverage measurement."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import collections
|
||||
|
||||
from typing import Callable, Iterable, TYPE_CHECKING
|
||||
|
||||
from coverage.debug import auto_repr
|
||||
from coverage.exceptions import ConfigError
|
||||
from coverage.misc import nice_pair
|
||||
from coverage.types import TArc, TLineNo
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from coverage.data import CoverageData
|
||||
from coverage.plugin import FileReporter
|
||||
|
||||
|
||||
class Analysis:
|
||||
"""The results of analyzing a FileReporter."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
data: CoverageData,
|
||||
precision: int,
|
||||
file_reporter: FileReporter,
|
||||
file_mapper: Callable[[str], str],
|
||||
) -> None:
|
||||
self.data = data
|
||||
self.file_reporter = file_reporter
|
||||
self.filename = file_mapper(self.file_reporter.filename)
|
||||
self.statements = self.file_reporter.lines()
|
||||
self.excluded = self.file_reporter.excluded_lines()
|
||||
|
||||
# Identify missing statements.
|
||||
executed: Iterable[TLineNo]
|
||||
executed = self.data.lines(self.filename) or []
|
||||
executed = self.file_reporter.translate_lines(executed)
|
||||
self.executed = executed
|
||||
self.missing = self.statements - self.executed
|
||||
|
||||
if self.data.has_arcs():
|
||||
self._arc_possibilities = sorted(self.file_reporter.arcs())
|
||||
self.exit_counts = self.file_reporter.exit_counts()
|
||||
self.no_branch = self.file_reporter.no_branch_lines()
|
||||
n_branches = self._total_branches()
|
||||
mba = self.missing_branch_arcs()
|
||||
n_partial_branches = sum(len(v) for k,v in mba.items() if k not in self.missing)
|
||||
n_missing_branches = sum(len(v) for k,v in mba.items())
|
||||
else:
|
||||
self._arc_possibilities = []
|
||||
self.exit_counts = {}
|
||||
self.no_branch = set()
|
||||
n_branches = n_partial_branches = n_missing_branches = 0
|
||||
|
||||
self.numbers = Numbers(
|
||||
precision=precision,
|
||||
n_files=1,
|
||||
n_statements=len(self.statements),
|
||||
n_excluded=len(self.excluded),
|
||||
n_missing=len(self.missing),
|
||||
n_branches=n_branches,
|
||||
n_partial_branches=n_partial_branches,
|
||||
n_missing_branches=n_missing_branches,
|
||||
)
|
||||
|
||||
def missing_formatted(self, branches: bool = False) -> str:
|
||||
"""The missing line numbers, formatted nicely.
|
||||
|
||||
Returns a string like "1-2, 5-11, 13-14".
|
||||
|
||||
If `branches` is true, includes the missing branch arcs also.
|
||||
|
||||
"""
|
||||
if branches and self.has_arcs():
|
||||
arcs = self.missing_branch_arcs().items()
|
||||
else:
|
||||
arcs = None
|
||||
|
||||
return format_lines(self.statements, self.missing, arcs=arcs)
|
||||
|
||||
def has_arcs(self) -> bool:
|
||||
"""Were arcs measured in this result?"""
|
||||
return self.data.has_arcs()
|
||||
|
||||
def arc_possibilities(self) -> list[TArc]:
|
||||
"""Returns a sorted list of the arcs in the code."""
|
||||
return self._arc_possibilities
|
||||
|
||||
def arcs_executed(self) -> list[TArc]:
|
||||
"""Returns a sorted list of the arcs actually executed in the code."""
|
||||
executed: Iterable[TArc]
|
||||
executed = self.data.arcs(self.filename) or []
|
||||
executed = self.file_reporter.translate_arcs(executed)
|
||||
return sorted(executed)
|
||||
|
||||
def arcs_missing(self) -> list[TArc]:
|
||||
"""Returns a sorted list of the un-executed arcs in the code."""
|
||||
possible = self.arc_possibilities()
|
||||
executed = self.arcs_executed()
|
||||
missing = (
|
||||
p for p in possible
|
||||
if p not in executed
|
||||
and p[0] not in self.no_branch
|
||||
and p[1] not in self.excluded
|
||||
)
|
||||
return sorted(missing)
|
||||
|
||||
def arcs_unpredicted(self) -> list[TArc]:
|
||||
"""Returns a sorted list of the executed arcs missing from the code."""
|
||||
possible = self.arc_possibilities()
|
||||
executed = self.arcs_executed()
|
||||
# Exclude arcs here which connect a line to itself. They can occur
|
||||
# in executed data in some cases. This is where they can cause
|
||||
# trouble, and here is where it's the least burden to remove them.
|
||||
# Also, generators can somehow cause arcs from "enter" to "exit", so
|
||||
# make sure we have at least one positive value.
|
||||
unpredicted = (
|
||||
e for e in executed
|
||||
if e not in possible
|
||||
and e[0] != e[1]
|
||||
and (e[0] > 0 or e[1] > 0)
|
||||
)
|
||||
return sorted(unpredicted)
|
||||
|
||||
def _branch_lines(self) -> list[TLineNo]:
|
||||
"""Returns a list of line numbers that have more than one exit."""
|
||||
return [l1 for l1,count in self.exit_counts.items() if count > 1]
|
||||
|
||||
def _total_branches(self) -> int:
|
||||
"""How many total branches are there?"""
|
||||
return sum(count for count in self.exit_counts.values() if count > 1)
|
||||
|
||||
def missing_branch_arcs(self) -> dict[TLineNo, list[TLineNo]]:
|
||||
"""Return arcs that weren't executed from branch lines.
|
||||
|
||||
Returns {l1:[l2a,l2b,...], ...}
|
||||
|
||||
"""
|
||||
missing = self.arcs_missing()
|
||||
branch_lines = set(self._branch_lines())
|
||||
mba = collections.defaultdict(list)
|
||||
for l1, l2 in missing:
|
||||
if l1 in branch_lines:
|
||||
mba[l1].append(l2)
|
||||
return mba
|
||||
|
||||
def executed_branch_arcs(self) -> dict[TLineNo, list[TLineNo]]:
|
||||
"""Return arcs that were executed from branch lines.
|
||||
|
||||
Returns {l1:[l2a,l2b,...], ...}
|
||||
|
||||
"""
|
||||
executed = self.arcs_executed()
|
||||
branch_lines = set(self._branch_lines())
|
||||
eba = collections.defaultdict(list)
|
||||
for l1, l2 in executed:
|
||||
if l1 in branch_lines:
|
||||
eba[l1].append(l2)
|
||||
return eba
|
||||
|
||||
def branch_stats(self) -> dict[TLineNo, tuple[int, int]]:
|
||||
"""Get stats about branches.
|
||||
|
||||
Returns a dict mapping line numbers to a tuple:
|
||||
(total_exits, taken_exits).
|
||||
"""
|
||||
|
||||
missing_arcs = self.missing_branch_arcs()
|
||||
stats = {}
|
||||
for lnum in self._branch_lines():
|
||||
exits = self.exit_counts[lnum]
|
||||
missing = len(missing_arcs[lnum])
|
||||
stats[lnum] = (exits, exits - missing)
|
||||
return stats
|
||||
|
||||
|
||||
class Numbers:
|
||||
"""The numerical results of measuring coverage.
|
||||
|
||||
This holds the basic statistics from `Analysis`, and is used to roll
|
||||
up statistics across files.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
precision: int = 0,
|
||||
n_files: int = 0,
|
||||
n_statements: int = 0,
|
||||
n_excluded: int = 0,
|
||||
n_missing: int = 0,
|
||||
n_branches: int = 0,
|
||||
n_partial_branches: int = 0,
|
||||
n_missing_branches: int = 0,
|
||||
) -> None:
|
||||
assert 0 <= precision < 10
|
||||
self._precision = precision
|
||||
self._near0 = 1.0 / 10**precision
|
||||
self._near100 = 100.0 - self._near0
|
||||
self.n_files = n_files
|
||||
self.n_statements = n_statements
|
||||
self.n_excluded = n_excluded
|
||||
self.n_missing = n_missing
|
||||
self.n_branches = n_branches
|
||||
self.n_partial_branches = n_partial_branches
|
||||
self.n_missing_branches = n_missing_branches
|
||||
|
||||
__repr__ = auto_repr
|
||||
|
||||
def init_args(self) -> list[int]:
|
||||
"""Return a list for __init__(*args) to recreate this object."""
|
||||
return [
|
||||
self._precision,
|
||||
self.n_files, self.n_statements, self.n_excluded, self.n_missing,
|
||||
self.n_branches, self.n_partial_branches, self.n_missing_branches,
|
||||
]
|
||||
|
||||
@property
|
||||
def n_executed(self) -> int:
|
||||
"""Returns the number of executed statements."""
|
||||
return self.n_statements - self.n_missing
|
||||
|
||||
@property
|
||||
def n_executed_branches(self) -> int:
|
||||
"""Returns the number of executed branches."""
|
||||
return self.n_branches - self.n_missing_branches
|
||||
|
||||
@property
|
||||
def pc_covered(self) -> float:
|
||||
"""Returns a single percentage value for coverage."""
|
||||
if self.n_statements > 0:
|
||||
numerator, denominator = self.ratio_covered
|
||||
pc_cov = (100.0 * numerator) / denominator
|
||||
else:
|
||||
pc_cov = 100.0
|
||||
return pc_cov
|
||||
|
||||
@property
|
||||
def pc_covered_str(self) -> str:
|
||||
"""Returns the percent covered, as a string, without a percent sign.
|
||||
|
||||
Note that "0" is only returned when the value is truly zero, and "100"
|
||||
is only returned when the value is truly 100. Rounding can never
|
||||
result in either "0" or "100".
|
||||
|
||||
"""
|
||||
return self.display_covered(self.pc_covered)
|
||||
|
||||
def display_covered(self, pc: float) -> str:
|
||||
"""Return a displayable total percentage, as a string.
|
||||
|
||||
Note that "0" is only returned when the value is truly zero, and "100"
|
||||
is only returned when the value is truly 100. Rounding can never
|
||||
result in either "0" or "100".
|
||||
|
||||
"""
|
||||
if 0 < pc < self._near0:
|
||||
pc = self._near0
|
||||
elif self._near100 < pc < 100:
|
||||
pc = self._near100
|
||||
else:
|
||||
pc = round(pc, self._precision)
|
||||
return "%.*f" % (self._precision, pc)
|
||||
|
||||
def pc_str_width(self) -> int:
|
||||
"""How many characters wide can pc_covered_str be?"""
|
||||
width = 3 # "100"
|
||||
if self._precision > 0:
|
||||
width += 1 + self._precision
|
||||
return width
|
||||
|
||||
@property
|
||||
def ratio_covered(self) -> tuple[int, int]:
|
||||
"""Return a numerator and denominator for the coverage ratio."""
|
||||
numerator = self.n_executed + self.n_executed_branches
|
||||
denominator = self.n_statements + self.n_branches
|
||||
return numerator, denominator
|
||||
|
||||
def __add__(self, other: Numbers) -> Numbers:
|
||||
nums = Numbers(precision=self._precision)
|
||||
nums.n_files = self.n_files + other.n_files
|
||||
nums.n_statements = self.n_statements + other.n_statements
|
||||
nums.n_excluded = self.n_excluded + other.n_excluded
|
||||
nums.n_missing = self.n_missing + other.n_missing
|
||||
nums.n_branches = self.n_branches + other.n_branches
|
||||
nums.n_partial_branches = (
|
||||
self.n_partial_branches + other.n_partial_branches
|
||||
)
|
||||
nums.n_missing_branches = (
|
||||
self.n_missing_branches + other.n_missing_branches
|
||||
)
|
||||
return nums
|
||||
|
||||
def __radd__(self, other: int) -> Numbers:
|
||||
# Implementing 0+Numbers allows us to sum() a list of Numbers.
|
||||
assert other == 0 # we only ever call it this way.
|
||||
return self
|
||||
|
||||
|
||||
def _line_ranges(
|
||||
statements: Iterable[TLineNo],
|
||||
lines: Iterable[TLineNo],
|
||||
) -> list[tuple[TLineNo, TLineNo]]:
|
||||
"""Produce a list of ranges for `format_lines`."""
|
||||
statements = sorted(statements)
|
||||
lines = sorted(lines)
|
||||
|
||||
pairs = []
|
||||
start = None
|
||||
lidx = 0
|
||||
for stmt in statements:
|
||||
if lidx >= len(lines):
|
||||
break
|
||||
if stmt == lines[lidx]:
|
||||
lidx += 1
|
||||
if not start:
|
||||
start = stmt
|
||||
end = stmt
|
||||
elif start:
|
||||
pairs.append((start, end))
|
||||
start = None
|
||||
if start:
|
||||
pairs.append((start, end))
|
||||
return pairs
|
||||
|
||||
|
||||
def format_lines(
|
||||
statements: Iterable[TLineNo],
|
||||
lines: Iterable[TLineNo],
|
||||
arcs: Iterable[tuple[TLineNo, list[TLineNo]]] | None = None,
|
||||
) -> str:
|
||||
"""Nicely format a list of line numbers.
|
||||
|
||||
Format a list of line numbers for printing by coalescing groups of lines as
|
||||
long as the lines represent consecutive statements. This will coalesce
|
||||
even if there are gaps between statements.
|
||||
|
||||
For example, if `statements` is [1,2,3,4,5,10,11,12,13,14] and
|
||||
`lines` is [1,2,5,10,11,13,14] then the result will be "1-2, 5-11, 13-14".
|
||||
|
||||
Both `lines` and `statements` can be any iterable. All of the elements of
|
||||
`lines` must be in `statements`, and all of the values must be positive
|
||||
integers.
|
||||
|
||||
If `arcs` is provided, they are (start,[end,end,end]) pairs that will be
|
||||
included in the output as long as start isn't in `lines`.
|
||||
|
||||
"""
|
||||
line_items = [(pair[0], nice_pair(pair)) for pair in _line_ranges(statements, lines)]
|
||||
if arcs is not None:
|
||||
line_exits = sorted(arcs)
|
||||
for line, exits in line_exits:
|
||||
for ex in sorted(exits):
|
||||
if line not in lines and ex not in lines:
|
||||
dest = (ex if ex > 0 else "exit")
|
||||
line_items.append((line, f"{line}->{dest}"))
|
||||
|
||||
ret = ", ".join(t[-1] for t in sorted(line_items))
|
||||
return ret
|
||||
|
||||
|
||||
def should_fail_under(total: float, fail_under: float, precision: int) -> bool:
|
||||
"""Determine if a total should fail due to fail-under.
|
||||
|
||||
`total` is a float, the coverage measurement total. `fail_under` is the
|
||||
fail_under setting to compare with. `precision` is the number of digits
|
||||
to consider after the decimal point.
|
||||
|
||||
Returns True if the total should fail.
|
||||
|
||||
"""
|
||||
# We can never achieve higher than 100% coverage, or less than zero.
|
||||
if not (0 <= fail_under <= 100.0):
|
||||
msg = f"fail_under={fail_under} is invalid. Must be between 0 and 100."
|
||||
raise ConfigError(msg)
|
||||
|
||||
# Special case for fail_under=100, it must really be 100.
|
||||
if fail_under == 100.0 and total != 100.0:
|
||||
return True
|
||||
|
||||
return round(total, precision) < fail_under
|
||||
1102
.venv/lib/python3.10/site-packages/coverage/sqldata.py
Normal file
1102
.venv/lib/python3.10/site-packages/coverage/sqldata.py
Normal file
File diff suppressed because it is too large
Load diff
230
.venv/lib/python3.10/site-packages/coverage/sqlitedb.py
Normal file
230
.venv/lib/python3.10/site-packages/coverage/sqlitedb.py
Normal file
|
|
@ -0,0 +1,230 @@
|
|||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""SQLite abstraction for coverage.py"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import contextlib
|
||||
import re
|
||||
import sqlite3
|
||||
|
||||
from typing import cast, Any, Iterable, Iterator, Tuple
|
||||
|
||||
from coverage.debug import auto_repr, clipped_repr, exc_one_line
|
||||
from coverage.exceptions import DataError
|
||||
from coverage.types import TDebugCtl
|
||||
|
||||
|
||||
class SqliteDb:
|
||||
"""A simple abstraction over a SQLite database.
|
||||
|
||||
Use as a context manager, then you can use it like a
|
||||
:class:`python:sqlite3.Connection` object::
|
||||
|
||||
with SqliteDb(filename, debug_control) as db:
|
||||
with db.execute("select a, b from some_table") as cur:
|
||||
for a, b in cur:
|
||||
etc(a, b)
|
||||
|
||||
"""
|
||||
def __init__(self, filename: str, debug: TDebugCtl) -> None:
|
||||
self.debug = debug
|
||||
self.filename = filename
|
||||
self.nest = 0
|
||||
self.con: sqlite3.Connection | None = None
|
||||
|
||||
__repr__ = auto_repr
|
||||
|
||||
def _connect(self) -> None:
|
||||
"""Connect to the db and do universal initialization."""
|
||||
if self.con is not None:
|
||||
return
|
||||
|
||||
# It can happen that Python switches threads while the tracer writes
|
||||
# data. The second thread will also try to write to the data,
|
||||
# effectively causing a nested context. However, given the idempotent
|
||||
# nature of the tracer operations, sharing a connection among threads
|
||||
# is not a problem.
|
||||
if self.debug.should("sql"):
|
||||
self.debug.write(f"Connecting to {self.filename!r}")
|
||||
try:
|
||||
self.con = sqlite3.connect(self.filename, check_same_thread=False)
|
||||
except sqlite3.Error as exc:
|
||||
raise DataError(f"Couldn't use data file {self.filename!r}: {exc}") from exc
|
||||
|
||||
if self.debug.should("sql"):
|
||||
self.debug.write(f"Connected to {self.filename!r} as {self.con!r}")
|
||||
|
||||
self.con.create_function("REGEXP", 2, lambda txt, pat: re.search(txt, pat) is not None)
|
||||
|
||||
# Turning off journal_mode can speed up writing. It can't always be
|
||||
# disabled, so we have to be prepared for *-journal files elsewhere.
|
||||
# In Python 3.12+, we can change the config to allow journal_mode=off.
|
||||
if hasattr(sqlite3, "SQLITE_DBCONFIG_DEFENSIVE"):
|
||||
# Turn off defensive mode, so that journal_mode=off can succeed.
|
||||
self.con.setconfig( # type: ignore[attr-defined, unused-ignore]
|
||||
sqlite3.SQLITE_DBCONFIG_DEFENSIVE, False,
|
||||
)
|
||||
|
||||
# This pragma makes writing faster. It disables rollbacks, but we never need them.
|
||||
self.execute_void("pragma journal_mode=off")
|
||||
|
||||
# This pragma makes writing faster. It can fail in unusual situations
|
||||
# (https://github.com/nedbat/coveragepy/issues/1646), so use fail_ok=True
|
||||
# to keep things going.
|
||||
self.execute_void("pragma synchronous=off", fail_ok=True)
|
||||
|
||||
def close(self) -> None:
|
||||
"""If needed, close the connection."""
|
||||
if self.con is not None and self.filename != ":memory:":
|
||||
if self.debug.should("sql"):
|
||||
self.debug.write(f"Closing {self.con!r} on {self.filename!r}")
|
||||
self.con.close()
|
||||
self.con = None
|
||||
|
||||
def __enter__(self) -> SqliteDb:
|
||||
if self.nest == 0:
|
||||
self._connect()
|
||||
assert self.con is not None
|
||||
self.con.__enter__()
|
||||
self.nest += 1
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, traceback) -> None: # type: ignore[no-untyped-def]
|
||||
self.nest -= 1
|
||||
if self.nest == 0:
|
||||
try:
|
||||
assert self.con is not None
|
||||
self.con.__exit__(exc_type, exc_value, traceback)
|
||||
self.close()
|
||||
except Exception as exc:
|
||||
if self.debug.should("sql"):
|
||||
self.debug.write(f"EXCEPTION from __exit__: {exc_one_line(exc)}")
|
||||
raise DataError(f"Couldn't end data file {self.filename!r}: {exc}") from exc
|
||||
|
||||
def _execute(self, sql: str, parameters: Iterable[Any]) -> sqlite3.Cursor:
|
||||
"""Same as :meth:`python:sqlite3.Connection.execute`."""
|
||||
if self.debug.should("sql"):
|
||||
tail = f" with {parameters!r}" if parameters else ""
|
||||
self.debug.write(f"Executing {sql!r}{tail}")
|
||||
try:
|
||||
assert self.con is not None
|
||||
try:
|
||||
return self.con.execute(sql, parameters) # type: ignore[arg-type]
|
||||
except Exception:
|
||||
# In some cases, an error might happen that isn't really an
|
||||
# error. Try again immediately.
|
||||
# https://github.com/nedbat/coveragepy/issues/1010
|
||||
return self.con.execute(sql, parameters) # type: ignore[arg-type]
|
||||
except sqlite3.Error as exc:
|
||||
msg = str(exc)
|
||||
if self.filename != ":memory:":
|
||||
try:
|
||||
# `execute` is the first thing we do with the database, so try
|
||||
# hard to provide useful hints if something goes wrong now.
|
||||
with open(self.filename, "rb") as bad_file:
|
||||
cov4_sig = b"!coverage.py: This is a private format"
|
||||
if bad_file.read(len(cov4_sig)) == cov4_sig:
|
||||
msg = (
|
||||
"Looks like a coverage 4.x data file. " +
|
||||
"Are you mixing versions of coverage?"
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
if self.debug.should("sql"):
|
||||
self.debug.write(f"EXCEPTION from execute: {exc_one_line(exc)}")
|
||||
raise DataError(f"Couldn't use data file {self.filename!r}: {msg}") from exc
|
||||
|
||||
@contextlib.contextmanager
|
||||
def execute(
|
||||
self,
|
||||
sql: str,
|
||||
parameters: Iterable[Any] = (),
|
||||
) -> Iterator[sqlite3.Cursor]:
|
||||
"""Context managed :meth:`python:sqlite3.Connection.execute`.
|
||||
|
||||
Use with a ``with`` statement to auto-close the returned cursor.
|
||||
"""
|
||||
cur = self._execute(sql, parameters)
|
||||
try:
|
||||
yield cur
|
||||
finally:
|
||||
cur.close()
|
||||
|
||||
def execute_void(self, sql: str, parameters: Iterable[Any] = (), fail_ok: bool = False) -> None:
|
||||
"""Same as :meth:`python:sqlite3.Connection.execute` when you don't need the cursor.
|
||||
|
||||
If `fail_ok` is True, then SQLite errors are ignored.
|
||||
"""
|
||||
try:
|
||||
# PyPy needs the .close() calls here, or sqlite gets twisted up:
|
||||
# https://bitbucket.org/pypy/pypy/issues/2872/default-isolation-mode-is-different-on
|
||||
self._execute(sql, parameters).close()
|
||||
except DataError:
|
||||
if not fail_ok:
|
||||
raise
|
||||
|
||||
def execute_for_rowid(self, sql: str, parameters: Iterable[Any] = ()) -> int:
|
||||
"""Like execute, but returns the lastrowid."""
|
||||
with self.execute(sql, parameters) as cur:
|
||||
assert cur.lastrowid is not None
|
||||
rowid: int = cur.lastrowid
|
||||
if self.debug.should("sqldata"):
|
||||
self.debug.write(f"Row id result: {rowid!r}")
|
||||
return rowid
|
||||
|
||||
def execute_one(self, sql: str, parameters: Iterable[Any] = ()) -> tuple[Any, ...] | None:
|
||||
"""Execute a statement and return the one row that results.
|
||||
|
||||
This is like execute(sql, parameters).fetchone(), except it is
|
||||
correct in reading the entire result set. This will raise an
|
||||
exception if more than one row results.
|
||||
|
||||
Returns a row, or None if there were no rows.
|
||||
"""
|
||||
with self.execute(sql, parameters) as cur:
|
||||
rows = list(cur)
|
||||
if len(rows) == 0:
|
||||
return None
|
||||
elif len(rows) == 1:
|
||||
return cast(Tuple[Any, ...], rows[0])
|
||||
else:
|
||||
raise AssertionError(f"SQL {sql!r} shouldn't return {len(rows)} rows")
|
||||
|
||||
def _executemany(self, sql: str, data: list[Any]) -> sqlite3.Cursor:
|
||||
"""Same as :meth:`python:sqlite3.Connection.executemany`."""
|
||||
if self.debug.should("sql"):
|
||||
final = ":" if self.debug.should("sqldata") else ""
|
||||
self.debug.write(f"Executing many {sql!r} with {len(data)} rows{final}")
|
||||
if self.debug.should("sqldata"):
|
||||
for i, row in enumerate(data):
|
||||
self.debug.write(f"{i:4d}: {row!r}")
|
||||
assert self.con is not None
|
||||
try:
|
||||
return self.con.executemany(sql, data)
|
||||
except Exception:
|
||||
# In some cases, an error might happen that isn't really an
|
||||
# error. Try again immediately.
|
||||
# https://github.com/nedbat/coveragepy/issues/1010
|
||||
return self.con.executemany(sql, data)
|
||||
|
||||
def executemany_void(self, sql: str, data: Iterable[Any]) -> None:
|
||||
"""Same as :meth:`python:sqlite3.Connection.executemany` when you don't need the cursor."""
|
||||
data = list(data)
|
||||
if data:
|
||||
self._executemany(sql, data).close()
|
||||
|
||||
def executescript(self, script: str) -> None:
|
||||
"""Same as :meth:`python:sqlite3.Connection.executescript`."""
|
||||
if self.debug.should("sql"):
|
||||
self.debug.write("Executing script with {} chars: {}".format(
|
||||
len(script), clipped_repr(script, 100),
|
||||
))
|
||||
assert self.con is not None
|
||||
self.con.executescript(script).close()
|
||||
|
||||
def dump(self) -> str:
|
||||
"""Return a multi-line string, the SQL dump of the database."""
|
||||
assert self.con is not None
|
||||
return "\n".join(self.con.iterdump())
|
||||
423
.venv/lib/python3.10/site-packages/coverage/sysmon.py
Normal file
423
.venv/lib/python3.10/site-packages/coverage/sysmon.py
Normal file
|
|
@ -0,0 +1,423 @@
|
|||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""Callback functions and support for sys.monitoring data collection."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import functools
|
||||
import inspect
|
||||
import os
|
||||
import os.path
|
||||
import sys
|
||||
import threading
|
||||
import traceback
|
||||
|
||||
from dataclasses import dataclass
|
||||
from types import CodeType, FrameType
|
||||
from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
Set,
|
||||
TYPE_CHECKING,
|
||||
cast,
|
||||
)
|
||||
|
||||
from coverage.debug import short_filename, short_stack
|
||||
from coverage.types import (
|
||||
AnyCallable,
|
||||
TArc,
|
||||
TFileDisposition,
|
||||
TLineNo,
|
||||
TTraceData,
|
||||
TTraceFileData,
|
||||
TracerCore,
|
||||
TWarnFn,
|
||||
)
|
||||
|
||||
# pylint: disable=unused-argument
|
||||
|
||||
LOG = False
|
||||
|
||||
# This module will be imported in all versions of Python, but only used in 3.12+
|
||||
# It will be type-checked for 3.12, but not for earlier versions.
|
||||
sys_monitoring = getattr(sys, "monitoring", None)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
assert sys_monitoring is not None
|
||||
# I want to say this but it's not allowed:
|
||||
# MonitorReturn = Literal[sys.monitoring.DISABLE] | None
|
||||
MonitorReturn = Any
|
||||
|
||||
|
||||
if LOG: # pragma: debugging
|
||||
|
||||
class LoggingWrapper:
|
||||
"""Wrap a namespace to log all its functions."""
|
||||
|
||||
def __init__(self, wrapped: Any, namespace: str) -> None:
|
||||
self.wrapped = wrapped
|
||||
self.namespace = namespace
|
||||
|
||||
def __getattr__(self, name: str) -> Callable[..., Any]:
|
||||
def _wrapped(*args: Any, **kwargs: Any) -> Any:
|
||||
log(f"{self.namespace}.{name}{args}{kwargs}")
|
||||
return getattr(self.wrapped, name)(*args, **kwargs)
|
||||
|
||||
return _wrapped
|
||||
|
||||
sys_monitoring = LoggingWrapper(sys_monitoring, "sys.monitoring")
|
||||
assert sys_monitoring is not None
|
||||
|
||||
short_stack = functools.partial(
|
||||
short_stack, full=True, short_filenames=True, frame_ids=True,
|
||||
)
|
||||
seen_threads: set[int] = set()
|
||||
|
||||
def log(msg: str) -> None:
|
||||
"""Write a message to our detailed debugging log(s)."""
|
||||
# Thread ids are reused across processes?
|
||||
# Make a shorter number more likely to be unique.
|
||||
pid = os.getpid()
|
||||
tid = cast(int, threading.current_thread().ident)
|
||||
tslug = f"{(pid * tid) % 9_999_991:07d}"
|
||||
if tid not in seen_threads:
|
||||
seen_threads.add(tid)
|
||||
log(f"New thread {tid} {tslug}:\n{short_stack()}")
|
||||
# log_seq = int(os.getenv("PANSEQ", "0"))
|
||||
# root = f"/tmp/pan.{log_seq:03d}"
|
||||
for filename in [
|
||||
"/tmp/foo.out",
|
||||
# f"{root}.out",
|
||||
# f"{root}-{pid}.out",
|
||||
# f"{root}-{pid}-{tslug}.out",
|
||||
]:
|
||||
with open(filename, "a") as f:
|
||||
print(f"{pid}:{tslug}: {msg}", file=f, flush=True)
|
||||
|
||||
def arg_repr(arg: Any) -> str:
|
||||
"""Make a customized repr for logged values."""
|
||||
if isinstance(arg, CodeType):
|
||||
return (
|
||||
f"<code @{id(arg):#x}"
|
||||
+ f" name={arg.co_name},"
|
||||
+ f" file={short_filename(arg.co_filename)!r}#{arg.co_firstlineno}>"
|
||||
)
|
||||
return repr(arg)
|
||||
|
||||
def panopticon(*names: str | None) -> AnyCallable:
|
||||
"""Decorate a function to log its calls."""
|
||||
|
||||
def _decorator(method: AnyCallable) -> AnyCallable:
|
||||
@functools.wraps(method)
|
||||
def _wrapped(self: Any, *args: Any) -> Any:
|
||||
try:
|
||||
# log(f"{method.__name__}() stack:\n{short_stack()}")
|
||||
args_reprs = []
|
||||
for name, arg in zip(names, args):
|
||||
if name is None:
|
||||
continue
|
||||
args_reprs.append(f"{name}={arg_repr(arg)}")
|
||||
log(f"{id(self):#x}:{method.__name__}({', '.join(args_reprs)})")
|
||||
ret = method(self, *args)
|
||||
# log(f" end {id(self):#x}:{method.__name__}({', '.join(args_reprs)})")
|
||||
return ret
|
||||
except Exception as exc:
|
||||
log(f"!!{exc.__class__.__name__}: {exc}")
|
||||
log("".join(traceback.format_exception(exc))) # pylint: disable=[no-value-for-parameter]
|
||||
try:
|
||||
assert sys_monitoring is not None
|
||||
sys_monitoring.set_events(sys.monitoring.COVERAGE_ID, 0)
|
||||
except ValueError:
|
||||
# We might have already shut off monitoring.
|
||||
log("oops, shutting off events with disabled tool id")
|
||||
raise
|
||||
|
||||
return _wrapped
|
||||
|
||||
return _decorator
|
||||
|
||||
else:
|
||||
|
||||
def log(msg: str) -> None:
|
||||
"""Write a message to our detailed debugging log(s), but not really."""
|
||||
|
||||
def panopticon(*names: str | None) -> AnyCallable:
|
||||
"""Decorate a function to log its calls, but not really."""
|
||||
|
||||
def _decorator(meth: AnyCallable) -> AnyCallable:
|
||||
return meth
|
||||
|
||||
return _decorator
|
||||
|
||||
|
||||
@dataclass
|
||||
class CodeInfo:
|
||||
"""The information we want about each code object."""
|
||||
|
||||
tracing: bool
|
||||
file_data: TTraceFileData | None
|
||||
# TODO: what is byte_to_line for?
|
||||
byte_to_line: dict[int, int] | None
|
||||
|
||||
|
||||
def bytes_to_lines(code: CodeType) -> dict[int, int]:
|
||||
"""Make a dict mapping byte code offsets to line numbers."""
|
||||
b2l = {}
|
||||
for bstart, bend, lineno in code.co_lines():
|
||||
if lineno is not None:
|
||||
for boffset in range(bstart, bend, 2):
|
||||
b2l[boffset] = lineno
|
||||
return b2l
|
||||
|
||||
|
||||
class SysMonitor(TracerCore):
|
||||
"""Python implementation of the raw data tracer for PEP669 implementations."""
|
||||
|
||||
# One of these will be used across threads. Be careful.
|
||||
|
||||
def __init__(self, tool_id: int) -> None:
|
||||
# Attributes set from the collector:
|
||||
self.data: TTraceData
|
||||
self.trace_arcs = False
|
||||
self.should_trace: Callable[[str, FrameType], TFileDisposition]
|
||||
self.should_trace_cache: dict[str, TFileDisposition | None]
|
||||
# TODO: should_start_context and switch_context are unused!
|
||||
# Change tests/testenv.py:DYN_CONTEXTS when this is updated.
|
||||
self.should_start_context: Callable[[FrameType], str | None] | None = None
|
||||
self.switch_context: Callable[[str | None], None] | None = None
|
||||
# TODO: warn is unused.
|
||||
self.warn: TWarnFn
|
||||
|
||||
self.myid = tool_id
|
||||
|
||||
# Map id(code_object) -> CodeInfo
|
||||
self.code_infos: dict[int, CodeInfo] = {}
|
||||
# A list of code_objects, just to keep them alive so that id's are
|
||||
# useful as identity.
|
||||
self.code_objects: list[CodeType] = []
|
||||
self.last_lines: dict[FrameType, int] = {}
|
||||
# Map id(code_object) -> code_object
|
||||
self.local_event_codes: dict[int, CodeType] = {}
|
||||
self.sysmon_on = False
|
||||
|
||||
self.stats = {
|
||||
"starts": 0,
|
||||
}
|
||||
|
||||
self.stopped = False
|
||||
self._activity = False
|
||||
|
||||
def __repr__(self) -> str:
|
||||
points = sum(len(v) for v in self.data.values())
|
||||
files = len(self.data)
|
||||
return f"<SysMonitor at {id(self):#x}: {points} data points in {files} files>"
|
||||
|
||||
@panopticon()
|
||||
def start(self) -> None:
|
||||
"""Start this Tracer."""
|
||||
self.stopped = False
|
||||
|
||||
assert sys_monitoring is not None
|
||||
sys_monitoring.use_tool_id(self.myid, "coverage.py")
|
||||
register = functools.partial(sys_monitoring.register_callback, self.myid)
|
||||
events = sys_monitoring.events
|
||||
if self.trace_arcs:
|
||||
sys_monitoring.set_events(
|
||||
self.myid,
|
||||
events.PY_START | events.PY_UNWIND,
|
||||
)
|
||||
register(events.PY_START, self.sysmon_py_start)
|
||||
register(events.PY_RESUME, self.sysmon_py_resume_arcs)
|
||||
register(events.PY_RETURN, self.sysmon_py_return_arcs)
|
||||
register(events.PY_UNWIND, self.sysmon_py_unwind_arcs)
|
||||
register(events.LINE, self.sysmon_line_arcs)
|
||||
else:
|
||||
sys_monitoring.set_events(self.myid, events.PY_START)
|
||||
register(events.PY_START, self.sysmon_py_start)
|
||||
register(events.LINE, self.sysmon_line_lines)
|
||||
sys_monitoring.restart_events()
|
||||
self.sysmon_on = True
|
||||
|
||||
@panopticon()
|
||||
def stop(self) -> None:
|
||||
"""Stop this Tracer."""
|
||||
if not self.sysmon_on:
|
||||
# In forking situations, we might try to stop when we are not
|
||||
# started. Do nothing in that case.
|
||||
return
|
||||
assert sys_monitoring is not None
|
||||
sys_monitoring.set_events(self.myid, 0)
|
||||
self.sysmon_on = False
|
||||
for code in self.local_event_codes.values():
|
||||
sys_monitoring.set_local_events(self.myid, code, 0)
|
||||
self.local_event_codes = {}
|
||||
sys_monitoring.free_tool_id(self.myid)
|
||||
|
||||
@panopticon()
|
||||
def post_fork(self) -> None:
|
||||
"""The process has forked, clean up as needed."""
|
||||
self.stop()
|
||||
|
||||
def activity(self) -> bool:
|
||||
"""Has there been any activity?"""
|
||||
return self._activity
|
||||
|
||||
def reset_activity(self) -> None:
|
||||
"""Reset the activity() flag."""
|
||||
self._activity = False
|
||||
|
||||
def get_stats(self) -> dict[str, int] | None:
|
||||
"""Return a dictionary of statistics, or None."""
|
||||
return None
|
||||
|
||||
# The number of frames in callers_frame takes @panopticon into account.
|
||||
if LOG:
|
||||
|
||||
def callers_frame(self) -> FrameType:
|
||||
"""Get the frame of the Python code we're monitoring."""
|
||||
return (
|
||||
inspect.currentframe().f_back.f_back.f_back # type: ignore[union-attr,return-value]
|
||||
)
|
||||
|
||||
else:
|
||||
|
||||
def callers_frame(self) -> FrameType:
|
||||
"""Get the frame of the Python code we're monitoring."""
|
||||
return inspect.currentframe().f_back.f_back # type: ignore[union-attr,return-value]
|
||||
|
||||
@panopticon("code", "@")
|
||||
def sysmon_py_start(self, code: CodeType, instruction_offset: int) -> MonitorReturn:
|
||||
"""Handle sys.monitoring.events.PY_START events."""
|
||||
# Entering a new frame. Decide if we should trace in this file.
|
||||
self._activity = True
|
||||
self.stats["starts"] += 1
|
||||
|
||||
code_info = self.code_infos.get(id(code))
|
||||
tracing_code: bool | None = None
|
||||
file_data: TTraceFileData | None = None
|
||||
if code_info is not None:
|
||||
tracing_code = code_info.tracing
|
||||
file_data = code_info.file_data
|
||||
|
||||
if tracing_code is None:
|
||||
filename = code.co_filename
|
||||
disp = self.should_trace_cache.get(filename)
|
||||
if disp is None:
|
||||
frame = inspect.currentframe().f_back # type: ignore[union-attr]
|
||||
if LOG:
|
||||
# @panopticon adds a frame.
|
||||
frame = frame.f_back # type: ignore[union-attr]
|
||||
disp = self.should_trace(filename, frame) # type: ignore[arg-type]
|
||||
self.should_trace_cache[filename] = disp
|
||||
|
||||
tracing_code = disp.trace
|
||||
if tracing_code:
|
||||
tracename = disp.source_filename
|
||||
assert tracename is not None
|
||||
if tracename not in self.data:
|
||||
self.data[tracename] = set()
|
||||
file_data = self.data[tracename]
|
||||
b2l = bytes_to_lines(code)
|
||||
else:
|
||||
file_data = None
|
||||
b2l = None
|
||||
|
||||
self.code_infos[id(code)] = CodeInfo(
|
||||
tracing=tracing_code,
|
||||
file_data=file_data,
|
||||
byte_to_line=b2l,
|
||||
)
|
||||
self.code_objects.append(code)
|
||||
|
||||
if tracing_code:
|
||||
events = sys.monitoring.events
|
||||
if self.sysmon_on:
|
||||
assert sys_monitoring is not None
|
||||
sys_monitoring.set_local_events(
|
||||
self.myid,
|
||||
code,
|
||||
events.PY_RETURN
|
||||
#
|
||||
| events.PY_RESUME
|
||||
# | events.PY_YIELD
|
||||
| events.LINE,
|
||||
# | events.BRANCH
|
||||
# | events.JUMP
|
||||
)
|
||||
self.local_event_codes[id(code)] = code
|
||||
|
||||
if tracing_code and self.trace_arcs:
|
||||
frame = self.callers_frame()
|
||||
self.last_lines[frame] = -code.co_firstlineno
|
||||
return None
|
||||
else:
|
||||
return sys.monitoring.DISABLE
|
||||
|
||||
@panopticon("code", "@")
|
||||
def sysmon_py_resume_arcs(
|
||||
self, code: CodeType, instruction_offset: int,
|
||||
) -> MonitorReturn:
|
||||
"""Handle sys.monitoring.events.PY_RESUME events for branch coverage."""
|
||||
frame = self.callers_frame()
|
||||
self.last_lines[frame] = frame.f_lineno
|
||||
|
||||
@panopticon("code", "@", None)
|
||||
def sysmon_py_return_arcs(
|
||||
self, code: CodeType, instruction_offset: int, retval: object,
|
||||
) -> MonitorReturn:
|
||||
"""Handle sys.monitoring.events.PY_RETURN events for branch coverage."""
|
||||
frame = self.callers_frame()
|
||||
code_info = self.code_infos.get(id(code))
|
||||
if code_info is not None and code_info.file_data is not None:
|
||||
last_line = self.last_lines.get(frame)
|
||||
if last_line is not None:
|
||||
arc = (last_line, -code.co_firstlineno)
|
||||
# log(f"adding {arc=}")
|
||||
cast(Set[TArc], code_info.file_data).add(arc)
|
||||
|
||||
# Leaving this function, no need for the frame any more.
|
||||
self.last_lines.pop(frame, None)
|
||||
|
||||
@panopticon("code", "@", "exc")
|
||||
def sysmon_py_unwind_arcs(
|
||||
self, code: CodeType, instruction_offset: int, exception: BaseException,
|
||||
) -> MonitorReturn:
|
||||
"""Handle sys.monitoring.events.PY_UNWIND events for branch coverage."""
|
||||
frame = self.callers_frame()
|
||||
# Leaving this function.
|
||||
last_line = self.last_lines.pop(frame, None)
|
||||
if isinstance(exception, GeneratorExit):
|
||||
# We don't want to count generator exits as arcs.
|
||||
return
|
||||
code_info = self.code_infos.get(id(code))
|
||||
if code_info is not None and code_info.file_data is not None:
|
||||
if last_line is not None:
|
||||
arc = (last_line, -code.co_firstlineno)
|
||||
# log(f"adding {arc=}")
|
||||
cast(Set[TArc], code_info.file_data).add(arc)
|
||||
|
||||
|
||||
@panopticon("code", "line")
|
||||
def sysmon_line_lines(self, code: CodeType, line_number: int) -> MonitorReturn:
|
||||
"""Handle sys.monitoring.events.LINE events for line coverage."""
|
||||
code_info = self.code_infos[id(code)]
|
||||
if code_info.file_data is not None:
|
||||
cast(Set[TLineNo], code_info.file_data).add(line_number)
|
||||
# log(f"adding {line_number=}")
|
||||
return sys.monitoring.DISABLE
|
||||
|
||||
@panopticon("code", "line")
|
||||
def sysmon_line_arcs(self, code: CodeType, line_number: int) -> MonitorReturn:
|
||||
"""Handle sys.monitoring.events.LINE events for branch coverage."""
|
||||
code_info = self.code_infos[id(code)]
|
||||
ret = None
|
||||
if code_info.file_data is not None:
|
||||
frame = self.callers_frame()
|
||||
last_line = self.last_lines.get(frame)
|
||||
if last_line is not None:
|
||||
arc = (last_line, line_number)
|
||||
cast(Set[TArc], code_info.file_data).add(arc)
|
||||
# log(f"adding {arc=}")
|
||||
self.last_lines[frame] = line_number
|
||||
return ret
|
||||
306
.venv/lib/python3.10/site-packages/coverage/templite.py
Normal file
306
.venv/lib/python3.10/site-packages/coverage/templite.py
Normal file
|
|
@ -0,0 +1,306 @@
|
|||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""A simple Python template renderer, for a nano-subset of Django syntax.
|
||||
|
||||
For a detailed discussion of this code, see this chapter from 500 Lines:
|
||||
http://aosabook.org/en/500L/a-template-engine.html
|
||||
|
||||
"""
|
||||
|
||||
# Coincidentally named the same as http://code.activestate.com/recipes/496702/
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
|
||||
from typing import (
|
||||
Any, Callable, Dict, NoReturn, cast,
|
||||
)
|
||||
|
||||
|
||||
class TempliteSyntaxError(ValueError):
|
||||
"""Raised when a template has a syntax error."""
|
||||
pass
|
||||
|
||||
|
||||
class TempliteValueError(ValueError):
|
||||
"""Raised when an expression won't evaluate in a template."""
|
||||
pass
|
||||
|
||||
|
||||
class CodeBuilder:
|
||||
"""Build source code conveniently."""
|
||||
|
||||
def __init__(self, indent: int = 0) -> None:
|
||||
self.code: list[str | CodeBuilder] = []
|
||||
self.indent_level = indent
|
||||
|
||||
def __str__(self) -> str:
|
||||
return "".join(str(c) for c in self.code)
|
||||
|
||||
def add_line(self, line: str) -> None:
|
||||
"""Add a line of source to the code.
|
||||
|
||||
Indentation and newline will be added for you, don't provide them.
|
||||
|
||||
"""
|
||||
self.code.extend([" " * self.indent_level, line, "\n"])
|
||||
|
||||
def add_section(self) -> CodeBuilder:
|
||||
"""Add a section, a sub-CodeBuilder."""
|
||||
section = CodeBuilder(self.indent_level)
|
||||
self.code.append(section)
|
||||
return section
|
||||
|
||||
INDENT_STEP = 4 # PEP8 says so!
|
||||
|
||||
def indent(self) -> None:
|
||||
"""Increase the current indent for following lines."""
|
||||
self.indent_level += self.INDENT_STEP
|
||||
|
||||
def dedent(self) -> None:
|
||||
"""Decrease the current indent for following lines."""
|
||||
self.indent_level -= self.INDENT_STEP
|
||||
|
||||
def get_globals(self) -> dict[str, Any]:
|
||||
"""Execute the code, and return a dict of globals it defines."""
|
||||
# A check that the caller really finished all the blocks they started.
|
||||
assert self.indent_level == 0
|
||||
# Get the Python source as a single string.
|
||||
python_source = str(self)
|
||||
# Execute the source, defining globals, and return them.
|
||||
global_namespace: dict[str, Any] = {}
|
||||
exec(python_source, global_namespace)
|
||||
return global_namespace
|
||||
|
||||
|
||||
class Templite:
|
||||
"""A simple template renderer, for a nano-subset of Django syntax.
|
||||
|
||||
Supported constructs are extended variable access::
|
||||
|
||||
{{var.modifier.modifier|filter|filter}}
|
||||
|
||||
loops::
|
||||
|
||||
{% for var in list %}...{% endfor %}
|
||||
|
||||
and ifs::
|
||||
|
||||
{% if var %}...{% endif %}
|
||||
|
||||
Comments are within curly-hash markers::
|
||||
|
||||
{# This will be ignored #}
|
||||
|
||||
Lines between `{% joined %}` and `{% endjoined %}` will have lines stripped
|
||||
and joined. Be careful, this could join words together!
|
||||
|
||||
Any of these constructs can have a hyphen at the end (`-}}`, `-%}`, `-#}`),
|
||||
which will collapse the white space following the tag.
|
||||
|
||||
Construct a Templite with the template text, then use `render` against a
|
||||
dictionary context to create a finished string::
|
||||
|
||||
templite = Templite('''
|
||||
<h1>Hello {{name|upper}}!</h1>
|
||||
{% for topic in topics %}
|
||||
<p>You are interested in {{topic}}.</p>
|
||||
{% endif %}
|
||||
''',
|
||||
{"upper": str.upper},
|
||||
)
|
||||
text = templite.render({
|
||||
"name": "Ned",
|
||||
"topics": ["Python", "Geometry", "Juggling"],
|
||||
})
|
||||
|
||||
"""
|
||||
def __init__(self, text: str, *contexts: dict[str, Any]) -> None:
|
||||
"""Construct a Templite with the given `text`.
|
||||
|
||||
`contexts` are dictionaries of values to use for future renderings.
|
||||
These are good for filters and global values.
|
||||
|
||||
"""
|
||||
self.context = {}
|
||||
for context in contexts:
|
||||
self.context.update(context)
|
||||
|
||||
self.all_vars: set[str] = set()
|
||||
self.loop_vars: set[str] = set()
|
||||
|
||||
# We construct a function in source form, then compile it and hold onto
|
||||
# it, and execute it to render the template.
|
||||
code = CodeBuilder()
|
||||
|
||||
code.add_line("def render_function(context, do_dots):")
|
||||
code.indent()
|
||||
vars_code = code.add_section()
|
||||
code.add_line("result = []")
|
||||
code.add_line("append_result = result.append")
|
||||
code.add_line("extend_result = result.extend")
|
||||
code.add_line("to_str = str")
|
||||
|
||||
buffered: list[str] = []
|
||||
|
||||
def flush_output() -> None:
|
||||
"""Force `buffered` to the code builder."""
|
||||
if len(buffered) == 1:
|
||||
code.add_line("append_result(%s)" % buffered[0])
|
||||
elif len(buffered) > 1:
|
||||
code.add_line("extend_result([%s])" % ", ".join(buffered))
|
||||
del buffered[:]
|
||||
|
||||
ops_stack = []
|
||||
|
||||
# Split the text to form a list of tokens.
|
||||
tokens = re.split(r"(?s)({{.*?}}|{%.*?%}|{#.*?#})", text)
|
||||
|
||||
squash = in_joined = False
|
||||
|
||||
for token in tokens:
|
||||
if token.startswith("{"):
|
||||
start, end = 2, -2
|
||||
squash = (token[-3] == "-")
|
||||
if squash:
|
||||
end = -3
|
||||
|
||||
if token.startswith("{#"):
|
||||
# Comment: ignore it and move on.
|
||||
continue
|
||||
elif token.startswith("{{"):
|
||||
# An expression to evaluate.
|
||||
expr = self._expr_code(token[start:end].strip())
|
||||
buffered.append("to_str(%s)" % expr)
|
||||
else:
|
||||
# token.startswith("{%")
|
||||
# Action tag: split into words and parse further.
|
||||
flush_output()
|
||||
|
||||
words = token[start:end].strip().split()
|
||||
if words[0] == "if":
|
||||
# An if statement: evaluate the expression to determine if.
|
||||
if len(words) != 2:
|
||||
self._syntax_error("Don't understand if", token)
|
||||
ops_stack.append("if")
|
||||
code.add_line("if %s:" % self._expr_code(words[1]))
|
||||
code.indent()
|
||||
elif words[0] == "for":
|
||||
# A loop: iterate over expression result.
|
||||
if len(words) != 4 or words[2] != "in":
|
||||
self._syntax_error("Don't understand for", token)
|
||||
ops_stack.append("for")
|
||||
self._variable(words[1], self.loop_vars)
|
||||
code.add_line(
|
||||
f"for c_{words[1]} in {self._expr_code(words[3])}:",
|
||||
)
|
||||
code.indent()
|
||||
elif words[0] == "joined":
|
||||
ops_stack.append("joined")
|
||||
in_joined = True
|
||||
elif words[0].startswith("end"):
|
||||
# Endsomething. Pop the ops stack.
|
||||
if len(words) != 1:
|
||||
self._syntax_error("Don't understand end", token)
|
||||
end_what = words[0][3:]
|
||||
if not ops_stack:
|
||||
self._syntax_error("Too many ends", token)
|
||||
start_what = ops_stack.pop()
|
||||
if start_what != end_what:
|
||||
self._syntax_error("Mismatched end tag", end_what)
|
||||
if end_what == "joined":
|
||||
in_joined = False
|
||||
else:
|
||||
code.dedent()
|
||||
else:
|
||||
self._syntax_error("Don't understand tag", words[0])
|
||||
else:
|
||||
# Literal content. If it isn't empty, output it.
|
||||
if in_joined:
|
||||
token = re.sub(r"\s*\n\s*", "", token.strip())
|
||||
elif squash:
|
||||
token = token.lstrip()
|
||||
if token:
|
||||
buffered.append(repr(token))
|
||||
|
||||
if ops_stack:
|
||||
self._syntax_error("Unmatched action tag", ops_stack[-1])
|
||||
|
||||
flush_output()
|
||||
|
||||
for var_name in self.all_vars - self.loop_vars:
|
||||
vars_code.add_line(f"c_{var_name} = context[{var_name!r}]")
|
||||
|
||||
code.add_line("return ''.join(result)")
|
||||
code.dedent()
|
||||
self._render_function = cast(
|
||||
Callable[
|
||||
[Dict[str, Any], Callable[..., Any]],
|
||||
str,
|
||||
],
|
||||
code.get_globals()["render_function"],
|
||||
)
|
||||
|
||||
def _expr_code(self, expr: str) -> str:
|
||||
"""Generate a Python expression for `expr`."""
|
||||
if "|" in expr:
|
||||
pipes = expr.split("|")
|
||||
code = self._expr_code(pipes[0])
|
||||
for func in pipes[1:]:
|
||||
self._variable(func, self.all_vars)
|
||||
code = f"c_{func}({code})"
|
||||
elif "." in expr:
|
||||
dots = expr.split(".")
|
||||
code = self._expr_code(dots[0])
|
||||
args = ", ".join(repr(d) for d in dots[1:])
|
||||
code = f"do_dots({code}, {args})"
|
||||
else:
|
||||
self._variable(expr, self.all_vars)
|
||||
code = "c_%s" % expr
|
||||
return code
|
||||
|
||||
def _syntax_error(self, msg: str, thing: Any) -> NoReturn:
|
||||
"""Raise a syntax error using `msg`, and showing `thing`."""
|
||||
raise TempliteSyntaxError(f"{msg}: {thing!r}")
|
||||
|
||||
def _variable(self, name: str, vars_set: set[str]) -> None:
|
||||
"""Track that `name` is used as a variable.
|
||||
|
||||
Adds the name to `vars_set`, a set of variable names.
|
||||
|
||||
Raises an syntax error if `name` is not a valid name.
|
||||
|
||||
"""
|
||||
if not re.match(r"[_a-zA-Z][_a-zA-Z0-9]*$", name):
|
||||
self._syntax_error("Not a valid name", name)
|
||||
vars_set.add(name)
|
||||
|
||||
def render(self, context: dict[str, Any] | None = None) -> str:
|
||||
"""Render this template by applying it to `context`.
|
||||
|
||||
`context` is a dictionary of values to use in this rendering.
|
||||
|
||||
"""
|
||||
# Make the complete context we'll use.
|
||||
render_context = dict(self.context)
|
||||
if context:
|
||||
render_context.update(context)
|
||||
return self._render_function(render_context, self._do_dots)
|
||||
|
||||
def _do_dots(self, value: Any, *dots: str) -> Any:
|
||||
"""Evaluate dotted expressions at run-time."""
|
||||
for dot in dots:
|
||||
try:
|
||||
value = getattr(value, dot)
|
||||
except AttributeError:
|
||||
try:
|
||||
value = value[dot]
|
||||
except (TypeError, KeyError) as exc:
|
||||
raise TempliteValueError(
|
||||
f"Couldn't evaluate {value!r}.{dot}",
|
||||
) from exc
|
||||
if callable(value):
|
||||
value = value()
|
||||
return value
|
||||
208
.venv/lib/python3.10/site-packages/coverage/tomlconfig.py
Normal file
208
.venv/lib/python3.10/site-packages/coverage/tomlconfig.py
Normal file
|
|
@ -0,0 +1,208 @@
|
|||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""TOML configuration support for coverage.py"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import re
|
||||
|
||||
from typing import Any, Callable, Iterable, TypeVar
|
||||
|
||||
from coverage import env
|
||||
from coverage.exceptions import ConfigError
|
||||
from coverage.misc import import_third_party, substitute_variables
|
||||
from coverage.types import TConfigSectionOut, TConfigValueOut
|
||||
|
||||
|
||||
if env.PYVERSION >= (3, 11, 0, "alpha", 7):
|
||||
import tomllib # pylint: disable=import-error
|
||||
has_tomllib = True
|
||||
else:
|
||||
# TOML support on Python 3.10 and below is an install-time extra option.
|
||||
tomllib, has_tomllib = import_third_party("tomli")
|
||||
|
||||
|
||||
class TomlDecodeError(Exception):
|
||||
"""An exception class that exists even when toml isn't installed."""
|
||||
pass
|
||||
|
||||
|
||||
TWant = TypeVar("TWant")
|
||||
|
||||
class TomlConfigParser:
|
||||
"""TOML file reading with the interface of HandyConfigParser."""
|
||||
|
||||
# This class has the same interface as config.HandyConfigParser, no
|
||||
# need for docstrings.
|
||||
# pylint: disable=missing-function-docstring
|
||||
|
||||
def __init__(self, our_file: bool) -> None:
|
||||
self.our_file = our_file
|
||||
self.data: dict[str, Any] = {}
|
||||
|
||||
def read(self, filenames: Iterable[str]) -> list[str]:
|
||||
# RawConfigParser takes a filename or list of filenames, but we only
|
||||
# ever call this with a single filename.
|
||||
assert isinstance(filenames, (bytes, str, os.PathLike))
|
||||
filename = os.fspath(filenames)
|
||||
|
||||
try:
|
||||
with open(filename, encoding='utf-8') as fp:
|
||||
toml_text = fp.read()
|
||||
except OSError:
|
||||
return []
|
||||
if has_tomllib:
|
||||
try:
|
||||
self.data = tomllib.loads(toml_text)
|
||||
except tomllib.TOMLDecodeError as err:
|
||||
raise TomlDecodeError(str(err)) from err
|
||||
return [filename]
|
||||
else:
|
||||
has_toml = re.search(r"^\[tool\.coverage(\.|])", toml_text, flags=re.MULTILINE)
|
||||
if self.our_file or has_toml:
|
||||
# Looks like they meant to read TOML, but we can't read it.
|
||||
msg = "Can't read {!r} without TOML support. Install with [toml] extra"
|
||||
raise ConfigError(msg.format(filename))
|
||||
return []
|
||||
|
||||
def _get_section(self, section: str) -> tuple[str | None, TConfigSectionOut | None]:
|
||||
"""Get a section from the data.
|
||||
|
||||
Arguments:
|
||||
section (str): A section name, which can be dotted.
|
||||
|
||||
Returns:
|
||||
name (str): the actual name of the section that was found, if any,
|
||||
or None.
|
||||
data (str): the dict of data in the section, or None if not found.
|
||||
|
||||
"""
|
||||
prefixes = ["tool.coverage."]
|
||||
for prefix in prefixes:
|
||||
real_section = prefix + section
|
||||
parts = real_section.split(".")
|
||||
try:
|
||||
data = self.data[parts[0]]
|
||||
for part in parts[1:]:
|
||||
data = data[part]
|
||||
except KeyError:
|
||||
continue
|
||||
break
|
||||
else:
|
||||
return None, None
|
||||
return real_section, data
|
||||
|
||||
def _get(self, section: str, option: str) -> tuple[str, TConfigValueOut]:
|
||||
"""Like .get, but returns the real section name and the value."""
|
||||
name, data = self._get_section(section)
|
||||
if data is None:
|
||||
raise ConfigError(f"No section: {section!r}")
|
||||
assert name is not None
|
||||
try:
|
||||
value = data[option]
|
||||
except KeyError:
|
||||
raise ConfigError(f"No option {option!r} in section: {name!r}") from None
|
||||
return name, value
|
||||
|
||||
def _get_single(self, section: str, option: str) -> Any:
|
||||
"""Get a single-valued option.
|
||||
|
||||
Performs environment substitution if the value is a string. Other types
|
||||
will be converted later as needed.
|
||||
"""
|
||||
name, value = self._get(section, option)
|
||||
if isinstance(value, str):
|
||||
value = substitute_variables(value, os.environ)
|
||||
return name, value
|
||||
|
||||
def has_option(self, section: str, option: str) -> bool:
|
||||
_, data = self._get_section(section)
|
||||
if data is None:
|
||||
return False
|
||||
return option in data
|
||||
|
||||
def real_section(self, section: str) -> str | None:
|
||||
name, _ = self._get_section(section)
|
||||
return name
|
||||
|
||||
def has_section(self, section: str) -> bool:
|
||||
name, _ = self._get_section(section)
|
||||
return bool(name)
|
||||
|
||||
def options(self, section: str) -> list[str]:
|
||||
_, data = self._get_section(section)
|
||||
if data is None:
|
||||
raise ConfigError(f"No section: {section!r}")
|
||||
return list(data.keys())
|
||||
|
||||
def get_section(self, section: str) -> TConfigSectionOut:
|
||||
_, data = self._get_section(section)
|
||||
return data or {}
|
||||
|
||||
def get(self, section: str, option: str) -> Any:
|
||||
_, value = self._get_single(section, option)
|
||||
return value
|
||||
|
||||
def _check_type(
|
||||
self,
|
||||
section: str,
|
||||
option: str,
|
||||
value: Any,
|
||||
type_: type[TWant],
|
||||
converter: Callable[[Any], TWant] | None,
|
||||
type_desc: str,
|
||||
) -> TWant:
|
||||
"""Check that `value` has the type we want, converting if needed.
|
||||
|
||||
Returns the resulting value of the desired type.
|
||||
"""
|
||||
if isinstance(value, type_):
|
||||
return value
|
||||
if isinstance(value, str) and converter is not None:
|
||||
try:
|
||||
return converter(value)
|
||||
except Exception as e:
|
||||
raise ValueError(
|
||||
f"Option [{section}]{option} couldn't convert to {type_desc}: {value!r}",
|
||||
) from e
|
||||
raise ValueError(
|
||||
f"Option [{section}]{option} is not {type_desc}: {value!r}",
|
||||
)
|
||||
|
||||
def getboolean(self, section: str, option: str) -> bool:
|
||||
name, value = self._get_single(section, option)
|
||||
bool_strings = {"true": True, "false": False}
|
||||
return self._check_type(name, option, value, bool, bool_strings.__getitem__, "a boolean")
|
||||
|
||||
def _get_list(self, section: str, option: str) -> tuple[str, list[str]]:
|
||||
"""Get a list of strings, substituting environment variables in the elements."""
|
||||
name, values = self._get(section, option)
|
||||
values = self._check_type(name, option, values, list, None, "a list")
|
||||
values = [substitute_variables(value, os.environ) for value in values]
|
||||
return name, values
|
||||
|
||||
def getlist(self, section: str, option: str) -> list[str]:
|
||||
_, values = self._get_list(section, option)
|
||||
return values
|
||||
|
||||
def getregexlist(self, section: str, option: str) -> list[str]:
|
||||
name, values = self._get_list(section, option)
|
||||
for value in values:
|
||||
value = value.strip()
|
||||
try:
|
||||
re.compile(value)
|
||||
except re.error as e:
|
||||
raise ConfigError(f"Invalid [{name}].{option} value {value!r}: {e}") from e
|
||||
return values
|
||||
|
||||
def getint(self, section: str, option: str) -> int:
|
||||
name, value = self._get_single(section, option)
|
||||
return self._check_type(name, option, value, int, int, "an integer")
|
||||
|
||||
def getfloat(self, section: str, option: str) -> float:
|
||||
name, value = self._get_single(section, option)
|
||||
if isinstance(value, int):
|
||||
value = float(value)
|
||||
return self._check_type(name, option, value, float, float, "a float")
|
||||
BIN
.venv/lib/python3.10/site-packages/coverage/tracer.cpython-310-darwin.so
Executable file
BIN
.venv/lib/python3.10/site-packages/coverage/tracer.cpython-310-darwin.so
Executable file
Binary file not shown.
39
.venv/lib/python3.10/site-packages/coverage/tracer.pyi
Normal file
39
.venv/lib/python3.10/site-packages/coverage/tracer.pyi
Normal file
|
|
@ -0,0 +1,39 @@
|
|||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""Typing information for the constructs from our .c files."""
|
||||
|
||||
from typing import Any, Dict
|
||||
|
||||
from coverage.types import TFileDisposition, TTraceData, TTraceFn, TracerCore
|
||||
|
||||
class CFileDisposition(TFileDisposition):
|
||||
"""CFileDisposition is in ctracer/filedisp.c"""
|
||||
canonical_filename: Any
|
||||
file_tracer: Any
|
||||
has_dynamic_filename: Any
|
||||
original_filename: Any
|
||||
reason: Any
|
||||
source_filename: Any
|
||||
trace: Any
|
||||
def __init__(self) -> None: ...
|
||||
|
||||
class CTracer(TracerCore):
|
||||
"""CTracer is in ctracer/tracer.c"""
|
||||
check_include: Any
|
||||
concur_id_func: Any
|
||||
data: TTraceData
|
||||
disable_plugin: Any
|
||||
file_tracers: Any
|
||||
should_start_context: Any
|
||||
should_trace: Any
|
||||
should_trace_cache: Any
|
||||
switch_context: Any
|
||||
trace_arcs: Any
|
||||
warn: Any
|
||||
def __init__(self) -> None: ...
|
||||
def activity(self) -> bool: ...
|
||||
def get_stats(self) -> Dict[str, int]: ...
|
||||
def reset_activity(self) -> Any: ...
|
||||
def start(self) -> TTraceFn: ...
|
||||
def stop(self) -> None: ...
|
||||
194
.venv/lib/python3.10/site-packages/coverage/types.py
Normal file
194
.venv/lib/python3.10/site-packages/coverage/types.py
Normal file
|
|
@ -0,0 +1,194 @@
|
|||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""
|
||||
Types for use throughout coverage.py.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import pathlib
|
||||
|
||||
from types import FrameType, ModuleType
|
||||
from typing import (
|
||||
Any, Callable, Dict, Iterable, List, Mapping, Optional, Protocol,
|
||||
Set, Tuple, Type, Union, TYPE_CHECKING,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from coverage.plugin import FileTracer
|
||||
|
||||
|
||||
AnyCallable = Callable[..., Any]
|
||||
|
||||
## File paths
|
||||
|
||||
# For arguments that are file paths:
|
||||
if TYPE_CHECKING:
|
||||
FilePath = Union[str, os.PathLike[str]]
|
||||
else:
|
||||
# PathLike < python3.9 doesn't support subscription
|
||||
FilePath = Union[str, os.PathLike]
|
||||
# For testing FilePath arguments
|
||||
FilePathClasses = [str, pathlib.Path]
|
||||
FilePathType = Union[Type[str], Type[pathlib.Path]]
|
||||
|
||||
## Python tracing
|
||||
|
||||
class TTraceFn(Protocol):
|
||||
"""A Python trace function."""
|
||||
def __call__(
|
||||
self,
|
||||
frame: FrameType,
|
||||
event: str,
|
||||
arg: Any,
|
||||
lineno: TLineNo | None = None, # Our own twist, see collector.py
|
||||
) -> TTraceFn | None:
|
||||
...
|
||||
|
||||
## Coverage.py tracing
|
||||
|
||||
# Line numbers are pervasive enough that they deserve their own type.
|
||||
TLineNo = int
|
||||
|
||||
TArc = Tuple[TLineNo, TLineNo]
|
||||
|
||||
class TFileDisposition(Protocol):
|
||||
"""A simple value type for recording what to do with a file."""
|
||||
|
||||
original_filename: str
|
||||
canonical_filename: str
|
||||
source_filename: str | None
|
||||
trace: bool
|
||||
reason: str
|
||||
file_tracer: FileTracer | None
|
||||
has_dynamic_filename: bool
|
||||
|
||||
|
||||
# When collecting data, we use a dictionary with a few possible shapes. The
|
||||
# keys are always file names.
|
||||
# - If measuring line coverage, the values are sets of line numbers.
|
||||
# - If measuring arcs in the Python tracer, the values are sets of arcs (pairs
|
||||
# of line numbers).
|
||||
# - If measuring arcs in the C tracer, the values are sets of packed arcs (two
|
||||
# line numbers combined into one integer).
|
||||
|
||||
TTraceFileData = Union[Set[TLineNo], Set[TArc], Set[int]]
|
||||
|
||||
TTraceData = Dict[str, TTraceFileData]
|
||||
|
||||
class TracerCore(Protocol):
|
||||
"""Anything that can report on Python execution."""
|
||||
|
||||
data: TTraceData
|
||||
trace_arcs: bool
|
||||
should_trace: Callable[[str, FrameType], TFileDisposition]
|
||||
should_trace_cache: Mapping[str, TFileDisposition | None]
|
||||
should_start_context: Callable[[FrameType], str | None] | None
|
||||
switch_context: Callable[[str | None], None] | None
|
||||
warn: TWarnFn
|
||||
|
||||
def __init__(self) -> None:
|
||||
...
|
||||
|
||||
def start(self) -> TTraceFn | None:
|
||||
"""Start this tracer, return a trace function if based on sys.settrace."""
|
||||
|
||||
def stop(self) -> None:
|
||||
"""Stop this tracer."""
|
||||
|
||||
def activity(self) -> bool:
|
||||
"""Has there been any activity?"""
|
||||
|
||||
def reset_activity(self) -> None:
|
||||
"""Reset the activity() flag."""
|
||||
|
||||
def get_stats(self) -> dict[str, int] | None:
|
||||
"""Return a dictionary of statistics, or None."""
|
||||
|
||||
|
||||
## Coverage
|
||||
|
||||
# Many places use kwargs as Coverage kwargs.
|
||||
TCovKwargs = Any
|
||||
|
||||
|
||||
## Configuration
|
||||
|
||||
# One value read from a config file.
|
||||
TConfigValueIn = Optional[Union[bool, int, float, str, Iterable[str]]]
|
||||
TConfigValueOut = Optional[Union[bool, int, float, str, List[str]]]
|
||||
# An entire config section, mapping option names to values.
|
||||
TConfigSectionIn = Mapping[str, TConfigValueIn]
|
||||
TConfigSectionOut = Mapping[str, TConfigValueOut]
|
||||
|
||||
class TConfigurable(Protocol):
|
||||
"""Something that can proxy to the coverage configuration settings."""
|
||||
|
||||
def get_option(self, option_name: str) -> TConfigValueOut | None:
|
||||
"""Get an option from the configuration.
|
||||
|
||||
`option_name` is a colon-separated string indicating the section and
|
||||
option name. For example, the ``branch`` option in the ``[run]``
|
||||
section of the config file would be indicated with `"run:branch"`.
|
||||
|
||||
Returns the value of the option.
|
||||
|
||||
"""
|
||||
|
||||
def set_option(self, option_name: str, value: TConfigValueIn | TConfigSectionIn) -> None:
|
||||
"""Set an option in the configuration.
|
||||
|
||||
`option_name` is a colon-separated string indicating the section and
|
||||
option name. For example, the ``branch`` option in the ``[run]``
|
||||
section of the config file would be indicated with `"run:branch"`.
|
||||
|
||||
`value` is the new value for the option.
|
||||
|
||||
"""
|
||||
|
||||
class TPluginConfig(Protocol):
|
||||
"""Something that can provide options to a plugin."""
|
||||
|
||||
def get_plugin_options(self, plugin: str) -> TConfigSectionOut:
|
||||
"""Get the options for a plugin."""
|
||||
|
||||
|
||||
## Parsing
|
||||
|
||||
TMorf = Union[ModuleType, str]
|
||||
|
||||
TSourceTokenLines = Iterable[List[Tuple[str, str]]]
|
||||
|
||||
## Plugins
|
||||
|
||||
class TPlugin(Protocol):
|
||||
"""What all plugins have in common."""
|
||||
_coverage_plugin_name: str
|
||||
_coverage_enabled: bool
|
||||
|
||||
|
||||
## Debugging
|
||||
|
||||
class TWarnFn(Protocol):
|
||||
"""A callable warn() function."""
|
||||
def __call__(self, msg: str, slug: str | None = None, once: bool = False) -> None:
|
||||
...
|
||||
|
||||
|
||||
class TDebugCtl(Protocol):
|
||||
"""A DebugControl object, or something like it."""
|
||||
|
||||
def should(self, option: str) -> bool:
|
||||
"""Decide whether to output debug information in category `option`."""
|
||||
|
||||
def write(self, msg: str) -> None:
|
||||
"""Write a line of debug output."""
|
||||
|
||||
|
||||
class TWritable(Protocol):
|
||||
"""Anything that can be written to."""
|
||||
|
||||
def write(self, msg: str) -> None:
|
||||
"""Write a message."""
|
||||
50
.venv/lib/python3.10/site-packages/coverage/version.py
Normal file
50
.venv/lib/python3.10/site-packages/coverage/version.py
Normal file
|
|
@ -0,0 +1,50 @@
|
|||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""The version and URL for coverage.py"""
|
||||
# This file is exec'ed in setup.py, don't import anything!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
# version_info: same semantics as sys.version_info.
|
||||
# _dev: the .devN suffix if any.
|
||||
version_info = (7, 4, 4, "final", 0)
|
||||
_dev = 0
|
||||
|
||||
|
||||
def _make_version(
|
||||
major: int,
|
||||
minor: int,
|
||||
micro: int,
|
||||
releaselevel: str = "final",
|
||||
serial: int = 0,
|
||||
dev: int = 0,
|
||||
) -> str:
|
||||
"""Create a readable version string from version_info tuple components."""
|
||||
assert releaselevel in ["alpha", "beta", "candidate", "final"]
|
||||
version = "%d.%d.%d" % (major, minor, micro)
|
||||
if releaselevel != "final":
|
||||
short = {"alpha": "a", "beta": "b", "candidate": "rc"}[releaselevel]
|
||||
version += f"{short}{serial}"
|
||||
if dev != 0:
|
||||
version += f".dev{dev}"
|
||||
return version
|
||||
|
||||
|
||||
def _make_url(
|
||||
major: int,
|
||||
minor: int,
|
||||
micro: int,
|
||||
releaselevel: str,
|
||||
serial: int = 0,
|
||||
dev: int = 0,
|
||||
) -> str:
|
||||
"""Make the URL people should start at for this version of coverage.py."""
|
||||
return (
|
||||
"https://coverage.readthedocs.io/en/"
|
||||
+ _make_version(major, minor, micro, releaselevel, serial, dev)
|
||||
)
|
||||
|
||||
|
||||
__version__ = _make_version(*version_info, _dev)
|
||||
__url__ = _make_url(*version_info, _dev)
|
||||
260
.venv/lib/python3.10/site-packages/coverage/xmlreport.py
Normal file
260
.venv/lib/python3.10/site-packages/coverage/xmlreport.py
Normal file
|
|
@ -0,0 +1,260 @@
|
|||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""XML reporting for coverage.py"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import os.path
|
||||
import sys
|
||||
import time
|
||||
import xml.dom.minidom
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import Any, IO, Iterable, TYPE_CHECKING
|
||||
|
||||
from coverage import __version__, files
|
||||
from coverage.misc import isolate_module, human_sorted, human_sorted_items
|
||||
from coverage.plugin import FileReporter
|
||||
from coverage.report_core import get_analysis_to_report
|
||||
from coverage.results import Analysis
|
||||
from coverage.types import TMorf
|
||||
from coverage.version import __url__
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from coverage import Coverage
|
||||
|
||||
os = isolate_module(os)
|
||||
|
||||
|
||||
DTD_URL = "https://raw.githubusercontent.com/cobertura/web/master/htdocs/xml/coverage-04.dtd"
|
||||
|
||||
|
||||
def rate(hit: int, num: int) -> str:
|
||||
"""Return the fraction of `hit`/`num`, as a string."""
|
||||
if num == 0:
|
||||
return "1"
|
||||
else:
|
||||
return "%.4g" % (hit / num)
|
||||
|
||||
|
||||
@dataclass
|
||||
class PackageData:
|
||||
"""Data we keep about each "package" (in Java terms)."""
|
||||
elements: dict[str, xml.dom.minidom.Element]
|
||||
hits: int
|
||||
lines: int
|
||||
br_hits: int
|
||||
branches: int
|
||||
|
||||
|
||||
def appendChild(parent: Any, child: Any) -> None:
|
||||
"""Append a child to a parent, in a way mypy will shut up about."""
|
||||
parent.appendChild(child)
|
||||
|
||||
|
||||
class XmlReporter:
|
||||
"""A reporter for writing Cobertura-style XML coverage results."""
|
||||
|
||||
report_type = "XML report"
|
||||
|
||||
def __init__(self, coverage: Coverage) -> None:
|
||||
self.coverage = coverage
|
||||
self.config = self.coverage.config
|
||||
|
||||
self.source_paths = set()
|
||||
if self.config.source:
|
||||
for src in self.config.source:
|
||||
if os.path.exists(src):
|
||||
if self.config.relative_files:
|
||||
src = src.rstrip(r"\/")
|
||||
else:
|
||||
src = files.canonical_filename(src)
|
||||
self.source_paths.add(src)
|
||||
self.packages: dict[str, PackageData] = {}
|
||||
self.xml_out: xml.dom.minidom.Document
|
||||
|
||||
def report(self, morfs: Iterable[TMorf] | None, outfile: IO[str] | None = None) -> float:
|
||||
"""Generate a Cobertura-compatible XML report for `morfs`.
|
||||
|
||||
`morfs` is a list of modules or file names.
|
||||
|
||||
`outfile` is a file object to write the XML to.
|
||||
|
||||
"""
|
||||
# Initial setup.
|
||||
outfile = outfile or sys.stdout
|
||||
has_arcs = self.coverage.get_data().has_arcs()
|
||||
|
||||
# Create the DOM that will store the data.
|
||||
impl = xml.dom.minidom.getDOMImplementation()
|
||||
assert impl is not None
|
||||
self.xml_out = impl.createDocument(None, "coverage", None)
|
||||
|
||||
# Write header stuff.
|
||||
xcoverage = self.xml_out.documentElement
|
||||
xcoverage.setAttribute("version", __version__)
|
||||
xcoverage.setAttribute("timestamp", str(int(time.time()*1000)))
|
||||
xcoverage.appendChild(self.xml_out.createComment(
|
||||
f" Generated by coverage.py: {__url__} ",
|
||||
))
|
||||
xcoverage.appendChild(self.xml_out.createComment(f" Based on {DTD_URL} "))
|
||||
|
||||
# Call xml_file for each file in the data.
|
||||
for fr, analysis in get_analysis_to_report(self.coverage, morfs):
|
||||
self.xml_file(fr, analysis, has_arcs)
|
||||
|
||||
xsources = self.xml_out.createElement("sources")
|
||||
xcoverage.appendChild(xsources)
|
||||
|
||||
# Populate the XML DOM with the source info.
|
||||
for path in human_sorted(self.source_paths):
|
||||
xsource = self.xml_out.createElement("source")
|
||||
appendChild(xsources, xsource)
|
||||
txt = self.xml_out.createTextNode(path)
|
||||
appendChild(xsource, txt)
|
||||
|
||||
lnum_tot, lhits_tot = 0, 0
|
||||
bnum_tot, bhits_tot = 0, 0
|
||||
|
||||
xpackages = self.xml_out.createElement("packages")
|
||||
xcoverage.appendChild(xpackages)
|
||||
|
||||
# Populate the XML DOM with the package info.
|
||||
for pkg_name, pkg_data in human_sorted_items(self.packages.items()):
|
||||
xpackage = self.xml_out.createElement("package")
|
||||
appendChild(xpackages, xpackage)
|
||||
xclasses = self.xml_out.createElement("classes")
|
||||
appendChild(xpackage, xclasses)
|
||||
for _, class_elt in human_sorted_items(pkg_data.elements.items()):
|
||||
appendChild(xclasses, class_elt)
|
||||
xpackage.setAttribute("name", pkg_name.replace(os.sep, "."))
|
||||
xpackage.setAttribute("line-rate", rate(pkg_data.hits, pkg_data.lines))
|
||||
if has_arcs:
|
||||
branch_rate = rate(pkg_data.br_hits, pkg_data.branches)
|
||||
else:
|
||||
branch_rate = "0"
|
||||
xpackage.setAttribute("branch-rate", branch_rate)
|
||||
xpackage.setAttribute("complexity", "0")
|
||||
|
||||
lhits_tot += pkg_data.hits
|
||||
lnum_tot += pkg_data.lines
|
||||
bhits_tot += pkg_data.br_hits
|
||||
bnum_tot += pkg_data.branches
|
||||
|
||||
xcoverage.setAttribute("lines-valid", str(lnum_tot))
|
||||
xcoverage.setAttribute("lines-covered", str(lhits_tot))
|
||||
xcoverage.setAttribute("line-rate", rate(lhits_tot, lnum_tot))
|
||||
if has_arcs:
|
||||
xcoverage.setAttribute("branches-valid", str(bnum_tot))
|
||||
xcoverage.setAttribute("branches-covered", str(bhits_tot))
|
||||
xcoverage.setAttribute("branch-rate", rate(bhits_tot, bnum_tot))
|
||||
else:
|
||||
xcoverage.setAttribute("branches-covered", "0")
|
||||
xcoverage.setAttribute("branches-valid", "0")
|
||||
xcoverage.setAttribute("branch-rate", "0")
|
||||
xcoverage.setAttribute("complexity", "0")
|
||||
|
||||
# Write the output file.
|
||||
outfile.write(serialize_xml(self.xml_out))
|
||||
|
||||
# Return the total percentage.
|
||||
denom = lnum_tot + bnum_tot
|
||||
if denom == 0:
|
||||
pct = 0.0
|
||||
else:
|
||||
pct = 100.0 * (lhits_tot + bhits_tot) / denom
|
||||
return pct
|
||||
|
||||
def xml_file(self, fr: FileReporter, analysis: Analysis, has_arcs: bool) -> None:
|
||||
"""Add to the XML report for a single file."""
|
||||
|
||||
if self.config.skip_empty:
|
||||
if analysis.numbers.n_statements == 0:
|
||||
return
|
||||
|
||||
# Create the "lines" and "package" XML elements, which
|
||||
# are populated later. Note that a package == a directory.
|
||||
filename = fr.filename.replace("\\", "/")
|
||||
for source_path in self.source_paths:
|
||||
if not self.config.relative_files:
|
||||
source_path = files.canonical_filename(source_path)
|
||||
if filename.startswith(source_path.replace("\\", "/") + "/"):
|
||||
rel_name = filename[len(source_path)+1:]
|
||||
break
|
||||
else:
|
||||
rel_name = fr.relative_filename().replace("\\", "/")
|
||||
self.source_paths.add(fr.filename[:-len(rel_name)].rstrip(r"\/"))
|
||||
|
||||
dirname = os.path.dirname(rel_name) or "."
|
||||
dirname = "/".join(dirname.split("/")[:self.config.xml_package_depth])
|
||||
package_name = dirname.replace("/", ".")
|
||||
|
||||
package = self.packages.setdefault(package_name, PackageData({}, 0, 0, 0, 0))
|
||||
|
||||
xclass: xml.dom.minidom.Element = self.xml_out.createElement("class")
|
||||
|
||||
appendChild(xclass, self.xml_out.createElement("methods"))
|
||||
|
||||
xlines = self.xml_out.createElement("lines")
|
||||
appendChild(xclass, xlines)
|
||||
|
||||
xclass.setAttribute("name", os.path.relpath(rel_name, dirname))
|
||||
xclass.setAttribute("filename", rel_name.replace("\\", "/"))
|
||||
xclass.setAttribute("complexity", "0")
|
||||
|
||||
branch_stats = analysis.branch_stats()
|
||||
missing_branch_arcs = analysis.missing_branch_arcs()
|
||||
|
||||
# For each statement, create an XML "line" element.
|
||||
for line in sorted(analysis.statements):
|
||||
xline = self.xml_out.createElement("line")
|
||||
xline.setAttribute("number", str(line))
|
||||
|
||||
# Q: can we get info about the number of times a statement is
|
||||
# executed? If so, that should be recorded here.
|
||||
xline.setAttribute("hits", str(int(line not in analysis.missing)))
|
||||
|
||||
if has_arcs:
|
||||
if line in branch_stats:
|
||||
total, taken = branch_stats[line]
|
||||
xline.setAttribute("branch", "true")
|
||||
xline.setAttribute(
|
||||
"condition-coverage",
|
||||
"%d%% (%d/%d)" % (100*taken//total, taken, total),
|
||||
)
|
||||
if line in missing_branch_arcs:
|
||||
annlines = ["exit" if b < 0 else str(b) for b in missing_branch_arcs[line]]
|
||||
xline.setAttribute("missing-branches", ",".join(annlines))
|
||||
appendChild(xlines, xline)
|
||||
|
||||
class_lines = len(analysis.statements)
|
||||
class_hits = class_lines - len(analysis.missing)
|
||||
|
||||
if has_arcs:
|
||||
class_branches = sum(t for t, k in branch_stats.values())
|
||||
missing_branches = sum(t - k for t, k in branch_stats.values())
|
||||
class_br_hits = class_branches - missing_branches
|
||||
else:
|
||||
class_branches = 0
|
||||
class_br_hits = 0
|
||||
|
||||
# Finalize the statistics that are collected in the XML DOM.
|
||||
xclass.setAttribute("line-rate", rate(class_hits, class_lines))
|
||||
if has_arcs:
|
||||
branch_rate = rate(class_br_hits, class_branches)
|
||||
else:
|
||||
branch_rate = "0"
|
||||
xclass.setAttribute("branch-rate", branch_rate)
|
||||
|
||||
package.elements[rel_name] = xclass
|
||||
package.hits += class_hits
|
||||
package.lines += class_lines
|
||||
package.br_hits += class_br_hits
|
||||
package.branches += class_branches
|
||||
|
||||
|
||||
def serialize_xml(dom: xml.dom.minidom.Document) -> str:
|
||||
"""Serialize a minidom node to XML."""
|
||||
return dom.toprettyxml()
|
||||
Loading…
Add table
Add a link
Reference in a new issue