diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml
index a2172ab84..0c0d2bd8c 100644
--- a/.github/workflows/codeql-analysis.yml
+++ b/.github/workflows/codeql-analysis.yml
@@ -51,7 +51,7 @@ jobs:
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
- uses: github/codeql-action/init@df559355d593797519d70b90fc8edd5db049e7a2 # v3
+ uses: github/codeql-action/init@3c3833e0f8c1c83d449a7478aa59c036a9165498 # v3
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
@@ -62,7 +62,7 @@ jobs:
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
- uses: github/codeql-action/autobuild@df559355d593797519d70b90fc8edd5db049e7a2 # v3
+ uses: github/codeql-action/autobuild@3c3833e0f8c1c83d449a7478aa59c036a9165498 # v3
# âšī¸ Command-line programs to run using the OS shell.
# đ https://git.io/JvXDl
@@ -76,4 +76,4 @@ jobs:
# make release
- name: Perform CodeQL Analysis
- uses: github/codeql-action/analyze@df559355d593797519d70b90fc8edd5db049e7a2 # v3
+ uses: github/codeql-action/analyze@3c3833e0f8c1c83d449a7478aa59c036a9165498 # v3
diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml
index ba81d4ad1..2841c55d7 100644
--- a/.github/workflows/coverage.yml
+++ b/.github/workflows/coverage.yml
@@ -54,6 +54,7 @@ jobs:
- "**.c"
- ".github/workflows/coverage.yml"
- "tox.ini"
+ - "metacov.ini"
- "requirements/*.pip"
- "tests/gold/**"
diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml
index 99c70bbb9..312c23b13 100644
--- a/.github/workflows/dependency-review.yml
+++ b/.github/workflows/dependency-review.yml
@@ -29,7 +29,7 @@ jobs:
persist-credentials: false
- name: 'Dependency Review'
- uses: actions/dependency-review-action@da24556b548a50705dd671f47852072ea4c105d9 # v4.7.1
+ uses: actions/dependency-review-action@bc41886e18ea39df68b1b1245f4184881938e050 # v4.7.2
with:
base-ref: ${{ github.event.pull_request.base.sha || 'master' }}
head-ref: ${{ github.event.pull_request.head.sha || github.ref }}
diff --git a/.github/workflows/quality.yml b/.github/workflows/quality.yml
index 67c7608ef..94e4243e0 100644
--- a/.github/workflows/quality.yml
+++ b/.github/workflows/quality.yml
@@ -173,7 +173,7 @@ jobs:
persist-credentials: false
- name: Install the latest version of uv
- uses: astral-sh/setup-uv@d9e0f98d3fc6adb07d1e3d37f3043649ddad06a1 #v6.5.0
+ uses: astral-sh/setup-uv@4959332f0f014c5280e7eac8b70c90cb574c9f9b #v6.6.0
with:
enable-cache: false
diff --git a/CHANGES.rst b/CHANGES.rst
index f264538ee..a115f44cb 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -22,6 +22,22 @@ upgrading your version of coverage.py.
.. start-releases
+.. _changes_7-10-6:
+
+Version 7.10.6 â 2025-08-29
+---------------------------
+
+- Fix: ``source`` directories were not properly communicated to subprocesses
+ that ran in different directories, as reported in `issue 1499`_. This is now
+ fixed.
+
+- Performance: `Alex Gaynor continues fine-tuning `_ the speed of
+ combination, especially with many contexts.
+
+.. _issue 1499: https://github.com/nedbat/coveragepy/issues/1499
+.. _pull 2038: https://github.com/nedbat/coveragepy/pull/2038
+
+
.. _changes_7-10-5:
Version 7.10.5 â 2025-08-23
diff --git a/coverage/config.py b/coverage/config.py
index 7cde8ec42..82e56fd3b 100644
--- a/coverage/config.py
+++ b/coverage/config.py
@@ -274,10 +274,15 @@ def __init__(self) -> None:
"patch",
}
+ # File paths to make absolute during serialization.
+ # The pairs are (config_key, must_exist).
SERIALIZE_ABSPATH = {
- "data_file",
- "debug_file",
- "source_dirs",
+ ("data_file", False),
+ ("debug_file", False),
+ # `source` can be directories or modules, so don't abspath it if it
+ # doesn't exist.
+ ("source", True),
+ ("source_dirs", False),
}
def from_args(self, **kwargs: TConfigValueIn) -> None:
@@ -569,12 +574,13 @@ def serialize(self) -> str:
deserialized config will refer to the same files.
"""
data = {k: v for k, v in self.__dict__.items() if not k.startswith("_")}
- for k in self.SERIALIZE_ABSPATH:
+ for k, must_exist in self.SERIALIZE_ABSPATH:
+ abs_fn = abs_path_if_exists if must_exist else os.path.abspath
v = data[k]
if isinstance(v, list):
- v = list(map(os.path.abspath, v))
+ v = list(map(abs_fn, v))
elif isinstance(v, str):
- v = os.path.abspath(v)
+ v = abs_fn(v)
data[k] = v
return base64.b64encode(json.dumps(data).encode()).decode()
@@ -584,6 +590,14 @@ def process_file_value(path: str) -> str:
return os.path.expanduser(path)
+def abs_path_if_exists(path: str) -> str:
+ """os.path.abspath, but only if the path exists."""
+ if os.path.exists(path):
+ return os.path.abspath(path)
+ else:
+ return path
+
+
def process_regexlist(name: str, option: str, values: list[str]) -> list[str]:
"""Check the values in a regex list and keep the non-blank ones."""
value_list = []
diff --git a/coverage/control.py b/coverage/control.py
index 7ce36af56..fece86c0f 100644
--- a/coverage/control.py
+++ b/coverage/control.py
@@ -793,7 +793,7 @@ def switch_context(self, new_context: str) -> None:
def clear_exclude(self, which: str = "exclude") -> None:
"""Clear the exclude list."""
self._init()
- setattr(self.config, which + "_list", [])
+ setattr(self.config, f"{which}_list", [])
self._exclude_regex_stale()
def exclude(self, regex: str, which: str = "exclude") -> None:
@@ -812,7 +812,7 @@ def exclude(self, regex: str, which: str = "exclude") -> None:
"""
self._init()
- excl_list = getattr(self.config, which + "_list")
+ excl_list = getattr(self.config, f"{which}_list")
excl_list.append(regex)
self._exclude_regex_stale()
@@ -823,7 +823,7 @@ def _exclude_regex_stale(self) -> None:
def _exclude_regex(self, which: str) -> str:
"""Return a regex string for the given exclusion list."""
if which not in self._exclude_re:
- excl_list = getattr(self.config, which + "_list")
+ excl_list = getattr(self.config, f"{which}_list")
self._exclude_re[which] = join_regex(excl_list)
return self._exclude_re[which]
@@ -835,7 +835,7 @@ def get_exclude_list(self, which: str = "exclude") -> list[str]:
"""
self._init()
- return cast(list[str], getattr(self.config, which + "_list"))
+ return cast(list[str], getattr(self.config, f"{which}_list"))
def save(self) -> None:
"""Save the collected coverage data to the data file."""
@@ -1064,6 +1064,7 @@ def _get_file_reporters(
if not isinstance(morfs, (list, tuple, set)):
morfs = [morfs] # type: ignore[list-item]
+ morfs = sorted(morfs, key=lambda m: m if isinstance(m, str) else m.__name__)
return [(self._get_file_reporter(morf), morf) for morf in morfs]
def _prepare_data_for_reporting(self) -> None:
@@ -1242,8 +1243,7 @@ def html_report(
precision=precision,
):
reporter = HtmlReporter(self)
- ret = reporter.report(morfs)
- return ret
+ return reporter.report(morfs)
def xml_report(
self,
diff --git a/coverage/debug.py b/coverage/debug.py
index b15f3ea36..21ac1d826 100644
--- a/coverage/debug.py
+++ b/coverage/debug.py
@@ -149,21 +149,20 @@ def info_formatter(info: Iterable[tuple[str, Any]]) -> Iterator[str]:
info = list(info)
if not info:
return
- label_len = 30
- assert all(len(l) < label_len for l, _ in info)
+ LABEL_LEN = 30
+ assert all(len(l) < LABEL_LEN for l, _ in info)
for label, data in info:
if data == []:
data = "-none-"
- if isinstance(data, tuple) and len(repr(tuple(data))) < 30:
- # Convert to tuple to scrub namedtuples.
- yield "%*s: %r" % (label_len, label, tuple(data))
+ prefix = f"{label:>{LABEL_LEN}}: "
+ if isinstance(data, tuple) and len(str(data)) < 30:
+ yield f"{prefix}{data}"
elif isinstance(data, (list, set, tuple)):
- prefix = "%*s:" % (label_len, label)
for e in data:
- yield "%*s %s" % (label_len + 1, prefix, e)
- prefix = ""
+ yield f"{prefix}{e}"
+ prefix = " " * (LABEL_LEN + 2)
else:
- yield "%*s: %s" % (label_len, label, data)
+ yield f"{prefix}{data}"
def write_formatted_info(
diff --git a/coverage/env.py b/coverage/env.py
index fcd8f3a36..ccee36e83 100644
--- a/coverage/env.py
+++ b/coverage/env.py
@@ -35,7 +35,7 @@
if PYPY:
# Minimum now is 7.3.16
- PYPYVERSION = sys.pypy_version_info # type: ignore[attr-defined]
+ PYPYVERSION = tuple(sys.pypy_version_info) # type: ignore[attr-defined]
else:
PYPYVERSION = (0,)
diff --git a/coverage/misc.py b/coverage/misc.py
index 399bf1ba0..f310ed0eb 100644
--- a/coverage/misc.py
+++ b/coverage/misc.py
@@ -109,9 +109,9 @@ def nice_pair(pair: TArc) -> str:
"""
start, end = pair
if start == end:
- return "%d" % start
+ return f"{start}"
else:
- return "%d-%d" % (start, end)
+ return f"{start}-{end}"
def bool_or_none(b: Any) -> bool | None:
diff --git a/coverage/plugin_support.py b/coverage/plugin_support.py
index b5ca302d0..8402ba0ae 100644
--- a/coverage/plugin_support.py
+++ b/coverage/plugin_support.py
@@ -210,10 +210,8 @@ def __init__(self, tracer: FileTracer, debug: LabelledDebug) -> None:
def _show_frame(self, frame: FrameType) -> str:
"""A short string identifying a frame, for debug messages."""
- return "%s@%d" % (
- os.path.basename(frame.f_code.co_filename),
- frame.f_lineno,
- )
+ filename = os.path.basename(frame.f_code.co_filename)
+ return f"{filename}@{frame.f_lineno}"
def source_filename(self) -> str:
sfilename = self.tracer.source_filename()
@@ -292,10 +290,10 @@ def arcs(self) -> set[TArc]:
def source(self) -> str:
ret = self.reporter.source()
- self.debug.write("source() --> %d chars" % (len(ret),))
+ self.debug.write(f"source() --> {len(ret)} chars")
return ret
def source_token_lines(self) -> TSourceTokenLines:
ret = list(self.reporter.source_token_lines())
- self.debug.write("source_token_lines() --> %d tokens" % (len(ret),))
+ self.debug.write(f"source_token_lines() --> {len(ret)} tokens")
return ret
diff --git a/coverage/report.py b/coverage/report.py
index 7322d3c89..7c1f9860e 100644
--- a/coverage/report.py
+++ b/coverage/report.py
@@ -10,7 +10,7 @@
from typing import IO, TYPE_CHECKING, Any
from coverage.exceptions import ConfigError, NoDataError
-from coverage.misc import human_sorted_items
+from coverage.misc import human_sorted_items, plural
from coverage.plugin import FileReporter
from coverage.report_core import get_analysis_to_report
from coverage.results import Analysis, Numbers
@@ -31,7 +31,7 @@ def __init__(self, coverage: Coverage) -> None:
self.output_format = self.config.format or "text"
if self.output_format not in {"text", "markdown", "total"}:
raise ConfigError(f"Unknown report format choice: {self.output_format!r}")
- self.fr_analysis: list[tuple[FileReporter, Analysis]] = []
+ self.fr_analyses: list[tuple[FileReporter, Analysis]] = []
self.skipped_count = 0
self.empty_count = 0
self.total = Numbers(precision=self.config.precision)
@@ -46,7 +46,7 @@ def write_items(self, items: Iterable[str]) -> None:
"""Write a list of strings, joined together."""
self.write("".join(items))
- def _report_text(
+ def report_text(
self,
header: list[str],
lines_values: list[list[Any]],
@@ -82,29 +82,36 @@ def _report_text(
self.write(header_str)
self.write(rule)
- formats.update(dict(Cover="{:>{n}}%"), Missing=" {:9}")
+ # Write the data lines
+ formats.update(
+ dict(
+ Cover="{:>{n}}%",
+ Missing=" {:9}",
+ )
+ )
for values in lines_values:
- # build string with line values
- line_items = [
- formats[item].format(str(value), name_len=max_name, n=max_n - 1)
- for item, value in zip(header, values)
- ]
- self.write_items(line_items)
+ self.write_items(
+ (
+ formats[item].format(str(value), name_len=max_name, n=max_n - 1)
+ for item, value in zip(header, values)
+ )
+ )
# Write a TOTAL line
if lines_values:
self.write(rule)
- line_items = [
- formats[item].format(str(value), name_len=max_name, n=max_n - 1)
- for item, value in zip(header, total_line)
- ]
- self.write_items(line_items)
+ self.write_items(
+ (
+ formats[item].format(str(value), name_len=max_name, n=max_n - 1)
+ for item, value in zip(header, total_line)
+ )
+ )
for end_line in end_lines:
self.write(end_line)
- def _report_markdown(
+ def report_markdown(
self,
header: list[str],
lines_values: list[list[Any]],
@@ -143,17 +150,29 @@ def _report_markdown(
self.write(header_str)
self.write(rule_str)
+ # Write the data lines
for values in lines_values:
- # build string with line values
- formats.update(dict(Cover="{:>{n}}% |"))
- line_items = [
- formats[item].format(str(value).replace("_", "\\_"), name_len=max_name, n=max_n - 1)
- for item, value in zip(header, values)
- ]
- self.write_items(line_items)
+ formats.update(
+ dict(
+ Cover="{:>{n}}% |",
+ )
+ )
+ self.write_items(
+ (
+ formats[item].format(
+ str(value).replace("_", "\\_"), name_len=max_name, n=max_n - 1
+ )
+ for item, value in zip(header, values)
+ )
+ )
# Write the TOTAL line
- formats.update(dict(Name="|{:>{name_len}} |", Cover="{:>{n}} |"))
+ formats.update(
+ dict(
+ Name="|{:>{name_len}} |",
+ Cover="{:>{n}} |",
+ ),
+ )
total_line_items: list[str] = []
for item, value in zip(header, total_line):
if value == "":
@@ -164,6 +183,7 @@ def _report_markdown(
insert = f" **{value}**"
total_line_items += formats[item].format(insert, name_len=max_name, n=max_n)
self.write_items(total_line_items)
+
for end_line in end_lines:
self.write(end_line)
@@ -206,9 +226,8 @@ def tabular_report(self) -> None:
# `lines_values` is list of lists of sortable values.
lines_values = []
- for fr, analysis in self.fr_analysis:
+ for fr, analysis in self.fr_analyses:
nums = analysis.numbers
-
args = [fr.relative_filename(), nums.n_statements, nums.n_missing]
if self.branches:
args += [nums.n_branches, nums.n_partial_branches]
@@ -248,18 +267,18 @@ def tabular_report(self) -> None:
# Create other final lines.
end_lines = []
if self.config.skip_covered and self.skipped_count:
- file_suffix = "s" if self.skipped_count > 1 else ""
+ files = plural(self.skipped_count, "file")
end_lines.append(
- f"\n{self.skipped_count} file{file_suffix} skipped due to complete coverage.",
+ f"\n{self.skipped_count} {files} skipped due to complete coverage.",
)
if self.config.skip_empty and self.empty_count:
- file_suffix = "s" if self.empty_count > 1 else ""
- end_lines.append(f"\n{self.empty_count} empty file{file_suffix} skipped.")
+ files = plural(self.empty_count, "file")
+ end_lines.append(f"\n{self.empty_count} empty {files} skipped.")
if self.output_format == "markdown":
- formatter = self._report_markdown
+ formatter = self.report_markdown
else:
- formatter = self._report_text
+ formatter = self.report_text
formatter(header, lines_values, total_line, end_lines)
def report_one_file(self, fr: FileReporter, analysis: Analysis) -> None:
@@ -276,4 +295,4 @@ def report_one_file(self, fr: FileReporter, analysis: Analysis) -> None:
# Don't report on empty files.
self.empty_count += 1
else:
- self.fr_analysis.append((fr, analysis))
+ self.fr_analyses.append((fr, analysis))
diff --git a/coverage/results.py b/coverage/results.py
index 163fc902e..86f6fcc15 100644
--- a/coverage/results.py
+++ b/coverage/results.py
@@ -325,7 +325,7 @@ def display_covered(pc: float, precision: int) -> str:
pc = 100.0 - near0
else:
pc = round(pc, precision)
- return "%.*f" % (precision, pc)
+ return f"{pc:.{precision}f}"
def _line_ranges(
diff --git a/coverage/sqldata.py b/coverage/sqldata.py
index 6de07c593..693b6e14c 100644
--- a/coverage/sqldata.py
+++ b/coverage/sqldata.py
@@ -614,7 +614,7 @@ def add_file_tracers(self, file_tracers: Mapping[str, str]) -> None:
"""
if self._debug.should("dataop"):
- self._debug.write("Adding file tracers: %d files" % (len(file_tracers),))
+ self._debug.write(f"Adding file tracers: {len(file_tracers)} files")
if not file_tracers:
return
self._start_using()
@@ -793,18 +793,28 @@ def update(
# Handle arcs if present in other_db
if has_arcs:
self._choose_lines_or_arcs(arcs=True)
+
+ # Create context mapping table for faster lookups
+ con.execute_void("""
+ CREATE TEMP TABLE context_mapping AS
+ SELECT
+ other_context.id as other_id,
+ main_context.id as main_id
+ FROM other_db.context AS other_context
+ INNER JOIN main.context AS main_context ON other_context.context = main_context.context
+ """)
+
con.execute_void("""
INSERT OR IGNORE INTO main.arc (file_id, context_id, fromno, tono)
SELECT
main_file.id,
- main_context.id,
+ context_mapping.main_id,
other_arc.fromno,
other_arc.tono
FROM other_db.arc AS other_arc
INNER JOIN other_file_mapped ON other_arc.file_id = other_file_mapped.other_file_id
- INNER JOIN other_db.context AS other_context ON other_arc.context_id = other_context.id
+ INNER JOIN context_mapping ON other_arc.context_id = context_mapping.other_id
INNER JOIN main.file AS main_file ON other_file_mapped.mapped_path = main_file.path
- INNER JOIN main.context AS main_context ON other_context.context = main_context.context
""")
# Handle line_bits if present in other_db
diff --git a/coverage/version.py b/coverage/version.py
index a7cd7a5ff..a9d77c510 100644
--- a/coverage/version.py
+++ b/coverage/version.py
@@ -8,7 +8,7 @@
# version_info: same semantics as sys.version_info.
# _dev: the .devN suffix if any.
-version_info = (7, 10, 5, "final", 0)
+version_info = (7, 10, 6, "final", 0)
_dev = 0
@@ -22,7 +22,7 @@ def _make_version(
) -> str:
"""Create a readable version string from version_info tuple components."""
assert releaselevel in ["alpha", "beta", "candidate", "final"]
- version = "%d.%d.%d" % (major, minor, micro)
+ version = f"{major}.{minor}.{micro}"
if releaselevel != "final":
short = {"alpha": "a", "beta": "b", "candidate": "rc"}[releaselevel]
version += f"{short}{serial}"
diff --git a/coverage/xmlreport.py b/coverage/xmlreport.py
index 87055f27c..97db7f4f8 100644
--- a/coverage/xmlreport.py
+++ b/coverage/xmlreport.py
@@ -36,7 +36,7 @@ def rate(hit: int, num: int) -> str:
if num == 0:
return "1"
else:
- return "%.4g" % (hit / num)
+ return f"{hit / num:.4g}"
@dataclass
@@ -226,7 +226,7 @@ def xml_file(self, fr: FileReporter, analysis: Analysis, has_arcs: bool) -> None
xline.setAttribute("branch", "true")
xline.setAttribute(
"condition-coverage",
- "%d%% (%d/%d)" % (100 * taken // total, taken, total),
+ f"{100 * taken // total}% ({taken}/{total})",
)
if line in missing_branch_arcs:
annlines = ["exit" if b < 0 else str(b) for b in missing_branch_arcs[line]]
diff --git a/doc/conf.py b/doc/conf.py
index 75a012e73..b1fa60438 100644
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -67,11 +67,11 @@
# @@@ editable
copyright = "2009â2025, Ned Batchelder" # pylint: disable=redefined-builtin
# The short X.Y.Z version.
-version = "7.10.5"
+version = "7.10.6"
# The full version, including alpha/beta/rc tags.
-release = "7.10.5"
+release = "7.10.6"
# The date of release, in "monthname day, year" format.
-release_date = "August 23, 2025"
+release_date = "August 29, 2025"
# @@@ end
rst_epilog = f"""
@@ -250,4 +250,4 @@ def setup(app):
"""Configure Sphinx"""
app.add_css_file("coverage.css")
app.add_config_value("prerelease", False, "env")
- print("** Prerelease = %r" % prerelease)
+ print(f"** Prerelease = {prerelease!r}")
diff --git a/doc/sample_html/class_index.html b/doc/sample_html/class_index.html
index e03b7540e..277ef81f7 100644
--- a/doc/sample_html/class_index.html
+++ b/doc/sample_html/class_index.html
@@ -56,8 +56,8 @@
- coverage.py v7.10.5,
- created at 2025-08-23 08:07 -0400
+ coverage.py v7.10.6,
+ created at 2025-08-29 10:20 -0400
@@ -567,8 +567,8 @@
@@ -97,8 +97,8 @@
^ index
» next
- coverage.py v7.10.5,
- created at 2025-08-23 08:07 -0400
+ coverage.py v7.10.6,
+ created at 2025-08-29 10:20 -0400
diff --git a/doc/sample_html/z_7b071bdc2a35fa80___main___py.html b/doc/sample_html/z_7b071bdc2a35fa80___main___py.html
index d6cfe3dd3..bb337f145 100644
--- a/doc/sample_html/z_7b071bdc2a35fa80___main___py.html
+++ b/doc/sample_html/z_7b071bdc2a35fa80___main___py.html
@@ -66,8 +66,8 @@
^ index
» next
- coverage.py v7.10.5,
- created at 2025-08-23 08:07 -0400
+ coverage.py v7.10.6,
+ created at 2025-08-29 10:20 -0400
@@ -97,8 +97,8 @@
^ index
» next
- coverage.py v7.10.5,
- created at 2025-08-23 08:07 -0400
+ coverage.py v7.10.6,
+ created at 2025-08-29 10:20 -0400
diff --git a/doc/sample_html/z_7b071bdc2a35fa80_cogapp_py.html b/doc/sample_html/z_7b071bdc2a35fa80_cogapp_py.html
index bae8bcb4c..4c7596ec4 100644
--- a/doc/sample_html/z_7b071bdc2a35fa80_cogapp_py.html
+++ b/doc/sample_html/z_7b071bdc2a35fa80_cogapp_py.html
@@ -66,8 +66,8 @@
^ index
» next
- coverage.py v7.10.5,
- created at 2025-08-23 08:07 -0400
+ coverage.py v7.10.6,
+ created at 2025-08-29 10:20 -0400
@@ -946,8 +946,8 @@
^ index
» next
- coverage.py v7.10.5,
- created at 2025-08-23 08:07 -0400
+ coverage.py v7.10.6,
+ created at 2025-08-29 10:20 -0400
diff --git a/doc/sample_html/z_7b071bdc2a35fa80_hashhandler_py.html b/doc/sample_html/z_7b071bdc2a35fa80_hashhandler_py.html
index 1c64be138..b187aaf79 100644
--- a/doc/sample_html/z_7b071bdc2a35fa80_hashhandler_py.html
+++ b/doc/sample_html/z_7b071bdc2a35fa80_hashhandler_py.html
@@ -66,8 +66,8 @@
^ index
» next
- coverage.py v7.10.5,
- created at 2025-08-23 08:07 -0400
+ coverage.py v7.10.6,
+ created at 2025-08-29 10:20 -0400
@@ -265,8 +265,8 @@
^ index
» next
- coverage.py v7.10.5,
- created at 2025-08-23 08:07 -0400
+ coverage.py v7.10.6,
+ created at 2025-08-29 10:20 -0400
diff --git a/doc/sample_html/z_7b071bdc2a35fa80_makefiles_py.html b/doc/sample_html/z_7b071bdc2a35fa80_makefiles_py.html
index 9c92f3641..01eabeb42 100644
--- a/doc/sample_html/z_7b071bdc2a35fa80_makefiles_py.html
+++ b/doc/sample_html/z_7b071bdc2a35fa80_makefiles_py.html
@@ -66,8 +66,8 @@
^ index
» next
- coverage.py v7.10.5,
- created at 2025-08-23 08:07 -0400
+ coverage.py v7.10.6,
+ created at 2025-08-29 10:20 -0400
@@ -127,8 +127,8 @@
^ index
» next
- coverage.py v7.10.5,
- created at 2025-08-23 08:07 -0400
+ coverage.py v7.10.6,
+ created at 2025-08-29 10:20 -0400
diff --git a/doc/sample_html/z_7b071bdc2a35fa80_test_cogapp_py.html b/doc/sample_html/z_7b071bdc2a35fa80_test_cogapp_py.html
index b04bd4bf2..975250a78 100644
--- a/doc/sample_html/z_7b071bdc2a35fa80_test_cogapp_py.html
+++ b/doc/sample_html/z_7b071bdc2a35fa80_test_cogapp_py.html
@@ -66,8 +66,8 @@
^ index
» next
- coverage.py v7.10.5,
- created at 2025-08-23 08:07 -0400
+ coverage.py v7.10.6,
+ created at 2025-08-29 10:20 -0400
@@ -2911,8 +2911,8 @@
^ index
» next
- coverage.py v7.10.5,
- created at 2025-08-23 08:07 -0400
+ coverage.py v7.10.6,
+ created at 2025-08-29 10:20 -0400
diff --git a/doc/sample_html/z_7b071bdc2a35fa80_test_makefiles_py.html b/doc/sample_html/z_7b071bdc2a35fa80_test_makefiles_py.html
index 6abe9d438..e230813fa 100644
--- a/doc/sample_html/z_7b071bdc2a35fa80_test_makefiles_py.html
+++ b/doc/sample_html/z_7b071bdc2a35fa80_test_makefiles_py.html
@@ -66,8 +66,8 @@
^ index
» next
- coverage.py v7.10.5,
- created at 2025-08-23 08:07 -0400
+ coverage.py v7.10.6,
+ created at 2025-08-29 10:20 -0400
@@ -205,8 +205,8 @@
^ index
» next
- coverage.py v7.10.5,
- created at 2025-08-23 08:07 -0400
+ coverage.py v7.10.6,
+ created at 2025-08-29 10:20 -0400
diff --git a/doc/sample_html/z_7b071bdc2a35fa80_test_whiteutils_py.html b/doc/sample_html/z_7b071bdc2a35fa80_test_whiteutils_py.html
index 647cc858a..46a8365ab 100644
--- a/doc/sample_html/z_7b071bdc2a35fa80_test_whiteutils_py.html
+++ b/doc/sample_html/z_7b071bdc2a35fa80_test_whiteutils_py.html
@@ -66,8 +66,8 @@
^ index
» next
- coverage.py v7.10.5,
- created at 2025-08-23 08:07 -0400
+ coverage.py v7.10.6,
+ created at 2025-08-29 10:20 -0400
@@ -186,8 +186,8 @@
^ index
» next
- coverage.py v7.10.5,
- created at 2025-08-23 08:07 -0400
+ coverage.py v7.10.6,
+ created at 2025-08-29 10:20 -0400
diff --git a/doc/sample_html/z_7b071bdc2a35fa80_utils_py.html b/doc/sample_html/z_7b071bdc2a35fa80_utils_py.html
index 719a176de..aa87a5746 100644
--- a/doc/sample_html/z_7b071bdc2a35fa80_utils_py.html
+++ b/doc/sample_html/z_7b071bdc2a35fa80_utils_py.html
@@ -66,8 +66,8 @@
^ index
» next
- coverage.py v7.10.5,
- created at 2025-08-23 08:07 -0400
+ coverage.py v7.10.6,
+ created at 2025-08-29 10:20 -0400
@@ -155,8 +155,8 @@
^ index
» next
- coverage.py v7.10.5,
- created at 2025-08-23 08:07 -0400
+ coverage.py v7.10.6,
+ created at 2025-08-29 10:20 -0400
diff --git a/doc/sample_html/z_7b071bdc2a35fa80_whiteutils_py.html b/doc/sample_html/z_7b071bdc2a35fa80_whiteutils_py.html
index af992509b..5e878c4c4 100644
--- a/doc/sample_html/z_7b071bdc2a35fa80_whiteutils_py.html
+++ b/doc/sample_html/z_7b071bdc2a35fa80_whiteutils_py.html
@@ -66,8 +66,8 @@
^ index
» next
- coverage.py v7.10.5,
- created at 2025-08-23 08:07 -0400
+ coverage.py v7.10.6,
+ created at 2025-08-29 10:20 -0400
@@ -159,8 +159,8 @@
^ index
» next
- coverage.py v7.10.5,
- created at 2025-08-23 08:07 -0400
+ coverage.py v7.10.6,
+ created at 2025-08-29 10:20 -0400
diff --git a/igor.py b/igor.py
index a2cc82e1c..6509adf12 100644
--- a/igor.py
+++ b/igor.py
@@ -8,7 +8,6 @@
"""
-import contextlib
import datetime
import glob
import inspect
@@ -22,7 +21,6 @@
import sys
import sysconfig
import textwrap
-import time
import types
import zipfile
@@ -47,16 +45,6 @@
# by "python igor.py blah".
-@contextlib.contextmanager
-def time_message(msg: str):
- """Print a message about how long something took."""
- start = time.monotonic()
- try:
- yield
- finally:
- print(f"Time for {msg}: {time.monotonic() - start:.2f}s")
-
-
def do_show_env():
"""Show the environment variables."""
print("Environment:")
@@ -243,8 +231,7 @@ def do_combine_html():
os.environ["COVERAGE_HOME"] = os.getcwd()
cov = coverage.Coverage(config_file="metacov.ini")
cov.load()
- with time_message("combine"):
- cov.combine()
+ cov.combine()
cov.save()
# A new Coverage to turn on messages. Better would be to have tighter
# control over message verbosity...
@@ -253,8 +240,7 @@ def do_combine_html():
show_contexts = bool(
os.getenv("COVERAGE_DYNCTX") or os.getenv("COVERAGE_CONTEXT"),
)
- with time_message("html"):
- total = cov.html_report(show_contexts=show_contexts)
+ total = cov.html_report(show_contexts=show_contexts)
print(f"Total: {total:.3f}%")
diff --git a/lab/warn_executed.py b/lab/warn_executed.py
new file mode 100644
index 000000000..177ddf13c
--- /dev/null
+++ b/lab/warn_executed.py
@@ -0,0 +1,213 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
+
+"""
+$ python warn_executed.py
+
+Find lines that were excluded by "warn-executed" regex patterns
+but were actually executed according to coverage data.
+
+The config_file is a TOML file with "warn-executed" and "warn-not-partial"
+patterns like:
+
+ warn-executed = [
+ "pragma: no cover",
+ "# debug",
+ "raise NotImplemented",
+ ]
+
+ warn-not-partial = [
+ "if TYPE_CHECKING:",
+ ]
+
+These should be patterns that you excluded as lines or partial branches.
+
+Warning: this program uses internal undocumented private classes from
+coverage.py. This is an unsupported proof-of-concept.
+
+I wrote a blog post about this:
+https://nedbatchelder.com/blog/202508/finding_unneeded_pragmas.html
+
+"""
+
+import linecache
+import os
+import sys
+import tomllib
+
+from coverage.parser import PythonParser
+from coverage.sqldata import CoverageData
+from coverage.results import Analysis
+
+
+def read_warn_patterns(config_file: str) -> tuple[list[str], list[str]]:
+ """Read "warn-executed" and "warn-not-partial" patterns from a TOML config file."""
+ with open(config_file, "rb") as f:
+ config = tomllib.load(f)
+
+ warn_executed = []
+ warn_not_partial = []
+
+ if "warn-executed" in config:
+ warn_executed.extend(config["warn-executed"])
+ if "warn-not-partial" in config:
+ warn_not_partial.extend(config["warn-not-partial"])
+
+ return warn_executed, warn_not_partial
+
+
+def find_executed_excluded_lines(
+ source_file: str,
+ coverage_data: CoverageData,
+ warn_patterns: list[str],
+) -> set[int]:
+ """
+ Find lines that match warn-executed patterns but were actually executed.
+
+ Args:
+ source_file: Path to the Python source file to analyze
+ coverage_data: The coverage data object
+ warn_patterns: List of regex patterns that should warn if executed
+
+ Returns:
+ Set of executed line numbers that matched any pattern
+ """
+ executed_lines = coverage_data.lines(source_file)
+ if executed_lines is None:
+ return set()
+
+ executed_lines = set(executed_lines)
+
+ try:
+ with open(source_file, "r", encoding="utf-8") as f:
+ source_text = f.read()
+ except Exception:
+ return set()
+
+ parser = PythonParser(text=source_text, filename=source_file)
+ parser.parse_source()
+
+ all_executed_excluded = set()
+ for pattern in warn_patterns:
+ matched_lines = parser.lines_matching(pattern)
+ all_executed_excluded.update(matched_lines & executed_lines)
+
+ return all_executed_excluded
+
+
+def find_not_partial_lines(
+ source_file: str,
+ coverage_data: CoverageData,
+ warn_patterns: list[str],
+) -> set[int]:
+ """
+ Find lines that match warn-not-partial patterns but had both code paths executed.
+
+ Args:
+ source_file: Path to the Python source file to analyze
+ coverage_data: The coverage data object
+ warn_patterns: List of regex patterns for lines expected to be partial
+
+ Returns:
+ Set of line numbers that matched patterns but weren't partial
+ """
+ if not coverage_data.has_arcs():
+ return set()
+
+ all_arcs = coverage_data.arcs(source_file)
+ if all_arcs is None:
+ return set()
+
+ try:
+ with open(source_file, "r", encoding="utf-8") as f:
+ source_text = f.read()
+ except Exception:
+ return set()
+
+ parser = PythonParser(text=source_text, filename=source_file)
+ parser.parse_source()
+
+ all_possible_arcs = set(parser.arcs())
+ executed_arcs = set(all_arcs)
+
+ # Lines with some missing arcs are partial branches
+ partial_lines = set()
+ for start_line in {arc[0] for arc in all_possible_arcs if arc[0] > 0}:
+ possible_from_line = {arc for arc in all_possible_arcs if arc[0] == start_line}
+ executed_from_line = {arc for arc in executed_arcs if arc[0] == start_line}
+ if executed_from_line and possible_from_line != executed_from_line:
+ partial_lines.add(start_line)
+
+ all_not_partial = set()
+ for pattern in warn_patterns:
+ matched_lines = parser.lines_matching(pattern)
+ not_partial = matched_lines - partial_lines
+ all_not_partial.update(not_partial)
+
+ return all_not_partial
+
+
+def analyze_warnings(coverage_file: str, config_file: str) -> dict[str, set[int]]:
+ """
+ Find lines that match warn-executed or warn-not-partial patterns.
+
+ Args:
+ coverage_file: Path to the coverage data file (.coverage)
+ config_file: Path to TOML config file with warning patterns
+
+ Returns:
+ Dictionary mapping filenames to sets of problematic line numbers
+ """
+ warn_executed_patterns, warn_not_partial_patterns = read_warn_patterns(config_file)
+
+ if not warn_executed_patterns and not warn_not_partial_patterns:
+ return {}
+
+ coverage_data = CoverageData(coverage_file)
+ coverage_data.read()
+
+ measured_files = sorted(coverage_data.measured_files())
+
+ all_results = {}
+ for source_file in measured_files:
+ problem_lines = set()
+
+ if warn_executed_patterns:
+ executed_excluded = find_executed_excluded_lines(
+ source_file,
+ coverage_data,
+ warn_executed_patterns,
+ )
+ problem_lines.update(executed_excluded)
+
+ if warn_not_partial_patterns:
+ not_partial = find_not_partial_lines(
+ source_file,
+ coverage_data,
+ warn_not_partial_patterns,
+ )
+ problem_lines.update(not_partial)
+
+ if problem_lines:
+ all_results[source_file] = problem_lines
+
+ return all_results
+
+
+def main():
+ if len(sys.argv) != 3:
+ print(__doc__.rstrip())
+ return 1
+
+ coverage_file, config_file = sys.argv[1:]
+ results = analyze_warnings(coverage_file, config_file)
+
+ for source_file in sorted(results.keys()):
+ problem_lines = results[source_file]
+ for line_num in sorted(problem_lines):
+ line_text = linecache.getline(source_file, line_num).rstrip()
+ print(f"{source_file}:{line_num}: {line_text}")
+
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/lab/warn_executed.toml b/lab/warn_executed.toml
new file mode 100644
index 000000000..abc80f217
--- /dev/null
+++ b/lab/warn_executed.toml
@@ -0,0 +1,15 @@
+warn-executed = [
+ "pragma: not covered",
+ "pragma: not testing",
+ "raise AssertionError",
+ "pragma: only failure",
+ "pragma: cant happen",
+ "pragma: never called",
+ "pytest.mark.skipif\\(env.METACOV",
+ ]
+
+warn-not-partial = [
+ "pragma: partial metacov",
+ "if env.METACOV:",
+ "pragma: part covered",
+ ]
diff --git a/metacov.ini b/metacov.ini
index 1aa05926b..2039b733e 100644
--- a/metacov.ini
+++ b/metacov.ini
@@ -54,7 +54,6 @@ exclude_lines =
pragma: not testing
# Lines that we can't run during metacov.
- pragma: no metacov
pytest.mark.skipif\(env.METACOV
if not env.METACOV:
@@ -85,6 +84,7 @@ partial_branches =
# If we're asserting that any() is true, it didn't finish.
assert any\(
if env.TESTING:
+ pragma: partial metacov
if env.METACOV:
precision = 3
diff --git a/tests/test_debug.py b/tests/test_debug.py
index 74ac199ed..629533c6e 100644
--- a/tests/test_debug.py
+++ b/tests/test_debug.py
@@ -50,6 +50,7 @@ def test_info_formatter(self) -> None:
[
("x", "hello there"),
("very long label", ["one element"]),
+ ("fits on 1", (17, 23, 42, 76, 99)),
("regular", ["abc", "def", "ghi", "jkl"]),
("nothing", []),
]
@@ -58,6 +59,7 @@ def test_info_formatter(self) -> None:
expected = [
" x: hello there",
" very long label: one element",
+ " fits on 1: (17, 23, 42, 76, 99)",
" regular: abc",
" def",
" ghi",
diff --git a/tests/test_oddball.py b/tests/test_oddball.py
index b91c47cc1..f997a37a2 100644
--- a/tests/test_oddball.py
+++ b/tests/test_oddball.py
@@ -145,7 +145,7 @@ def recur(n):
assert cov._collector is not None
pytrace = (cov._collector.tracer_name() == "PyTracer") # fmt: skip
expected_missing = [4]
- if pytrace: # pragma: no metacov
+ if pytrace: # pragma: partial metacov
expected_missing += [10, 11, 12]
_, statements, missing, _ = cov.analysis("recur.py")
@@ -153,7 +153,7 @@ def recur(n):
assert expected_missing == missing
# Get a warning about the stackoverflow effect on the tracing function.
- if pytrace and not env.METACOV: # pragma: no metacov
+ if pytrace and not env.METACOV: # pragma: partial metacov
assert len(cov._warnings) == 1
assert re.fullmatch(
r"Trace function changed, data is likely wrong: None != "
diff --git a/tests/test_process.py b/tests/test_process.py
index 353784574..74bc2f50e 100644
--- a/tests/test_process.py
+++ b/tests/test_process.py
@@ -30,7 +30,7 @@
from tests import testenv
from tests.coveragetest import CoverageTest, TESTS_DIR
-from tests.helpers import re_line, re_lines, re_lines_text
+from tests.helpers import change_dir, re_line, re_lines, re_lines_text
class ProcessTest(CoverageTest):
@@ -1627,6 +1627,45 @@ def f2():
data.read()
assert line_counts(data)["subfunctions.py"] == 11
+ def test_subprocess_dir_with_source(self) -> None:
+ # https://github.com/nedbat/coveragepy/issues/1499
+ self.make_file("main/d/README", "A sub-directory")
+ self.make_file(
+ "main/main.py",
+ """\
+ import os, subprocess, sys
+ orig = os.getcwd()
+ os.chdir("./d")
+ subprocess.run([sys.executable, f"{orig}/sub.py"])
+ os.chdir(orig)
+ """,
+ )
+ self.make_file("lib/other.py", "print('Other', flush=True)")
+ self.make_file(
+ "main/sub.py",
+ """
+ import other
+ print("Hello, world!", flush=True)
+ """,
+ )
+ self.make_file(
+ "main/pyproject.toml",
+ """\
+ [tool.coverage.run]
+ patch = ["subprocess"]
+ source = [".", "other"]
+ disable_warnings = ["module-not-imported"]
+ """,
+ )
+ self.set_environ("PYTHONPATH", os.path.abspath("lib"))
+ with change_dir("main"):
+ out = self.run_command("coverage run main.py")
+ assert out == "Other\nHello, world!\n"
+ self.run_command("coverage combine")
+ data = coverage.CoverageData()
+ data.read()
+ assert line_counts(data) == {"main.py": 5, "sub.py": 2, "other.py": 1}
+
@pytest.fixture
def _clean_pth_files() -> Iterable[None]: