diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md
index 67393a8ca..71b32f2fa 100644
--- a/.github/ISSUE_TEMPLATE/bug_report.md
+++ b/.github/ISSUE_TEMPLATE/bug_report.md
@@ -16,7 +16,7 @@ How can we reproduce the problem? Please *be specific*. Don't link to a failing
1. What version of coverage.py shows the problem? The output of `coverage debug sys` is helpful.
1. What versions of what packages do you have installed? The output of `pip freeze` is helpful.
1. What code shows the problem? Give us a specific commit of a specific repo that we can check out. If you've already worked around the problem, please provide a commit before that fix.
-1. What commands did you run?
+1. What commands should we run to reproduce the problem? *Be specific*. Include everything, even `git clone`, `pip install`, and so on. Explain like we're five!
**Expected behavior**
A clear and concise description of what you expected to happen.
diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml
index ab0c1142a..ab94a83e3 100644
--- a/.github/workflows/coverage.yml
+++ b/.github/workflows/coverage.yml
@@ -48,6 +48,7 @@ jobs:
- "3.9"
- "3.10"
- "3.11"
+ - "3.12"
- "pypy-3.7"
- "pypy-3.8"
- "pypy-3.9"
@@ -77,6 +78,7 @@ jobs:
uses: "actions/setup-python@v4"
with:
python-version: "${{ matrix.python-version }}"
+ allow-prereleases: true
cache: pip
cache-dependency-path: 'requirements/*.pip'
diff --git a/.github/workflows/kit.yml b/.github/workflows/kit.yml
index 179f7a649..53e081455 100644
--- a/.github/workflows/kit.yml
+++ b/.github/workflows/kit.yml
@@ -77,9 +77,8 @@ jobs:
# }
# # PYVERSIONS. Available versions:
# # https://github.com/actions/python-versions/blob/main/versions-manifest.json
- # # Include prereleases if they are at rc stage.
# # PyPy versions are handled further below in the "pypy" step.
- # pys = ["cp37", "cp38", "cp39", "cp310", "cp311"]
+ # pys = ["cp37", "cp38", "cp39", "cp310", "cp311", "cp312"]
#
# # Some OS/arch combinations need overrides for the Python versions:
# os_arch_pys = {
@@ -104,16 +103,19 @@ jobs:
- {"os": "ubuntu", "py": "cp39", "arch": "x86_64"}
- {"os": "ubuntu", "py": "cp310", "arch": "x86_64"}
- {"os": "ubuntu", "py": "cp311", "arch": "x86_64"}
+ - {"os": "ubuntu", "py": "cp312", "arch": "x86_64"}
- {"os": "ubuntu", "py": "cp37", "arch": "i686"}
- {"os": "ubuntu", "py": "cp38", "arch": "i686"}
- {"os": "ubuntu", "py": "cp39", "arch": "i686"}
- {"os": "ubuntu", "py": "cp310", "arch": "i686"}
- {"os": "ubuntu", "py": "cp311", "arch": "i686"}
+ - {"os": "ubuntu", "py": "cp312", "arch": "i686"}
- {"os": "ubuntu", "py": "cp37", "arch": "aarch64"}
- {"os": "ubuntu", "py": "cp38", "arch": "aarch64"}
- {"os": "ubuntu", "py": "cp39", "arch": "aarch64"}
- {"os": "ubuntu", "py": "cp310", "arch": "aarch64"}
- {"os": "ubuntu", "py": "cp311", "arch": "aarch64"}
+ - {"os": "ubuntu", "py": "cp312", "arch": "aarch64"}
- {"os": "macos", "py": "cp38", "arch": "arm64"}
- {"os": "macos", "py": "cp39", "arch": "arm64"}
- {"os": "macos", "py": "cp310", "arch": "arm64"}
@@ -123,17 +125,20 @@ jobs:
- {"os": "macos", "py": "cp39", "arch": "x86_64"}
- {"os": "macos", "py": "cp310", "arch": "x86_64"}
- {"os": "macos", "py": "cp311", "arch": "x86_64"}
+ - {"os": "macos", "py": "cp312", "arch": "x86_64"}
- {"os": "windows", "py": "cp37", "arch": "x86"}
- {"os": "windows", "py": "cp38", "arch": "x86"}
- {"os": "windows", "py": "cp39", "arch": "x86"}
- {"os": "windows", "py": "cp310", "arch": "x86"}
- {"os": "windows", "py": "cp311", "arch": "x86"}
+ - {"os": "windows", "py": "cp312", "arch": "x86"}
- {"os": "windows", "py": "cp37", "arch": "AMD64"}
- {"os": "windows", "py": "cp38", "arch": "AMD64"}
- {"os": "windows", "py": "cp39", "arch": "AMD64"}
- {"os": "windows", "py": "cp310", "arch": "AMD64"}
- {"os": "windows", "py": "cp311", "arch": "AMD64"}
- # [[[end]]] (checksum: ded8a9f214bf59776562d91ae6828863)
+ - {"os": "windows", "py": "cp312", "arch": "AMD64"}
+ # [[[end]]] (checksum: 5e62f362263935c1e3a21299f8a1b649)
fail-fast: false
steps:
@@ -149,6 +154,7 @@ jobs:
- name: "Install Python 3.8"
uses: actions/setup-python@v4
with:
+ # PYVERSIONS
python-version: "3.8"
cache: pip
cache-dependency-path: 'requirements/*.pip'
@@ -162,6 +168,7 @@ jobs:
CIBW_BUILD: ${{ matrix.py }}-*
CIBW_ARCHS: ${{ matrix.arch }}
CIBW_ENVIRONMENT: PIP_DISABLE_PIP_VERSION_CHECK=1
+ CIBW_PRERELEASE_PYTHONS: True
CIBW_TEST_COMMAND: python -c "from coverage.tracer import CTracer; print('CTracer OK!')"
run: |
python -m cibuildwheel --output-dir wheelhouse
@@ -175,6 +182,7 @@ jobs:
with:
name: dist
path: wheelhouse/*.whl
+ retention-days: 7
sdist:
name: "Source distribution"
@@ -186,6 +194,7 @@ jobs:
- name: "Install Python 3.8"
uses: actions/setup-python@v4
with:
+ # PYVERSIONS
python-version: "3.8"
cache: pip
cache-dependency-path: 'requirements/*.pip'
@@ -207,6 +216,7 @@ jobs:
with:
name: dist
path: dist/*.tar.gz
+ retention-days: 7
pypy:
name: "PyPy wheel"
@@ -241,3 +251,40 @@ jobs:
with:
name: dist
path: dist/*.whl
+ retention-days: 7
+
+ sign:
+ # This signs our artifacts, but we don't use the signatures for anything
+ # yet. Someday maybe PyPI will have a way to upload and verify them.
+ name: "Sign artifacts"
+ needs:
+ - wheels
+ - sdist
+ - pypy
+ runs-on: ubuntu-latest
+ permissions:
+ id-token: write
+ steps:
+ - name: "Download artifacts"
+ uses: actions/download-artifact@v3
+ with:
+ name: dist
+
+ - name: "Sign artifacts"
+ uses: sigstore/gh-action-sigstore-python@v1.2.3
+ with:
+ inputs: coverage-*.*
+
+ - name: "List files"
+ run: |
+ ls -alR
+
+ - name: "Upload signatures"
+ uses: actions/upload-artifact@v3
+ with:
+ name: signatures
+ path: |
+ *.crt
+ *.sig
+ *.sigstore
+ retention-days: 7
diff --git a/.github/workflows/python-nightly.yml b/.github/workflows/python-nightly.yml
index 94a30ecc2..319064c94 100644
--- a/.github/workflows/python-nightly.yml
+++ b/.github/workflows/python-nightly.yml
@@ -53,6 +53,7 @@ jobs:
- "pypy-3.7-nightly"
- "pypy-3.8-nightly"
- "pypy-3.9-nightly"
+ - "pypy-3.10-nightly"
fail-fast: false
steps:
diff --git a/.github/workflows/testsuite.yml b/.github/workflows/testsuite.yml
index b0f0ee6ca..8ab3608bc 100644
--- a/.github/workflows/testsuite.yml
+++ b/.github/workflows/testsuite.yml
@@ -49,6 +49,7 @@ jobs:
- "3.9"
- "3.10"
- "3.11"
+ - "3.12"
- "pypy-3.7"
- "pypy-3.9"
exclude:
@@ -65,6 +66,7 @@ jobs:
uses: "actions/setup-python@v4"
with:
python-version: "${{ matrix.python-version }}"
+ allow-prereleases: true
cache: pip
cache-dependency-path: 'requirements/*.pip'
diff --git a/.treerc b/.treerc
index ddea2e92c..0916e24a9 100644
--- a/.treerc
+++ b/.treerc
@@ -14,5 +14,5 @@ ignore =
*.gz *.zip
_build _spell
*.egg *.egg-info
- .mypy_cache
+ .*_cache
tmp
diff --git a/CHANGES.rst b/CHANGES.rst
index b4da5e1b6..4b567d6dc 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -19,6 +19,44 @@ development at the same time, such as 4.5.x and 5.0.
.. scriv-start-here
+.. _changes_7-2-7:
+
+Version 7.2.7 — 2023-05-29
+--------------------------
+
+- Fix: reverted a `change from 6.4.3 `_ that helped Cython, but
+ also increased the size of data files when using dynamic contexts, as
+ described in the now-fixed `issue 1586`_. The problem is now avoided due to a
+ recent change (`issue 1538`_). Thanks to `Anders Kaseorg `_
+ and David Szotten for persisting with problem reports and detailed diagnoses.
+
+- Wheels are now provided for CPython 3.12.
+
+.. _issue 1586: https://github.com/nedbat/coveragepy/issues/1586
+.. _pull 1629: https://github.com/nedbat/coveragepy/pull/1629
+
+
+.. _changes_7-2-6:
+
+Version 7.2.6 — 2023-05-23
+--------------------------
+
+- Fix: the ``lcov`` command could raise an IndexError exception if a file is
+ translated to Python but then executed under its own name. Jinja2 does this
+ when rendering templates. Fixes `issue 1553`_.
+
+- Python 3.12 beta 1 now inlines comprehensions. Previously they were compiled
+ as invisible functions and coverage.py would warn you if they weren't
+ completely executed. This no longer happens under Python 3.12.
+
+- Fix: the ``coverage debug sys`` command includes some environment variables
+ in its output. This could have included sensitive data. Those values are
+ now hidden with asterisks, closing `issue 1628`_.
+
+.. _issue 1553: https://github.com/nedbat/coveragepy/issues/1553
+.. _issue 1628: https://github.com/nedbat/coveragepy/issues/1628
+
+
.. _changes_7-2-5:
Version 7.2.5 — 2023-04-30
@@ -182,6 +220,7 @@ Version 7.1.0 — 2023-01-24
.. _issue 1319: https://github.com/nedbat/coveragepy/issues/1319
.. _issue 1538: https://github.com/nedbat/coveragepy/issues/1538
+
.. _changes_7-0-5:
Version 7.0.5 — 2023-01-10
@@ -395,7 +434,6 @@ update your settings.
.. _pull 1479: https://github.com/nedbat/coveragepy/pull/1479
-
.. _changes_6-6-0b1:
Version 6.6.0b1 — 2022-10-31
@@ -1105,6 +1143,7 @@ Version 5.3.1 — 2020-12-19
.. _issue 1010: https://github.com/nedbat/coveragepy/issues/1010
.. _pull request 1066: https://github.com/nedbat/coveragepy/pull/1066
+
.. _changes_53:
Version 5.3 — 2020-09-13
diff --git a/CONTRIBUTORS.txt b/CONTRIBUTORS.txt
index f03ceacbc..c3dfef428 100644
--- a/CONTRIBUTORS.txt
+++ b/CONTRIBUTORS.txt
@@ -16,6 +16,7 @@ Alexander Todorov
Alexander Walters
Alpha Chen
Ammar Askar
+Anders Kaseorg
Andrew Hoos
Anthony Sottile
Arcadiy Ivanov
@@ -42,6 +43,7 @@ Calen Pennington
Carl Friedrich Bolz-Tereick
Carl Gieringer
Catherine Proulx
+Charles Chan
Chris Adams
Chris Jerdonek
Chris Rose
diff --git a/Makefile b/Makefile
index 7f529e0c1..b5276a944 100644
--- a/Makefile
+++ b/Makefile
@@ -32,7 +32,7 @@ clean: clean_platform ## Remove artifacts of test execution, installation, etc
@rm -f tests/covmain.zip tests/zipmods.zip tests/zip1.zip
@rm -rf doc/_build doc/_spell doc/sample_html_beta
@rm -rf tmp
- @rm -rf .cache .hypothesis .mypy_cache .pytest_cache
+ @rm -rf .cache .hypothesis .*_cache
@rm -rf tests/actual
@-make -C tests/gold/html clean
@@ -89,7 +89,6 @@ metasmoke:
PIP_COMPILE = pip-compile --upgrade --allow-unsafe --resolver=backtracking
upgrade: export CUSTOM_COMPILE_COMMAND=make upgrade
upgrade: ## Update the *.pip files with the latest packages satisfying *.in files.
- git switch -c nedbat/upgrade-$$(date +%Y%m%d)
pip install -q -r requirements/pip-tools.pip
$(PIP_COMPILE) -o requirements/pip-tools.pip requirements/pip-tools.in
$(PIP_COMPILE) -o requirements/pip.pip requirements/pip.in
@@ -101,7 +100,6 @@ upgrade: ## Update the *.pip files with the latest packages satisfying *.in
$(PIP_COMPILE) -o doc/requirements.pip doc/requirements.in
$(PIP_COMPILE) -o requirements/lint.pip doc/requirements.in requirements/dev.in
$(PIP_COMPILE) -o requirements/mypy.pip requirements/mypy.in
- git commit -am "chore: make upgrade"
diff_upgrade: ## Summarize the last `make upgrade`
@# The sort flags sort by the package name first, then by the -/+, and
diff --git a/README.rst b/README.rst
index 897f8801d..5e4024d87 100644
--- a/README.rst
+++ b/README.rst
@@ -28,7 +28,7 @@ Coverage.py runs on these versions of Python:
.. PYVERSIONS
-* CPython 3.7 through 3.12.0a7
+* CPython 3.7 through 3.12.0b1
* PyPy3 7.3.11.
Documentation is on `Read the Docs`_. Code repository and issue tracker are on
@@ -39,6 +39,7 @@ Documentation is on `Read the Docs`_. Code repository and issue tracker are on
**New in 7.x:**
improved data combining;
+``[run] exclude_also`` setting;
``report --format=``;
type annotations.
diff --git a/coverage/__init__.py b/coverage/__init__.py
index 054e37dff..e3ed23223 100644
--- a/coverage/__init__.py
+++ b/coverage/__init__.py
@@ -14,8 +14,6 @@
# so disable its warning.
# pylint: disable=useless-import-alias
-import sys
-
from coverage.version import (
__version__ as __version__,
version_info as version_info,
diff --git a/coverage/annotate.py b/coverage/annotate.py
index b4a02cb47..2ef89c967 100644
--- a/coverage/annotate.py
+++ b/coverage/annotate.py
@@ -13,7 +13,7 @@
from coverage.files import flat_rootname
from coverage.misc import ensure_dir, isolate_module
from coverage.plugin import FileReporter
-from coverage.report import get_analysis_to_report
+from coverage.report_core import get_analysis_to_report
from coverage.results import Analysis
from coverage.types import TMorf
diff --git a/coverage/cmdline.py b/coverage/cmdline.py
index 4498eeec3..55f6c793e 100644
--- a/coverage/cmdline.py
+++ b/coverage/cmdline.py
@@ -952,13 +952,12 @@ def unglob_args(args: List[str]) -> List[str]:
Use "{program_name} help " for detailed help on any command.
""",
- "minimum_help": """\
- Code coverage for Python, version {__version__} {extension_modifier}. Use '{program_name} help' for help.
- """,
+ "minimum_help": (
+ "Code coverage for Python, version {__version__} {extension_modifier}. " +
+ "Use '{program_name} help' for help."
+ ),
- "version": """\
- Coverage.py, version {__version__} {extension_modifier}
- """,
+ "version": "Coverage.py, version {__version__} {extension_modifier}",
}
diff --git a/coverage/collector.py b/coverage/collector.py
index 2f8c17520..ca7f5d94b 100644
--- a/coverage/collector.py
+++ b/coverage/collector.py
@@ -456,7 +456,7 @@ def mapped_file_dict(self, d: Mapping[str, T]) -> Dict[str, T]:
assert isinstance(runtime_err, Exception)
raise runtime_err
- return {self.cached_mapped_file(k): v for k, v in items}
+ return {self.cached_mapped_file(k): v for k, v in items if v}
def plugin_was_disabled(self, plugin: CoveragePlugin) -> None:
"""Record that `plugin` was disabled during the run."""
diff --git a/coverage/control.py b/coverage/control.py
index e405a5bf4..723c4d876 100644
--- a/coverage/control.py
+++ b/coverage/control.py
@@ -29,7 +29,9 @@
from coverage.config import CoverageConfig, read_coverage_config
from coverage.context import should_start_context_test_function, combine_context_switchers
from coverage.data import CoverageData, combine_parallel_data
-from coverage.debug import DebugControl, NoDebugging, short_stack, write_formatted_info
+from coverage.debug import (
+ DebugControl, NoDebugging, short_stack, write_formatted_info, relevant_environment_display
+)
from coverage.disposition import disposition_debug_msg
from coverage.exceptions import ConfigError, CoverageException, CoverageWarning, PluginError
from coverage.files import PathAliases, abs_file, relative_filename, set_relative_directory
@@ -37,15 +39,15 @@
from coverage.inorout import InOrOut
from coverage.jsonreport import JsonReporter
from coverage.lcovreport import LcovReporter
-from coverage.misc import bool_or_none, join_regex, human_sorted
+from coverage.misc import bool_or_none, join_regex
from coverage.misc import DefaultValue, ensure_dir_for_file, isolate_module
from coverage.multiproc import patch_multiprocessing
from coverage.plugin import FileReporter
from coverage.plugin_support import Plugins
from coverage.python import PythonFileReporter
-from coverage.report import render_report
+from coverage.report import SummaryReporter
+from coverage.report_core import render_report
from coverage.results import Analysis
-from coverage.summary import SummaryReporter
from coverage.types import (
FilePath, TConfigurable, TConfigSectionIn, TConfigValueIn, TConfigValueOut,
TFileDisposition, TLineNo, TMorf,
@@ -1298,14 +1300,7 @@ def plugin_info(plugins: List[Any]) -> List[str]:
("pid", os.getpid()),
("cwd", os.getcwd()),
("path", sys.path),
- ("environment", human_sorted(
- f"{k} = {v}"
- for k, v in os.environ.items()
- if (
- any(slug in k for slug in ("COV", "PY")) or
- (k in ("HOME", "TEMP", "TMP"))
- )
- )),
+ ("environment", [f"{k} = {v}" for k, v in relevant_environment_display(os.environ)]),
("command_line", " ".join(getattr(sys, "argv", ["-none-"]))),
]
diff --git a/coverage/debug.py b/coverage/debug.py
index 3ef6dae8a..3484792e2 100644
--- a/coverage/debug.py
+++ b/coverage/debug.py
@@ -12,16 +12,18 @@
import itertools
import os
import pprint
+import re
import reprlib
import sys
import types
import _thread
from typing import (
- Any, Callable, IO, Iterable, Iterator, Optional, List, Tuple, cast,
+ cast,
+ Any, Callable, IO, Iterable, Iterator, Mapping, Optional, List, Tuple,
)
-from coverage.misc import isolate_module
+from coverage.misc import human_sorted_items, isolate_module
from coverage.types import TWritable
os = isolate_module(os)
@@ -489,3 +491,34 @@ def _clean_stack_line(s: str) -> str: # pragma: debugging
s = s.replace(os.path.dirname(os.__file__) + "/", "")
s = s.replace(sys.prefix + "/", "")
return s
+
+
+def relevant_environment_display(env: Mapping[str, str]) -> List[Tuple[str, str]]:
+ """Filter environment variables for a debug display.
+
+ Select variables to display (with COV or PY in the name, or HOME, TEMP, or
+ TMP), and also cloak sensitive values with asterisks.
+
+ Arguments:
+ env: a dict of environment variable names and values.
+
+ Returns:
+ A list of pairs (name, value) to show.
+
+ """
+ slugs = {"COV", "PY"}
+ include = {"HOME", "TEMP", "TMP"}
+ cloak = {"API", "TOKEN", "KEY", "SECRET", "PASS", "SIGNATURE"}
+
+ to_show = []
+ for name, val in env.items():
+ keep = False
+ if name in include:
+ keep = True
+ elif any(slug in name for slug in slugs):
+ keep = True
+ if keep:
+ if any(slug in name for slug in cloak):
+ val = re.sub(r"\w", "*", val)
+ to_show.append((name, val))
+ return human_sorted_items(to_show)
diff --git a/coverage/env.py b/coverage/env.py
index bdc2c7854..3370970e3 100644
--- a/coverage/env.py
+++ b/coverage/env.py
@@ -40,13 +40,10 @@ class PYBEHAVIOR:
# Does Python conform to PEP626, Precise line numbers for debugging and other tools.
# https://www.python.org/dev/peps/pep-0626
- pep626 = CPYTHON and (PYVERSION > (3, 10, 0, "alpha", 4))
+ pep626 = (PYVERSION > (3, 10, 0, "alpha", 4))
# Is "if __debug__" optimized away?
- if PYPY:
- optimize_if_debug = True
- else:
- optimize_if_debug = not pep626
+ optimize_if_debug = not pep626
# Is "if not __debug__" optimized away? The exact details have changed
# across versions.
@@ -137,6 +134,10 @@ class PYBEHAVIOR:
# only a 0-number line, which is ignored, giving a truly empty module.
empty_is_empty = (PYVERSION >= (3, 11, 0, "beta", 4))
+ # Are comprehensions inlined (new) or compiled as called functions (old)?
+ # Changed in https://github.com/python/cpython/pull/101441
+ comprehensions_are_functions = (PYVERSION <= (3, 12, 0, "alpha", 7, 0))
+
# Coverage.py specifics.
# Are we using the C-implemented trace function?
diff --git a/coverage/html.py b/coverage/html.py
index f11d85e1a..532eb66c2 100644
--- a/coverage/html.py
+++ b/coverage/html.py
@@ -23,7 +23,7 @@
from coverage.files import flat_rootname
from coverage.misc import ensure_dir, file_be_gone, Hasher, isolate_module, format_local_datetime
from coverage.misc import human_sorted, plural, stdout_link
-from coverage.report import get_analysis_to_report
+from coverage.report_core import get_analysis_to_report
from coverage.results import Analysis, Numbers
from coverage.templite import Templite
from coverage.types import TLineNo, TMorf
diff --git a/coverage/inorout.py b/coverage/inorout.py
index ff46bac0d..d2dbdcdf7 100644
--- a/coverage/inorout.py
+++ b/coverage/inorout.py
@@ -528,7 +528,7 @@ def find_possibly_unexecuted_files(self) -> Iterable[Tuple[str, Optional[str]]]:
Yields pairs: file path, and responsible plug-in name.
"""
for pkg in self.source_pkgs:
- if (not pkg in sys.modules or
+ if (pkg not in sys.modules or
not module_has_file(sys.modules[pkg])):
continue
pkg_file = source_for_file(cast(str, sys.modules[pkg].__file__))
diff --git a/coverage/jsonreport.py b/coverage/jsonreport.py
index 24e33585c..9780e261a 100644
--- a/coverage/jsonreport.py
+++ b/coverage/jsonreport.py
@@ -12,7 +12,7 @@
from typing import Any, Dict, IO, Iterable, List, Optional, Tuple, TYPE_CHECKING
from coverage import __version__
-from coverage.report import get_analysis_to_report
+from coverage.report_core import get_analysis_to_report
from coverage.results import Analysis, Numbers
from coverage.types import TMorf, TLineNo
diff --git a/coverage/lcovreport.py b/coverage/lcovreport.py
index 7d72e8135..3da164d5d 100644
--- a/coverage/lcovreport.py
+++ b/coverage/lcovreport.py
@@ -5,20 +5,25 @@
from __future__ import annotations
-import sys
import base64
-from hashlib import md5
+import hashlib
+import sys
from typing import IO, Iterable, Optional, TYPE_CHECKING
from coverage.plugin import FileReporter
-from coverage.report import get_analysis_to_report
+from coverage.report_core import get_analysis_to_report
from coverage.results import Analysis, Numbers
from coverage.types import TMorf
if TYPE_CHECKING:
from coverage import Coverage
- from coverage.data import CoverageData
+
+
+def line_hash(line: str) -> str:
+ """Produce a hash of a source line for use in the LCOV file."""
+ hashed = hashlib.md5(line.encode("utf-8")).digest()
+ return base64.b64encode(hashed).decode("ascii").rstrip("=")
class LcovReporter:
@@ -69,17 +74,17 @@ def get_lcov(self, fr: FileReporter, analysis: Analysis, outfile: IO[str]) -> No
# characters of the encoding ("==") are removed from the hash to
# allow genhtml to run on the resulting lcov file.
if source_lines:
- line = source_lines[covered-1].encode("utf-8")
+ if covered-1 >= len(source_lines):
+ break
+ line = source_lines[covered-1]
else:
- line = b""
- hashed = base64.b64encode(md5(line).digest()).decode().rstrip("=")
- outfile.write(f"DA:{covered},1,{hashed}\n")
+ line = ""
+ outfile.write(f"DA:{covered},1,{line_hash(line)}\n")
for missed in sorted(analysis.missing):
assert source_lines
- line = source_lines[missed-1].encode("utf-8")
- hashed = base64.b64encode(md5(line).digest()).decode().rstrip("=")
- outfile.write(f"DA:{missed},0,{hashed}\n")
+ line = source_lines[missed-1]
+ outfile.write(f"DA:{missed},0,{line_hash(line)}\n")
outfile.write(f"LF:{analysis.numbers.n_statements}\n")
outfile.write(f"LH:{analysis.numbers.n_executed}\n")
diff --git a/coverage/parser.py b/coverage/parser.py
index e653a9ccd..51a5a52da 100644
--- a/coverage/parser.py
+++ b/coverage/parser.py
@@ -251,7 +251,7 @@ def parse_source(self) -> None:
"""
try:
self._raw_parse()
- except (tokenize.TokenError, IndentationError) as err:
+ except (tokenize.TokenError, IndentationError, SyntaxError) as err:
if hasattr(err, "lineno"):
lineno = err.lineno # IndentationError
else:
@@ -1343,9 +1343,10 @@ def _code_object__ClassDef(self, node: ast.ClassDef) -> None:
_code_object__Lambda = _make_expression_code_method("lambda")
_code_object__GeneratorExp = _make_expression_code_method("generator expression")
- _code_object__DictComp = _make_expression_code_method("dictionary comprehension")
- _code_object__SetComp = _make_expression_code_method("set comprehension")
- _code_object__ListComp = _make_expression_code_method("list comprehension")
+ if env.PYBEHAVIOR.comprehensions_are_functions:
+ _code_object__DictComp = _make_expression_code_method("dictionary comprehension")
+ _code_object__SetComp = _make_expression_code_method("set comprehension")
+ _code_object__ListComp = _make_expression_code_method("list comprehension")
# Code only used when dumping the AST for debugging.
diff --git a/coverage/report.py b/coverage/report.py
index 09eed0a82..e1c7a071d 100644
--- a/coverage/report.py
+++ b/coverage/report.py
@@ -1,117 +1,281 @@
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-"""Reporter foundation for coverage.py."""
+"""Summary reporting"""
from __future__ import annotations
import sys
-from typing import Callable, Iterable, Iterator, IO, Optional, Tuple, TYPE_CHECKING
+from typing import Any, IO, Iterable, List, Optional, Tuple, TYPE_CHECKING
-from coverage.exceptions import NoDataError, NotPython
-from coverage.files import prep_patterns, GlobMatcher
-from coverage.misc import ensure_dir_for_file, file_be_gone
+from coverage.exceptions import ConfigError, NoDataError
+from coverage.misc import human_sorted_items
from coverage.plugin import FileReporter
-from coverage.results import Analysis
-from coverage.types import Protocol, TMorf
+from coverage.report_core import get_analysis_to_report
+from coverage.results import Analysis, Numbers
+from coverage.types import TMorf
if TYPE_CHECKING:
from coverage import Coverage
-class Reporter(Protocol):
- """What we expect of reporters."""
-
- report_type: str
-
- def report(self, morfs: Optional[Iterable[TMorf]], outfile: IO[str]) -> float:
- """Generate a report of `morfs`, written to `outfile`."""
-
-
-def render_report(
- output_path: str,
- reporter: Reporter,
- morfs: Optional[Iterable[TMorf]],
- msgfn: Callable[[str], None],
-) -> float:
- """Run a one-file report generator, managing the output file.
-
- This function ensures the output file is ready to be written to. Then writes
- the report to it. Then closes the file and cleans up.
-
- """
- file_to_close = None
- delete_file = False
-
- if output_path == "-":
- outfile = sys.stdout
- else:
- # Ensure that the output directory is created; done here because this
- # report pre-opens the output file. HtmlReporter does this on its own
- # because its task is more complex, being multiple files.
- ensure_dir_for_file(output_path)
- outfile = open(output_path, "w", encoding="utf-8")
- file_to_close = outfile
- delete_file = True
-
- try:
- ret = reporter.report(morfs, outfile=outfile)
- if file_to_close is not None:
- msgfn(f"Wrote {reporter.report_type} to {output_path}")
- delete_file = False
- return ret
- finally:
- if file_to_close is not None:
- file_to_close.close()
- if delete_file:
- file_be_gone(output_path) # pragma: part covered (doesn't return)
-
-
-def get_analysis_to_report(
- coverage: Coverage,
- morfs: Optional[Iterable[TMorf]],
-) -> Iterator[Tuple[FileReporter, Analysis]]:
- """Get the files to report on.
-
- For each morf in `morfs`, if it should be reported on (based on the omit
- and include configuration options), yield a pair, the `FileReporter` and
- `Analysis` for the morf.
-
- """
- file_reporters = coverage._get_file_reporters(morfs)
- config = coverage.config
-
- if config.report_include:
- matcher = GlobMatcher(prep_patterns(config.report_include), "report_include")
- file_reporters = [fr for fr in file_reporters if matcher.match(fr.filename)]
-
- if config.report_omit:
- matcher = GlobMatcher(prep_patterns(config.report_omit), "report_omit")
- file_reporters = [fr for fr in file_reporters if not matcher.match(fr.filename)]
-
- if not file_reporters:
- raise NoDataError("No data to report.")
-
- for fr in sorted(file_reporters):
- try:
- analysis = coverage._analyze(fr)
- except NotPython:
- # Only report errors for .py files, and only if we didn't
- # explicitly suppress those errors.
- # NotPython is only raised by PythonFileReporter, which has a
- # should_be_python() method.
- if fr.should_be_python(): # type: ignore[attr-defined]
- if config.ignore_errors:
- msg = f"Couldn't parse Python file '{fr.filename}'"
- coverage._warn(msg, slug="couldnt-parse")
- else:
- raise
- except Exception as exc:
- if config.ignore_errors:
- msg = f"Couldn't parse '{fr.filename}': {exc}".rstrip()
- coverage._warn(msg, slug="couldnt-parse")
+class SummaryReporter:
+ """A reporter for writing the summary report."""
+
+ def __init__(self, coverage: Coverage) -> None:
+ self.coverage = coverage
+ self.config = self.coverage.config
+ self.branches = coverage.get_data().has_arcs()
+ self.outfile: Optional[IO[str]] = None
+ self.output_format = self.config.format or "text"
+ if self.output_format not in {"text", "markdown", "total"}:
+ raise ConfigError(f"Unknown report format choice: {self.output_format!r}")
+ self.fr_analysis: List[Tuple[FileReporter, Analysis]] = []
+ self.skipped_count = 0
+ self.empty_count = 0
+ self.total = Numbers(precision=self.config.precision)
+
+ def write(self, line: str) -> None:
+ """Write a line to the output, adding a newline."""
+ assert self.outfile is not None
+ self.outfile.write(line.rstrip())
+ self.outfile.write("\n")
+
+ def write_items(self, items: Iterable[str]) -> None:
+ """Write a list of strings, joined together."""
+ self.write("".join(items))
+
+ def _report_text(
+ self,
+ header: List[str],
+ lines_values: List[List[Any]],
+ total_line: List[Any],
+ end_lines: List[str],
+ ) -> None:
+ """Internal method that prints report data in text format.
+
+ `header` is a list with captions.
+ `lines_values` is list of lists of sortable values.
+ `total_line` is a list with values of the total line.
+ `end_lines` is a list of ending lines with information about skipped files.
+
+ """
+ # Prepare the formatting strings, header, and column sorting.
+ max_name = max([len(line[0]) for line in lines_values] + [5]) + 1
+ max_n = max(len(total_line[header.index("Cover")]) + 2, len(" Cover")) + 1
+ max_n = max([max_n] + [len(line[header.index("Cover")]) + 2 for line in lines_values])
+ formats = dict(
+ Name="{:{name_len}}",
+ Stmts="{:>7}",
+ Miss="{:>7}",
+ Branch="{:>7}",
+ BrPart="{:>7}",
+ Cover="{:>{n}}",
+ Missing="{:>10}",
+ )
+ header_items = [
+ formats[item].format(item, name_len=max_name, n=max_n)
+ for item in header
+ ]
+ header_str = "".join(header_items)
+ rule = "-" * len(header_str)
+
+ # Write the header
+ self.write(header_str)
+ self.write(rule)
+
+ formats.update(dict(Cover="{:>{n}}%"), Missing=" {:9}")
+ for values in lines_values:
+ # build string with line values
+ line_items = [
+ formats[item].format(str(value),
+ name_len=max_name, n=max_n-1) for item, value in zip(header, values)
+ ]
+ self.write_items(line_items)
+
+ # Write a TOTAL line
+ if lines_values:
+ self.write(rule)
+
+ line_items = [
+ formats[item].format(str(value),
+ name_len=max_name, n=max_n-1) for item, value in zip(header, total_line)
+ ]
+ self.write_items(line_items)
+
+ for end_line in end_lines:
+ self.write(end_line)
+
+ def _report_markdown(
+ self,
+ header: List[str],
+ lines_values: List[List[Any]],
+ total_line: List[Any],
+ end_lines: List[str],
+ ) -> None:
+ """Internal method that prints report data in markdown format.
+
+ `header` is a list with captions.
+ `lines_values` is a sorted list of lists containing coverage information.
+ `total_line` is a list with values of the total line.
+ `end_lines` is a list of ending lines with information about skipped files.
+
+ """
+ # Prepare the formatting strings, header, and column sorting.
+ max_name = max((len(line[0].replace("_", "\\_")) for line in lines_values), default=0)
+ max_name = max(max_name, len("**TOTAL**")) + 1
+ formats = dict(
+ Name="| {:{name_len}}|",
+ Stmts="{:>9} |",
+ Miss="{:>9} |",
+ Branch="{:>9} |",
+ BrPart="{:>9} |",
+ Cover="{:>{n}} |",
+ Missing="{:>10} |",
+ )
+ max_n = max(len(total_line[header.index("Cover")]) + 6, len(" Cover "))
+ header_items = [formats[item].format(item, name_len=max_name, n=max_n) for item in header]
+ header_str = "".join(header_items)
+ rule_str = "|" + " ".join(["- |".rjust(len(header_items[0])-1, "-")] +
+ ["-: |".rjust(len(item)-1, "-") for item in header_items[1:]]
+ )
+
+ # Write the header
+ self.write(header_str)
+ self.write(rule_str)
+
+ for values in lines_values:
+ # build string with line values
+ formats.update(dict(Cover="{:>{n}}% |"))
+ line_items = [
+ formats[item].format(str(value).replace("_", "\\_"), name_len=max_name, n=max_n-1)
+ for item, value in zip(header, values)
+ ]
+ self.write_items(line_items)
+
+ # Write the TOTAL line
+ formats.update(dict(Name="|{:>{name_len}} |", Cover="{:>{n}} |"))
+ total_line_items: List[str] = []
+ for item, value in zip(header, total_line):
+ if value == "":
+ insert = value
+ elif item == "Cover":
+ insert = f" **{value}%**"
else:
- raise
+ insert = f" **{value}**"
+ total_line_items += formats[item].format(insert, name_len=max_name, n=max_n)
+ self.write_items(total_line_items)
+ for end_line in end_lines:
+ self.write(end_line)
+
+ def report(self, morfs: Optional[Iterable[TMorf]], outfile: Optional[IO[str]] = None) -> float:
+ """Writes a report summarizing coverage statistics per module.
+
+ `outfile` is a text-mode file object to write the summary to.
+
+ """
+ self.outfile = outfile or sys.stdout
+
+ self.coverage.get_data().set_query_contexts(self.config.report_contexts)
+ for fr, analysis in get_analysis_to_report(self.coverage, morfs):
+ self.report_one_file(fr, analysis)
+
+ if not self.total.n_files and not self.skipped_count:
+ raise NoDataError("No data to report.")
+
+ if self.output_format == "total":
+ self.write(self.total.pc_covered_str)
+ else:
+ self.tabular_report()
+
+ return self.total.pc_covered
+
+ def tabular_report(self) -> None:
+ """Writes tabular report formats."""
+ # Prepare the header line and column sorting.
+ header = ["Name", "Stmts", "Miss"]
+ if self.branches:
+ header += ["Branch", "BrPart"]
+ header += ["Cover"]
+ if self.config.show_missing:
+ header += ["Missing"]
+
+ column_order = dict(name=0, stmts=1, miss=2, cover=-1)
+ if self.branches:
+ column_order.update(dict(branch=3, brpart=4))
+
+ # `lines_values` is list of lists of sortable values.
+ lines_values = []
+
+ for (fr, analysis) in self.fr_analysis:
+ nums = analysis.numbers
+
+ args = [fr.relative_filename(), nums.n_statements, nums.n_missing]
+ if self.branches:
+ args += [nums.n_branches, nums.n_partial_branches]
+ args += [nums.pc_covered_str]
+ if self.config.show_missing:
+ args += [analysis.missing_formatted(branches=True)]
+ args += [nums.pc_covered]
+ lines_values.append(args)
+
+ # Line sorting.
+ sort_option = (self.config.sort or "name").lower()
+ reverse = False
+ if sort_option[0] == "-":
+ reverse = True
+ sort_option = sort_option[1:]
+ elif sort_option[0] == "+":
+ sort_option = sort_option[1:]
+ sort_idx = column_order.get(sort_option)
+ if sort_idx is None:
+ raise ConfigError(f"Invalid sorting option: {self.config.sort!r}")
+ if sort_option == "name":
+ lines_values = human_sorted_items(lines_values, reverse=reverse)
+ else:
+ lines_values.sort(
+ key=lambda line: (line[sort_idx], line[0]), # type: ignore[index]
+ reverse=reverse,
+ )
+
+ # Calculate total if we had at least one file.
+ total_line = ["TOTAL", self.total.n_statements, self.total.n_missing]
+ if self.branches:
+ total_line += [self.total.n_branches, self.total.n_partial_branches]
+ total_line += [self.total.pc_covered_str]
+ if self.config.show_missing:
+ total_line += [""]
+
+ # Create other final lines.
+ end_lines = []
+ if self.config.skip_covered and self.skipped_count:
+ file_suffix = "s" if self.skipped_count>1 else ""
+ end_lines.append(
+ f"\n{self.skipped_count} file{file_suffix} skipped due to complete coverage."
+ )
+ if self.config.skip_empty and self.empty_count:
+ file_suffix = "s" if self.empty_count > 1 else ""
+ end_lines.append(f"\n{self.empty_count} empty file{file_suffix} skipped.")
+
+ if self.output_format == "markdown":
+ formatter = self._report_markdown
+ else:
+ formatter = self._report_text
+ formatter(header, lines_values, total_line, end_lines)
+
+ def report_one_file(self, fr: FileReporter, analysis: Analysis) -> None:
+ """Report on just one file, the callback from report()."""
+ nums = analysis.numbers
+ self.total += nums
+
+ no_missing_lines = (nums.n_missing == 0)
+ no_missing_branches = (nums.n_partial_branches == 0)
+ if self.config.skip_covered and no_missing_lines and no_missing_branches:
+ # Don't report on 100% files.
+ self.skipped_count += 1
+ elif self.config.skip_empty and nums.n_statements == 0:
+ # Don't report on empty files.
+ self.empty_count += 1
else:
- yield (fr, analysis)
+ self.fr_analysis.append((fr, analysis))
diff --git a/coverage/report_core.py b/coverage/report_core.py
new file mode 100644
index 000000000..09eed0a82
--- /dev/null
+++ b/coverage/report_core.py
@@ -0,0 +1,117 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
+
+"""Reporter foundation for coverage.py."""
+
+from __future__ import annotations
+
+import sys
+
+from typing import Callable, Iterable, Iterator, IO, Optional, Tuple, TYPE_CHECKING
+
+from coverage.exceptions import NoDataError, NotPython
+from coverage.files import prep_patterns, GlobMatcher
+from coverage.misc import ensure_dir_for_file, file_be_gone
+from coverage.plugin import FileReporter
+from coverage.results import Analysis
+from coverage.types import Protocol, TMorf
+
+if TYPE_CHECKING:
+ from coverage import Coverage
+
+
+class Reporter(Protocol):
+ """What we expect of reporters."""
+
+ report_type: str
+
+ def report(self, morfs: Optional[Iterable[TMorf]], outfile: IO[str]) -> float:
+ """Generate a report of `morfs`, written to `outfile`."""
+
+
+def render_report(
+ output_path: str,
+ reporter: Reporter,
+ morfs: Optional[Iterable[TMorf]],
+ msgfn: Callable[[str], None],
+) -> float:
+ """Run a one-file report generator, managing the output file.
+
+ This function ensures the output file is ready to be written to. Then writes
+ the report to it. Then closes the file and cleans up.
+
+ """
+ file_to_close = None
+ delete_file = False
+
+ if output_path == "-":
+ outfile = sys.stdout
+ else:
+ # Ensure that the output directory is created; done here because this
+ # report pre-opens the output file. HtmlReporter does this on its own
+ # because its task is more complex, being multiple files.
+ ensure_dir_for_file(output_path)
+ outfile = open(output_path, "w", encoding="utf-8")
+ file_to_close = outfile
+ delete_file = True
+
+ try:
+ ret = reporter.report(morfs, outfile=outfile)
+ if file_to_close is not None:
+ msgfn(f"Wrote {reporter.report_type} to {output_path}")
+ delete_file = False
+ return ret
+ finally:
+ if file_to_close is not None:
+ file_to_close.close()
+ if delete_file:
+ file_be_gone(output_path) # pragma: part covered (doesn't return)
+
+
+def get_analysis_to_report(
+ coverage: Coverage,
+ morfs: Optional[Iterable[TMorf]],
+) -> Iterator[Tuple[FileReporter, Analysis]]:
+ """Get the files to report on.
+
+ For each morf in `morfs`, if it should be reported on (based on the omit
+ and include configuration options), yield a pair, the `FileReporter` and
+ `Analysis` for the morf.
+
+ """
+ file_reporters = coverage._get_file_reporters(morfs)
+ config = coverage.config
+
+ if config.report_include:
+ matcher = GlobMatcher(prep_patterns(config.report_include), "report_include")
+ file_reporters = [fr for fr in file_reporters if matcher.match(fr.filename)]
+
+ if config.report_omit:
+ matcher = GlobMatcher(prep_patterns(config.report_omit), "report_omit")
+ file_reporters = [fr for fr in file_reporters if not matcher.match(fr.filename)]
+
+ if not file_reporters:
+ raise NoDataError("No data to report.")
+
+ for fr in sorted(file_reporters):
+ try:
+ analysis = coverage._analyze(fr)
+ except NotPython:
+ # Only report errors for .py files, and only if we didn't
+ # explicitly suppress those errors.
+ # NotPython is only raised by PythonFileReporter, which has a
+ # should_be_python() method.
+ if fr.should_be_python(): # type: ignore[attr-defined]
+ if config.ignore_errors:
+ msg = f"Couldn't parse Python file '{fr.filename}'"
+ coverage._warn(msg, slug="couldnt-parse")
+ else:
+ raise
+ except Exception as exc:
+ if config.ignore_errors:
+ msg = f"Couldn't parse '{fr.filename}': {exc}".rstrip()
+ coverage._warn(msg, slug="couldnt-parse")
+ else:
+ raise
+ else:
+ yield (fr, analysis)
diff --git a/coverage/summary.py b/coverage/summary.py
deleted file mode 100644
index 5d373ec52..000000000
--- a/coverage/summary.py
+++ /dev/null
@@ -1,281 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""Summary reporting"""
-
-from __future__ import annotations
-
-import sys
-
-from typing import Any, IO, Iterable, List, Optional, Tuple, TYPE_CHECKING
-
-from coverage.exceptions import ConfigError, NoDataError
-from coverage.misc import human_sorted_items
-from coverage.plugin import FileReporter
-from coverage.report import get_analysis_to_report
-from coverage.results import Analysis, Numbers
-from coverage.types import TMorf
-
-if TYPE_CHECKING:
- from coverage import Coverage
-
-
-class SummaryReporter:
- """A reporter for writing the summary report."""
-
- def __init__(self, coverage: Coverage) -> None:
- self.coverage = coverage
- self.config = self.coverage.config
- self.branches = coverage.get_data().has_arcs()
- self.outfile: Optional[IO[str]] = None
- self.output_format = self.config.format or "text"
- if self.output_format not in {"text", "markdown", "total"}:
- raise ConfigError(f"Unknown report format choice: {self.output_format!r}")
- self.fr_analysis: List[Tuple[FileReporter, Analysis]] = []
- self.skipped_count = 0
- self.empty_count = 0
- self.total = Numbers(precision=self.config.precision)
-
- def write(self, line: str) -> None:
- """Write a line to the output, adding a newline."""
- assert self.outfile is not None
- self.outfile.write(line.rstrip())
- self.outfile.write("\n")
-
- def write_items(self, items: Iterable[str]) -> None:
- """Write a list of strings, joined together."""
- self.write("".join(items))
-
- def _report_text(
- self,
- header: List[str],
- lines_values: List[List[Any]],
- total_line: List[Any],
- end_lines: List[str],
- ) -> None:
- """Internal method that prints report data in text format.
-
- `header` is a list with captions.
- `lines_values` is list of lists of sortable values.
- `total_line` is a list with values of the total line.
- `end_lines` is a list of ending lines with information about skipped files.
-
- """
- # Prepare the formatting strings, header, and column sorting.
- max_name = max([len(line[0]) for line in lines_values] + [5]) + 1
- max_n = max(len(total_line[header.index("Cover")]) + 2, len(" Cover")) + 1
- max_n = max([max_n] + [len(line[header.index("Cover")]) + 2 for line in lines_values])
- formats = dict(
- Name="{:{name_len}}",
- Stmts="{:>7}",
- Miss="{:>7}",
- Branch="{:>7}",
- BrPart="{:>7}",
- Cover="{:>{n}}",
- Missing="{:>10}",
- )
- header_items = [
- formats[item].format(item, name_len=max_name, n=max_n)
- for item in header
- ]
- header_str = "".join(header_items)
- rule = "-" * len(header_str)
-
- # Write the header
- self.write(header_str)
- self.write(rule)
-
- formats.update(dict(Cover="{:>{n}}%"), Missing=" {:9}")
- for values in lines_values:
- # build string with line values
- line_items = [
- formats[item].format(str(value),
- name_len=max_name, n=max_n-1) for item, value in zip(header, values)
- ]
- self.write_items(line_items)
-
- # Write a TOTAL line
- if lines_values:
- self.write(rule)
-
- line_items = [
- formats[item].format(str(value),
- name_len=max_name, n=max_n-1) for item, value in zip(header, total_line)
- ]
- self.write_items(line_items)
-
- for end_line in end_lines:
- self.write(end_line)
-
- def _report_markdown(
- self,
- header: List[str],
- lines_values: List[List[Any]],
- total_line: List[Any],
- end_lines: List[str],
- ) -> None:
- """Internal method that prints report data in markdown format.
-
- `header` is a list with captions.
- `lines_values` is a sorted list of lists containing coverage information.
- `total_line` is a list with values of the total line.
- `end_lines` is a list of ending lines with information about skipped files.
-
- """
- # Prepare the formatting strings, header, and column sorting.
- max_name = max((len(line[0].replace("_", "\\_")) for line in lines_values), default=0)
- max_name = max(max_name, len("**TOTAL**")) + 1
- formats = dict(
- Name="| {:{name_len}}|",
- Stmts="{:>9} |",
- Miss="{:>9} |",
- Branch="{:>9} |",
- BrPart="{:>9} |",
- Cover="{:>{n}} |",
- Missing="{:>10} |",
- )
- max_n = max(len(total_line[header.index("Cover")]) + 6, len(" Cover "))
- header_items = [formats[item].format(item, name_len=max_name, n=max_n) for item in header]
- header_str = "".join(header_items)
- rule_str = "|" + " ".join(["- |".rjust(len(header_items[0])-1, "-")] +
- ["-: |".rjust(len(item)-1, "-") for item in header_items[1:]]
- )
-
- # Write the header
- self.write(header_str)
- self.write(rule_str)
-
- for values in lines_values:
- # build string with line values
- formats.update(dict(Cover="{:>{n}}% |"))
- line_items = [
- formats[item].format(str(value).replace("_", "\\_"), name_len=max_name, n=max_n-1)
- for item, value in zip(header, values)
- ]
- self.write_items(line_items)
-
- # Write the TOTAL line
- formats.update(dict(Name="|{:>{name_len}} |", Cover="{:>{n}} |"))
- total_line_items: List[str] = []
- for item, value in zip(header, total_line):
- if value == "":
- insert = value
- elif item == "Cover":
- insert = f" **{value}%**"
- else:
- insert = f" **{value}**"
- total_line_items += formats[item].format(insert, name_len=max_name, n=max_n)
- self.write_items(total_line_items)
- for end_line in end_lines:
- self.write(end_line)
-
- def report(self, morfs: Optional[Iterable[TMorf]], outfile: Optional[IO[str]] = None) -> float:
- """Writes a report summarizing coverage statistics per module.
-
- `outfile` is a text-mode file object to write the summary to.
-
- """
- self.outfile = outfile or sys.stdout
-
- self.coverage.get_data().set_query_contexts(self.config.report_contexts)
- for fr, analysis in get_analysis_to_report(self.coverage, morfs):
- self.report_one_file(fr, analysis)
-
- if not self.total.n_files and not self.skipped_count:
- raise NoDataError("No data to report.")
-
- if self.output_format == "total":
- self.write(self.total.pc_covered_str)
- else:
- self.tabular_report()
-
- return self.total.pc_covered
-
- def tabular_report(self) -> None:
- """Writes tabular report formats."""
- # Prepare the header line and column sorting.
- header = ["Name", "Stmts", "Miss"]
- if self.branches:
- header += ["Branch", "BrPart"]
- header += ["Cover"]
- if self.config.show_missing:
- header += ["Missing"]
-
- column_order = dict(name=0, stmts=1, miss=2, cover=-1)
- if self.branches:
- column_order.update(dict(branch=3, brpart=4))
-
- # `lines_values` is list of lists of sortable values.
- lines_values = []
-
- for (fr, analysis) in self.fr_analysis:
- nums = analysis.numbers
-
- args = [fr.relative_filename(), nums.n_statements, nums.n_missing]
- if self.branches:
- args += [nums.n_branches, nums.n_partial_branches]
- args += [nums.pc_covered_str]
- if self.config.show_missing:
- args += [analysis.missing_formatted(branches=True)]
- args += [nums.pc_covered]
- lines_values.append(args)
-
- # Line sorting.
- sort_option = (self.config.sort or "name").lower()
- reverse = False
- if sort_option[0] == "-":
- reverse = True
- sort_option = sort_option[1:]
- elif sort_option[0] == "+":
- sort_option = sort_option[1:]
- sort_idx = column_order.get(sort_option)
- if sort_idx is None:
- raise ConfigError(f"Invalid sorting option: {self.config.sort!r}")
- if sort_option == "name":
- lines_values = human_sorted_items(lines_values, reverse=reverse)
- else:
- lines_values.sort(
- key=lambda line: (line[sort_idx], line[0]), # type: ignore[index]
- reverse=reverse,
- )
-
- # Calculate total if we had at least one file.
- total_line = ["TOTAL", self.total.n_statements, self.total.n_missing]
- if self.branches:
- total_line += [self.total.n_branches, self.total.n_partial_branches]
- total_line += [self.total.pc_covered_str]
- if self.config.show_missing:
- total_line += [""]
-
- # Create other final lines.
- end_lines = []
- if self.config.skip_covered and self.skipped_count:
- file_suffix = "s" if self.skipped_count>1 else ""
- end_lines.append(
- f"\n{self.skipped_count} file{file_suffix} skipped due to complete coverage."
- )
- if self.config.skip_empty and self.empty_count:
- file_suffix = "s" if self.empty_count > 1 else ""
- end_lines.append(f"\n{self.empty_count} empty file{file_suffix} skipped.")
-
- if self.output_format == "markdown":
- formatter = self._report_markdown
- else:
- formatter = self._report_text
- formatter(header, lines_values, total_line, end_lines)
-
- def report_one_file(self, fr: FileReporter, analysis: Analysis) -> None:
- """Report on just one file, the callback from report()."""
- nums = analysis.numbers
- self.total += nums
-
- no_missing_lines = (nums.n_missing == 0)
- no_missing_branches = (nums.n_partial_branches == 0)
- if self.config.skip_covered and no_missing_lines and no_missing_branches:
- # Don't report on 100% files.
- self.skipped_count += 1
- elif self.config.skip_empty and nums.n_statements == 0:
- # Don't report on empty files.
- self.empty_count += 1
- else:
- self.fr_analysis.append((fr, analysis))
diff --git a/coverage/version.py b/coverage/version.py
index 516d00c15..c48974967 100644
--- a/coverage/version.py
+++ b/coverage/version.py
@@ -8,7 +8,7 @@
# version_info: same semantics as sys.version_info.
# _dev: the .devN suffix if any.
-version_info = (7, 2, 5, "final", 0)
+version_info = (7, 2, 7, "final", 0)
_dev = 0
diff --git a/coverage/xmlreport.py b/coverage/xmlreport.py
index 82e60fc1f..819b4c6bc 100644
--- a/coverage/xmlreport.py
+++ b/coverage/xmlreport.py
@@ -12,12 +12,12 @@
import xml.dom.minidom
from dataclasses import dataclass
-from typing import Any, Dict, IO, Iterable, Optional, TYPE_CHECKING, cast
+from typing import Any, Dict, IO, Iterable, Optional, TYPE_CHECKING
from coverage import __version__, files
from coverage.misc import isolate_module, human_sorted, human_sorted_items
from coverage.plugin import FileReporter
-from coverage.report import get_analysis_to_report
+from coverage.report_core import get_analysis_to_report
from coverage.results import Analysis
from coverage.types import TMorf
from coverage.version import __url__
@@ -257,4 +257,4 @@ def xml_file(self, fr: FileReporter, analysis: Analysis, has_arcs: bool) -> None
def serialize_xml(dom: xml.dom.minidom.Document) -> str:
"""Serialize a minidom node to XML."""
- return cast(str, dom.toprettyxml())
+ return dom.toprettyxml()
diff --git a/doc/cmd.rst b/doc/cmd.rst
index 0704e940a..7db6746a8 100644
--- a/doc/cmd.rst
+++ b/doc/cmd.rst
@@ -624,9 +624,10 @@ Here's a `sample report`__.
__ https://nedbatchelder.com/files/sample_coverage_html/index.html
-Lines are highlighted green for executed, red for missing, and gray for
-excluded. The counts at the top of the file are buttons to turn on and off
-the highlighting.
+Lines are highlighted: green for executed, red for missing, and gray for
+excluded. If you've used branch coverage, partial branches are yellow. The
+colored counts at the top of the file are buttons to turn on and off the
+highlighting.
A number of keyboard shortcuts are available for navigating the report.
Click the keyboard icon in the upper right to see the complete list.
diff --git a/doc/conf.py b/doc/conf.py
index fbd7e3f3a..bee8c14b2 100644
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -66,11 +66,11 @@
# @@@ editable
copyright = "2009–2023, Ned Batchelder" # pylint: disable=redefined-builtin
# The short X.Y.Z version.
-version = "7.2.5"
+version = "7.2.7"
# The full version, including alpha/beta/rc tags.
-release = "7.2.5"
+release = "7.2.7"
# The date of release, in "monthname day, year" format.
-release_date = "April 30, 2023"
+release_date = "May 29, 2023"
# @@@ end
rst_epilog = """
diff --git a/doc/config.rst b/doc/config.rst
index 152b3af48..0100d89e1 100644
--- a/doc/config.rst
+++ b/doc/config.rst
@@ -79,10 +79,7 @@ Here's a sample configuration file::
[report]
# Regexes for lines to exclude from consideration
- exclude_lines =
- # Have to re-enable the standard pragma
- pragma: no cover
-
+ exclude_also =
# Don't complain about missing debug-only code:
def __repr__
if self\.debug
@@ -375,6 +372,19 @@ See :ref:`cmd_combine_remapping` and :ref:`source_glob` for more information.
Settings common to many kinds of reporting.
+.. _config_report_exclude_also:
+
+[report] exclude_also
+.....................
+
+(multi-string) A list of regular expressions. This setting is similar to
+:ref:`config_report_exclude_lines`: it specifies patterns for lines to exclude
+from reporting. This setting is preferred, because it will preserve the
+default exclude patterns instead of overwriting them.
+
+.. versionadded:: 7.2.0
+
+
.. _config_report_exclude_lines:
[report] exclude_lines
@@ -384,7 +394,9 @@ Settings common to many kinds of reporting.
containing a match for one of these regexes is excluded from being reported as
missing. More details are in :ref:`excluding`. If you use this option, you
are replacing all the exclude regexes, so you'll need to also supply the
-"pragma: no cover" regex if you still want to use it.
+"pragma: no cover" regex if you still want to use it. The
+:ref:`config_report_exclude_also` setting can be used to specify patterns
+without overwriting the default set.
You can exclude lines introducing blocks, and the entire block is excluded. If
you exclude a ``def`` line or decorator line, the entire function is excluded.
@@ -395,19 +407,6 @@ you'll exclude any line with three or more of any character. If you write
``pass``, you'll also exclude the line ``my_pass="foo"``, and so on.
-.. _config_report_exclude_also:
-
-[report] exclude_also
-.....................
-
-(multi-string) A list of regular expressions. This setting is the same as
-:ref:`config_report_exclude_lines`: it adds patterns for lines to exclude from
-reporting. This setting will preserve the default exclude patterns instead of
-overwriting them.
-
-.. versionadded:: 7.2.0
-
-
.. _config_report_fail_under:
[report] fail_under
diff --git a/doc/excluding.rst b/doc/excluding.rst
index 4651e6bba..e9d28f156 100644
--- a/doc/excluding.rst
+++ b/doc/excluding.rst
@@ -80,14 +80,13 @@ debugging code, and are uninteresting to test themselves. You could exclude
all of them by adding a regex to the exclusion list::
[report]
- exclude_lines =
+ exclude_also =
def __repr__
For example, here's a list of exclusions I've used::
[report]
- exclude_lines =
- pragma: no cover
+ exclude_also =
def __repr__
if self.debug:
if settings.DEBUG
@@ -99,11 +98,10 @@ For example, here's a list of exclusions I've used::
class .*\bProtocol\):
@(abc\.)?abstractmethod
-Note that when using the ``exclude_lines`` option in a configuration file, you
-are taking control of the entire list of regexes, so you need to re-specify the
-default "pragma: no cover" match if you still want it to apply. The
-``exclude_also`` option can be used instead to preserve the default
-exclusions while adding new ones.
+The :ref:`config_report_exclude_also` option adds regexes to the built-in
+default list so that you can add your own exclusions. The older
+:ref:`config_report_exclude_lines` option completely overwrites the list of
+regexes.
The regexes only have to match part of a line. Be careful not to over-match. A
value of ``...`` will match any line with more than three characters in it.
diff --git a/doc/faq.rst b/doc/faq.rst
index b25dce0fd..d4f5a565e 100644
--- a/doc/faq.rst
+++ b/doc/faq.rst
@@ -121,7 +121,7 @@ Make sure you are using the C trace function. Coverage.py provides two
implementations of the trace function. The C implementation runs much faster.
To see what you are running, use ``coverage debug sys``. The output contains
details of the environment, including a line that says either
-``CTrace: available`` or ``CTracer: unavailable``. If it says unavailable,
+``CTracer: available`` or ``CTracer: unavailable``. If it says unavailable,
then you are using the slow Python implementation.
Try re-installing coverage.py to see what happened and if you get the CTracer
diff --git a/doc/index.rst b/doc/index.rst
index 2420aa6f8..2475eb402 100644
--- a/doc/index.rst
+++ b/doc/index.rst
@@ -18,7 +18,7 @@ supported on:
.. PYVERSIONS
-* Python versions 3.7 through 3.12.0a7.
+* Python versions 3.7 through 3.12.0b1.
* PyPy3 7.3.11.
.. ifconfig:: prerelease
diff --git a/doc/migrating.rst b/doc/migrating.rst
index 7a5e65df8..443afac63 100644
--- a/doc/migrating.rst
+++ b/doc/migrating.rst
@@ -7,23 +7,48 @@
Migrating between versions
==========================
-New major versions of coverage.py might require you to adjust your settings,
-options, or other aspects of your use. This page details those changes.
+New versions of coverage.py or Python might require you to adjust your
+settings, options, or other aspects how you use coverage.py. This page details
+those changes.
-.. _migrating_6x_7x:
+.. _migrating_cov7:
-Migrating from 6.x to 7.x
--------------------------
+Migrating to coverage.py 7.x
+----------------------------
-- The way that wildcards when specifying file paths work in certain cases has changed in 7.x:
+Consider these changes when migrating to coverage.py 7.x:
+
+- The way that wildcards when specifying file paths work in certain cases has
+ changed in 7.x:
- Previously, ``*`` would incorrectly match directory separators, making
precise matching difficult. Patterns such as ``*tests/*``
will need to be changed to ``*/tests/*``.
- - ``**`` now matches any number of nested directories. If you wish to retain the behavior of
- ``**/tests/*`` in previous versions then ``*/**/tests/*`` can be used instead.
+ - ``**`` now matches any number of nested directories. If you wish to retain
+ the behavior of ``**/tests/*`` in previous versions then ``*/**/tests/*``
+ can be used instead.
- When remapping file paths with ``[paths]``, a path will be remapped only if
- the resulting path exists. Ensure that remapped ``[paths]`` exist when upgrading
- as this is now being enforced.
+ the resulting path exists. Ensure that remapped ``[paths]`` exist when
+ upgrading as this is now being enforced.
+
+- The :ref:`config_report_exclude_also` setting is new in 7.2.0. It adds
+ exclusion regexes while keeping the default built-in set. It's better than
+ the older :ref:`config_report_exclude_lines` setting, which overwrote the
+ entire list. Newer versions of coverage.py will be adding to the default set
+ of exclusions. Using ``exclude_also`` will let you benefit from those
+ updates.
+
+
+.. _migrating_py312:
+
+Migrating to Python 3.12
+------------------------
+
+Keep these things in mind when running under Python 3.12:
+
+- Python 3.12 now inlines list, dict, and set comprehensions. Previously, they
+ were compiled as functions that were called internally. Coverage.py would
+ warn you if comprehensions weren't fully completed, but this no longer
+ happens with Python 3.12.
diff --git a/doc/requirements.pip b/doc/requirements.pip
index 16213c7ff..a1894b64f 100644
--- a/doc/requirements.pip
+++ b/doc/requirements.pip
@@ -10,7 +10,7 @@ attrs==23.1.0
# via scriv
babel==2.12.1
# via sphinx
-certifi==2022.12.7
+certifi==2023.5.7
# via requests
charset-normalizer==3.1.0
# via requests
@@ -56,7 +56,7 @@ pygments==2.15.1
# via sphinx
pytz==2023.3
# via babel
-requests==2.29.0
+requests==2.31.0
# via
# scriv
# sphinx
@@ -76,7 +76,7 @@ sphinx==5.3.0
# sphinxcontrib-spelling
sphinx-autobuild==2021.3.14
# via -r doc/requirements.in
-sphinx-rtd-theme==1.2.0
+sphinx-rtd-theme==1.2.1
# via -r doc/requirements.in
sphinxcontrib-applehelp==1.0.2
# via sphinx
@@ -98,9 +98,9 @@ sphinxcontrib-spelling==8.0.0
# via -r doc/requirements.in
tornado==6.2
# via livereload
-typing-extensions==4.5.0
+typing-extensions==4.6.2
# via importlib-metadata
-urllib3==1.26.15
+urllib3==2.0.2
# via requests
zipp==3.15.0
# via importlib-metadata
diff --git a/doc/sample_html/d_7b071bdc2a35fa80___init___py.html b/doc/sample_html/d_7b071bdc2a35fa80___init___py.html
index cf43ec9ee..c5ac367ec 100644
--- a/doc/sample_html/d_7b071bdc2a35fa80___init___py.html
+++ b/doc/sample_html/d_7b071bdc2a35fa80___init___py.html
@@ -66,8 +66,8 @@