diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs
index daa5f19d0..7e1f430d3 100644
--- a/.git-blame-ignore-revs
+++ b/.git-blame-ignore-revs
@@ -5,3 +5,10 @@
# 2023-01-06 style(perf): blacken lab/benchmark.py
bf6c12f5da54db7c5c0cc47cbf22c70f686e8236
+
+# 2023-03-22 style: use double-quotes
+16abd82b6e87753184e8308c4b2606ff3979f8d3
+b7be64538aa480fce641349d3053e9a84862d571
+
+# 2023-04-01 style: use double-quotes in JavaScript
+b03ab92bae24c54f1d5a98baa3af6b9a18de4d36
diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml
index 6404a7c21..60e8d0a29 100644
--- a/.github/workflows/coverage.yml
+++ b/.github/workflows/coverage.yml
@@ -85,7 +85,7 @@ jobs:
set -xe
python -VV
python -m site
- python -m pip install --require-hashes -r requirements/tox.pip
+ python -m pip install -r requirements/tox.pip
- name: "Run tox coverage for ${{ matrix.python-version }}"
env:
diff --git a/.github/workflows/kit.yml b/.github/workflows/kit.yml
index fd1b3a307..179f7a649 100644
--- a/.github/workflows/kit.yml
+++ b/.github/workflows/kit.yml
@@ -47,7 +47,7 @@ concurrency:
jobs:
wheels:
- name: "Build ${{ matrix.os }} ${{ matrix.py }} ${{ matrix.arch }} wheels"
+ name: "${{ matrix.py }} ${{ matrix.os }} ${{ matrix.arch }} wheels"
runs-on: ${{ matrix.os }}-latest
strategy:
matrix:
@@ -155,7 +155,7 @@ jobs:
- name: "Install tools"
run: |
- python -m pip install --require-hashes -r requirements/kit.pip
+ python -m pip install -r requirements/kit.pip
- name: "Build wheels"
env:
@@ -177,7 +177,7 @@ jobs:
path: wheelhouse/*.whl
sdist:
- name: "Build source distribution"
+ name: "Source distribution"
runs-on: ubuntu-latest
steps:
- name: "Check out the repo"
@@ -192,7 +192,7 @@ jobs:
- name: "Install tools"
run: |
- python -m pip install --require-hashes -r requirements/kit.pip
+ python -m pip install -r requirements/kit.pip
- name: "Build sdist"
run: |
@@ -209,7 +209,7 @@ jobs:
path: dist/*.tar.gz
pypy:
- name: "Build PyPy wheel"
+ name: "PyPy wheel"
runs-on: ubuntu-latest
steps:
- name: "Check out the repo"
diff --git a/.github/workflows/python-nightly.yml b/.github/workflows/python-nightly.yml
index c2ba98e60..94a30ecc2 100644
--- a/.github/workflows/python-nightly.yml
+++ b/.github/workflows/python-nightly.yml
@@ -81,7 +81,7 @@ jobs:
- name: "Install dependencies"
run: |
- python -m pip install --require-hashes -r requirements/tox.pip
+ python -m pip install -r requirements/tox.pip
- name: "Run tox"
run: |
diff --git a/.github/workflows/quality.yml b/.github/workflows/quality.yml
index 0901d5caa..9ee690df9 100644
--- a/.github/workflows/quality.yml
+++ b/.github/workflows/quality.yml
@@ -46,7 +46,7 @@ jobs:
- name: "Install dependencies"
run: |
- python -m pip install --require-hashes -r requirements/tox.pip
+ python -m pip install -r requirements/tox.pip
- name: "Tox lint"
run: |
@@ -97,7 +97,7 @@ jobs:
set -xe
python -VV
python -m site
- python -m pip install --require-hashes -r requirements/tox.pip
+ python -m pip install -r requirements/tox.pip
- name: "Tox doc"
run: |
diff --git a/.github/workflows/testsuite.yml b/.github/workflows/testsuite.yml
index e560325c8..b0f0ee6ca 100644
--- a/.github/workflows/testsuite.yml
+++ b/.github/workflows/testsuite.yml
@@ -73,7 +73,7 @@ jobs:
set -xe
python -VV
python -m site
- python -m pip install --require-hashes -r requirements/tox.pip
+ python -m pip install -r requirements/tox.pip
# For extreme debugging:
# python -c "import urllib.request as r; exec(r.urlopen('https://bit.ly/pydoctor').read())"
diff --git a/.gitignore b/.gitignore
index 2373d5dc7..a49767e77 100644
--- a/.gitignore
+++ b/.gitignore
@@ -21,6 +21,7 @@ coverage.json
# Stuff in the root.
build
*.egg-info
+cheats.txt
dist
htmlcov
MANIFEST
diff --git a/CHANGES.rst b/CHANGES.rst
index 209eb6ad8..937835ccc 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -19,6 +19,29 @@ development at the same time, such as 4.5.x and 5.0.
.. scriv-start-here
+.. _changes_7-2-3:
+
+Version 7.2.3 — 2023-04-06
+--------------------------
+
+- Fix: the :ref:`config_run_sigterm` setting was meant to capture data if a
+ process was terminated with a SIGTERM signal, but it didn't always. This was
+ fixed thanks to `Lewis Gaul `_, closing `issue 1599`_.
+
+- Performance: HTML reports with context information are now much more compact.
+ File sizes are typically as small as one-third the previous size, but can be
+ dramatically smaller. This closes `issue 1584`_ thanks to `Oleh Krehel
+ `_.
+
+- Development dependencies no longer use hashed pins, closing `issue 1592`_.
+
+.. _issue 1584: https://github.com/nedbat/coveragepy/issues/1584
+.. _pull 1587: https://github.com/nedbat/coveragepy/pull/1587
+.. _issue 1592: https://github.com/nedbat/coveragepy/issues/1592
+.. _issue 1599: https://github.com/nedbat/coveragepy/issues/1599
+.. _pull 1600: https://github.com/nedbat/coveragepy/pull/1600
+
+
.. _changes_7-2-2:
Version 7.2.2 — 2023-03-16
diff --git a/CONTRIBUTORS.txt b/CONTRIBUTORS.txt
index a50138f85..0ba35f628 100644
--- a/CONTRIBUTORS.txt
+++ b/CONTRIBUTORS.txt
@@ -99,11 +99,13 @@ Judson Neer
Julian Berman
Julien Voisin
Justas Sadzevičius
+Kassandra Keeton
Kjell Braden
Krystian Kichewko
Kyle Altendorf
Lars Hupfeldt Nielsen
Leonardo Pistone
+Lewis Gaul
Lex Berezhny
Loïc Dachary
Lorenzo Micò
@@ -115,6 +117,7 @@ Marcus Cobden
Marius Gedminas
Mark van der Wal
Martin Fuzzey
+Mathieu Kniewallner
Matt Bachmann
Matthew Boehm
Matthew Desmarais
@@ -125,19 +128,22 @@ Michał Bultrowicz
Michał Górny
Mickie Betz
Mike Fiedler
-Naveen Yadav
Nathan Land
+Naveen Yadav
+Neil Pilgrim
Nikita Bloshchanevich
Nils Kattenbeck
Noel O'Boyle
+Oleh Krehel
Olivier Grisel
Ori Avtalion
-Pankaj Pandey
Pablo Carballo
+Pankaj Pandey
Patrick Mezard
Peter Baughman
Peter Ebden
Peter Portante
+Phebe Polk
Reya B
Rodrigue Cloutier
Roger Hu
@@ -168,8 +174,8 @@ Thijs Triemstra
Thomas Grainger
Titus Brown
Valentin Lab
-Vince Salvino
Ville Skyttä
+Vince Salvino
Xie Yanbo
Yilei "Dolee" Yang
Yury Selivanov
diff --git a/Makefile b/Makefile
index 7f6959208..f82f2ee27 100644
--- a/Makefile
+++ b/Makefile
@@ -11,17 +11,18 @@
clean_platform:
@rm -f *.so */*.so
+ @rm -f *.pyd */*.pyd
@rm -rf __pycache__ */__pycache__ */*/__pycache__ */*/*/__pycache__ */*/*/*/__pycache__ */*/*/*/*/__pycache__
@rm -f *.pyc */*.pyc */*/*.pyc */*/*/*.pyc */*/*/*/*.pyc */*/*/*/*/*.pyc
@rm -f *.pyo */*.pyo */*/*.pyo */*/*/*.pyo */*/*/*/*.pyo */*/*/*/*/*.pyo
+ @rm -f *$$py.class */*$$py.class */*/*$$py.class */*/*/*$$py.class */*/*/*/*$$py.class */*/*/*/*/*$$py.class
clean: clean_platform ## Remove artifacts of test execution, installation, etc.
@echo "Cleaning..."
@-pip uninstall -yq coverage
- @rm -f *.pyd */*.pyd
+ @chmod -R 777 build
@rm -rf build coverage.egg-info dist htmlcov
@rm -f *.bak */*.bak */*/*.bak */*/*/*.bak */*/*/*/*.bak */*/*/*/*/*.bak
- @rm -f *$$py.class */*$$py.class */*/*$$py.class */*/*/*$$py.class */*/*/*/*$$py.class */*/*/*/*/*$$py.class
@rm -f coverage/*,cover
@rm -f MANIFEST
@rm -f .coverage .coverage.* coverage.xml coverage.json .metacov*
@@ -36,6 +37,7 @@ clean: clean_platform ## Remove artifacts of test execution, installation, etc
sterile: clean ## Remove all non-controlled content, even if expensive.
rm -rf .tox
+ rm -f cheats.txt
help: ## Show this help.
@# Adapted from https://www.thapaliya.com/en/writings/well-documented-makefiles/
@@ -83,7 +85,7 @@ metasmoke:
.PHONY: upgrade
-PIP_COMPILE = pip-compile --upgrade --allow-unsafe --generate-hashes --resolver=backtracking
+PIP_COMPILE = pip-compile --upgrade --allow-unsafe --resolver=backtracking
upgrade: export CUSTOM_COMPILE_COMMAND=make upgrade
upgrade: ## Update the *.pip files with the latest packages satisfying *.in files.
pip install -q -r requirements/pip-tools.pip
@@ -158,6 +160,18 @@ REPO_OWNER = nedbat/coveragepy
edit_for_release: ## Edit sources to insert release facts.
python igor.py edit_for_release
+cheats: ## Create some useful snippets for releasing.
+ python igor.py cheats | tee cheats.txt
+
+relbranch: ## Create the branch for releasing.
+ git switch -c nedbat/release-$$(date +%Y%m%d)
+
+relcommit1: ## Commit the first release changes.
+ git commit -am "docs: prep for $$(python setup.py --version)"
+
+relcommit2: ## Commit the latest sample HTML report.
+ git commit -am "docs: sample HTML for $$(python setup.py --version)"
+
kit: ## Make the source distribution.
python -m build
diff --git a/README.rst b/README.rst
index 25239393e..897f8801d 100644
--- a/README.rst
+++ b/README.rst
@@ -28,7 +28,7 @@ Coverage.py runs on these versions of Python:
.. PYVERSIONS
-* CPython 3.7 through 3.12.0a6
+* CPython 3.7 through 3.12.0a7
* PyPy3 7.3.11.
Documentation is on `Read the Docs`_. Code repository and issue tracker are on
@@ -70,7 +70,8 @@ For Enterprise
Getting Started
---------------
-See the `Quick Start section`_ of the docs.
+Looking to run ``coverage`` on your test suite? See the `Quick Start section`_
+of the docs.
.. _Quick Start section: https://coverage.readthedocs.io/#quick-start
@@ -96,7 +97,8 @@ Community Code of Conduct`_.
Contributing
------------
-See the `Contributing section`_ of the docs.
+Found a bug? Want to help improve the code or documentation? See the
+`Contributing section`_ of the docs.
.. _Contributing section: https://coverage.readthedocs.io/en/latest/contributing.html
diff --git a/coverage/annotate.py b/coverage/annotate.py
index 13dbe9b6e..b4a02cb47 100644
--- a/coverage/annotate.py
+++ b/coverage/annotate.py
@@ -40,8 +40,8 @@ class AnnotateReporter:
> h(2)
- Executed lines use '>', lines not executed use '!', lines excluded from
- consideration use '-'.
+ Executed lines use ">", lines not executed use "!", lines excluded from
+ consideration use "-".
"""
@@ -83,7 +83,7 @@ def annotate_file(self, fr: FileReporter, analysis: Analysis) -> None:
else:
dest_file = fr.filename + ",cover"
- with open(dest_file, 'w', encoding='utf-8') as dest:
+ with open(dest_file, "w", encoding="utf-8") as dest:
i = j = 0
covered = True
source = fr.source()
@@ -95,20 +95,20 @@ def annotate_file(self, fr: FileReporter, analysis: Analysis) -> None:
if i < len(statements) and statements[i] == lineno:
covered = j >= len(missing) or missing[j] > lineno
if self.blank_re.match(line):
- dest.write(' ')
+ dest.write(" ")
elif self.else_re.match(line):
- # Special logic for lines containing only 'else:'.
+ # Special logic for lines containing only "else:".
if j >= len(missing):
- dest.write('> ')
+ dest.write("> ")
elif statements[i] == missing[j]:
- dest.write('! ')
+ dest.write("! ")
else:
- dest.write('> ')
+ dest.write("> ")
elif lineno in excluded:
- dest.write('- ')
+ dest.write("- ")
elif covered:
- dest.write('> ')
+ dest.write("> ")
else:
- dest.write('! ')
+ dest.write("! ")
dest.write(line)
diff --git a/coverage/cmdline.py b/coverage/cmdline.py
index ef760a503..4498eeec3 100644
--- a/coverage/cmdline.py
+++ b/coverage/cmdline.py
@@ -39,130 +39,130 @@ class Opts:
# appears on the command line.
append = optparse.make_option(
- '-a', '--append', action='store_true',
+ "-a", "--append", action="store_true",
help="Append coverage data to .coverage, otherwise it starts clean each time.",
)
keep = optparse.make_option(
- '', '--keep', action='store_true',
+ "", "--keep", action="store_true",
help="Keep original coverage files, otherwise they are deleted.",
)
branch = optparse.make_option(
- '', '--branch', action='store_true',
+ "", "--branch", action="store_true",
help="Measure branch coverage in addition to statement coverage.",
)
concurrency = optparse.make_option(
- '', '--concurrency', action='store', metavar="LIBS",
+ "", "--concurrency", action="store", metavar="LIBS",
help=(
"Properly measure code using a concurrency library. " +
"Valid values are: {}, or a comma-list of them."
).format(", ".join(sorted(CoverageConfig.CONCURRENCY_CHOICES))),
)
context = optparse.make_option(
- '', '--context', action='store', metavar="LABEL",
+ "", "--context", action="store", metavar="LABEL",
help="The context label to record for this coverage run.",
)
contexts = optparse.make_option(
- '', '--contexts', action='store', metavar="REGEX1,REGEX2,...",
+ "", "--contexts", action="store", metavar="REGEX1,REGEX2,...",
help=(
"Only display data from lines covered in the given contexts. " +
"Accepts Python regexes, which must be quoted."
),
)
combine_datafile = optparse.make_option(
- '', '--data-file', action='store', metavar="DATAFILE",
+ "", "--data-file", action="store", metavar="DATAFILE",
help=(
"Base name of the data files to operate on. " +
"Defaults to '.coverage'. [env: COVERAGE_FILE]"
),
)
input_datafile = optparse.make_option(
- '', '--data-file', action='store', metavar="INFILE",
+ "", "--data-file", action="store", metavar="INFILE",
help=(
"Read coverage data for report generation from this file. " +
"Defaults to '.coverage'. [env: COVERAGE_FILE]"
),
)
output_datafile = optparse.make_option(
- '', '--data-file', action='store', metavar="OUTFILE",
+ "", "--data-file", action="store", metavar="OUTFILE",
help=(
"Write the recorded coverage data to this file. " +
"Defaults to '.coverage'. [env: COVERAGE_FILE]"
),
)
debug = optparse.make_option(
- '', '--debug', action='store', metavar="OPTS",
+ "", "--debug", action="store", metavar="OPTS",
help="Debug options, separated by commas. [env: COVERAGE_DEBUG]",
)
directory = optparse.make_option(
- '-d', '--directory', action='store', metavar="DIR",
+ "-d", "--directory", action="store", metavar="DIR",
help="Write the output files to DIR.",
)
fail_under = optparse.make_option(
- '', '--fail-under', action='store', metavar="MIN", type="float",
+ "", "--fail-under", action="store", metavar="MIN", type="float",
help="Exit with a status of 2 if the total coverage is less than MIN.",
)
format = optparse.make_option(
- '', '--format', action='store', metavar="FORMAT",
+ "", "--format", action="store", metavar="FORMAT",
help="Output format, either text (default), markdown, or total.",
)
help = optparse.make_option(
- '-h', '--help', action='store_true',
+ "-h", "--help", action="store_true",
help="Get help on this command.",
)
ignore_errors = optparse.make_option(
- '-i', '--ignore-errors', action='store_true',
+ "-i", "--ignore-errors", action="store_true",
help="Ignore errors while reading source files.",
)
include = optparse.make_option(
- '', '--include', action='store', metavar="PAT1,PAT2,...",
+ "", "--include", action="store", metavar="PAT1,PAT2,...",
help=(
"Include only files whose paths match one of these patterns. " +
"Accepts shell-style wildcards, which must be quoted."
),
)
pylib = optparse.make_option(
- '-L', '--pylib', action='store_true',
+ "-L", "--pylib", action="store_true",
help=(
"Measure coverage even inside the Python installed library, " +
"which isn't done by default."
),
)
show_missing = optparse.make_option(
- '-m', '--show-missing', action='store_true',
+ "-m", "--show-missing", action="store_true",
help="Show line numbers of statements in each module that weren't executed.",
)
module = optparse.make_option(
- '-m', '--module', action='store_true',
+ "-m", "--module", action="store_true",
help=(
" is an importable Python module, not a script path, " +
"to be run as 'python -m' would run it."
),
)
omit = optparse.make_option(
- '', '--omit', action='store', metavar="PAT1,PAT2,...",
+ "", "--omit", action="store", metavar="PAT1,PAT2,...",
help=(
"Omit files whose paths match one of these patterns. " +
"Accepts shell-style wildcards, which must be quoted."
),
)
output_xml = optparse.make_option(
- '-o', '', action='store', dest="outfile", metavar="OUTFILE",
+ "-o", "", action="store", dest="outfile", metavar="OUTFILE",
help="Write the XML report to this file. Defaults to 'coverage.xml'",
)
output_json = optparse.make_option(
- '-o', '', action='store', dest="outfile", metavar="OUTFILE",
+ "-o", "", action="store", dest="outfile", metavar="OUTFILE",
help="Write the JSON report to this file. Defaults to 'coverage.json'",
)
output_lcov = optparse.make_option(
- '-o', '', action='store', dest='outfile', metavar="OUTFILE",
+ "-o", "", action="store", dest="outfile", metavar="OUTFILE",
help="Write the LCOV report to this file. Defaults to 'coverage.lcov'",
)
json_pretty_print = optparse.make_option(
- '', '--pretty-print', action='store_true',
+ "", "--pretty-print", action="store_true",
help="Format the JSON for human readers.",
)
parallel_mode = optparse.make_option(
- '-p', '--parallel-mode', action='store_true',
+ "-p", "--parallel-mode", action="store_true",
help=(
"Append the machine name, process id and random number to the " +
"data file name to simplify collecting data from " +
@@ -170,18 +170,18 @@ class Opts:
),
)
precision = optparse.make_option(
- '', '--precision', action='store', metavar='N', type=int,
+ "", "--precision", action="store", metavar="N", type=int,
help=(
"Number of digits after the decimal point to display for " +
"reported coverage percentages."
),
)
quiet = optparse.make_option(
- '-q', '--quiet', action='store_true',
+ "-q", "--quiet", action="store_true",
help="Don't print messages about what is happening.",
)
rcfile = optparse.make_option(
- '', '--rcfile', action='store',
+ "", "--rcfile", action="store",
help=(
"Specify configuration file. " +
"By default '.coveragerc', 'setup.cfg', 'tox.ini', and " +
@@ -189,45 +189,45 @@ class Opts:
),
)
show_contexts = optparse.make_option(
- '--show-contexts', action='store_true',
+ "--show-contexts", action="store_true",
help="Show contexts for covered lines.",
)
skip_covered = optparse.make_option(
- '--skip-covered', action='store_true',
+ "--skip-covered", action="store_true",
help="Skip files with 100% coverage.",
)
no_skip_covered = optparse.make_option(
- '--no-skip-covered', action='store_false', dest='skip_covered',
+ "--no-skip-covered", action="store_false", dest="skip_covered",
help="Disable --skip-covered.",
)
skip_empty = optparse.make_option(
- '--skip-empty', action='store_true',
+ "--skip-empty", action="store_true",
help="Skip files with no code.",
)
sort = optparse.make_option(
- '--sort', action='store', metavar='COLUMN',
+ "--sort", action="store", metavar="COLUMN",
help=(
"Sort the report by the named column: name, stmts, miss, branch, brpart, or cover. " +
"Default is name."
),
)
source = optparse.make_option(
- '', '--source', action='store', metavar="SRC1,SRC2,...",
+ "", "--source", action="store", metavar="SRC1,SRC2,...",
help="A list of directories or importable names of code to measure.",
)
timid = optparse.make_option(
- '', '--timid', action='store_true',
+ "", "--timid", action="store_true",
help=(
"Use a simpler but slower trace method. Try this if you get " +
"seemingly impossible results!"
),
)
title = optparse.make_option(
- '', '--title', action='store', metavar="TITLE",
+ "", "--title", action="store", metavar="TITLE",
help="A text string to use as the title on the HTML.",
)
version = optparse.make_option(
- '', '--version', action='store_true',
+ "", "--version", action="store_true",
help="Display version information and exit.",
)
@@ -369,7 +369,7 @@ def get_prog_name(self) -> str:
]
COMMANDS = {
- 'annotate': CmdOptionParser(
+ "annotate": CmdOptionParser(
"annotate",
[
Opts.directory,
@@ -385,7 +385,7 @@ def get_prog_name(self) -> str:
),
),
- 'combine': CmdOptionParser(
+ "combine": CmdOptionParser(
"combine",
[
Opts.append,
@@ -404,7 +404,7 @@ def get_prog_name(self) -> str:
),
),
- 'debug': CmdOptionParser(
+ "debug": CmdOptionParser(
"debug", GLOBAL_ARGS,
usage="",
description=(
@@ -419,7 +419,7 @@ def get_prog_name(self) -> str:
),
),
- 'erase': CmdOptionParser(
+ "erase": CmdOptionParser(
"erase",
[
Opts.combine_datafile
@@ -427,13 +427,13 @@ def get_prog_name(self) -> str:
description="Erase previously collected coverage data.",
),
- 'help': CmdOptionParser(
+ "help": CmdOptionParser(
"help", GLOBAL_ARGS,
usage="[command]",
description="Describe how to use coverage.py",
),
- 'html': CmdOptionParser(
+ "html": CmdOptionParser(
"html",
[
Opts.contexts,
@@ -459,7 +459,7 @@ def get_prog_name(self) -> str:
),
),
- 'json': CmdOptionParser(
+ "json": CmdOptionParser(
"json",
[
Opts.contexts,
@@ -477,7 +477,7 @@ def get_prog_name(self) -> str:
description="Generate a JSON report of coverage results.",
),
- 'lcov': CmdOptionParser(
+ "lcov": CmdOptionParser(
"lcov",
[
Opts.input_datafile,
@@ -492,7 +492,7 @@ def get_prog_name(self) -> str:
description="Generate an LCOV report of coverage results.",
),
- 'report': CmdOptionParser(
+ "report": CmdOptionParser(
"report",
[
Opts.contexts,
@@ -513,7 +513,7 @@ def get_prog_name(self) -> str:
description="Report coverage statistics on modules.",
),
- 'run': CmdOptionParser(
+ "run": CmdOptionParser(
"run",
[
Opts.append,
@@ -533,7 +533,7 @@ def get_prog_name(self) -> str:
description="Run a Python program, measuring code execution.",
),
- 'xml': CmdOptionParser(
+ "xml": CmdOptionParser(
"xml",
[
Opts.input_datafile,
@@ -560,12 +560,12 @@ def show_help(
assert error or topic or parser
program_path = sys.argv[0]
- if program_path.endswith(os.path.sep + '__main__.py'):
+ if program_path.endswith(os.path.sep + "__main__.py"):
# The path is the main module of a package; get that path instead.
program_path = os.path.dirname(program_path)
program_name = os.path.basename(program_path)
if env.WINDOWS:
- # entry_points={'console_scripts':...} on Windows makes files
+ # entry_points={"console_scripts":...} on Windows makes files
# called coverage.exe, coverage3.exe, and coverage-3.5.exe. These
# invoke coverage-script.py, coverage3-script.py, and
# coverage-3.5-script.py. argv[0] is the .py file, but we want to
@@ -576,11 +576,11 @@ def show_help(
help_params = dict(coverage.__dict__)
help_params["__url__"] = __url__
- help_params['program_name'] = program_name
+ help_params["program_name"] = program_name
if HAS_CTRACER:
- help_params['extension_modifier'] = 'with C extension'
+ help_params["extension_modifier"] = "with C extension"
else:
- help_params['extension_modifier'] = 'without C extension'
+ help_params["extension_modifier"] = "without C extension"
if error:
print(error, file=sys.stderr)
@@ -590,7 +590,7 @@ def show_help(
print()
else:
assert topic is not None
- help_msg = textwrap.dedent(HELP_TOPICS.get(topic, '')).strip()
+ help_msg = textwrap.dedent(HELP_TOPICS.get(topic, "")).strip()
if help_msg:
print(help_msg.format(**help_params))
else:
@@ -618,13 +618,13 @@ def command_line(self, argv: List[str]) -> int:
"""
# Collect the command-line options.
if not argv:
- show_help(topic='minimum_help')
+ show_help(topic="minimum_help")
return OK
# The command syntax we parse depends on the first argument. Global
# switch syntax always starts with an option.
parser: Optional[optparse.OptionParser]
- self.global_option = argv[0].startswith('-')
+ self.global_option = argv[0].startswith("-")
if self.global_option:
parser = GlobalOptionParser()
else:
@@ -702,7 +702,7 @@ def command_line(self, argv: List[str]) -> int:
# We need to be able to import from the current directory, because
# plugins may try to, for example, to read Django settings.
- sys.path.insert(0, '')
+ sys.path.insert(0, "")
self.coverage.load()
@@ -786,7 +786,7 @@ def do_help(
# Handle help.
if options.help:
if self.global_option:
- show_help(topic='help')
+ show_help(topic="help")
else:
show_help(parser=parser)
return True
@@ -800,12 +800,12 @@ def do_help(
else:
show_help(topic=a)
else:
- show_help(topic='help')
+ show_help(topic="help")
return True
# Handle version.
if options.version:
- show_help(topic='version')
+ show_help(topic="version")
return True
return False
@@ -835,7 +835,7 @@ def do_run(self, options: optparse.Values, args: List[str]) -> int:
if options.concurrency == "multiprocessing":
# Can't set other run-affecting command line options with
# multiprocessing.
- for opt_name in ['branch', 'include', 'omit', 'pylib', 'source', 'timid']:
+ for opt_name in ["branch", "include", "omit", "pylib", "source", "timid"]:
# As it happens, all of these options have no default, meaning
# they will be None if they have not been specified.
if getattr(options, opt_name) is not None:
@@ -913,7 +913,7 @@ def unshell_list(s: str) -> Optional[List[str]]:
# line, but (not) helpfully, the single quotes are included in the
# argument, so we have to strip them off here.
s = s.strip("'")
- return s.split(',')
+ return s.split(",")
def unglob_args(args: List[str]) -> List[str]:
@@ -921,7 +921,7 @@ def unglob_args(args: List[str]) -> List[str]:
if env.WINDOWS:
globbed = []
for arg in args:
- if '?' in arg or '*' in arg:
+ if "?" in arg or "*" in arg:
globbed.extend(glob.glob(arg))
else:
globbed.append(arg)
@@ -930,7 +930,7 @@ def unglob_args(args: List[str]) -> List[str]:
HELP_TOPICS = {
- 'help': """\
+ "help": """\
Coverage.py, version {__version__} {extension_modifier}
Measure, collect, and report on code coverage in Python programs.
@@ -952,11 +952,11 @@ def unglob_args(args: List[str]) -> List[str]:
Use "{program_name} help " for detailed help on any command.
""",
- 'minimum_help': """\
+ "minimum_help": """\
Code coverage for Python, version {__version__} {extension_modifier}. Use '{program_name} help' for help.
""",
- 'version': """\
+ "version": """\
Coverage.py, version {__version__} {extension_modifier}
""",
}
@@ -1008,6 +1008,6 @@ def main( # pylint: disable=functi
try:
return original_main(argv)
finally:
- data, _ = profiler.query(re_filter='coverage', max_records=100)
- print(profiler.show(query=data, limit=100, sep='', col=''))
+ data, _ = profiler.query(re_filter="coverage", max_records=100)
+ print(profiler.show(query=data, limit=100, sep="", col=""))
profiler.cancel()
diff --git a/coverage/config.py b/coverage/config.py
index 9518e5356..1edbe0de4 100644
--- a/coverage/config.py
+++ b/coverage/config.py
@@ -114,8 +114,8 @@ def getlist(self, section: str, option: str) -> List[str]:
"""
value_list = self.get(section, option)
values = []
- for value_line in value_list.split('\n'):
- for value in value_line.split(','):
+ for value_line in value_list.split("\n"):
+ for value in value_line.split(","):
value = value.strip()
if value:
values.append(value)
@@ -150,20 +150,20 @@ def getregexlist(self, section: str, option: str) -> List[str]:
# The default line exclusion regexes.
DEFAULT_EXCLUDE = [
- r'#\s*(pragma|PRAGMA)[:\s]?\s*(no|NO)\s*(cover|COVER)',
+ r"#\s*(pragma|PRAGMA)[:\s]?\s*(no|NO)\s*(cover|COVER)",
]
# The default partial branch regexes, to be modified by the user.
DEFAULT_PARTIAL = [
- r'#\s*(pragma|PRAGMA)[:\s]?\s*(no|NO)\s*(branch|BRANCH)',
+ r"#\s*(pragma|PRAGMA)[:\s]?\s*(no|NO)\s*(branch|BRANCH)",
]
# The default partial branch regexes, based on Python semantics.
# These are any Python branching constructs that can't actually execute all
# their branches.
DEFAULT_PARTIAL_ALWAYS = [
- 'while (True|1|False|0):',
- 'if (True|1|False|0):',
+ "while (True|1|False|0):",
+ "if (True|1|False|0):",
]
@@ -286,7 +286,7 @@ def from_file(self, filename: str, warn: Callable[[str], None], our_file: bool)
"""
_, ext = os.path.splitext(filename)
cp: TConfigParser
- if ext == '.toml':
+ if ext == ".toml":
cp = TomlConfigParser(our_file)
else:
cp = HandyConfigParser(our_file)
@@ -328,9 +328,9 @@ def from_file(self, filename: str, warn: Callable[[str], None], our_file: bool)
)
# [paths] is special
- if cp.has_section('paths'):
- for option in cp.options('paths'):
- self.paths[option] = cp.getlist('paths', option)
+ if cp.has_section("paths"):
+ for option in cp.options("paths"):
+ self.paths[option] = cp.getlist("paths", option)
any_set = True
# plugins can have options
@@ -370,64 +370,64 @@ def copy(self) -> CoverageConfig:
# configuration value from the file.
# [run]
- ('branch', 'run:branch', 'boolean'),
- ('command_line', 'run:command_line'),
- ('concurrency', 'run:concurrency', 'list'),
- ('context', 'run:context'),
- ('cover_pylib', 'run:cover_pylib', 'boolean'),
- ('data_file', 'run:data_file'),
- ('debug', 'run:debug', 'list'),
- ('debug_file', 'run:debug_file'),
- ('disable_warnings', 'run:disable_warnings', 'list'),
- ('dynamic_context', 'run:dynamic_context'),
- ('parallel', 'run:parallel', 'boolean'),
- ('plugins', 'run:plugins', 'list'),
- ('relative_files', 'run:relative_files', 'boolean'),
- ('run_include', 'run:include', 'list'),
- ('run_omit', 'run:omit', 'list'),
- ('sigterm', 'run:sigterm', 'boolean'),
- ('source', 'run:source', 'list'),
- ('source_pkgs', 'run:source_pkgs', 'list'),
- ('timid', 'run:timid', 'boolean'),
- ('_crash', 'run:_crash'),
+ ("branch", "run:branch", "boolean"),
+ ("command_line", "run:command_line"),
+ ("concurrency", "run:concurrency", "list"),
+ ("context", "run:context"),
+ ("cover_pylib", "run:cover_pylib", "boolean"),
+ ("data_file", "run:data_file"),
+ ("debug", "run:debug", "list"),
+ ("debug_file", "run:debug_file"),
+ ("disable_warnings", "run:disable_warnings", "list"),
+ ("dynamic_context", "run:dynamic_context"),
+ ("parallel", "run:parallel", "boolean"),
+ ("plugins", "run:plugins", "list"),
+ ("relative_files", "run:relative_files", "boolean"),
+ ("run_include", "run:include", "list"),
+ ("run_omit", "run:omit", "list"),
+ ("sigterm", "run:sigterm", "boolean"),
+ ("source", "run:source", "list"),
+ ("source_pkgs", "run:source_pkgs", "list"),
+ ("timid", "run:timid", "boolean"),
+ ("_crash", "run:_crash"),
# [report]
- ('exclude_list', 'report:exclude_lines', 'regexlist'),
- ('exclude_also', 'report:exclude_also', 'regexlist'),
- ('fail_under', 'report:fail_under', 'float'),
- ('format', 'report:format', 'boolean'),
- ('ignore_errors', 'report:ignore_errors', 'boolean'),
- ('include_namespace_packages', 'report:include_namespace_packages', 'boolean'),
- ('partial_always_list', 'report:partial_branches_always', 'regexlist'),
- ('partial_list', 'report:partial_branches', 'regexlist'),
- ('precision', 'report:precision', 'int'),
- ('report_contexts', 'report:contexts', 'list'),
- ('report_include', 'report:include', 'list'),
- ('report_omit', 'report:omit', 'list'),
- ('show_missing', 'report:show_missing', 'boolean'),
- ('skip_covered', 'report:skip_covered', 'boolean'),
- ('skip_empty', 'report:skip_empty', 'boolean'),
- ('sort', 'report:sort'),
+ ("exclude_list", "report:exclude_lines", "regexlist"),
+ ("exclude_also", "report:exclude_also", "regexlist"),
+ ("fail_under", "report:fail_under", "float"),
+ ("format", "report:format", "boolean"),
+ ("ignore_errors", "report:ignore_errors", "boolean"),
+ ("include_namespace_packages", "report:include_namespace_packages", "boolean"),
+ ("partial_always_list", "report:partial_branches_always", "regexlist"),
+ ("partial_list", "report:partial_branches", "regexlist"),
+ ("precision", "report:precision", "int"),
+ ("report_contexts", "report:contexts", "list"),
+ ("report_include", "report:include", "list"),
+ ("report_omit", "report:omit", "list"),
+ ("show_missing", "report:show_missing", "boolean"),
+ ("skip_covered", "report:skip_covered", "boolean"),
+ ("skip_empty", "report:skip_empty", "boolean"),
+ ("sort", "report:sort"),
# [html]
- ('extra_css', 'html:extra_css'),
- ('html_dir', 'html:directory'),
- ('html_skip_covered', 'html:skip_covered', 'boolean'),
- ('html_skip_empty', 'html:skip_empty', 'boolean'),
- ('html_title', 'html:title'),
- ('show_contexts', 'html:show_contexts', 'boolean'),
+ ("extra_css", "html:extra_css"),
+ ("html_dir", "html:directory"),
+ ("html_skip_covered", "html:skip_covered", "boolean"),
+ ("html_skip_empty", "html:skip_empty", "boolean"),
+ ("html_title", "html:title"),
+ ("show_contexts", "html:show_contexts", "boolean"),
# [xml]
- ('xml_output', 'xml:output'),
- ('xml_package_depth', 'xml:package_depth', 'int'),
+ ("xml_output", "xml:output"),
+ ("xml_package_depth", "xml:package_depth", "int"),
# [json]
- ('json_output', 'json:output'),
- ('json_pretty_print', 'json:pretty_print', 'boolean'),
- ('json_show_contexts', 'json:show_contexts', 'boolean'),
+ ("json_output", "json:output"),
+ ("json_pretty_print", "json:pretty_print", "boolean"),
+ ("json_show_contexts", "json:show_contexts", "boolean"),
# [lcov]
- ('lcov_output', 'lcov:output'),
+ ("lcov_output", "lcov:output"),
]
def _set_attr_from_config_option(
@@ -435,7 +435,7 @@ def _set_attr_from_config_option(
cp: TConfigParser,
attr: str,
where: str,
- type_: str = '',
+ type_: str = "",
) -> bool:
"""Set an attribute on self if it exists in the ConfigParser.
@@ -444,7 +444,7 @@ def _set_attr_from_config_option(
"""
section, option = where.split(":")
if cp.has_option(section, option):
- method = getattr(cp, 'get' + type_)
+ method = getattr(cp, "get" + type_)
setattr(self, attr, method(section, option))
return True
return False
@@ -548,7 +548,7 @@ def config_files_to_try(config_file: Union[bool, str]) -> List[Tuple[str, bool,
specified_file = (config_file is not True)
if not specified_file:
# No file was specified. Check COVERAGE_RCFILE.
- rcfile = os.environ.get('COVERAGE_RCFILE')
+ rcfile = os.environ.get("COVERAGE_RCFILE")
if rcfile:
config_file = rcfile
specified_file = True
@@ -602,10 +602,10 @@ def read_coverage_config(
# $set_env.py: COVERAGE_DEBUG - Options for --debug.
# 3) from environment variables:
- env_data_file = os.environ.get('COVERAGE_FILE')
+ env_data_file = os.environ.get("COVERAGE_FILE")
if env_data_file:
config.data_file = env_data_file
- debugs = os.environ.get('COVERAGE_DEBUG')
+ debugs = os.environ.get("COVERAGE_DEBUG")
if debugs:
config.debug.extend(d.strip() for d in debugs.split(","))
diff --git a/coverage/control.py b/coverage/control.py
index 290da655c..e405a5bf4 100644
--- a/coverage/control.py
+++ b/coverage/control.py
@@ -84,7 +84,7 @@ class Coverage(TConfigurable):
cov.start()
#.. call your code ..
cov.stop()
- cov.html_report(directory='covhtml')
+ cov.html_report(directory="covhtml")
Note: in keeping with Python custom, names starting with underscore are
not part of the public API. They might stop working at any point. Please
@@ -343,7 +343,7 @@ def _post_init(self) -> None:
self._should_write_debug = False
self._write_startup_debug()
- # '[run] _crash' will raise an exception if the value is close by in
+ # "[run] _crash" will raise an exception if the value is close by in
# the call stack, for testing error handling.
if self.config._crash and self.config._crash in short_stack(limit=4):
raise RuntimeError(f"Crashing because called by {self.config._crash}")
@@ -380,7 +380,7 @@ def _should_trace(self, filename: str, frame: FrameType) -> TFileDisposition:
"""
assert self._inorout is not None
disp = self._inorout.should_trace(filename, frame)
- if self._debug.should('trace'):
+ if self._debug.should("trace"):
self._debug.write(disposition_debug_msg(disp))
return disp
@@ -392,7 +392,7 @@ def _check_include_omit_etc(self, filename: str, frame: FrameType) -> bool:
"""
assert self._inorout is not None
reason = self._inorout.check_include_omit_etc(filename, frame)
- if self._debug.should('trace'):
+ if self._debug.should("trace"):
if not reason:
msg = f"Including {filename!r}"
else:
@@ -420,7 +420,7 @@ def _warn(self, msg: str, slug: Optional[str] = None, once: bool = False) -> Non
self._warnings.append(msg)
if slug:
msg = f"{msg} ({slug})"
- if self._debug.should('pid'):
+ if self._debug.should("pid"):
msg = f"[{os.getpid()}] {msg}"
warnings.warn(msg, category=CoverageWarning, stacklevel=2)
@@ -566,7 +566,7 @@ def _init_for_start(self) -> None:
self._inorout = InOrOut(
config=self.config,
warn=self._warn,
- debug=(self._debug if self._debug.should('trace') else None),
+ debug=(self._debug if self._debug.should("trace") else None),
include_namespace_packages=self.config.include_namespace_packages,
)
self._inorout.plugins = self._plugins
@@ -653,7 +653,7 @@ def _atexit(self, event: str = "atexit") -> None:
self._debug.write(f"{event}: pid: {os.getpid()}, instance: {self!r}")
if self._started:
self.stop()
- if self._auto_save:
+ if self._auto_save or event == "sigterm":
self.save()
def _on_sigterm(self, signum_unused: int, frame_unused: Optional[FrameType]) -> None:
@@ -703,13 +703,13 @@ def switch_context(self, new_context: str) -> None:
self._collector.switch_context(new_context)
- def clear_exclude(self, which: str = 'exclude') -> None:
+ def clear_exclude(self, which: str = "exclude") -> None:
"""Clear the exclude list."""
self._init()
setattr(self.config, which + "_list", [])
self._exclude_regex_stale()
- def exclude(self, regex: str, which: str = 'exclude') -> None:
+ def exclude(self, regex: str, which: str = "exclude") -> None:
"""Exclude source lines from execution consideration.
A number of lists of regular expressions are maintained. Each list
@@ -740,7 +740,7 @@ def _exclude_regex(self, which: str) -> str:
self._exclude_re[which] = join_regex(excl_list)
return self._exclude_re[which]
- def get_exclude_list(self, which: str = 'exclude') -> List[str]:
+ def get_exclude_list(self, which: str = "exclude") -> List[str]:
"""Return a list of excluded regex strings.
`which` indicates which list is desired. See :meth:`exclude` for the
@@ -969,7 +969,7 @@ def _get_file_reporters(self, morfs: Optional[Iterable[TMorf]] = None) -> List[F
return file_reporters
def _prepare_data_for_reporting(self) -> None:
- """Re-map data before reporting, to get implicit 'combine' behavior."""
+ """Re-map data before reporting, to get implicit "combine" behavior."""
if self.config.paths:
mapped_data = CoverageData(warn=self._warn, debug=self._debug, no_disk=True)
if self._data is not None:
@@ -1238,10 +1238,10 @@ def lcov_report(
) -> float:
"""Generate an LCOV report of coverage results.
- Each module in 'morfs' is included in the report. 'outfile' is the
+ Each module in `morfs` is included in the report. `outfile` is the
path to write the file to, "-" will write to stdout.
- See :meth 'report' for other arguments.
+ See :meth:`report` for other arguments.
.. versionadded:: 6.3
"""
@@ -1275,30 +1275,30 @@ def plugin_info(plugins: List[Any]) -> List[str]:
return entries
info = [
- ('coverage_version', covmod.__version__),
- ('coverage_module', covmod.__file__),
- ('tracer', self._collector.tracer_name() if self._collector is not None else "-none-"),
- ('CTracer', 'available' if HAS_CTRACER else "unavailable"),
- ('plugins.file_tracers', plugin_info(self._plugins.file_tracers)),
- ('plugins.configurers', plugin_info(self._plugins.configurers)),
- ('plugins.context_switchers', plugin_info(self._plugins.context_switchers)),
- ('configs_attempted', self.config.attempted_config_files),
- ('configs_read', self.config.config_files_read),
- ('config_file', self.config.config_file),
- ('config_contents',
- repr(self.config._config_contents) if self.config._config_contents else '-none-'
+ ("coverage_version", covmod.__version__),
+ ("coverage_module", covmod.__file__),
+ ("tracer", self._collector.tracer_name() if self._collector is not None else "-none-"),
+ ("CTracer", "available" if HAS_CTRACER else "unavailable"),
+ ("plugins.file_tracers", plugin_info(self._plugins.file_tracers)),
+ ("plugins.configurers", plugin_info(self._plugins.configurers)),
+ ("plugins.context_switchers", plugin_info(self._plugins.context_switchers)),
+ ("configs_attempted", self.config.attempted_config_files),
+ ("configs_read", self.config.config_files_read),
+ ("config_file", self.config.config_file),
+ ("config_contents",
+ repr(self.config._config_contents) if self.config._config_contents else "-none-"
),
- ('data_file', self._data.data_filename() if self._data is not None else "-none-"),
- ('python', sys.version.replace('\n', '')),
- ('platform', platform.platform()),
- ('implementation', platform.python_implementation()),
- ('executable', sys.executable),
- ('def_encoding', sys.getdefaultencoding()),
- ('fs_encoding', sys.getfilesystemencoding()),
- ('pid', os.getpid()),
- ('cwd', os.getcwd()),
- ('path', sys.path),
- ('environment', human_sorted(
+ ("data_file", self._data.data_filename() if self._data is not None else "-none-"),
+ ("python", sys.version.replace("\n", "")),
+ ("platform", platform.platform()),
+ ("implementation", platform.python_implementation()),
+ ("executable", sys.executable),
+ ("def_encoding", sys.getdefaultencoding()),
+ ("fs_encoding", sys.getfilesystemencoding()),
+ ("pid", os.getpid()),
+ ("cwd", os.getcwd()),
+ ("path", sys.path),
+ ("environment", human_sorted(
f"{k} = {v}"
for k, v in os.environ.items()
if (
@@ -1306,7 +1306,7 @@ def plugin_info(plugins: List[Any]) -> List[str]:
(k in ("HOME", "TEMP", "TMP"))
)
)),
- ('command_line', " ".join(getattr(sys, 'argv', ['-none-']))),
+ ("command_line", " ".join(getattr(sys, "argv", ["-none-"]))),
]
if self._inorout is not None:
@@ -1324,7 +1324,7 @@ def plugin_info(plugins: List[Any]) -> List[str]:
Coverage = decorate_methods( # type: ignore[misc]
show_calls(show_args=True),
- butnot=['get_data']
+ butnot=["get_data"]
)(Coverage)
diff --git a/coverage/data.py b/coverage/data.py
index c737d5939..c196ac7ab 100644
--- a/coverage/data.py
+++ b/coverage/data.py
@@ -133,7 +133,7 @@ def combine_parallel_data(
if f == data.data_filename():
# Sometimes we are combining into a file which is one of the
# parallel files. Skip that file.
- if data._debug.should('dataio'):
+ if data._debug.should("dataio"):
data._debug.write(f"Skipping combining ourself: {f!r}")
continue
@@ -153,7 +153,7 @@ def combine_parallel_data(
delete_this_one = not keep
if combine_this_one:
- if data._debug.should('dataio'):
+ if data._debug.should("dataio"):
data._debug.write(f"Combining data file {f!r}")
file_hashes.add(sha)
try:
@@ -177,7 +177,7 @@ def combine_parallel_data(
message(f"Skipping duplicate data {rel_file_name}")
if delete_this_one:
- if data._debug.should('dataio'):
+ if data._debug.should("dataio"):
data._debug.write(f"Deleting data file {f!r}")
file_be_gone(f)
diff --git a/coverage/debug.py b/coverage/debug.py
index d56a66bb8..3ef6dae8a 100644
--- a/coverage/debug.py
+++ b/coverage/debug.py
@@ -50,12 +50,12 @@ def __init__(
self.suppress_callers = False
filters = []
- if self.should('pid'):
+ if self.should("pid"):
filters.append(add_pid_and_tid)
self.output = DebugOutputFile.get_one(
output,
file_name=file_name,
- show_process=self.should('process'),
+ show_process=self.should("process"),
filters=filters,
)
self.raw_output = self.output.outfile
@@ -86,11 +86,11 @@ def write(self, msg: str) -> None:
"""
self.output.write(msg+"\n")
- if self.should('self'):
- caller_self = inspect.stack()[1][0].f_locals.get('self')
+ if self.should("self"):
+ caller_self = inspect.stack()[1][0].f_locals.get("self")
if caller_self is not None:
self.output.write(f"self: {caller_self!r}\n")
- if self.should('callers'):
+ if self.should("callers"):
dump_stack_frames(out=self.output, skip=1)
self.output.flush()
@@ -228,7 +228,7 @@ def add_pid_and_tid(text: str) -> str:
class AutoReprMixin:
"""A mixin implementing an automatic __repr__ for debugging."""
- auto_repr_ignore = ['auto_repr_ignore', '$coverage.object_id']
+ auto_repr_ignore = ["auto_repr_ignore", "$coverage.object_id"]
def __repr__(self) -> str:
show_attrs = (
@@ -251,7 +251,7 @@ def simplify(v: Any) -> Any: # pragma: debugging
elif isinstance(v, (list, tuple)):
return type(v)(simplify(vv) for vv in v)
elif hasattr(v, "__dict__"):
- return simplify({'.'+k: v for k, v in v.__dict__.items()})
+ return simplify({"."+k: v for k, v in v.__dict__.items()})
else:
return v
@@ -312,8 +312,8 @@ def __init__(
if self.show_process:
self.filters.insert(0, CwdTracker().filter)
self.write(f"New process: executable: {sys.executable!r}\n")
- self.write("New process: cmd: {!r}\n".format(getattr(sys, 'argv', None)))
- if hasattr(os, 'getppid'):
+ self.write("New process: cmd: {!r}\n".format(getattr(sys, "argv", None)))
+ if hasattr(os, "getppid"):
self.write(f"New process: pid: {os.getpid()!r}, parent pid: {os.getppid()!r}\n")
@classmethod
@@ -367,8 +367,8 @@ def get_one(
# a process-wide singleton. So stash it in sys.modules instead of
# on a class attribute. Yes, this is aggressively gross.
- SYS_MOD_NAME = '$coverage.debug.DebugOutputFile.the_one'
- SINGLETON_ATTR = 'the_one_and_is_interim'
+ SYS_MOD_NAME = "$coverage.debug.DebugOutputFile.the_one"
+ SINGLETON_ATTR = "the_one_and_is_interim"
@classmethod
def _set_singleton_data(cls, the_one: DebugOutputFile, interim: bool) -> None:
@@ -485,7 +485,7 @@ def _wrapper(self: Any, *args: Any, **kwargs: Any) -> Any:
def _clean_stack_line(s: str) -> str: # pragma: debugging
"""Simplify some paths in a stack trace, for compactness."""
s = s.strip()
- s = s.replace(os.path.dirname(__file__) + '/', '')
- s = s.replace(os.path.dirname(os.__file__) + '/', '')
- s = s.replace(sys.prefix + '/', '')
+ s = s.replace(os.path.dirname(__file__) + "/", "")
+ s = s.replace(os.path.dirname(os.__file__) + "/", "")
+ s = s.replace(sys.prefix + "/", "")
return s
diff --git a/coverage/env.py b/coverage/env.py
index b22292818..bdc2c7854 100644
--- a/coverage/env.py
+++ b/coverage/env.py
@@ -40,7 +40,7 @@ class PYBEHAVIOR:
# Does Python conform to PEP626, Precise line numbers for debugging and other tools.
# https://www.python.org/dev/peps/pep-0626
- pep626 = CPYTHON and (PYVERSION > (3, 10, 0, 'alpha', 4))
+ pep626 = CPYTHON and (PYVERSION > (3, 10, 0, "alpha", 4))
# Is "if __debug__" optimized away?
if PYPY:
@@ -60,7 +60,7 @@ class PYBEHAVIOR:
else:
optimize_if_not_debug = 1
else:
- if PYVERSION >= (3, 8, 0, 'beta', 1):
+ if PYVERSION >= (3, 8, 0, "beta", 1):
optimize_if_not_debug = 2
else:
optimize_if_not_debug = 1
@@ -69,7 +69,7 @@ class PYBEHAVIOR:
negative_lnotab = not (PYPY and PYPYVERSION < (7, 2))
# 3.7 changed how functions with only docstrings are numbered.
- docstring_only_function = (not PYPY) and ((3, 7, 0, 'beta', 5) <= PYVERSION <= (3, 10))
+ docstring_only_function = (not PYPY) and ((3, 7, 0, "beta", 5) <= PYVERSION <= (3, 10))
# When a break/continue/return statement in a try block jumps to a finally
# block, does the finally block do the break/continue/return (pre-3.8), or
@@ -93,7 +93,7 @@ class PYBEHAVIOR:
# CPython 3.11 now jumps to the decorator line again while executing
# the decorator.
- trace_decorator_line_again = (CPYTHON and PYVERSION > (3, 11, 0, 'alpha', 3, 0))
+ trace_decorator_line_again = (CPYTHON and PYVERSION > (3, 11, 0, "alpha", 3, 0))
# Are while-true loops optimized into absolute jumps with no loop setup?
nix_while_true = (PYVERSION >= (3, 8))
@@ -125,7 +125,7 @@ class PYBEHAVIOR:
keep_constant_test = pep626
# When leaving a with-block, do we visit the with-line again for the exit?
- exit_through_with = (PYVERSION >= (3, 10, 0, 'beta'))
+ exit_through_with = (PYVERSION >= (3, 10, 0, "beta"))
# Match-case construct.
match_case = (PYVERSION >= (3, 10))
@@ -135,20 +135,20 @@ class PYBEHAVIOR:
# Modules start with a line numbered zero. This means empty modules have
# only a 0-number line, which is ignored, giving a truly empty module.
- empty_is_empty = (PYVERSION >= (3, 11, 0, 'beta', 4))
+ empty_is_empty = (PYVERSION >= (3, 11, 0, "beta", 4))
# Coverage.py specifics.
# Are we using the C-implemented trace function?
-C_TRACER = os.getenv('COVERAGE_TEST_TRACER', 'c') == 'c'
+C_TRACER = os.getenv("COVERAGE_TEST_TRACER", "c") == "c"
# Are we coverage-measuring ourselves?
-METACOV = os.getenv('COVERAGE_COVERAGE', '') != ''
+METACOV = os.getenv("COVERAGE_COVERAGE", "") != ""
# Are we running our test suite?
# Even when running tests, you can use COVERAGE_TESTING=0 to disable the
# test-specific behavior like AST checking.
-TESTING = os.getenv('COVERAGE_TESTING', '') == 'True'
+TESTING = os.getenv("COVERAGE_TESTING", "") == "True"
def debug_info() -> Iterable[Tuple[str, Any]]:
diff --git a/coverage/execfile.py b/coverage/execfile.py
index ef0277d61..aac4d30bb 100644
--- a/coverage/execfile.py
+++ b/coverage/execfile.py
@@ -172,7 +172,7 @@ def run(self) -> None:
self._prepare2()
# Create a module to serve as __main__
- main_mod = ModuleType('__main__')
+ main_mod = ModuleType("__main__")
from_pyc = self.arg0.endswith((".pyc", ".pyo"))
main_mod.__file__ = self.arg0
@@ -184,9 +184,9 @@ def run(self) -> None:
if self.spec is not None:
main_mod.__spec__ = self.spec
- main_mod.__builtins__ = sys.modules['builtins'] # type: ignore[attr-defined]
+ main_mod.__builtins__ = sys.modules["builtins"] # type: ignore[attr-defined]
- sys.modules['__main__'] = main_mod
+ sys.modules["__main__"] = main_mod
# Set sys.argv properly.
sys.argv = self.args
@@ -228,7 +228,7 @@ def run(self) -> None:
# is non-None when the exception is reported at the upper layer,
# and a nested exception is shown to the user. This getattr fixes
# it somehow? https://bitbucket.org/pypy/pypy/issue/1903
- getattr(err, '__context__', None)
+ getattr(err, "__context__", None)
# Call the excepthook.
try:
@@ -311,7 +311,7 @@ def make_code_from_pyc(filename: str) -> CodeType:
if magic != PYC_MAGIC_NUMBER:
raise NoCode(f"Bad magic number in .pyc file: {magic!r} != {PYC_MAGIC_NUMBER!r}")
- flags = struct.unpack(' Optional[Tuple[str, str]]:
name is in the zipfile.
"""
- for ext in ['.zip', '.whl', '.egg', '.pex']:
+ for ext in [".zip", ".whl", ".egg", ".pex"]:
zipbase, extension, inner = filename.partition(ext + sep(filename))
if extension:
zipfile = zipbase + ext
@@ -273,7 +273,7 @@ def match(self, module_name: str) -> bool:
if module_name.startswith(m):
if module_name == m:
return True
- if module_name[len(m)] == '.':
+ if module_name[len(m)] == ".":
# This is a module in the package
return True
@@ -433,7 +433,7 @@ def add(self, pattern: str, result: str) -> None:
# The pattern is meant to match a file path. Let's make it absolute
# unless it already is, or is meant to match any prefix.
if not self.relative:
- if not pattern.startswith('*') and not isabs_anywhere(pattern + pattern_sep):
+ if not pattern.startswith("*") and not isabs_anywhere(pattern + pattern_sep):
pattern = abs_file(pattern)
if not pattern.endswith(pattern_sep):
pattern += pattern_sep
diff --git a/coverage/html.py b/coverage/html.py
index ae09bc37d..570760604 100644
--- a/coverage/html.py
+++ b/coverage/html.py
@@ -5,11 +5,14 @@
from __future__ import annotations
+import collections
import datetime
+import functools
import json
import os
import re
import shutil
+import string # pylint: disable=deprecated-module
from dataclasses import dataclass
from typing import Any, Dict, Iterable, List, Optional, Tuple, TYPE_CHECKING, cast
@@ -68,7 +71,7 @@ def write_html(fname: str, html: str) -> None:
"""Write `html` to `fname`, properly encoded."""
html = re.sub(r"(\A\s+)|(\s+$)", "", html, flags=re.MULTILINE) + "\n"
with open(fname, "wb") as fout:
- fout.write(html.encode('ascii', 'xmlcharrefreplace'))
+ fout.write(html.encode("ascii", "xmlcharrefreplace"))
@dataclass
@@ -84,6 +87,7 @@ class LineData:
short_annotations: List[str]
long_annotations: List[str]
html: str = ""
+ context_str: Optional[str] = None
annotate: Optional[str] = None
annotate_long: Optional[str] = None
css_class: str = ""
@@ -130,11 +134,11 @@ def data_for_file(self, fr: FileReporter, analysis: Analysis) -> FileData:
long_annotations = []
if lineno in analysis.excluded:
- category = 'exc'
+ category = "exc"
elif lineno in analysis.missing:
- category = 'mis'
+ category = "mis"
elif self.has_arcs and lineno in missing_branch_arcs:
- category = 'par'
+ category = "par"
for b in missing_branch_arcs[lineno]:
if b < 0:
short_annotations.append("exit")
@@ -142,7 +146,7 @@ def data_for_file(self, fr: FileReporter, analysis: Analysis) -> FileData:
short_annotations.append(str(b))
long_annotations.append(fr.missing_arc_description(lineno, b, arcs_executed))
elif lineno in analysis.statements:
- category = 'run'
+ category = "run"
contexts = []
contexts_label = ""
@@ -185,6 +189,21 @@ def __init__(self, fr: FileReporter, analysis: Analysis) -> None:
self.html_filename = self.rootname + ".html"
+HTML_SAFE = string.ascii_letters + string.digits + "!#$%'()*+,-./:;=?@[]^_`{|}~"
+
+@functools.lru_cache(maxsize=None)
+def encode_int(n: int) -> str:
+ """Create a short HTML-safe string from an integer, using HTML_SAFE."""
+ if n == 0:
+ return HTML_SAFE[0]
+
+ r = []
+ while n:
+ n, t = divmod(n, len(HTML_SAFE))
+ r.append(HTML_SAFE[t])
+ return "".join(r)
+
+
class HtmlReporter:
"""HTML reporting."""
@@ -234,26 +253,26 @@ def __init__(self, cov: Coverage) -> None:
self.template_globals = {
# Functions available in the templates.
- 'escape': escape,
- 'pair': pair,
- 'len': len,
+ "escape": escape,
+ "pair": pair,
+ "len": len,
# Constants for this report.
- '__url__': __url__,
- '__version__': coverage.__version__,
- 'title': title,
- 'time_stamp': format_local_datetime(datetime.datetime.now()),
- 'extra_css': self.extra_css,
- 'has_arcs': self.has_arcs,
- 'show_contexts': self.config.show_contexts,
+ "__url__": __url__,
+ "__version__": coverage.__version__,
+ "title": title,
+ "time_stamp": format_local_datetime(datetime.datetime.now()),
+ "extra_css": self.extra_css,
+ "has_arcs": self.has_arcs,
+ "show_contexts": self.config.show_contexts,
# Constants for all reports.
# These css classes determine which lines are highlighted by default.
- 'category': {
- 'exc': 'exc show_exc',
- 'mis': 'mis show_mis',
- 'par': 'par run show_par',
- 'run': 'run',
+ "category": {
+ "exc": "exc show_exc",
+ "mis": "mis show_mis",
+ "par": "par run show_par",
+ "run": "run",
},
}
self.pyfile_html_source = read_data("pyfile.html")
@@ -367,6 +386,17 @@ def write_html_file(self, ftr: FileToReport, prev_html: str, next_html: str) ->
# Write the HTML page for this file.
file_data = self.datagen.data_for_file(ftr.fr, ftr.analysis)
+
+ contexts = collections.Counter(c for cline in file_data.lines for c in cline.contexts)
+ context_codes = {y: i for (i, y) in enumerate(x[0] for x in contexts.most_common())}
+ if context_codes:
+ contexts_json = json.dumps(
+ {encode_int(v): k for (k, v) in context_codes.items()},
+ indent=2,
+ )
+ else:
+ contexts_json = None
+
for ldata in file_data.lines:
# Build the HTML for the line.
html_parts = []
@@ -374,11 +404,20 @@ def write_html_file(self, ftr: FileToReport, prev_html: str, next_html: str) ->
if tok_type == "ws":
html_parts.append(escape(tok_text))
else:
- tok_html = escape(tok_text) or ' '
- html_parts.append(
- f'{tok_html}'
- )
- ldata.html = ''.join(html_parts)
+ tok_html = escape(tok_text) or " "
+ html_parts.append(f'{tok_html}')
+ ldata.html = "".join(html_parts)
+ if ldata.context_list:
+ encoded_contexts = [
+ encode_int(context_codes[c_context]) for c_context in ldata.context_list
+ ]
+ code_width = max(len(ec) for ec in encoded_contexts)
+ ldata.context_str = (
+ str(code_width)
+ + "".join(ec.ljust(code_width) for ec in encoded_contexts)
+ )
+ else:
+ ldata.context_str = ""
if ldata.short_annotations:
# 202F is NARROW NO-BREAK SPACE.
@@ -408,23 +447,24 @@ def write_html_file(self, ftr: FileToReport, prev_html: str, next_html: str) ->
css_classes = []
if ldata.category:
css_classes.append(
- self.template_globals['category'][ldata.category] # type: ignore[index]
+ self.template_globals["category"][ldata.category] # type: ignore[index]
)
- ldata.css_class = ' '.join(css_classes) or "pln"
+ ldata.css_class = " ".join(css_classes) or "pln"
html_path = os.path.join(self.directory, ftr.html_filename)
html = self.source_tmpl.render({
**file_data.__dict__,
- 'prev_html': prev_html,
- 'next_html': next_html,
+ "contexts_json": contexts_json,
+ "prev_html": prev_html,
+ "next_html": next_html,
})
write_html(html_path, html)
# Save this file's information for the index file.
index_info: IndexInfoDict = {
- 'nums': ftr.analysis.numbers,
- 'html_filename': ftr.html_filename,
- 'relative_filename': ftr.fr.relative_filename(),
+ "nums": ftr.analysis.numbers,
+ "html_filename": ftr.html_filename,
+ "relative_filename": ftr.fr.relative_filename(),
}
self.file_summaries.append(index_info)
self.incr.set_index_info(ftr.rootname, index_info)
@@ -443,12 +483,12 @@ def index_file(self, first_html: str, final_html: str) -> None:
skipped_empty_msg = f"{n} empty file{plural(n)} skipped."
html = index_tmpl.render({
- 'files': self.file_summaries,
- 'totals': self.totals,
- 'skipped_covered_msg': skipped_covered_msg,
- 'skipped_empty_msg': skipped_empty_msg,
- 'first_html': first_html,
- 'final_html': final_html,
+ "files": self.file_summaries,
+ "totals": self.totals,
+ "skipped_covered_msg": skipped_covered_msg,
+ "skipped_empty_msg": skipped_empty_msg,
+ "first_html": first_html,
+ "final_html": final_html,
})
index_file = os.path.join(self.directory, "index.html")
@@ -498,7 +538,7 @@ def __init__(self, directory: str) -> None:
def reset(self) -> None:
"""Initialize to empty. Causes all files to be reported."""
- self.globals = ''
+ self.globals = ""
self.files: Dict[str, FileInfoDict] = {}
def read(self) -> None:
@@ -512,17 +552,17 @@ def read(self) -> None:
usable = False
else:
usable = True
- if status['format'] != self.STATUS_FORMAT:
+ if status["format"] != self.STATUS_FORMAT:
usable = False
- elif status['version'] != coverage.__version__:
+ elif status["version"] != coverage.__version__:
usable = False
if usable:
self.files = {}
- for filename, fileinfo in status['files'].items():
- fileinfo['index']['nums'] = Numbers(*fileinfo['index']['nums'])
+ for filename, fileinfo in status["files"].items():
+ fileinfo["index"]["nums"] = Numbers(*fileinfo["index"]["nums"])
self.files[filename] = fileinfo
- self.globals = status['globals']
+ self.globals = status["globals"]
else:
self.reset()
@@ -531,18 +571,18 @@ def write(self) -> None:
status_file = os.path.join(self.directory, self.STATUS_FILE)
files = {}
for filename, fileinfo in self.files.items():
- index = fileinfo['index']
- index['nums'] = index['nums'].init_args() # type: ignore[typeddict-item]
+ index = fileinfo["index"]
+ index["nums"] = index["nums"].init_args() # type: ignore[typeddict-item]
files[filename] = fileinfo
status = {
- 'format': self.STATUS_FORMAT,
- 'version': coverage.__version__,
- 'globals': self.globals,
- 'files': files,
+ "format": self.STATUS_FORMAT,
+ "version": coverage.__version__,
+ "globals": self.globals,
+ "files": files,
}
with open(status_file, "w") as fout:
- json.dump(status, fout, separators=(',', ':'))
+ json.dump(status, fout, separators=(",", ":"))
def check_global_data(self, *data: Any) -> None:
"""Check the global data that can affect incremental reporting."""
@@ -561,7 +601,7 @@ def can_skip_file(self, data: CoverageData, fr: FileReporter, rootname: str) ->
`rootname` is the name being used for the file.
"""
m = Hasher()
- m.update(fr.source().encode('utf-8'))
+ m.update(fr.source().encode("utf-8"))
add_data_to_hash(data, fr.filename, m)
this_hash = m.hexdigest()
@@ -576,19 +616,19 @@ def can_skip_file(self, data: CoverageData, fr: FileReporter, rootname: str) ->
def file_hash(self, fname: str) -> str:
"""Get the hash of `fname`'s contents."""
- return self.files.get(fname, {}).get('hash', '') # type: ignore[call-overload]
+ return self.files.get(fname, {}).get("hash", "") # type: ignore[call-overload]
def set_file_hash(self, fname: str, val: str) -> None:
"""Set the hash of `fname`'s contents."""
- self.files.setdefault(fname, {})['hash'] = val # type: ignore[typeddict-item]
+ self.files.setdefault(fname, {})["hash"] = val # type: ignore[typeddict-item]
def index_info(self, fname: str) -> IndexInfoDict:
"""Get the information for index.html for `fname`."""
- return self.files.get(fname, {}).get('index', {}) # type: ignore
+ return self.files.get(fname, {}).get("index", {}) # type: ignore
def set_index_info(self, fname: str, info: IndexInfoDict) -> None:
"""Set the information for index.html for `fname`."""
- self.files.setdefault(fname, {})['index'] = info # type: ignore[typeddict-item]
+ self.files.setdefault(fname, {})["index"] = info # type: ignore[typeddict-item]
# Helpers for templates and generating HTML
diff --git a/coverage/htmlfiles/coverage_html.js b/coverage/htmlfiles/coverage_html.js
index 1c4eb9881..4c321182c 100644
--- a/coverage/htmlfiles/coverage_html.js
+++ b/coverage/htmlfiles/coverage_html.js
@@ -214,7 +214,7 @@ coverage.LINE_FILTERS_STORAGE = "COVERAGE_LINE_FILTERS";
coverage.pyfile_ready = function () {
// If we're directed to a particular line number, highlight the line.
var frag = location.hash;
- if (frag.length > 2 && frag[1] === 't') {
+ if (frag.length > 2 && frag[1] === "t") {
document.querySelector(frag).closest(".n").classList.add("highlight");
coverage.set_sel(parseInt(frag.substr(2), 10));
} else {
@@ -257,6 +257,10 @@ coverage.pyfile_ready = function () {
coverage.init_scroll_markers();
coverage.wire_up_sticky_header();
+ document.querySelectorAll("[id^=ctxs]").forEach(
+ cbox => cbox.addEventListener("click", coverage.expand_contexts)
+ );
+
// Rebuild scroll markers when the window height changes.
window.addEventListener("resize", coverage.build_scroll_markers);
};
@@ -528,14 +532,14 @@ coverage.scroll_window = function (to_pos) {
coverage.init_scroll_markers = function () {
// Init some variables
- coverage.lines_len = document.querySelectorAll('#source > p').length;
+ coverage.lines_len = document.querySelectorAll("#source > p").length;
// Build html
coverage.build_scroll_markers();
};
coverage.build_scroll_markers = function () {
- const temp_scroll_marker = document.getElementById('scroll_marker')
+ const temp_scroll_marker = document.getElementById("scroll_marker")
if (temp_scroll_marker) temp_scroll_marker.remove();
// Don't build markers if the window has no scroll bar.
if (document.body.scrollHeight <= window.innerHeight) {
@@ -549,8 +553,8 @@ coverage.build_scroll_markers = function () {
const scroll_marker = document.createElement("div");
scroll_marker.id = "scroll_marker";
- document.getElementById('source').querySelectorAll(
- 'p.show_run, p.show_mis, p.show_exc, p.show_exc, p.show_par'
+ document.getElementById("source").querySelectorAll(
+ "p.show_run, p.show_mis, p.show_exc, p.show_exc, p.show_par"
).forEach(element => {
const line_top = Math.floor(element.offsetTop * marker_scale);
const line_number = parseInt(element.querySelector(".n a").id.substr(1));
@@ -577,24 +581,40 @@ coverage.build_scroll_markers = function () {
};
coverage.wire_up_sticky_header = function () {
- const header = document.querySelector('header');
+ const header = document.querySelector("header");
const header_bottom = (
- header.querySelector('.content h2').getBoundingClientRect().top -
+ header.querySelector(".content h2").getBoundingClientRect().top -
header.getBoundingClientRect().top
);
function updateHeader() {
if (window.scrollY > header_bottom) {
- header.classList.add('sticky');
+ header.classList.add("sticky");
} else {
- header.classList.remove('sticky');
+ header.classList.remove("sticky");
}
}
- window.addEventListener('scroll', updateHeader);
+ window.addEventListener("scroll", updateHeader);
updateHeader();
};
+coverage.expand_contexts = function (e) {
+ var ctxs = e.target.parentNode.querySelector(".ctxs");
+
+ if (!ctxs.classList.contains("expanded")) {
+ var ctxs_text = ctxs.textContent;
+ var width = Number(ctxs_text[0]);
+ ctxs.textContent = "";
+ for (var i = 1; i < ctxs_text.length; i += width) {
+ key = ctxs_text.substring(i, i + width).trim();
+ ctxs.appendChild(document.createTextNode(contexts[key]));
+ ctxs.appendChild(document.createElement("br"));
+ }
+ ctxs.classList.add("expanded");
+ }
+};
+
document.addEventListener("DOMContentLoaded", () => {
if (document.body.classList.contains("indexfile")) {
coverage.index_ready();
diff --git a/coverage/htmlfiles/pyfile.html b/coverage/htmlfiles/pyfile.html
index 8fcfc660a..bc8fa697d 100644
--- a/coverage/htmlfiles/pyfile.html
+++ b/coverage/htmlfiles/pyfile.html
@@ -11,6 +11,13 @@
{% if extra_css %}
{% endif %}
+
+ {% if contexts_json %}
+
+ {% endif %}
+
@@ -117,12 +124,8 @@
{% endif %}
{# Things that should appear below the line. #}
- {% if line.context_list %}
-
- {% for context in line.context_list %}
- {{context}}
- {% endfor %}
-
+ {% if line.context_str %}
+ {{ line.context_str }}
{% endif %}
{% endjoined %}
diff --git a/coverage/htmlfiles/style.css b/coverage/htmlfiles/style.css
index d6768a35e..11b24c4e7 100644
--- a/coverage/htmlfiles/style.css
+++ b/coverage/htmlfiles/style.css
@@ -258,12 +258,10 @@ kbd { border: 1px solid black; border-color: #888 #333 #333 #888; padding: .1em
@media (prefers-color-scheme: dark) { #source p label.ctx { color: #777; } }
-#source p .ctxs { display: block; max-height: 0; overflow-y: hidden; transition: all .2s; padding: 0 .5em; font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; white-space: nowrap; background: #d0e8ff; border-radius: .25em; margin-right: 1.75em; }
+#source p .ctxs { display: block; max-height: 0; overflow-y: hidden; transition: all .2s; padding: 0 .5em; font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; white-space: nowrap; background: #d0e8ff; border-radius: .25em; margin-right: 1.75em; text-align: right; }
@media (prefers-color-scheme: dark) { #source p .ctxs { background: #056; } }
-#source p .ctxs span { display: block; text-align: right; }
-
#index { font-family: SFMono-Regular, Menlo, Monaco, Consolas, monospace; font-size: 0.875em; }
#index table.index { margin-left: -.5em; }
diff --git a/coverage/htmlfiles/style.scss b/coverage/htmlfiles/style.scss
index 1e9103fd1..b1465154e 100644
--- a/coverage/htmlfiles/style.scss
+++ b/coverage/htmlfiles/style.scss
@@ -622,10 +622,7 @@ $border-indicator-width: .2em;
@include background-dark($dark-context-bg-color);
border-radius: .25em;
margin-right: 1.75em;
- span {
- display: block;
- text-align: right;
- }
+ text-align: right;
}
}
}
diff --git a/coverage/inorout.py b/coverage/inorout.py
index d0d0ef913..ff46bac0d 100644
--- a/coverage/inorout.py
+++ b/coverage/inorout.py
@@ -83,20 +83,20 @@ def name_for_module(filename: str, frame: Optional[FrameType]) -> str:
"""
module_globals = frame.f_globals if frame is not None else {}
- dunder_name: str = module_globals.get('__name__', None)
+ dunder_name: str = module_globals.get("__name__", None)
- if isinstance(dunder_name, str) and dunder_name != '__main__':
+ if isinstance(dunder_name, str) and dunder_name != "__main__":
# This is the usual case: an imported module.
return dunder_name
- loader = module_globals.get('__loader__', None)
- for attrname in ('fullname', 'name'): # attribute renamed in py3.2
+ loader = module_globals.get("__loader__", None)
+ for attrname in ("fullname", "name"): # attribute renamed in py3.2
if hasattr(loader, attrname):
fullname = getattr(loader, attrname)
else:
continue
- if isinstance(fullname, str) and fullname != '__main__':
+ if isinstance(fullname, str) and fullname != "__main__":
# Module loaded via: runpy -m
return fullname
@@ -110,12 +110,12 @@ def name_for_module(filename: str, frame: Optional[FrameType]) -> str:
def module_is_namespace(mod: ModuleType) -> bool:
"""Is the module object `mod` a PEP420 namespace module?"""
- return hasattr(mod, '__path__') and getattr(mod, '__file__', None) is None
+ return hasattr(mod, "__path__") and getattr(mod, "__file__", None) is None
def module_has_file(mod: ModuleType) -> bool:
"""Does the module object `mod` have an existing __file__ ?"""
- mod__file__ = getattr(mod, '__file__', None)
+ mod__file__ = getattr(mod, "__file__", None)
if mod__file__ is None:
return False
return os.path.exists(mod__file__)
@@ -313,7 +313,7 @@ def nope(disp: TFileDisposition, reason: str) -> TFileDisposition:
disp.reason = reason
return disp
- if original_filename.startswith('<'):
+ if original_filename.startswith("<"):
return nope(disp, "original file name is not real")
if frame is not None:
@@ -323,10 +323,10 @@ def nope(disp: TFileDisposition, reason: str) -> TFileDisposition:
# .pyc files can be moved after compilation (for example, by being
# installed), we look for __file__ in the frame and prefer it to the
# co_filename value.
- dunder_file = frame.f_globals and frame.f_globals.get('__file__')
+ dunder_file = frame.f_globals and frame.f_globals.get("__file__")
if dunder_file:
filename = source_for_file(dunder_file)
- if original_filename and not original_filename.startswith('<'):
+ if original_filename and not original_filename.startswith("<"):
orig = os.path.basename(original_filename)
if orig != os.path.basename(filename):
# Files shouldn't be renamed when moved. This happens when
@@ -338,10 +338,10 @@ def nope(disp: TFileDisposition, reason: str) -> TFileDisposition:
# Empty string is pretty useless.
return nope(disp, "empty string isn't a file name")
- if filename.startswith('memory:'):
+ if filename.startswith("memory:"):
return nope(disp, "memory isn't traceable")
- if filename.startswith('<'):
+ if filename.startswith("<"):
# Lots of non-file execution is represented with artificial
# file names like "", "", or
# "". Don't ever trace these executions, since we
@@ -484,7 +484,7 @@ def warn_already_imported_files(self) -> None:
msg = f"Already imported a file that will be measured: {filename}"
self.warn(msg, slug="already-imported")
warned.add(filename)
- elif self.debug and self.debug.should('trace'):
+ elif self.debug and self.debug.should("trace"):
self.debug.write(
"Didn't trace already imported file {!r}: {}".format(
disp.original_filename, disp.reason
@@ -580,9 +580,9 @@ def sys_info(self) -> Iterable[Tuple[str, Any]]:
]
matcher_names = [
- 'source_match', 'source_pkgs_match',
- 'include_match', 'omit_match',
- 'cover_match', 'pylib_match', 'third_match', 'source_in_third_match',
+ "source_match", "source_pkgs_match",
+ "include_match", "omit_match",
+ "cover_match", "pylib_match", "third_match", "source_in_third_match",
]
for matcher_name in matcher_names:
@@ -590,7 +590,7 @@ def sys_info(self) -> Iterable[Tuple[str, Any]]:
if matcher:
matcher_info = matcher.info()
else:
- matcher_info = '-none-'
+ matcher_info = "-none-"
info.append((matcher_name, matcher_info))
return info
diff --git a/coverage/jsonreport.py b/coverage/jsonreport.py
index 7ee1fb99f..24e33585c 100644
--- a/coverage/jsonreport.py
+++ b/coverage/jsonreport.py
@@ -60,20 +60,20 @@ def report(self, morfs: Optional[Iterable[TMorf]], outfile: IO[str]) -> float:
self.report_data["files"] = measured_files
self.report_data["totals"] = {
- 'covered_lines': self.total.n_executed,
- 'num_statements': self.total.n_statements,
- 'percent_covered': self.total.pc_covered,
- 'percent_covered_display': self.total.pc_covered_str,
- 'missing_lines': self.total.n_missing,
- 'excluded_lines': self.total.n_excluded,
+ "covered_lines": self.total.n_executed,
+ "num_statements": self.total.n_statements,
+ "percent_covered": self.total.pc_covered,
+ "percent_covered_display": self.total.pc_covered_str,
+ "missing_lines": self.total.n_missing,
+ "excluded_lines": self.total.n_excluded,
}
if coverage_data.has_arcs():
self.report_data["totals"].update({
- 'num_branches': self.total.n_branches,
- 'num_partial_branches': self.total.n_partial_branches,
- 'covered_branches': self.total.n_executed_branches,
- 'missing_branches': self.total.n_missing_branches,
+ "num_branches": self.total.n_branches,
+ "num_partial_branches": self.total.n_partial_branches,
+ "covered_branches": self.total.n_executed_branches,
+ "missing_branches": self.total.n_missing_branches,
})
json.dump(
@@ -89,32 +89,32 @@ def report_one_file(self, coverage_data: CoverageData, analysis: Analysis) -> Di
nums = analysis.numbers
self.total += nums
summary = {
- 'covered_lines': nums.n_executed,
- 'num_statements': nums.n_statements,
- 'percent_covered': nums.pc_covered,
- 'percent_covered_display': nums.pc_covered_str,
- 'missing_lines': nums.n_missing,
- 'excluded_lines': nums.n_excluded,
+ "covered_lines": nums.n_executed,
+ "num_statements": nums.n_statements,
+ "percent_covered": nums.pc_covered,
+ "percent_covered_display": nums.pc_covered_str,
+ "missing_lines": nums.n_missing,
+ "excluded_lines": nums.n_excluded,
}
reported_file = {
- 'executed_lines': sorted(analysis.executed),
- 'summary': summary,
- 'missing_lines': sorted(analysis.missing),
- 'excluded_lines': sorted(analysis.excluded),
+ "executed_lines": sorted(analysis.executed),
+ "summary": summary,
+ "missing_lines": sorted(analysis.missing),
+ "excluded_lines": sorted(analysis.excluded),
}
if self.config.json_show_contexts:
- reported_file['contexts'] = analysis.data.contexts_by_lineno(analysis.filename)
+ reported_file["contexts"] = analysis.data.contexts_by_lineno(analysis.filename)
if coverage_data.has_arcs():
summary.update({
- 'num_branches': nums.n_branches,
- 'num_partial_branches': nums.n_partial_branches,
- 'covered_branches': nums.n_executed_branches,
- 'missing_branches': nums.n_missing_branches,
+ "num_branches": nums.n_branches,
+ "num_partial_branches": nums.n_partial_branches,
+ "covered_branches": nums.n_executed_branches,
+ "missing_branches": nums.n_missing_branches,
})
- reported_file['executed_branches'] = list(
+ reported_file["executed_branches"] = list(
_convert_branch_arcs(analysis.executed_branch_arcs())
)
- reported_file['missing_branches'] = list(
+ reported_file["missing_branches"] = list(
_convert_branch_arcs(analysis.missing_branch_arcs())
)
return reported_file
diff --git a/coverage/lcovreport.py b/coverage/lcovreport.py
index 1789c17e6..7d72e8135 100644
--- a/coverage/lcovreport.py
+++ b/coverage/lcovreport.py
@@ -33,7 +33,7 @@ def __init__(self, coverage: Coverage) -> None:
def report(self, morfs: Optional[Iterable[TMorf]], outfile: IO[str]) -> float:
"""Renders the full lcov report.
- 'morfs' is a list of modules or filenames
+ `morfs` is a list of modules or filenames
outfile is the file object to write the file into.
"""
diff --git a/coverage/misc.py b/coverage/misc.py
index e0658eb18..8cefa12e0 100644
--- a/coverage/misc.py
+++ b/coverage/misc.py
@@ -218,14 +218,14 @@ def update(self, v: Any) -> None:
self.update(v[k])
else:
for k in dir(v):
- if k.startswith('__'):
+ if k.startswith("__"):
continue
a = getattr(v, k)
if inspect.isroutine(a):
continue
self.update(k)
self.update(a)
- self.hash.update(b'.')
+ self.hash.update(b".")
def hexdigest(self) -> str:
"""Retrieve the hex digest of the hash."""
@@ -292,7 +292,7 @@ def substitute_variables(text: str, variables: Mapping[str, str]) -> str:
)
"""
- dollar_groups = ('dollar', 'word1', 'word2')
+ dollar_groups = ("dollar", "word1", "word2")
def dollar_replace(match: re.Match[str]) -> str:
"""Called for each $replacement."""
@@ -302,11 +302,11 @@ def dollar_replace(match: re.Match[str]) -> str:
return "$"
elif word in variables:
return variables[word]
- elif match['strict']:
+ elif match["strict"]:
msg = f"Variable {word} is undefined: {text!r}"
raise CoverageException(msg)
else:
- return match['defval']
+ return match["defval"]
text = re.sub(dollar_pattern, dollar_replace, text)
return text
@@ -315,7 +315,7 @@ def dollar_replace(match: re.Match[str]) -> str:
def format_local_datetime(dt: datetime.datetime) -> str:
"""Return a string with local timezone representing the date.
"""
- return dt.astimezone().strftime('%Y-%m-%d %H:%M %z')
+ return dt.astimezone().strftime("%Y-%m-%d %H:%M %z")
def import_local_file(modname: str, modfile: Optional[str] = None) -> ModuleType:
@@ -327,7 +327,7 @@ def import_local_file(modname: str, modfile: Optional[str] = None) -> ModuleType
"""
if modfile is None:
- modfile = modname + '.py'
+ modfile = modname + ".py"
spec = importlib.util.spec_from_file_location(modname, modfile)
assert spec is not None
mod = importlib.util.module_from_spec(spec)
diff --git a/coverage/multiproc.py b/coverage/multiproc.py
index e11ca7b70..2fd8ad5dc 100644
--- a/coverage/multiproc.py
+++ b/coverage/multiproc.py
@@ -56,10 +56,10 @@ def __init__(self, rcfile: str) -> None:
self.rcfile = rcfile
def __getstate__(self) -> Dict[str, str]:
- return {'rcfile': self.rcfile}
+ return {"rcfile": self.rcfile}
def __setstate__(self, state: Dict[str, str]) -> None:
- patch_multiprocessing(state['rcfile'])
+ patch_multiprocessing(state["rcfile"])
def patch_multiprocessing(rcfile: str) -> None:
@@ -96,7 +96,7 @@ def patch_multiprocessing(rcfile: str) -> None:
def get_preparation_data_with_stowaway(name: str) -> Dict[str, Any]:
"""Get the original preparation data, and also insert our stowaway."""
d = original_get_preparation_data(name)
- d['stowaway'] = Stowaway(rcfile)
+ d["stowaway"] = Stowaway(rcfile)
return d
spawn.get_preparation_data = get_preparation_data_with_stowaway
diff --git a/coverage/numbits.py b/coverage/numbits.py
index 26e5c2725..71b974de5 100644
--- a/coverage/numbits.py
+++ b/coverage/numbits.py
@@ -36,7 +36,7 @@ def nums_to_numbits(nums: Iterable[int]) -> bytes:
nbytes = max(nums) // 8 + 1
except ValueError:
# nums was empty.
- return b''
+ return b""
b = bytearray(nbytes)
for num in nums:
b[num//8] |= 1 << num % 8
@@ -82,7 +82,7 @@ def numbits_intersection(numbits1: bytes, numbits2: bytes) -> bytes:
"""
byte_pairs = zip_longest(numbits1, numbits2, fillvalue=0)
intersection_bytes = bytes(b1 & b2 for b1, b2 in byte_pairs)
- return intersection_bytes.rstrip(b'\0')
+ return intersection_bytes.rstrip(b"\0")
def numbits_any_intersection(numbits1: bytes, numbits2: bytes) -> bool:
@@ -130,7 +130,7 @@ def register_sqlite_functions(connection: sqlite3.Connection) -> None:
import sqlite3
from coverage.numbits import register_sqlite_functions
- conn = sqlite3.connect('example.db')
+ conn = sqlite3.connect("example.db")
register_sqlite_functions(conn)
c = conn.cursor()
# Kind of a nonsense query:
diff --git a/coverage/parser.py b/coverage/parser.py
index ae70b4f0f..e653a9ccd 100644
--- a/coverage/parser.py
+++ b/coverage/parser.py
@@ -60,7 +60,7 @@ def __init__(
self.exclude = exclude
# The text lines of the parsed code.
- self.lines: List[str] = self.text.split('\n')
+ self.lines: List[str] = self.text.split("\n")
# The normalized line numbers of the statements in the code. Exclusions
# are taken into account, and statements are adjusted to their first
@@ -149,13 +149,13 @@ def _raw_parse(self) -> None:
elif toktype == token.DEDENT:
indent -= 1
elif toktype == token.NAME:
- if ttext == 'class':
+ if ttext == "class":
# Class definitions look like branches in the bytecode, so
# we need to exclude them. The simplest way is to note the
- # lines with the 'class' keyword.
+ # lines with the "class" keyword.
self.raw_classdefs.add(slineno)
elif toktype == token.OP:
- if ttext == ':' and nesting == 0:
+ if ttext == ":" and nesting == 0:
should_exclude = (elineno in self.raw_excluded) or excluding_decorators
if not excluding and should_exclude:
# Start excluding a suite. We trigger off of the colon
@@ -165,7 +165,7 @@ def _raw_parse(self) -> None:
exclude_indent = indent
excluding = True
excluding_decorators = False
- elif ttext == '@' and first_on_line:
+ elif ttext == "@" and first_on_line:
# A decorator.
if elineno in self.raw_excluded:
excluding_decorators = True
@@ -763,7 +763,7 @@ def _line__Dict(self, node: ast.Dict) -> TLineNo:
if node.keys[0] is not None:
return node.keys[0].lineno
else:
- # Unpacked dict literals `{**{'a':1}}` have None as the key,
+ # Unpacked dict literals `{**{"a":1}}` have None as the key,
# use the value in that case.
return node.values[0].lineno
else:
diff --git a/coverage/phystokens.py b/coverage/phystokens.py
index 4d1ee46e6..d5659268d 100644
--- a/coverage/phystokens.py
+++ b/coverage/phystokens.py
@@ -57,7 +57,7 @@ def _phys_tokens(toks: TokenInfos) -> TokenInfos:
if last_ttext.endswith("\\"):
inject_backslash = False
elif ttype == token.STRING:
- if "\n" in ttext and ttext.split('\n', 1)[0][-1] == '\\':
+ if "\n" in ttext and ttext.split("\n", 1)[0][-1] == "\\":
# It's a multi-line string and the first line ends with
# a backslash, so we don't need to inject another.
inject_backslash = False
@@ -113,7 +113,7 @@ def source_token_lines(source: str) -> TSourceTokenLines:
line: List[Tuple[str, str]] = []
col = 0
- source = source.expandtabs(8).replace('\r\n', '\n')
+ source = source.expandtabs(8).replace("\r\n", "\n")
tokgen = generate_tokens(source)
if env.PYBEHAVIOR.soft_keywords:
@@ -121,13 +121,13 @@ def source_token_lines(source: str) -> TSourceTokenLines:
for ttype, ttext, (sline, scol), (_, ecol), _ in _phys_tokens(tokgen):
mark_start = True
- for part in re.split('(\n)', ttext):
- if part == '\n':
+ for part in re.split("(\n)", ttext):
+ if part == "\n":
yield line
line = []
col = 0
mark_end = False
- elif part == '':
+ elif part == "":
mark_end = False
elif ttype in ws_tokens:
mark_end = False
@@ -135,7 +135,7 @@ def source_token_lines(source: str) -> TSourceTokenLines:
if mark_start and scol > col:
line.append(("ws", " " * (scol - col)))
mark_start = False
- tok_class = tokenize.tok_name.get(ttype, 'xx').lower()[:3]
+ tok_class = tokenize.tok_name.get(ttype, "xx").lower()[:3]
if ttype == token.NAME:
if keyword.iskeyword(ttext):
# Hard keywords are always keywords.
diff --git a/coverage/plugin.py b/coverage/plugin.py
index 5279c4d06..2c1ffada4 100644
--- a/coverage/plugin.py
+++ b/coverage/plugin.py
@@ -519,29 +519,29 @@ def source_token_lines(self) -> TSourceTokenLines:
Each line is a list of pairs, each pair is a token::
- [('key', 'def'), ('ws', ' '), ('nam', 'hello'), ('op', '('), ... ]
+ [("key", "def"), ("ws", " "), ("nam", "hello"), ("op", "("), ... ]
Each pair has a token class, and the token text. The token classes
are:
- * ``'com'``: a comment
- * ``'key'``: a keyword
- * ``'nam'``: a name, or identifier
- * ``'num'``: a number
- * ``'op'``: an operator
- * ``'str'``: a string literal
- * ``'ws'``: some white space
- * ``'txt'``: some other kind of text
+ * ``"com"``: a comment
+ * ``"key"``: a keyword
+ * ``"nam"``: a name, or identifier
+ * ``"num"``: a number
+ * ``"op"``: an operator
+ * ``"str"``: a string literal
+ * ``"ws"``: some white space
+ * ``"txt"``: some other kind of text
If you concatenate all the token texts, and then join them with
newlines, you should have your original source back.
The default implementation simply returns each line tagged as
- ``'txt'``.
+ ``"txt"``.
"""
for line in self.source().splitlines():
- yield [('txt', line)]
+ yield [("txt", line)]
def __eq__(self, other: Any) -> bool:
return isinstance(other, FileReporter) and self.filename == other.filename
diff --git a/coverage/plugin_support.py b/coverage/plugin_support.py
index 4ed02c5c0..c99fb5e30 100644
--- a/coverage/plugin_support.py
+++ b/coverage/plugin_support.py
@@ -114,7 +114,7 @@ def _add_plugin(
"""
plugin_name = f"{self.current_module}.{plugin.__class__.__name__}"
- if self.debug and self.debug.should('plugin'):
+ if self.debug and self.debug.should("plugin"):
self.debug.write(f"Loaded plugin {self.current_module!r}: {plugin!r}")
labelled = LabelledDebug(f"plugin {self.current_module!r}", self.debug)
plugin = DebugPluginWrapper(plugin, labelled)
@@ -150,7 +150,7 @@ def add_label(self, label: str) -> LabelledDebug:
def message_prefix(self) -> str:
"""The prefix to use on messages, combining the labels."""
- prefixes = self.labels + ['']
+ prefixes = self.labels + [""]
return ":\n".join(" "*i+label for i, label in enumerate(prefixes))
def write(self, message: str) -> None:
diff --git a/coverage/python.py b/coverage/python.py
index 744ab4cb8..3deb6819f 100644
--- a/coverage/python.py
+++ b/coverage/python.py
@@ -63,12 +63,12 @@ def get_python_source(filename: str) -> str:
raise NoSource(f"No source for code: '{filename}'.")
# Replace \f because of http://bugs.python.org/issue19035
- source_bytes = source_bytes.replace(b'\f', b' ')
+ source_bytes = source_bytes.replace(b"\f", b" ")
source = source_bytes.decode(source_encoding(source_bytes), "replace")
# Python code should always end with a line with a newline.
- if source and source[-1] != '\n':
- source += '\n'
+ if source and source[-1] != "\n":
+ source += "\n"
return source
@@ -127,7 +127,7 @@ def source_for_file(filename: str) -> str:
def source_for_morf(morf: TMorf) -> str:
"""Get the source filename for the module-or-file `morf`."""
- if hasattr(morf, '__file__') and morf.__file__:
+ if hasattr(morf, "__file__") and morf.__file__:
filename = morf.__file__
elif isinstance(morf, types.ModuleType):
# A module should have had .__file__, otherwise we can't use it.
@@ -157,9 +157,9 @@ def __init__(self, morf: TMorf, coverage: Optional[Coverage] = None) -> None:
fname = canonical_filename(filename)
super().__init__(fname)
- if hasattr(morf, '__name__'):
+ if hasattr(morf, "__name__"):
name = morf.__name__.replace(".", os.sep)
- if os.path.basename(filename).startswith('__init__.'):
+ if os.path.basename(filename).startswith("__init__."):
name += os.sep + "__init__"
name += ".py"
else:
@@ -183,7 +183,7 @@ def parser(self) -> PythonParser:
if self._parser is None:
self._parser = PythonParser(
filename=self.filename,
- exclude=self.coverage._exclude_regex('exclude'),
+ exclude=self.coverage._exclude_regex("exclude"),
)
self._parser.parse_source()
return self._parser
@@ -244,7 +244,7 @@ def should_be_python(self) -> bool:
_, ext = os.path.splitext(self.filename)
# Anything named *.py* should be Python.
- if ext.startswith('.py'):
+ if ext.startswith(".py"):
return True
# A file with no extension should be Python.
if not ext:
diff --git a/coverage/pytracer.py b/coverage/pytracer.py
index 6723c2a1b..81832b0fd 100644
--- a/coverage/pytracer.py
+++ b/coverage/pytracer.py
@@ -20,11 +20,11 @@
)
# We need the YIELD_VALUE opcode below, in a comparison-friendly form.
-RESUME = dis.opmap.get('RESUME')
-RETURN_VALUE = dis.opmap['RETURN_VALUE']
+RESUME = dis.opmap.get("RESUME")
+RETURN_VALUE = dis.opmap["RETURN_VALUE"]
if RESUME is None:
- YIELD_VALUE = dis.opmap['YIELD_VALUE']
- YIELD_FROM = dis.opmap['YIELD_FROM']
+ YIELD_VALUE = dis.opmap["YIELD_VALUE"]
+ YIELD_FROM = dis.opmap["YIELD_FROM"]
YIELD_FROM_OFFSET = 0 if env.PYPY else 2
# When running meta-coverage, this file can try to trace itself, which confuses
@@ -78,7 +78,7 @@ def __init__(self) -> None:
self.in_atexit = False
# On exit, self.in_atexit = True
- atexit.register(setattr, self, 'in_atexit', True)
+ atexit.register(setattr, self, "in_atexit", True)
# Cache a bound method on the instance, so that we don't have to
# re-create a bound method object all the time.
@@ -150,10 +150,10 @@ def _trace(
)
return None
- # if event != 'call' and frame.f_code.co_filename != self.cur_file_name:
+ # if event != "call" and frame.f_code.co_filename != self.cur_file_name:
# self.log("---\n*", frame.f_code.co_filename, self.cur_file_name, frame.f_lineno)
- if event == 'call':
+ if event == "call":
# Should we start a new context?
if self.should_start_context and self.context is None:
context_maybe = self.should_start_context(frame)
@@ -215,13 +215,13 @@ def _trace(
oparg = frame.f_code.co_code[frame.f_lasti + 1]
real_call = (oparg == 0)
else:
- real_call = (getattr(frame, 'f_lasti', -1) < 0)
+ real_call = (getattr(frame, "f_lasti", -1) < 0)
if real_call:
self.last_line = -frame.f_code.co_firstlineno
else:
self.last_line = frame.f_lineno
- elif event == 'line':
+ elif event == "line":
# Record an executed line.
if self.cur_file_data is not None:
flineno: TLineNo = frame.f_lineno
@@ -232,7 +232,7 @@ def _trace(
cast(Set[TLineNo], self.cur_file_data).add(flineno)
self.last_line = flineno
- elif event == 'return':
+ elif event == "return":
if self.trace_arcs and self.cur_file_data:
# Record an arc leaving the function, but beware that a
# "return" event might just mean yielding from a generator.
diff --git a/coverage/results.py b/coverage/results.py
index 2731700ed..ea6dc207f 100644
--- a/coverage/results.py
+++ b/coverage/results.py
@@ -357,7 +357,7 @@ def format_lines(
dest = (ex if ex > 0 else "exit")
line_items.append((line, f"{line}->{dest}"))
- ret = ', '.join(t[-1] for t in sorted(line_items))
+ ret = ", ".join(t[-1] for t in sorted(line_items))
return ret
diff --git a/coverage/summary.py b/coverage/summary.py
index c4c7fd1de..5d373ec52 100644
--- a/coverage/summary.py
+++ b/coverage/summary.py
@@ -137,8 +137,8 @@ def _report_markdown(
max_n = max(len(total_line[header.index("Cover")]) + 6, len(" Cover "))
header_items = [formats[item].format(item, name_len=max_name, n=max_n) for item in header]
header_str = "".join(header_items)
- rule_str = "|" + " ".join(["- |".rjust(len(header_items[0])-1, '-')] +
- ["-: |".rjust(len(item)-1, '-') for item in header_items[1:]]
+ rule_str = "|" + " ".join(["- |".rjust(len(header_items[0])-1, "-")] +
+ ["-: |".rjust(len(item)-1, "-") for item in header_items[1:]]
)
# Write the header
@@ -223,10 +223,10 @@ def tabular_report(self) -> None:
# Line sorting.
sort_option = (self.config.sort or "name").lower()
reverse = False
- if sort_option[0] == '-':
+ if sort_option[0] == "-":
reverse = True
sort_option = sort_option[1:]
- elif sort_option[0] == '+':
+ elif sort_option[0] == "+":
sort_option = sort_option[1:]
sort_idx = column_order.get(sort_option)
if sort_idx is None:
@@ -250,12 +250,12 @@ def tabular_report(self) -> None:
# Create other final lines.
end_lines = []
if self.config.skip_covered and self.skipped_count:
- file_suffix = 's' if self.skipped_count>1 else ''
+ file_suffix = "s" if self.skipped_count>1 else ""
end_lines.append(
f"\n{self.skipped_count} file{file_suffix} skipped due to complete coverage."
)
if self.config.skip_empty and self.empty_count:
- file_suffix = 's' if self.empty_count > 1 else ''
+ file_suffix = "s" if self.empty_count > 1 else ""
end_lines.append(f"\n{self.empty_count} empty file{file_suffix} skipped.")
if self.output_format == "markdown":
diff --git a/coverage/templite.py b/coverage/templite.py
index 897a58f95..11ea847be 100644
--- a/coverage/templite.py
+++ b/coverage/templite.py
@@ -109,11 +109,11 @@ class Templite:
You are interested in {{topic}}.
{% endif %}
''',
- {'upper': str.upper},
+ {"upper": str.upper},
)
text = templite.render({
- 'name': "Ned",
- 'topics': ['Python', 'Geometry', 'Juggling'],
+ "name": "Ned",
+ "topics": ["Python", "Geometry", "Juggling"],
})
"""
@@ -161,37 +161,37 @@ def flush_output() -> None:
squash = in_joined = False
for token in tokens:
- if token.startswith('{'):
+ if token.startswith("{"):
start, end = 2, -2
- squash = (token[-3] == '-')
+ squash = (token[-3] == "-")
if squash:
end = -3
- if token.startswith('{#'):
+ if token.startswith("{#"):
# Comment: ignore it and move on.
continue
- elif token.startswith('{{'):
+ elif token.startswith("{{"):
# An expression to evaluate.
expr = self._expr_code(token[start:end].strip())
buffered.append("to_str(%s)" % expr)
else:
- # token.startswith('{%')
+ # token.startswith("{%")
# Action tag: split into words and parse further.
flush_output()
words = token[start:end].strip().split()
- if words[0] == 'if':
+ if words[0] == "if":
# An if statement: evaluate the expression to determine if.
if len(words) != 2:
self._syntax_error("Don't understand if", token)
- ops_stack.append('if')
+ ops_stack.append("if")
code.add_line("if %s:" % self._expr_code(words[1]))
code.indent()
- elif words[0] == 'for':
+ elif words[0] == "for":
# A loop: iterate over expression result.
- if len(words) != 4 or words[2] != 'in':
+ if len(words) != 4 or words[2] != "in":
self._syntax_error("Don't understand for", token)
- ops_stack.append('for')
+ ops_stack.append("for")
self._variable(words[1], self.loop_vars)
code.add_line(
"for c_{} in {}:".format(
@@ -200,10 +200,10 @@ def flush_output() -> None:
)
)
code.indent()
- elif words[0] == 'joined':
- ops_stack.append('joined')
+ elif words[0] == "joined":
+ ops_stack.append("joined")
in_joined = True
- elif words[0].startswith('end'):
+ elif words[0].startswith("end"):
# Endsomething. Pop the ops stack.
if len(words) != 1:
self._syntax_error("Don't understand end", token)
@@ -213,7 +213,7 @@ def flush_output() -> None:
start_what = ops_stack.pop()
if start_what != end_what:
self._syntax_error("Mismatched end tag", end_what)
- if end_what == 'joined':
+ if end_what == "joined":
in_joined = False
else:
code.dedent()
@@ -236,14 +236,14 @@ def flush_output() -> None:
for var_name in self.all_vars - self.loop_vars:
vars_code.add_line(f"c_{var_name} = context[{var_name!r}]")
- code.add_line('return "".join(result)')
+ code.add_line("return ''.join(result)")
code.dedent()
self._render_function = cast(
Callable[
[Dict[str, Any], Callable[..., Any]],
str
],
- code.get_globals()['render_function'],
+ code.get_globals()["render_function"],
)
def _expr_code(self, expr: str) -> str:
diff --git a/coverage/version.py b/coverage/version.py
index c9e8d7f7e..9cf7d9d19 100644
--- a/coverage/version.py
+++ b/coverage/version.py
@@ -8,7 +8,7 @@
# version_info: same semantics as sys.version_info.
# _dev: the .devN suffix if any.
-version_info = (7, 2, 2, "final", 0)
+version_info = (7, 2, 3, "final", 0)
_dev = 0
@@ -21,10 +21,10 @@ def _make_version(
dev: int = 0,
) -> str:
"""Create a readable version string from version_info tuple components."""
- assert releaselevel in ['alpha', 'beta', 'candidate', 'final']
+ assert releaselevel in ["alpha", "beta", "candidate", "final"]
version = "%d.%d.%d" % (major, minor, micro)
- if releaselevel != 'final':
- short = {'alpha': 'a', 'beta': 'b', 'candidate': 'rc'}[releaselevel]
+ if releaselevel != "final":
+ short = {"alpha": "a", "beta": "b", "candidate": "rc"}[releaselevel]
version += f"{short}{serial}"
if dev != 0:
version += f".dev{dev}"
diff --git a/coverage/xmlreport.py b/coverage/xmlreport.py
index fd2e9f81b..2c8fd0cc1 100644
--- a/coverage/xmlreport.py
+++ b/coverage/xmlreport.py
@@ -28,7 +28,7 @@
os = isolate_module(os)
-DTD_URL = 'https://raw.githubusercontent.com/cobertura/web/master/htdocs/xml/coverage-04.dtd'
+DTD_URL = "https://raw.githubusercontent.com/cobertura/web/master/htdocs/xml/coverage-04.dtd"
def rate(hit: int, num: int) -> str:
@@ -127,7 +127,7 @@ def report(self, morfs: Optional[Iterable[TMorf]], outfile: Optional[IO[str]] =
appendChild(xpackage, xclasses)
for _, class_elt in human_sorted_items(pkg_data.elements.items()):
appendChild(xclasses, class_elt)
- xpackage.setAttribute("name", pkg_name.replace(os.sep, '.'))
+ xpackage.setAttribute("name", pkg_name.replace(os.sep, "."))
xpackage.setAttribute("line-rate", rate(pkg_data.hits, pkg_data.lines))
if has_arcs:
branch_rate = rate(pkg_data.br_hits, pkg_data.branches)
@@ -172,7 +172,7 @@ def xml_file(self, fr: FileReporter, analysis: Analysis, has_arcs: bool) -> None
if analysis.numbers.n_statements == 0:
return
- # Create the 'lines' and 'package' XML elements, which
+ # Create the "lines" and "package" XML elements, which
# are populated later. Note that a package == a directory.
filename = fr.filename.replace("\\", "/")
for source_path in self.source_paths:
@@ -205,7 +205,7 @@ def xml_file(self, fr: FileReporter, analysis: Analysis, has_arcs: bool) -> None
branch_stats = analysis.branch_stats()
missing_branch_arcs = analysis.missing_branch_arcs()
- # For each statement, create an XML 'line' element.
+ # For each statement, create an XML "line" element.
for line in sorted(analysis.statements):
xline = self.xml_out.createElement("line")
xline.setAttribute("number", str(line))
diff --git a/doc/conf.py b/doc/conf.py
index 59907127a..f6310b577 100644
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -36,7 +36,6 @@
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.ifconfig',
- 'sphinxcontrib.spelling',
'sphinx.ext.intersphinx',
'sphinxcontrib.restbuilder',
'sphinx.ext.napoleon',
@@ -67,11 +66,11 @@
# @@@ editable
copyright = "2009–2023, Ned Batchelder" # pylint: disable=redefined-builtin
# The short X.Y.Z version.
-version = "7.2.2"
+version = "7.2.3"
# The full version, including alpha/beta/rc tags.
-release = "7.2.2"
+release = "7.2.3"
# The date of release, in "monthname day, year" format.
-release_date = "March 16, 2023"
+release_date = "April 6, 2023"
# @@@ end
rst_epilog = """
@@ -220,6 +219,9 @@
# -- Spelling ---
if any("spell" in arg for arg in sys.argv):
+ # sphinxcontrib.spelling needs the native "enchant" library, which often is
+ # missing, so only use the extension if we are specifically spell-checking.
+ extensions += ['sphinxcontrib.spelling']
names_file = tempfile.NamedTemporaryFile(mode='w', prefix="coverage_names_", suffix=".txt")
with open("../CONTRIBUTORS.txt") as contributors:
names = set(re.split(r"[^\w']", contributors.read()))
diff --git a/doc/contributing.rst b/doc/contributing.rst
index e9d2c3a40..fa7bb9f0c 100644
--- a/doc/contributing.rst
+++ b/doc/contributing.rst
@@ -37,28 +37,24 @@ The coverage.py code is hosted on a GitHub repository at
https://github.com/nedbat/coveragepy. To get a working environment, follow
these steps:
-.. minimum of PYVERSIONS:
+#. `Fork the repo`_ into your own GitHub account. The coverage.py code will
+ then be copied into a GitHub repository at
+ ``https://github.com/GITHUB_USER/coveragepy`` where GITHUB_USER is your
+ GitHub username.
-#. Create a Python 3.7 virtualenv to work in, and activate it.
+#. (Optional) Create a virtualenv to work in, and activate it. There
+ are a number of ways to do this. Use the method you are comfortable with.
#. Clone the repository::
- $ git clone https://github.com/nedbat/coveragepy
+ $ git clone https://github.com/GITHUB_USER/coveragepy
$ cd coveragepy
#. Install the requirements::
- $ python3 -m pip install -r requirements/dev.pip
-
- If this fails due to incorrect or missing hashes, use
- ``dev.in`` instead::
-
$ python3 -m pip install -r requirements/dev.in
-#. Install a number of versions of Python. Coverage.py supports a range
- of Python versions. The more you can test with, the more easily your code
- can be used as-is. If you only have one version, that's OK too, but may
- mean more work integrating your contribution.
+ Note: You may need to upgrade pip to install the requirements.
Running the tests
@@ -67,57 +63,91 @@ Running the tests
The tests are written mostly as standard unittest-style tests, and are run with
pytest running under `tox`_::
- $ tox
- py37 create: /Users/nedbat/coverage/trunk/.tox/py37
- py37 installdeps: -rrequirements/pip.pip, -rrequirements/pytest.pip, eventlet==0.25.1, greenlet==0.4.15
- py37 develop-inst: /Users/nedbat/coverage/trunk
- py37 installed: apipkg==1.5,appdirs==1.4.4,attrs==20.3.0,backports.functools-lru-cache==1.6.4,-e git+git@github.com:nedbat/coveragepy.git@36ef0e03c0439159c2245d38de70734fa08cddb4#egg=coverage,decorator==5.0.7,distlib==0.3.1,dnspython==2.1.0,eventlet==0.25.1,execnet==1.8.0,filelock==3.0.12,flaky==3.7.0,future==0.18.2,greenlet==0.4.15,hypothesis==6.10.1,importlib-metadata==4.0.1,iniconfig==1.1.1,monotonic==1.6,packaging==20.9,pluggy==0.13.1,py==1.10.0,PyContracts @ git+https://github.com/slorg1/contracts@c5a6da27d4dc9985f68e574d20d86000880919c3,pyparsing==2.4.7,pytest==6.2.3,pytest-forked==1.3.0,pytest-xdist==2.2.1,qualname==0.1.0,six==1.15.0,sortedcontainers==2.3.0,toml==0.10.2,typing-extensions==3.10.0.0,virtualenv==20.4.4,zipp==3.4.1
- py37 run-test-pre: PYTHONHASHSEED='376882681'
- py37 run-test: commands[0] | python setup.py --quiet clean develop
- py37 run-test: commands[1] | python igor.py zip_mods remove_extension
- py37 run-test: commands[2] | python igor.py test_with_tracer py
- === CPython 3.7.10 with Python tracer (.tox/py37/bin/python) ===
+ % python3 -m tox
+ ROOT: tox-gh won't override envlist because tox is not running in GitHub Actions
+ .pkg: _optional_hooks> python /usr/local/virtualenvs/coverage/lib/python3.7/site-packages/pyproject_api/_backend.py True setuptools.build_meta
+ .pkg: get_requires_for_build_editable> python /usr/local/virtualenvs/coverage/lib/python3.7/site-packages/pyproject_api/_backend.py True setuptools.build_meta
+ .pkg: build_editable> python /usr/local/virtualenvs/coverage/lib/python3.7/site-packages/pyproject_api/_backend.py True setuptools.build_meta
+ py37: install_package> python -m pip install -U --force-reinstall --no-deps .tox/.tmp/package/87/coverage-7.2.3a0.dev1-0.editable-cp37-cp37m-macosx_10_15_x86_64.whl
+ py37: commands[0]> python igor.py zip_mods
+ py37: commands[1]> python setup.py --quiet build_ext --inplace
+ py37: commands[2]> python -m pip install -q -e .
+ py37: commands[3]> python igor.py test_with_tracer c
+ === CPython 3.7.15 with C tracer (.tox/py37/bin/python) ===
bringing up nodes...
- ........................................................................................................................................................... [ 15%]
- ........................................................................................................................................................... [ 31%]
- ...........................................................................................................................................s............... [ 47%]
- ...........................................s...................................................................................sss.sssssssssssssssssss..... [ 63%]
- ........................................................................................................................................................s.. [ 79%]
- ......................................s..................................s................................................................................. [ 95%]
- ........................................ss...... [100%]
- 949 passed, 29 skipped in 40.56s
- py37 run-test: commands[3] | python setup.py --quiet build_ext --inplace
- py37 run-test: commands[4] | python igor.py test_with_tracer c
- === CPython 3.7.10 with C tracer (.tox/py37/bin/python) ===
+ .........................................................................................................................x.................s....s....... [ 11%]
+ ..s.....x.............................................s................................................................................................. [ 22%]
+ ........................................................................................................................................................ [ 34%]
+ ........................................................................................................................................................ [ 45%]
+ ........................................................................................................................................................ [ 57%]
+ .........s....................................................................................................................s......................... [ 68%]
+ .................................s..............................s...............s..................................s.................................... [ 80%]
+ ........................................................s............................................................................................... [ 91%]
+ ......................................s......................................................................... [100%]
+ 1316 passed, 12 skipped, 2 xfailed in 36.42s
+ py37: commands[4]> python igor.py remove_extension
+ py37: commands[5]> python igor.py test_with_tracer py
+ === CPython 3.7.15 with Python tracer (.tox/py37/bin/python) ===
bringing up nodes...
- ........................................................................................................................................................... [ 15%]
- ........................................................................................................................................................... [ 31%]
- ......................................................................s.................................................................................... [ 47%]
- ........................................................................................................................................................... [ 63%]
- ..........................s................................................s............................................................................... [ 79%]
- .................................................................................s......................................................................... [ 95%]
- ......................................s......... [100%]
- 973 passed, 5 skipped in 41.36s
- ____________________________________________________________________________ summary _____________________________________________________________________________
- py37: commands succeeded
- congratulations :)
+ ................................................................................................x...........................x.................s......... [ 11%]
+ .....s.............s.s.....................................................s..............ss............................s.ss....ss.ss................... [ 22%]
+ ......................................................................................................................................s................. [ 34%]
+ ..................................................................................................................s..................................... [ 45%]
+ ...................s.ss.....................................................................................s....................s.ss................... [ 57%]
+ ..................s.s................................................................................................................................... [ 68%]
+ ..........................s.........................................ssss...............s.................s...sss..................s...ss...ssss.s....... [ 80%]
+ .......................................................................................................................................................s [ 91%]
+ .........................................................................s.................................ss.... [100%]
+ 1281 passed, 47 skipped, 2 xfailed in 33.86s
+ .pkg: _exit> python /usr/local/virtualenvs/coverage/lib/python3.7/site-packages/pyproject_api/_backend.py True setuptools.build_meta
+ py37: OK (82.38=setup[2.80]+cmd[0.20,0.35,7.30,37.20,0.21,34.32] seconds)
+ congratulations :) (83.61 seconds)
Tox runs the complete test suite twice for each version of Python you have
-installed. The first run uses the Python implementation of the trace function,
-the second uses the C implementation.
+installed. The first run uses the C implementation of the trace function,
+the second uses the Python implementation.
To limit tox to just a few versions of Python, use the ``-e`` switch::
- $ tox -e py37,py39
-
-To run just a few tests, you can use `pytest test selectors`_::
-
- $ tox tests/test_misc.py
- $ tox tests/test_misc.py::HasherTest
- $ tox tests/test_misc.py::HasherTest::test_string_hashing
-
-These command run the tests in one file, one class, and just one test,
-respectively.
+ $ python3 -m tox -e py37,py39
+
+On the tox command line, options after ``--`` are passed to pytest. To run
+just a few tests, you can use `pytest test selectors`_::
+
+ $ python3 -m tox -- tests/test_misc.py
+ $ python3 -m tox -- tests/test_misc.py::HasherTest
+ $ python3 -m tox -- tests/test_misc.py::HasherTest::test_string_hashing
+
+These commands run the tests in one file, one class, and just one test,
+respectively. The pytest ``-k`` option selects tests based on a word in their
+name, which can be very convenient for ad-hoc test selection. Of course you
+can combine tox and pytest options::
+
+ $ python3 -m tox -q -e py37 -- -n 0 -vv -k hash
+ === CPython 3.7.15 with C tracer (.tox/py37/bin/python) ===
+ ======================================= test session starts ========================================
+ platform darwin -- Python 3.7.15, pytest-7.2.2, pluggy-1.0.0 -- /Users/nedbat/coverage/.tox/py37/bin/python
+ cachedir: .tox/py37/.pytest_cache
+ rootdir: /Users/nedbat/coverage, configfile: setup.cfg
+ plugins: flaky-3.7.0, hypothesis-6.70.0, xdist-3.2.1
+ collected 1330 items / 1320 deselected / 10 selected
+ run-last-failure: no previously failed tests, not deselecting items.
+
+ tests/test_data.py::CoverageDataTest::test_add_to_hash_with_lines PASSED [ 10%]
+ tests/test_data.py::CoverageDataTest::test_add_to_hash_with_arcs PASSED [ 20%]
+ tests/test_data.py::CoverageDataTest::test_add_to_lines_hash_with_missing_file PASSED [ 30%]
+ tests/test_data.py::CoverageDataTest::test_add_to_arcs_hash_with_missing_file PASSED [ 40%]
+ tests/test_execfile.py::RunPycFileTest::test_running_hashed_pyc PASSED [ 50%]
+ tests/test_misc.py::HasherTest::test_string_hashing PASSED [ 60%]
+ tests/test_misc.py::HasherTest::test_bytes_hashing PASSED [ 70%]
+ tests/test_misc.py::HasherTest::test_unicode_hashing PASSED [ 80%]
+ tests/test_misc.py::HasherTest::test_dict_hashing PASSED [ 90%]
+ tests/test_misc.py::HasherTest::test_dict_collision PASSED [100%]
+
+ =============================== 10 passed, 1320 deselected in 1.88s ================================
+ Skipping tests with Python tracer: Only one tracer: no Python tracer for CPython
+ py37: OK (12.22=setup[2.19]+cmd[0.20,0.36,6.57,2.51,0.20,0.19] seconds)
+ congratulations :) (13.10 seconds)
You can also affect the test runs with environment variables. Define any of
these as 1 to use them:
@@ -156,7 +186,8 @@ some warnings. Please try to keep it that way, but don't let pylint warnings
keep you from sending patches. I can clean them up.
Lines should be kept to a 100-character maximum length. I recommend an
-`editorconfig.org`_ plugin for your editor of choice.
+`editorconfig.org`_ plugin for your editor of choice, which will also help with
+indentation, line endings and so on.
Other style questions are best answered by looking at the existing code.
Formatting of docstrings, comments, long lines, and so on, should match the
@@ -220,6 +251,7 @@ All contributions are expected to include tests for new functionality and
fixes. If you need help writing tests, please ask.
+.. _fork the repo: https://docs.github.com/en/get-started/quickstart/fork-a-repo
.. _editorconfig.org: http://editorconfig.org
.. _tox: https://tox.readthedocs.io/
.. _black: https://pypi.org/project/black/
diff --git a/doc/dict.txt b/doc/dict.txt
index 63544dcde..41d8c94f4 100644
--- a/doc/dict.txt
+++ b/doc/dict.txt
@@ -1,18 +1,36 @@
+API
+BOM
+BTW
+CPython
+CTracer
+Cobertura
+Consolas
+Cython
+DOCTYPE
+DOM
+HTML
+Jinja
+Mako
+OK
+PYTHONPATH
+TODO
+Tidelift
+URL
+UTF
+XML
activestate
-api
apache
-API
+api
args
argv
ascii
+async
basename
basenames
bitbucket
-BOM
bom
boolean
booleans
-BTW
btw
builtin
builtins
@@ -27,7 +45,6 @@ canonicalizes
chdir'd
clickable
cmdline
-Cobertura
codecs
colorsys
combinable
@@ -38,17 +55,16 @@ configurability
configurability's
configurer
configurers
-Consolas
cov
coveragepy
coveragerc
covhtml
-CPython
css
-CTracer
-Cython
+dataio
datetime
deallocating
+debounce
+decodable
dedent
defaultdict
deserialize
@@ -62,8 +78,6 @@ docstring
docstrings
doctest
doctests
-DOCTYPE
-DOM
encodable
encodings
endfor
@@ -75,6 +89,7 @@ exec'ing
execfile
executability
executable's
+execv
expr
extensibility
favicon
@@ -96,10 +111,10 @@ github
gitignore
globals
greenlet
+hintedness
hotkey
hotkeys
html
-HTML
htmlcov
http
https
@@ -111,15 +126,13 @@ ints
invariants
iterable
iterables
-Jinja
-jquery
jQuery
+jquery
json
jython
kwargs
lcov
localStorage
-Mako
manylinux
matcher
matchers
@@ -136,8 +149,10 @@ monospaced
morf
morfs
multi
+multiproc
mumbo
mycode
+mypy
namespace
namespaces
nano
@@ -145,13 +160,14 @@ nbsp
ned
nedbat
nedbatchelder
+newb
+nocover
nosetests
nullary
num
numbits
numpy
ok
-OK
opcode
opcodes
optparse
@@ -161,13 +177,15 @@ overridable
parallelizing
parsable
parsers
+pathlib
pathnames
plugin
plugins
pragma
-pragmas
pragma'd
+pragmas
pre
+premain
prepended
prepending
programmability
@@ -175,17 +193,19 @@ programmatically
py
py's
pyc
+pyenv
pyexpat
+pylib
pylint
pyproject
pypy
pytest
pythonpath
-PYTHONPATH
pyw
rcfile
readme
readthedocs
+realpath
recordable
refactored
refactoring
@@ -194,9 +214,11 @@ regex
regexes
reimplemented
renderer
+rootname
runnable
runtime
scrollbar
+septatrix
serializable
settrace
setuptools
@@ -217,12 +239,10 @@ symlink
symlinks
syntaxes
sys
-templite
templating
+templite
testability
-Tidelift
todo
-TODO
tokenization
tokenize
tokenized
@@ -247,9 +267,8 @@ unparsable
unrunnable
unsubscriptable
untokenizable
+usecache
username
-URL
-UTF
utf
vendored
versionadded
@@ -258,7 +277,7 @@ wikipedia
wildcard
wildcards
www
+xdist
xml
-XML
xrange
xyzzy
diff --git a/doc/faq.rst b/doc/faq.rst
index 8252eeb98..b25dce0fd 100644
--- a/doc/faq.rst
+++ b/doc/faq.rst
@@ -23,7 +23,7 @@ environment variable.
This will write a line for each file considered, indicating whether it is
traced or not, and if not, why not. Be careful though: the output might be
swallowed by your test runner. If so, a ``COVERAGE_DEBUG_FILE=/tmp/cov.out``
-environemnt variable can direct the output to a file insttead to ensure you see
+environment variable can direct the output to a file instead to ensure you see
everything.
diff --git a/doc/index.rst b/doc/index.rst
index e5ac5a0ae..b11dc90e9 100644
--- a/doc/index.rst
+++ b/doc/index.rst
@@ -18,7 +18,7 @@ supported on:
.. PYVERSIONS
-* Python versions 3.7 through 3.12.0a6.
+* Python versions 3.7 through 3.12.0a7.
* PyPy3 7.3.11.
.. ifconfig:: prerelease
diff --git a/doc/requirements.pip b/doc/requirements.pip
index 5ef2a81b2..b13fedcd8 100644
--- a/doc/requirements.pip
+++ b/doc/requirements.pip
@@ -4,321 +4,102 @@
#
# make upgrade
#
-alabaster==0.7.13 \
- --hash=sha256:1ee19aca801bbabb5ba3f5f258e4422dfa86f82f3e9cefb0859b283cdd7f62a3 \
- --hash=sha256:a27a4a084d5e690e16e01e03ad2b2e552c61a65469419b907243193de1a84ae2
+alabaster==0.7.13
# via sphinx
-attrs==22.2.0 \
- --hash=sha256:29e95c7f6778868dbd49170f98f8818f78f3dc5e0e37c0b1f474e3561b240836 \
- --hash=sha256:c9227bfc2f01993c03f68db37d1d15c9690188323c067c641f1a35ca58185f99
+attrs==22.2.0
# via scriv
-babel==2.12.1 \
- --hash=sha256:b4246fb7677d3b98f501a39d43396d3cafdc8eadb045f4a31be01863f655c610 \
- --hash=sha256:cc2d99999cd01d44420ae725a21c9e3711b3aadc7976d6147f622d8581963455
+babel==2.12.1
# via sphinx
-certifi==2022.12.7 \
- --hash=sha256:35824b4c3a97115964b408844d64aa14db1cc518f6562e8d7261699d1350a9e3 \
- --hash=sha256:4ad3232f5e926d6718ec31cfc1fcadfde020920e278684144551c91769c7bc18
+certifi==2022.12.7
# via requests
-charset-normalizer==3.1.0 \
- --hash=sha256:04afa6387e2b282cf78ff3dbce20f0cc071c12dc8f685bd40960cc68644cfea6 \
- --hash=sha256:04eefcee095f58eaabe6dc3cc2262f3bcd776d2c67005880894f447b3f2cb9c1 \
- --hash=sha256:0be65ccf618c1e7ac9b849c315cc2e8a8751d9cfdaa43027d4f6624bd587ab7e \
- --hash=sha256:0c95f12b74681e9ae127728f7e5409cbbef9cd914d5896ef238cc779b8152373 \
- --hash=sha256:0ca564606d2caafb0abe6d1b5311c2649e8071eb241b2d64e75a0d0065107e62 \
- --hash=sha256:10c93628d7497c81686e8e5e557aafa78f230cd9e77dd0c40032ef90c18f2230 \
- --hash=sha256:11d117e6c63e8f495412d37e7dc2e2fff09c34b2d09dbe2bee3c6229577818be \
- --hash=sha256:11d3bcb7be35e7b1bba2c23beedac81ee893ac9871d0ba79effc7fc01167db6c \
- --hash=sha256:12a2b561af122e3d94cdb97fe6fb2bb2b82cef0cdca131646fdb940a1eda04f0 \
- --hash=sha256:12d1a39aa6b8c6f6248bb54550efcc1c38ce0d8096a146638fd4738e42284448 \
- --hash=sha256:1435ae15108b1cb6fffbcea2af3d468683b7afed0169ad718451f8db5d1aff6f \
- --hash=sha256:1c60b9c202d00052183c9be85e5eaf18a4ada0a47d188a83c8f5c5b23252f649 \
- --hash=sha256:1e8fcdd8f672a1c4fc8d0bd3a2b576b152d2a349782d1eb0f6b8e52e9954731d \
- --hash=sha256:20064ead0717cf9a73a6d1e779b23d149b53daf971169289ed2ed43a71e8d3b0 \
- --hash=sha256:21fa558996782fc226b529fdd2ed7866c2c6ec91cee82735c98a197fae39f706 \
- --hash=sha256:22908891a380d50738e1f978667536f6c6b526a2064156203d418f4856d6e86a \
- --hash=sha256:3160a0fd9754aab7d47f95a6b63ab355388d890163eb03b2d2b87ab0a30cfa59 \
- --hash=sha256:322102cdf1ab682ecc7d9b1c5eed4ec59657a65e1c146a0da342b78f4112db23 \
- --hash=sha256:34e0a2f9c370eb95597aae63bf85eb5e96826d81e3dcf88b8886012906f509b5 \
- --hash=sha256:3573d376454d956553c356df45bb824262c397c6e26ce43e8203c4c540ee0acb \
- --hash=sha256:3747443b6a904001473370d7810aa19c3a180ccd52a7157aacc264a5ac79265e \
- --hash=sha256:38e812a197bf8e71a59fe55b757a84c1f946d0ac114acafaafaf21667a7e169e \
- --hash=sha256:3a06f32c9634a8705f4ca9946d667609f52cf130d5548881401f1eb2c39b1e2c \
- --hash=sha256:3a5fc78f9e3f501a1614a98f7c54d3969f3ad9bba8ba3d9b438c3bc5d047dd28 \
- --hash=sha256:3d9098b479e78c85080c98e1e35ff40b4a31d8953102bb0fd7d1b6f8a2111a3d \
- --hash=sha256:3dc5b6a8ecfdc5748a7e429782598e4f17ef378e3e272eeb1340ea57c9109f41 \
- --hash=sha256:4155b51ae05ed47199dc5b2a4e62abccb274cee6b01da5b895099b61b1982974 \
- --hash=sha256:49919f8400b5e49e961f320c735388ee686a62327e773fa5b3ce6721f7e785ce \
- --hash=sha256:53d0a3fa5f8af98a1e261de6a3943ca631c526635eb5817a87a59d9a57ebf48f \
- --hash=sha256:5f008525e02908b20e04707a4f704cd286d94718f48bb33edddc7d7b584dddc1 \
- --hash=sha256:628c985afb2c7d27a4800bfb609e03985aaecb42f955049957814e0491d4006d \
- --hash=sha256:65ed923f84a6844de5fd29726b888e58c62820e0769b76565480e1fdc3d062f8 \
- --hash=sha256:6734e606355834f13445b6adc38b53c0fd45f1a56a9ba06c2058f86893ae8017 \
- --hash=sha256:6baf0baf0d5d265fa7944feb9f7451cc316bfe30e8df1a61b1bb08577c554f31 \
- --hash=sha256:6f4f4668e1831850ebcc2fd0b1cd11721947b6dc7c00bf1c6bd3c929ae14f2c7 \
- --hash=sha256:6f5c2e7bc8a4bf7c426599765b1bd33217ec84023033672c1e9a8b35eaeaaaf8 \
- --hash=sha256:6f6c7a8a57e9405cad7485f4c9d3172ae486cfef1344b5ddd8e5239582d7355e \
- --hash=sha256:7381c66e0561c5757ffe616af869b916c8b4e42b367ab29fedc98481d1e74e14 \
- --hash=sha256:73dc03a6a7e30b7edc5b01b601e53e7fc924b04e1835e8e407c12c037e81adbd \
- --hash=sha256:74db0052d985cf37fa111828d0dd230776ac99c740e1a758ad99094be4f1803d \
- --hash=sha256:75f2568b4189dda1c567339b48cba4ac7384accb9c2a7ed655cd86b04055c795 \
- --hash=sha256:78cacd03e79d009d95635e7d6ff12c21eb89b894c354bd2b2ed0b4763373693b \
- --hash=sha256:80d1543d58bd3d6c271b66abf454d437a438dff01c3e62fdbcd68f2a11310d4b \
- --hash=sha256:830d2948a5ec37c386d3170c483063798d7879037492540f10a475e3fd6f244b \
- --hash=sha256:891cf9b48776b5c61c700b55a598621fdb7b1e301a550365571e9624f270c203 \
- --hash=sha256:8f25e17ab3039b05f762b0a55ae0b3632b2e073d9c8fc88e89aca31a6198e88f \
- --hash=sha256:9a3267620866c9d17b959a84dd0bd2d45719b817245e49371ead79ed4f710d19 \
- --hash=sha256:a04f86f41a8916fe45ac5024ec477f41f886b3c435da2d4e3d2709b22ab02af1 \
- --hash=sha256:aaf53a6cebad0eae578f062c7d462155eada9c172bd8c4d250b8c1d8eb7f916a \
- --hash=sha256:abc1185d79f47c0a7aaf7e2412a0eb2c03b724581139193d2d82b3ad8cbb00ac \
- --hash=sha256:ac0aa6cd53ab9a31d397f8303f92c42f534693528fafbdb997c82bae6e477ad9 \
- --hash=sha256:ac3775e3311661d4adace3697a52ac0bab17edd166087d493b52d4f4f553f9f0 \
- --hash=sha256:b06f0d3bf045158d2fb8837c5785fe9ff9b8c93358be64461a1089f5da983137 \
- --hash=sha256:b116502087ce8a6b7a5f1814568ccbd0e9f6cfd99948aa59b0e241dc57cf739f \
- --hash=sha256:b82fab78e0b1329e183a65260581de4375f619167478dddab510c6c6fb04d9b6 \
- --hash=sha256:bd7163182133c0c7701b25e604cf1611c0d87712e56e88e7ee5d72deab3e76b5 \
- --hash=sha256:c36bcbc0d5174a80d6cccf43a0ecaca44e81d25be4b7f90f0ed7bcfbb5a00909 \
- --hash=sha256:c3af8e0f07399d3176b179f2e2634c3ce9c1301379a6b8c9c9aeecd481da494f \
- --hash=sha256:c84132a54c750fda57729d1e2599bb598f5fa0344085dbde5003ba429a4798c0 \
- --hash=sha256:cb7b2ab0188829593b9de646545175547a70d9a6e2b63bf2cd87a0a391599324 \
- --hash=sha256:cca4def576f47a09a943666b8f829606bcb17e2bc2d5911a46c8f8da45f56755 \
- --hash=sha256:cf6511efa4801b9b38dc5546d7547d5b5c6ef4b081c60b23e4d941d0eba9cbeb \
- --hash=sha256:d16fd5252f883eb074ca55cb622bc0bee49b979ae4e8639fff6ca3ff44f9f854 \
- --hash=sha256:d2686f91611f9e17f4548dbf050e75b079bbc2a82be565832bc8ea9047b61c8c \
- --hash=sha256:d7fc3fca01da18fbabe4625d64bb612b533533ed10045a2ac3dd194bfa656b60 \
- --hash=sha256:dd5653e67b149503c68c4018bf07e42eeed6b4e956b24c00ccdf93ac79cdff84 \
- --hash=sha256:de5695a6f1d8340b12a5d6d4484290ee74d61e467c39ff03b39e30df62cf83a0 \
- --hash=sha256:e0ac8959c929593fee38da1c2b64ee9778733cdf03c482c9ff1d508b6b593b2b \
- --hash=sha256:e1b25e3ad6c909f398df8921780d6a3d120d8c09466720226fc621605b6f92b1 \
- --hash=sha256:e633940f28c1e913615fd624fcdd72fdba807bf53ea6925d6a588e84e1151531 \
- --hash=sha256:e89df2958e5159b811af9ff0f92614dabf4ff617c03a4c1c6ff53bf1c399e0e1 \
- --hash=sha256:ea9f9c6034ea2d93d9147818f17c2a0860d41b71c38b9ce4d55f21b6f9165a11 \
- --hash=sha256:f645caaf0008bacf349875a974220f1f1da349c5dbe7c4ec93048cdc785a3326 \
- --hash=sha256:f8303414c7b03f794347ad062c0516cee0e15f7a612abd0ce1e25caf6ceb47df \
- --hash=sha256:fca62a8301b605b954ad2e9c3666f9d97f63872aa4efcae5492baca2056b74ab
+charset-normalizer==3.1.0
# via requests
-click==8.1.3 \
- --hash=sha256:7682dc8afb30297001674575ea00d1814d808d6a36af415a82bd481d37ba7b8e \
- --hash=sha256:bb4d8133cb15a609f44e8213d9b391b0809795062913b383c62be0ee95b1db48
+click==8.1.3
# via
# click-log
# scriv
-click-log==0.4.0 \
- --hash=sha256:3970f8570ac54491237bcdb3d8ab5e3eef6c057df29f8c3d1151a51a9c23b975 \
- --hash=sha256:a43e394b528d52112af599f2fc9e4b7cf3c15f94e53581f74fa6867e68c91756
+click-log==0.4.0
# via scriv
-cogapp==3.3.0 \
- --hash=sha256:1be95183f70282422d594fa42426be6923070a4bd8335621f6347f3aeee81db0 \
- --hash=sha256:8b5b5f6063d8ee231961c05da010cb27c30876b2279e23ad0eae5f8f09460d50
+cogapp==3.3.0
# via -r doc/requirements.in
-colorama==0.4.6 \
- --hash=sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44 \
- --hash=sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6
+colorama==0.4.6
# via sphinx-autobuild
-docutils==0.18.1 \
- --hash=sha256:23010f129180089fbcd3bc08cfefccb3b890b0050e1ca00c867036e9d161b98c \
- --hash=sha256:679987caf361a7539d76e584cbeddc311e3aee937877c87346f31debc63e9d06
+docutils==0.18.1
# via
# sphinx
# sphinx-rtd-theme
-idna==3.4 \
- --hash=sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4 \
- --hash=sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2
+idna==3.4
# via requests
-imagesize==1.4.1 \
- --hash=sha256:0d8d18d08f840c19d0ee7ca1fd82490fdc3729b7ac93f49870406ddde8ef8d8b \
- --hash=sha256:69150444affb9cb0d5cc5a92b3676f0b2fb7cd9ae39e947a5e11a36b4497cd4a
+imagesize==1.4.1
# via sphinx
-importlib-metadata==6.0.0 \
- --hash=sha256:7efb448ec9a5e313a57655d35aa54cd3e01b7e1fbcf72dce1bf06119420f5bad \
- --hash=sha256:e354bedeb60efa6affdcc8ae121b73544a7aa74156d047311948f6d711cd378d
+importlib-metadata==6.1.0
# via
# click
# sphinx
# sphinxcontrib-spelling
-jinja2==3.1.2 \
- --hash=sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852 \
- --hash=sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61
+jinja2==3.1.2
# via
# scriv
# sphinx
-livereload==2.6.3 \
- --hash=sha256:776f2f865e59fde56490a56bcc6773b6917366bce0c267c60ee8aaf1a0959869 \
- --hash=sha256:ad4ac6f53b2d62bb6ce1a5e6e96f1f00976a32348afedcb4b6d68df2a1d346e4
+livereload==2.6.3
# via sphinx-autobuild
-markupsafe==2.1.2 \
- --hash=sha256:0576fe974b40a400449768941d5d0858cc624e3249dfd1e0c33674e5c7ca7aed \
- --hash=sha256:085fd3201e7b12809f9e6e9bc1e5c96a368c8523fad5afb02afe3c051ae4afcc \
- --hash=sha256:090376d812fb6ac5f171e5938e82e7f2d7adc2b629101cec0db8b267815c85e2 \
- --hash=sha256:0b462104ba25f1ac006fdab8b6a01ebbfbce9ed37fd37fd4acd70c67c973e460 \
- --hash=sha256:137678c63c977754abe9086a3ec011e8fd985ab90631145dfb9294ad09c102a7 \
- --hash=sha256:1bea30e9bf331f3fef67e0a3877b2288593c98a21ccb2cf29b74c581a4eb3af0 \
- --hash=sha256:22152d00bf4a9c7c83960521fc558f55a1adbc0631fbb00a9471e097b19d72e1 \
- --hash=sha256:22731d79ed2eb25059ae3df1dfc9cb1546691cc41f4e3130fe6bfbc3ecbbecfa \
- --hash=sha256:2298c859cfc5463f1b64bd55cb3e602528db6fa0f3cfd568d3605c50678f8f03 \
- --hash=sha256:28057e985dace2f478e042eaa15606c7efccb700797660629da387eb289b9323 \
- --hash=sha256:2e7821bffe00aa6bd07a23913b7f4e01328c3d5cc0b40b36c0bd81d362faeb65 \
- --hash=sha256:2ec4f2d48ae59bbb9d1f9d7efb9236ab81429a764dedca114f5fdabbc3788013 \
- --hash=sha256:340bea174e9761308703ae988e982005aedf427de816d1afe98147668cc03036 \
- --hash=sha256:40627dcf047dadb22cd25ea7ecfe9cbf3bbbad0482ee5920b582f3809c97654f \
- --hash=sha256:40dfd3fefbef579ee058f139733ac336312663c6706d1163b82b3003fb1925c4 \
- --hash=sha256:4cf06cdc1dda95223e9d2d3c58d3b178aa5dacb35ee7e3bbac10e4e1faacb419 \
- --hash=sha256:50c42830a633fa0cf9e7d27664637532791bfc31c731a87b202d2d8ac40c3ea2 \
- --hash=sha256:55f44b440d491028addb3b88f72207d71eeebfb7b5dbf0643f7c023ae1fba619 \
- --hash=sha256:608e7073dfa9e38a85d38474c082d4281f4ce276ac0010224eaba11e929dd53a \
- --hash=sha256:63ba06c9941e46fa389d389644e2d8225e0e3e5ebcc4ff1ea8506dce646f8c8a \
- --hash=sha256:65608c35bfb8a76763f37036547f7adfd09270fbdbf96608be2bead319728fcd \
- --hash=sha256:665a36ae6f8f20a4676b53224e33d456a6f5a72657d9c83c2aa00765072f31f7 \
- --hash=sha256:6d6607f98fcf17e534162f0709aaad3ab7a96032723d8ac8750ffe17ae5a0666 \
- --hash=sha256:7313ce6a199651c4ed9d7e4cfb4aa56fe923b1adf9af3b420ee14e6d9a73df65 \
- --hash=sha256:7668b52e102d0ed87cb082380a7e2e1e78737ddecdde129acadb0eccc5423859 \
- --hash=sha256:7df70907e00c970c60b9ef2938d894a9381f38e6b9db73c5be35e59d92e06625 \
- --hash=sha256:7e007132af78ea9df29495dbf7b5824cb71648d7133cf7848a2a5dd00d36f9ff \
- --hash=sha256:835fb5e38fd89328e9c81067fd642b3593c33e1e17e2fdbf77f5676abb14a156 \
- --hash=sha256:8bca7e26c1dd751236cfb0c6c72d4ad61d986e9a41bbf76cb445f69488b2a2bd \
- --hash=sha256:8db032bf0ce9022a8e41a22598eefc802314e81b879ae093f36ce9ddf39ab1ba \
- --hash=sha256:99625a92da8229df6d44335e6fcc558a5037dd0a760e11d84be2260e6f37002f \
- --hash=sha256:9cad97ab29dfc3f0249b483412c85c8ef4766d96cdf9dcf5a1e3caa3f3661cf1 \
- --hash=sha256:a4abaec6ca3ad8660690236d11bfe28dfd707778e2442b45addd2f086d6ef094 \
- --hash=sha256:a6e40afa7f45939ca356f348c8e23048e02cb109ced1eb8420961b2f40fb373a \
- --hash=sha256:a6f2fcca746e8d5910e18782f976489939d54a91f9411c32051b4aab2bd7c513 \
- --hash=sha256:a806db027852538d2ad7555b203300173dd1b77ba116de92da9afbc3a3be3eed \
- --hash=sha256:abcabc8c2b26036d62d4c746381a6f7cf60aafcc653198ad678306986b09450d \
- --hash=sha256:b8526c6d437855442cdd3d87eede9c425c4445ea011ca38d937db299382e6fa3 \
- --hash=sha256:bb06feb762bade6bf3c8b844462274db0c76acc95c52abe8dbed28ae3d44a147 \
- --hash=sha256:c0a33bc9f02c2b17c3ea382f91b4db0e6cde90b63b296422a939886a7a80de1c \
- --hash=sha256:c4a549890a45f57f1ebf99c067a4ad0cb423a05544accaf2b065246827ed9603 \
- --hash=sha256:ca244fa73f50a800cf8c3ebf7fd93149ec37f5cb9596aa8873ae2c1d23498601 \
- --hash=sha256:cf877ab4ed6e302ec1d04952ca358b381a882fbd9d1b07cccbfd61783561f98a \
- --hash=sha256:d9d971ec1e79906046aa3ca266de79eac42f1dbf3612a05dc9368125952bd1a1 \
- --hash=sha256:da25303d91526aac3672ee6d49a2f3db2d9502a4a60b55519feb1a4c7714e07d \
- --hash=sha256:e55e40ff0cc8cc5c07996915ad367fa47da6b3fc091fdadca7f5403239c5fec3 \
- --hash=sha256:f03a532d7dee1bed20bc4884194a16160a2de9ffc6354b3878ec9682bb623c54 \
- --hash=sha256:f1cd098434e83e656abf198f103a8207a8187c0fc110306691a2e94a78d0abb2 \
- --hash=sha256:f2bfb563d0211ce16b63c7cb9395d2c682a23187f54c3d79bfec33e6705473c6 \
- --hash=sha256:f8ffb705ffcf5ddd0e80b65ddf7bed7ee4f5a441ea7d3419e861a12eaf41af58
+markupsafe==2.1.2
# via jinja2
-packaging==23.0 \
- --hash=sha256:714ac14496c3e68c99c29b00845f7a2b85f3bb6f1078fd9f72fd20f0570002b2 \
- --hash=sha256:b6ad297f8907de0fa2fe1ccbd26fdaf387f5f47c7275fedf8cce89f99446cf97
+packaging==23.0
# via sphinx
-pyenchant==3.2.2 \
- --hash=sha256:1cf830c6614362a78aab78d50eaf7c6c93831369c52e1bb64ffae1df0341e637 \
- --hash=sha256:5a636832987eaf26efe971968f4d1b78e81f62bca2bde0a9da210c7de43c3bce \
- --hash=sha256:5facc821ece957208a81423af7d6ec7810dad29697cb0d77aae81e4e11c8e5a6 \
- --hash=sha256:6153f521852e23a5add923dbacfbf4bebbb8d70c4e4bad609a8e0f9faeb915d1
+pyenchant==3.2.2
# via
# -r doc/requirements.in
# sphinxcontrib-spelling
-pygments==2.14.0 \
- --hash=sha256:b3ed06a9e8ac9a9aae5a6f5dbe78a8a58655d17b43b93c078f094ddc476ae297 \
- --hash=sha256:fa7bd7bd2771287c0de303af8bfdfc731f51bd2c6a47ab69d117138893b82717
+pygments==2.14.0
# via sphinx
-pytz==2022.7.1 \
- --hash=sha256:01a0681c4b9684a28304615eba55d1ab31ae00bf68ec157ec3708a8182dbbcd0 \
- --hash=sha256:78f4f37d8198e0627c5f1143240bb0206b8691d8d7ac6d78fee88b78733f8c4a
+pytz==2023.3
# via babel
-requests==2.28.2 \
- --hash=sha256:64299f4909223da747622c030b781c0d7811e359c37124b4bd368fb8c6518baa \
- --hash=sha256:98b1b2782e3c6c4904938b84c0eb932721069dfdb9134313beff7c83c2df24bf
+requests==2.28.2
# via
# scriv
# sphinx
-scriv==1.2.1 \
- --hash=sha256:0ceec6243ebf02f6a685507eec72f890ca9d9da4cafcfcfce640b1f027cec17d \
- --hash=sha256:95edfd76642cf7ae6b5cd40975545d8af58f6398cabfe83ff755e8eedb8ddd4e
+scriv==1.2.1
# via -r doc/requirements.in
-six==1.16.0 \
- --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \
- --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254
+six==1.16.0
# via livereload
-snowballstemmer==2.2.0 \
- --hash=sha256:09b16deb8547d3412ad7b590689584cd0fe25ec8db3be37788be3810cbf19cb1 \
- --hash=sha256:c8e1716e83cc398ae16824e5572ae04e0d9fc2c6b985fb0f900f5f0c96ecba1a
+snowballstemmer==2.2.0
# via sphinx
-sphinx==5.3.0 \
- --hash=sha256:060ca5c9f7ba57a08a1219e547b269fadf125ae25b06b9fa7f66768efb652d6d \
- --hash=sha256:51026de0a9ff9fc13c05d74913ad66047e104f56a129ff73e174eb5c3ee794b5
+sphinx==5.3.0
# via
# -r doc/requirements.in
# sphinx-autobuild
# sphinx-rtd-theme
+ # sphinxcontrib-jquery
# sphinxcontrib-restbuilder
# sphinxcontrib-spelling
-sphinx-autobuild==2021.3.14 \
- --hash=sha256:8fe8cbfdb75db04475232f05187c776f46f6e9e04cacf1e49ce81bdac649ccac \
- --hash=sha256:de1ca3b66e271d2b5b5140c35034c89e47f263f2cd5db302c9217065f7443f05
+sphinx-autobuild==2021.3.14
# via -r doc/requirements.in
-sphinx-rtd-theme==1.2.0 \
- --hash=sha256:a0d8bd1a2ed52e0b338cbe19c4b2eef3c5e7a048769753dac6a9f059c7b641b8 \
- --hash=sha256:f823f7e71890abe0ac6aaa6013361ea2696fc8d3e1fa798f463e82bdb77eeff2
+sphinx-rtd-theme==1.2.0
# via -r doc/requirements.in
-sphinxcontrib-applehelp==1.0.2 \
- --hash=sha256:806111e5e962be97c29ec4c1e7fe277bfd19e9652fb1a4392105b43e01af885a \
- --hash=sha256:a072735ec80e7675e3f432fcae8610ecf509c5f1869d17e2eecff44389cdbc58
+sphinxcontrib-applehelp==1.0.2
# via sphinx
-sphinxcontrib-devhelp==1.0.2 \
- --hash=sha256:8165223f9a335cc1af7ffe1ed31d2871f325254c0423bc0c4c7cd1c1e4734a2e \
- --hash=sha256:ff7f1afa7b9642e7060379360a67e9c41e8f3121f2ce9164266f61b9f4b338e4
+sphinxcontrib-devhelp==1.0.2
# via sphinx
-sphinxcontrib-htmlhelp==2.0.0 \
- --hash=sha256:d412243dfb797ae3ec2b59eca0e52dac12e75a241bf0e4eb861e450d06c6ed07 \
- --hash=sha256:f5f8bb2d0d629f398bf47d0d69c07bc13b65f75a81ad9e2f71a63d4b7a2f6db2
+sphinxcontrib-htmlhelp==2.0.0
# via sphinx
-sphinxcontrib-jquery==2.0.0 \
- --hash=sha256:8fb65f6dba84bf7bcd1aea1f02ab3955ac34611d838bcc95d4983b805b234daa \
- --hash=sha256:ed47fa425c338ffebe3c37e1cdb56e30eb806116b85f01055b158c7057fdb995
+sphinxcontrib-jquery==4.1
# via sphinx-rtd-theme
-sphinxcontrib-jsmath==1.0.1 \
- --hash=sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178 \
- --hash=sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8
+sphinxcontrib-jsmath==1.0.1
# via sphinx
-sphinxcontrib-qthelp==1.0.3 \
- --hash=sha256:4c33767ee058b70dba89a6fc5c1892c0d57a54be67ddd3e7875a18d14cba5a72 \
- --hash=sha256:bd9fc24bcb748a8d51fd4ecaade681350aa63009a347a8c14e637895444dfab6
+sphinxcontrib-qthelp==1.0.3
# via sphinx
-sphinxcontrib-restbuilder==0.3 \
- --hash=sha256:6b3ee9394b5ec5e73e6afb34d223530d0b9098cb7562f9c5e364e6d6b41410ce \
- --hash=sha256:6ba2ddc7a87d845c075c1b2e00d541bd1c8400488e50e32c9b4169ccdd9f30cb
+sphinxcontrib-restbuilder==0.3
# via -r doc/requirements.in
-sphinxcontrib-serializinghtml==1.1.5 \
- --hash=sha256:352a9a00ae864471d3a7ead8d7d79f5fc0b57e8b3f95e9867eb9eb28999b92fd \
- --hash=sha256:aa5f6de5dfdf809ef505c4895e51ef5c9eac17d0f287933eb49ec495280b6952
+sphinxcontrib-serializinghtml==1.1.5
# via sphinx
-sphinxcontrib-spelling==8.0.0 \
- --hash=sha256:199d0a16902ad80c387c2966dc9eb10f565b1fb15ccce17210402db7c2443e5c \
- --hash=sha256:b27e0a16aef00bcfc888a6490dc3f16651f901dc475446c6882834278c8dc7b3
+sphinxcontrib-spelling==8.0.0
# via -r doc/requirements.in
-tornado==6.2 \
- --hash=sha256:1d54d13ab8414ed44de07efecb97d4ef7c39f7438cf5e976ccd356bebb1b5fca \
- --hash=sha256:20f638fd8cc85f3cbae3c732326e96addff0a15e22d80f049e00121651e82e72 \
- --hash=sha256:5c87076709343557ef8032934ce5f637dbb552efa7b21d08e89ae7619ed0eb23 \
- --hash=sha256:5f8c52d219d4995388119af7ccaa0bcec289535747620116a58d830e7c25d8a8 \
- --hash=sha256:6fdfabffd8dfcb6cf887428849d30cf19a3ea34c2c248461e1f7d718ad30b66b \
- --hash=sha256:87dcafae3e884462f90c90ecc200defe5e580a7fbbb4365eda7c7c1eb809ebc9 \
- --hash=sha256:9b630419bde84ec666bfd7ea0a4cb2a8a651c2d5cccdbdd1972a0c859dfc3c13 \
- --hash=sha256:b8150f721c101abdef99073bf66d3903e292d851bee51910839831caba341a75 \
- --hash=sha256:ba09ef14ca9893954244fd872798b4ccb2367c165946ce2dd7376aebdde8e3ac \
- --hash=sha256:d3a2f5999215a3a06a4fc218026cd84c61b8b2b40ac5296a6db1f1451ef04c1e \
- --hash=sha256:e5f923aa6a47e133d1cf87d60700889d7eae68988704e20c75fb2d65677a8e4b
+tornado==6.2
# via livereload
-typing-extensions==4.5.0 \
- --hash=sha256:5cb5f4a79139d699607b3ef622a1dedafa84e115ab0024e0d9c044a9479ca7cb \
- --hash=sha256:fb33085c39dd998ac16d1431ebc293a8b3eedd00fd4a32de0ff79002c19511b4
+typing-extensions==4.5.0
# via importlib-metadata
-urllib3==1.26.15 \
- --hash=sha256:8a388717b9476f934a21484e8c8e61875ab60644d29b9b39e11e4b9dc1c6b305 \
- --hash=sha256:aa751d169e23c7479ce47a0cb0da579e3ede798f994f5816a74e4f4500dcea42
+urllib3==1.26.15
# via requests
-zipp==3.15.0 \
- --hash=sha256:112929ad649da941c23de50f356a2b5570c954b65150642bccdd66bf194d224b \
- --hash=sha256:48904fc76a60e542af151aded95726c1a5c34ed43ab4134b597665c86d7ad556
+zipp==3.15.0
# via importlib-metadata
-
-# The following packages are considered to be unsafe in a requirements file:
-setuptools==65.7.0 \
- --hash=sha256:4d3c92fac8f1118bb77a22181355e29c239cabfe2b9effdaa665c66b711136d7 \
- --hash=sha256:8ab4f1dbf2b4a65f7eec5ad0c620e84c34111a68d3349833494b9088212214dd
- # via
- # -c doc/../requirements/pins.pip
- # sphinxcontrib-jquery
diff --git a/doc/sample_html/coverage_html.js b/doc/sample_html/coverage_html.js
index 1c4eb9881..4c321182c 100644
--- a/doc/sample_html/coverage_html.js
+++ b/doc/sample_html/coverage_html.js
@@ -214,7 +214,7 @@ coverage.LINE_FILTERS_STORAGE = "COVERAGE_LINE_FILTERS";
coverage.pyfile_ready = function () {
// If we're directed to a particular line number, highlight the line.
var frag = location.hash;
- if (frag.length > 2 && frag[1] === 't') {
+ if (frag.length > 2 && frag[1] === "t") {
document.querySelector(frag).closest(".n").classList.add("highlight");
coverage.set_sel(parseInt(frag.substr(2), 10));
} else {
@@ -257,6 +257,10 @@ coverage.pyfile_ready = function () {
coverage.init_scroll_markers();
coverage.wire_up_sticky_header();
+ document.querySelectorAll("[id^=ctxs]").forEach(
+ cbox => cbox.addEventListener("click", coverage.expand_contexts)
+ );
+
// Rebuild scroll markers when the window height changes.
window.addEventListener("resize", coverage.build_scroll_markers);
};
@@ -528,14 +532,14 @@ coverage.scroll_window = function (to_pos) {
coverage.init_scroll_markers = function () {
// Init some variables
- coverage.lines_len = document.querySelectorAll('#source > p').length;
+ coverage.lines_len = document.querySelectorAll("#source > p").length;
// Build html
coverage.build_scroll_markers();
};
coverage.build_scroll_markers = function () {
- const temp_scroll_marker = document.getElementById('scroll_marker')
+ const temp_scroll_marker = document.getElementById("scroll_marker")
if (temp_scroll_marker) temp_scroll_marker.remove();
// Don't build markers if the window has no scroll bar.
if (document.body.scrollHeight <= window.innerHeight) {
@@ -549,8 +553,8 @@ coverage.build_scroll_markers = function () {
const scroll_marker = document.createElement("div");
scroll_marker.id = "scroll_marker";
- document.getElementById('source').querySelectorAll(
- 'p.show_run, p.show_mis, p.show_exc, p.show_exc, p.show_par'
+ document.getElementById("source").querySelectorAll(
+ "p.show_run, p.show_mis, p.show_exc, p.show_exc, p.show_par"
).forEach(element => {
const line_top = Math.floor(element.offsetTop * marker_scale);
const line_number = parseInt(element.querySelector(".n a").id.substr(1));
@@ -577,24 +581,40 @@ coverage.build_scroll_markers = function () {
};
coverage.wire_up_sticky_header = function () {
- const header = document.querySelector('header');
+ const header = document.querySelector("header");
const header_bottom = (
- header.querySelector('.content h2').getBoundingClientRect().top -
+ header.querySelector(".content h2").getBoundingClientRect().top -
header.getBoundingClientRect().top
);
function updateHeader() {
if (window.scrollY > header_bottom) {
- header.classList.add('sticky');
+ header.classList.add("sticky");
} else {
- header.classList.remove('sticky');
+ header.classList.remove("sticky");
}
}
- window.addEventListener('scroll', updateHeader);
+ window.addEventListener("scroll", updateHeader);
updateHeader();
};
+coverage.expand_contexts = function (e) {
+ var ctxs = e.target.parentNode.querySelector(".ctxs");
+
+ if (!ctxs.classList.contains("expanded")) {
+ var ctxs_text = ctxs.textContent;
+ var width = Number(ctxs_text[0]);
+ ctxs.textContent = "";
+ for (var i = 1; i < ctxs_text.length; i += width) {
+ key = ctxs_text.substring(i, i + width).trim();
+ ctxs.appendChild(document.createTextNode(contexts[key]));
+ ctxs.appendChild(document.createElement("br"));
+ }
+ ctxs.classList.add("expanded");
+ }
+};
+
document.addEventListener("DOMContentLoaded", () => {
if (document.body.classList.contains("indexfile")) {
coverage.index_ready();
diff --git a/doc/sample_html/d_7b071bdc2a35fa80___init___py.html b/doc/sample_html/d_7b071bdc2a35fa80___init___py.html
index dce7d87cb..a15b8decf 100644
--- a/doc/sample_html/d_7b071bdc2a35fa80___init___py.html
+++ b/doc/sample_html/d_7b071bdc2a35fa80___init___py.html
@@ -66,8 +66,8 @@
^ index
» next
- coverage.py v7.2.2,
- created at 2023-03-16 07:52 -0400
+ coverage.py v7.2.3,
+ created at 2023-04-06 08:42 -0400