- « prev
- ^ index
- » next
+ « prev
+ ^ index
+ » next
coverage.py v{{__version__}},
created at {{ time_stamp }}
diff --git a/coverage/htmlfiles/style.css b/coverage/htmlfiles/style.css
index d6768a35e..3cdaf05a3 100644
--- a/coverage/htmlfiles/style.css
+++ b/coverage/htmlfiles/style.css
@@ -22,7 +22,7 @@ td { vertical-align: top; }
table tr.hidden { display: none !important; }
-p#no_rows { display: none; font-size: 1.2em; }
+p#no_rows { display: none; font-size: 1.15em; font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; }
a.nav { text-decoration: none; color: inherit; }
@@ -40,6 +40,18 @@ header .content { padding: 1rem 3.5rem; }
header h2 { margin-top: .5em; font-size: 1em; }
+header h2 a.button { font-family: inherit; font-size: inherit; border: 1px solid; border-radius: .2em; background: #eee; color: inherit; text-decoration: none; padding: .1em .5em; margin: 1px calc(.1em + 1px); cursor: pointer; border-color: #ccc; }
+
+@media (prefers-color-scheme: dark) { header h2 a.button { background: #333; } }
+
+@media (prefers-color-scheme: dark) { header h2 a.button { border-color: #444; } }
+
+header h2 a.button.current { border: 2px solid; background: #fff; border-color: #999; cursor: default; }
+
+@media (prefers-color-scheme: dark) { header h2 a.button.current { background: #1e1e1e; } }
+
+@media (prefers-color-scheme: dark) { header h2 a.button.current { border-color: #777; } }
+
header p.text { margin: .5em 0 -.5em; color: #666; font-style: italic; }
@media (prefers-color-scheme: dark) { header p.text { color: #aaa; } }
@@ -68,19 +80,29 @@ footer .content { padding: 0; color: #666; font-style: italic; }
h1 { font-size: 1.25em; display: inline-block; }
-#filter_container { float: right; margin: 0 2em 0 0; }
+#filter_container { float: right; margin: 0 2em 0 0; line-height: 1.66em; }
+
+#filter_container #filter { width: 10em; padding: 0.2em 0.5em; border: 2px solid #ccc; background: #fff; color: #000; }
+
+@media (prefers-color-scheme: dark) { #filter_container #filter { border-color: #444; } }
-#filter_container input { width: 10em; padding: 0.2em 0.5em; border: 2px solid #ccc; background: #fff; color: #000; }
+@media (prefers-color-scheme: dark) { #filter_container #filter { background: #1e1e1e; } }
-@media (prefers-color-scheme: dark) { #filter_container input { border-color: #444; } }
+@media (prefers-color-scheme: dark) { #filter_container #filter { color: #eee; } }
-@media (prefers-color-scheme: dark) { #filter_container input { background: #1e1e1e; } }
+#filter_container #filter:focus { border-color: #007acc; }
-@media (prefers-color-scheme: dark) { #filter_container input { color: #eee; } }
+#filter_container :disabled ~ label { color: #ccc; }
-#filter_container input:focus { border-color: #007acc; }
+@media (prefers-color-scheme: dark) { #filter_container :disabled ~ label { color: #444; } }
-header button { font-family: inherit; font-size: inherit; border: 1px solid; border-radius: .2em; color: inherit; padding: .1em .5em; margin: 1px calc(.1em + 1px); cursor: pointer; border-color: #ccc; }
+#filter_container label { font-size: .875em; color: #666; }
+
+@media (prefers-color-scheme: dark) { #filter_container label { color: #aaa; } }
+
+header button { font-family: inherit; font-size: inherit; border: 1px solid; border-radius: .2em; background: #eee; color: inherit; text-decoration: none; padding: .1em .5em; margin: 1px calc(.1em + 1px); cursor: pointer; border-color: #ccc; }
+
+@media (prefers-color-scheme: dark) { header button { background: #333; } }
@media (prefers-color-scheme: dark) { header button { border-color: #444; } }
@@ -148,13 +170,13 @@ kbd { border: 1px solid black; border-color: #888 #333 #333 #888; padding: .1em
#source p * { box-sizing: border-box; }
-#source p .n { float: left; text-align: right; width: 3.5rem; box-sizing: border-box; margin-left: -3.5rem; padding-right: 1em; color: #999; }
+#source p .n { float: left; text-align: right; width: 3.5rem; box-sizing: border-box; margin-left: -3.5rem; padding-right: 1em; color: #999; user-select: none; }
@media (prefers-color-scheme: dark) { #source p .n { color: #777; } }
#source p .n.highlight { background: #ffdd00; }
-#source p .n a { margin-top: -4em; padding-top: 4em; text-decoration: none; color: #999; }
+#source p .n a { scroll-margin-top: 6em; text-decoration: none; color: #999; }
@media (prefers-color-scheme: dark) { #source p .n a { color: #777; } }
@@ -258,23 +280,21 @@ kbd { border: 1px solid black; border-color: #888 #333 #333 #888; padding: .1em
@media (prefers-color-scheme: dark) { #source p label.ctx { color: #777; } }
-#source p .ctxs { display: block; max-height: 0; overflow-y: hidden; transition: all .2s; padding: 0 .5em; font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; white-space: nowrap; background: #d0e8ff; border-radius: .25em; margin-right: 1.75em; }
+#source p .ctxs { display: block; max-height: 0; overflow-y: hidden; transition: all .2s; padding: 0 .5em; font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; white-space: nowrap; background: #d0e8ff; border-radius: .25em; margin-right: 1.75em; text-align: right; }
@media (prefers-color-scheme: dark) { #source p .ctxs { background: #056; } }
-#source p .ctxs span { display: block; text-align: right; }
-
#index { font-family: SFMono-Regular, Menlo, Monaco, Consolas, monospace; font-size: 0.875em; }
#index table.index { margin-left: -.5em; }
-#index td, #index th { text-align: right; width: 5em; padding: .25em .5em; border-bottom: 1px solid #eee; }
+#index td, #index th { text-align: right; padding: .25em .5em; border-bottom: 1px solid #eee; }
@media (prefers-color-scheme: dark) { #index td, #index th { border-color: #333; } }
-#index td.name, #index th.name { text-align: left; width: auto; }
+#index td.name, #index th.name { text-align: left; width: auto; font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; min-width: 15em; }
-#index th { font-style: italic; color: #333; cursor: pointer; }
+#index th { font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; font-style: italic; color: #333; cursor: pointer; }
@media (prefers-color-scheme: dark) { #index th { color: #ddd; } }
@@ -282,23 +302,29 @@ kbd { border: 1px solid black; border-color: #888 #333 #333 #888; padding: .1em
@media (prefers-color-scheme: dark) { #index th:hover { background: #333; } }
+#index th .arrows { color: #666; font-size: 85%; font-family: sans-serif; font-style: normal; pointer-events: none; }
+
#index th[aria-sort="ascending"], #index th[aria-sort="descending"] { white-space: nowrap; background: #eee; padding-left: .5em; }
@media (prefers-color-scheme: dark) { #index th[aria-sort="ascending"], #index th[aria-sort="descending"] { background: #333; } }
-#index th[aria-sort="ascending"]::after { font-family: sans-serif; content: " ↑"; }
+#index th[aria-sort="ascending"] .arrows::after { content: " ▲"; }
+
+#index th[aria-sort="descending"] .arrows::after { content: " ▼"; }
-#index th[aria-sort="descending"]::after { font-family: sans-serif; content: " ↓"; }
+#index td.name { font-size: 1.15em; }
#index td.name a { text-decoration: none; color: inherit; }
+#index td.name .no-noun { font-style: italic; }
+
#index tr.total td, #index tr.total_dynamic td { font-weight: bold; border-top: 1px solid #ccc; border-bottom: none; }
-#index tr.file:hover { background: #eee; }
+#index tr.region:hover { background: #eee; }
-@media (prefers-color-scheme: dark) { #index tr.file:hover { background: #333; } }
+@media (prefers-color-scheme: dark) { #index tr.region:hover { background: #333; } }
-#index tr.file:hover td.name { text-decoration: underline; color: inherit; }
+#index tr.region:hover td.name { text-decoration: underline; color: inherit; }
#scroll_marker { position: fixed; z-index: 3; right: 0; top: 0; width: 16px; height: 100%; background: #fff; border-left: 1px solid #eee; will-change: transform; }
diff --git a/coverage/htmlfiles/style.scss b/coverage/htmlfiles/style.scss
index 1e9103fd1..9bf1a7cc6 100644
--- a/coverage/htmlfiles/style.scss
+++ b/coverage/htmlfiles/style.scss
@@ -96,6 +96,22 @@ $dark-context-bg-color: #056;
}
}
+@mixin button-shape {
+ font-family: inherit;
+ font-size: inherit;
+ border: 1px solid;
+ border-radius: .2em;
+ background: $light-gray2;
+ @include background-dark($dark-gray2);
+ color: inherit;
+ text-decoration: none;
+ padding: .1em .5em;
+ margin: 1px calc(.1em + 1px);
+ cursor: pointer;
+ border-color: $light-gray3;
+ @include border-color-dark($dark-gray3);
+}
+
// Page-wide styles
html, body, h1, h2, h3, p, table, td, th {
margin: 0;
@@ -143,7 +159,8 @@ table tr.hidden {
p#no_rows {
display: none;
- font-size: 1.2em;
+ font-size: 1.15em;
+ font-family: $font-normal;
}
a.nav {
@@ -176,6 +193,18 @@ header {
h2 {
margin-top: .5em;
font-size: 1em;
+
+ a.button {
+ @include button-shape;
+ &.current {
+ border: 2px solid;
+ background: $light-bg;
+ @include background-dark($dark-bg);
+ border-color: $light-gray4;
+ @include border-color-dark($dark-gray4);
+ cursor: default;
+ }
+ }
}
p.text {
@@ -244,8 +273,9 @@ h1 {
#filter_container {
float: right;
margin: 0 2em 0 0;
+ line-height: 1.66em;
- input {
+ #filter {
width: 10em;
padding: 0.2em 0.5em;
border: 2px solid $light-gray3;
@@ -258,19 +288,21 @@ h1 {
border-color: $focus-color;
}
}
+
+ :disabled ~ label{
+ color: $light-gray3;
+ @include color-dark($dark-gray3);
+ }
+
+ label {
+ font-size: .875em;
+ color: $light-gray5;
+ @include color-dark($dark-gray5);
+ }
}
header button {
- font-family: inherit;
- font-size: inherit;
- border: 1px solid;
- border-radius: .2em;
- color: inherit;
- padding: .1em .5em;
- margin: 1px calc(.1em + 1px);
- cursor: pointer;
- border-color: $light-gray3;
- @include border-color-dark($dark-gray3);
+ @include button-shape;
@include focus-border;
&.run {
@@ -418,6 +450,7 @@ $border-indicator-width: .2em;
margin-left: -$left-gutter;
padding-right: 1em;
color: $light-gray4;
+ user-select: none;
@include color-dark($dark-gray4);
&.highlight {
@@ -425,11 +458,9 @@ $border-indicator-width: .2em;
}
a {
- // These two lines make anchors to the line scroll the line to be
+ // Make anchors to the line scroll the line to be
// visible beneath the fixed-position header.
- margin-top: -4em;
- padding-top: 4em;
-
+ scroll-margin-top: 6em;
text-decoration: none;
color: $light-gray4;
@include color-dark($dark-gray4);
@@ -622,10 +653,7 @@ $border-indicator-width: .2em;
@include background-dark($dark-context-bg-color);
border-radius: .25em;
margin-right: 1.75em;
- span {
- display: block;
- text-align: right;
- }
+ text-align: right;
}
}
}
@@ -641,16 +669,18 @@ $border-indicator-width: .2em;
}
td, th {
text-align: right;
- width: 5em;
padding: .25em .5em;
border-bottom: 1px solid $light-gray2;
@include border-color-dark($dark-gray2);
&.name {
text-align: left;
width: auto;
+ font-family: $font-normal;
+ min-width: 15em;
}
}
th {
+ font-family: $font-normal;
font-style: italic;
color: $light-gray6;
@include color-dark($dark-gray6);
@@ -659,24 +689,35 @@ $border-indicator-width: .2em;
background: $light-gray2;
@include background-dark($dark-gray2);
}
+ .arrows {
+ color: #666;
+ font-size: 85%;
+ font-family: sans-serif;
+ font-style: normal;
+ pointer-events: none;
+ }
&[aria-sort="ascending"], &[aria-sort="descending"] {
white-space: nowrap;
background: $light-gray2;
@include background-dark($dark-gray2);
padding-left: .5em;
}
- &[aria-sort="ascending"]::after {
- font-family: sans-serif;
- content: " ↑";
+ &[aria-sort="ascending"] .arrows::after {
+ content: " ▲";
}
- &[aria-sort="descending"]::after {
- font-family: sans-serif;
- content: " ↓";
+ &[aria-sort="descending"] .arrows::after {
+ content: " ▼";
}
}
- td.name a {
- text-decoration: none;
- color: inherit;
+ td.name {
+ font-size: 1.15em;
+ a {
+ text-decoration: none;
+ color: inherit;
+ }
+ & .no-noun {
+ font-style: italic;
+ }
}
tr.total td,
@@ -685,7 +726,7 @@ $border-indicator-width: .2em;
border-top: 1px solid #ccc;
border-bottom: none;
}
- tr.file:hover {
+ tr.region:hover {
background: $light-gray2;
@include background-dark($dark-gray2);
td.name {
diff --git a/coverage/inorout.py b/coverage/inorout.py
index d0d0ef913..e2b4c8ca3 100644
--- a/coverage/inorout.py
+++ b/coverage/inorout.py
@@ -9,6 +9,7 @@
import inspect
import itertools
import os
+import os.path
import platform
import re
import sys
@@ -17,15 +18,16 @@
from types import FrameType, ModuleType
from typing import (
- cast, Any, Iterable, List, Optional, Set, Tuple, Type, TYPE_CHECKING,
+ cast, Any, TYPE_CHECKING,
)
+from collections.abc import Iterable
from coverage import env
from coverage.disposition import FileDisposition, disposition_init
from coverage.exceptions import CoverageException, PluginError
from coverage.files import TreeMatcher, GlobMatcher, ModuleMatcher
from coverage.files import prep_patterns, find_python_files, canonical_filename
-from coverage.misc import sys_modules_saved
+from coverage.misc import isolate_module, sys_modules_saved
from coverage.python import source_for_file, source_for_morf
from coverage.types import TFileDisposition, TMorf, TWarnFn, TDebugCtl
@@ -38,7 +40,7 @@
# when deciding where the stdlib is. These modules are not used for anything,
# they are modules importable from the pypy lib directories, so that we can
# find those directories.
-modules_we_happen_to_have: List[ModuleType] = [
+modules_we_happen_to_have: list[ModuleType] = [
inspect, itertools, os, platform, re, sysconfig, traceback,
]
@@ -56,6 +58,8 @@
pass
+os = isolate_module(os)
+
def canonical_path(morf: TMorf, directory: bool = False) -> str:
"""Return the canonical path of the module or file `morf`.
@@ -70,7 +74,7 @@ def canonical_path(morf: TMorf, directory: bool = False) -> str:
return morf_path
-def name_for_module(filename: str, frame: Optional[FrameType]) -> str:
+def name_for_module(filename: str, frame: FrameType | None) -> str:
"""Get the name of the module for a filename and frame.
For configurability's sake, we allow __main__ modules to be matched by
@@ -83,20 +87,16 @@ def name_for_module(filename: str, frame: Optional[FrameType]) -> str:
"""
module_globals = frame.f_globals if frame is not None else {}
- dunder_name: str = module_globals.get('__name__', None)
+ dunder_name: str = module_globals.get("__name__", None)
- if isinstance(dunder_name, str) and dunder_name != '__main__':
+ if isinstance(dunder_name, str) and dunder_name != "__main__":
# This is the usual case: an imported module.
return dunder_name
- loader = module_globals.get('__loader__', None)
- for attrname in ('fullname', 'name'): # attribute renamed in py3.2
- if hasattr(loader, attrname):
- fullname = getattr(loader, attrname)
- else:
- continue
-
- if isinstance(fullname, str) and fullname != '__main__':
+ spec = module_globals.get("__spec__", None)
+ if spec:
+ fullname = spec.name
+ if isinstance(fullname, str) and fullname != "__main__":
# Module loaded via: runpy -m
return fullname
@@ -110,18 +110,18 @@ def name_for_module(filename: str, frame: Optional[FrameType]) -> str:
def module_is_namespace(mod: ModuleType) -> bool:
"""Is the module object `mod` a PEP420 namespace module?"""
- return hasattr(mod, '__path__') and getattr(mod, '__file__', None) is None
+ return hasattr(mod, "__path__") and getattr(mod, "__file__", None) is None
def module_has_file(mod: ModuleType) -> bool:
"""Does the module object `mod` have an existing __file__ ?"""
- mod__file__ = getattr(mod, '__file__', None)
+ mod__file__ = getattr(mod, "__file__", None)
if mod__file__ is None:
return False
return os.path.exists(mod__file__)
-def file_and_path_for_module(modulename: str) -> Tuple[Optional[str], List[str]]:
+def file_and_path_for_module(modulename: str) -> tuple[str | None, list[str]]:
"""Find the file and search path for `modulename`.
Returns:
@@ -142,7 +142,7 @@ def file_and_path_for_module(modulename: str) -> Tuple[Optional[str], List[str]]
return filename, path
-def add_stdlib_paths(paths: Set[str]) -> None:
+def add_stdlib_paths(paths: set[str]) -> None:
"""Add paths where the stdlib can be found to the set `paths`."""
# Look at where some standard modules are located. That's the
# indication for "installed with the interpreter". In some
@@ -154,7 +154,7 @@ def add_stdlib_paths(paths: Set[str]) -> None:
paths.add(canonical_path(m, directory=True))
-def add_third_party_paths(paths: Set[str]) -> None:
+def add_third_party_paths(paths: set[str]) -> None:
"""Add locations for third-party packages to the set `paths`."""
# Get the paths that sysconfig knows about.
scheme_names = set(sysconfig.get_scheme_names())
@@ -168,7 +168,7 @@ def add_third_party_paths(paths: Set[str]) -> None:
paths.add(config_paths[path_name])
-def add_coverage_paths(paths: Set[str]) -> None:
+def add_coverage_paths(paths: set[str]) -> None:
"""Add paths where coverage.py code can be found to the set `paths`."""
cover_path = canonical_path(__file__, directory=True)
paths.add(cover_path)
@@ -184,15 +184,15 @@ def __init__(
self,
config: CoverageConfig,
warn: TWarnFn,
- debug: Optional[TDebugCtl],
+ debug: TDebugCtl | None,
include_namespace_packages: bool,
) -> None:
self.warn = warn
self.debug = debug
self.include_namespace_packages = include_namespace_packages
- self.source: List[str] = []
- self.source_pkgs: List[str] = []
+ self.source: list[str] = []
+ self.source_pkgs: list[str] = []
self.source_pkgs.extend(config.source_pkgs)
for src in config.source or []:
if os.path.isdir(src):
@@ -205,17 +205,17 @@ def __init__(
self.omit = prep_patterns(config.run_omit)
# The directories for files considered "installed with the interpreter".
- self.pylib_paths: Set[str] = set()
+ self.pylib_paths: set[str] = set()
if not config.cover_pylib:
add_stdlib_paths(self.pylib_paths)
# To avoid tracing the coverage.py code itself, we skip anything
# located where we are.
- self.cover_paths: Set[str] = set()
+ self.cover_paths: set[str] = set()
add_coverage_paths(self.cover_paths)
# Find where third-party packages are installed.
- self.third_paths: Set[str] = set()
+ self.third_paths: set[str] = set()
add_third_party_paths(self.third_paths)
def _debug(msg: str) -> None:
@@ -274,14 +274,14 @@ def _debug(msg: str) -> None:
if modfile:
if self.third_match.match(modfile):
_debug(
- f"Source in third-party: source_pkg {pkg!r} at {modfile!r}"
+ f"Source in third-party: source_pkg {pkg!r} at {modfile!r}",
)
self.source_in_third_paths.add(canonical_path(source_for_file(modfile)))
else:
for pathdir in path:
if self.third_match.match(pathdir):
_debug(
- f"Source in third-party: {pkg!r} path directory at {pathdir!r}"
+ f"Source in third-party: {pkg!r} path directory at {pathdir!r}",
)
self.source_in_third_paths.add(pathdir)
@@ -293,9 +293,9 @@ def _debug(msg: str) -> None:
_debug(f"Source in third-party matching: {self.source_in_third_match}")
self.plugins: Plugins
- self.disp_class: Type[TFileDisposition] = FileDisposition
+ self.disp_class: type[TFileDisposition] = FileDisposition
- def should_trace(self, filename: str, frame: Optional[FrameType] = None) -> TFileDisposition:
+ def should_trace(self, filename: str, frame: FrameType | None = None) -> TFileDisposition:
"""Decide whether to trace execution in `filename`, with a reason.
This function is called from the trace function. As each new file name
@@ -313,7 +313,7 @@ def nope(disp: TFileDisposition, reason: str) -> TFileDisposition:
disp.reason = reason
return disp
- if original_filename.startswith('<'):
+ if original_filename.startswith("<"):
return nope(disp, "original file name is not real")
if frame is not None:
@@ -323,10 +323,11 @@ def nope(disp: TFileDisposition, reason: str) -> TFileDisposition:
# .pyc files can be moved after compilation (for example, by being
# installed), we look for __file__ in the frame and prefer it to the
# co_filename value.
- dunder_file = frame.f_globals and frame.f_globals.get('__file__')
+ dunder_file = frame.f_globals and frame.f_globals.get("__file__")
if dunder_file:
- filename = source_for_file(dunder_file)
- if original_filename and not original_filename.startswith('<'):
+ # Danger: __file__ can (rarely?) be of type Path.
+ filename = source_for_file(str(dunder_file))
+ if original_filename and not original_filename.startswith("<"):
orig = os.path.basename(original_filename)
if orig != os.path.basename(filename):
# Files shouldn't be renamed when moved. This happens when
@@ -338,10 +339,10 @@ def nope(disp: TFileDisposition, reason: str) -> TFileDisposition:
# Empty string is pretty useless.
return nope(disp, "empty string isn't a file name")
- if filename.startswith('memory:'):
+ if filename.startswith("memory:"):
return nope(disp, "memory isn't traceable")
- if filename.startswith('<'):
+ if filename.startswith("<"):
# Lots of non-file execution is represented with artificial
# file names like "", "", or
# "". Don't ever trace these executions, since we
@@ -367,7 +368,7 @@ def nope(disp: TFileDisposition, reason: str) -> TFileDisposition:
disp.has_dynamic_filename = True
else:
disp.source_filename = canonical_filename(
- file_tracer.source_filename()
+ file_tracer.source_filename(),
)
break
except Exception:
@@ -384,7 +385,7 @@ def nope(disp: TFileDisposition, reason: str) -> TFileDisposition:
if not disp.has_dynamic_filename:
if not disp.source_filename:
raise PluginError(
- f"Plugin {plugin!r} didn't set source_filename for '{disp.original_filename}'"
+ f"Plugin {plugin!r} didn't set source_filename for '{disp.original_filename}'",
)
reason = self.check_include_omit_etc(disp.source_filename, frame)
if reason:
@@ -392,7 +393,7 @@ def nope(disp: TFileDisposition, reason: str) -> TFileDisposition:
return disp
- def check_include_omit_etc(self, filename: str, frame: Optional[FrameType]) -> Optional[str]:
+ def check_include_omit_etc(self, filename: str, frame: FrameType | None) -> str | None:
"""Check a file name against the include, omit, etc, rules.
Returns a string or None. String means, don't trace, and is the reason
@@ -484,11 +485,11 @@ def warn_already_imported_files(self) -> None:
msg = f"Already imported a file that will be measured: {filename}"
self.warn(msg, slug="already-imported")
warned.add(filename)
- elif self.debug and self.debug.should('trace'):
+ elif self.debug and self.debug.should("trace"):
self.debug.write(
"Didn't trace already imported file {!r}: {}".format(
- disp.original_filename, disp.reason
- )
+ disp.original_filename, disp.reason,
+ ),
)
def warn_unimported_source(self) -> None:
@@ -522,13 +523,13 @@ def _warn_about_unmeasured_code(self, pkg: str) -> None:
msg = f"Module {pkg} was previously imported, but not measured"
self.warn(msg, slug="module-not-measured")
- def find_possibly_unexecuted_files(self) -> Iterable[Tuple[str, Optional[str]]]:
+ def find_possibly_unexecuted_files(self) -> Iterable[tuple[str, str | None]]:
"""Find files in the areas of interest that might be untraced.
Yields pairs: file path, and responsible plug-in name.
"""
for pkg in self.source_pkgs:
- if (not pkg in sys.modules or
+ if (pkg not in sys.modules or
not module_has_file(sys.modules[pkg])):
continue
pkg_file = source_for_file(cast(str, sys.modules[pkg].__file__))
@@ -537,13 +538,13 @@ def find_possibly_unexecuted_files(self) -> Iterable[Tuple[str, Optional[str]]]:
for src in self.source:
yield from self._find_executable_files(src)
- def _find_plugin_files(self, src_dir: str) -> Iterable[Tuple[str, str]]:
+ def _find_plugin_files(self, src_dir: str) -> Iterable[tuple[str, str]]:
"""Get executable files from the plugins."""
for plugin in self.plugins.file_tracers:
for x_file in plugin.find_executable_files(src_dir):
yield x_file, plugin._coverage_plugin_name
- def _find_executable_files(self, src_dir: str) -> Iterable[Tuple[str, Optional[str]]]:
+ def _find_executable_files(self, src_dir: str) -> Iterable[tuple[str, str | None]]:
"""Find executable files in `src_dir`.
Search for files in `src_dir` that can be executed because they
@@ -567,7 +568,7 @@ def _find_executable_files(self, src_dir: str) -> Iterable[Tuple[str, Optional[s
continue
yield file_path, plugin_name
- def sys_info(self) -> Iterable[Tuple[str, Any]]:
+ def sys_info(self) -> Iterable[tuple[str, Any]]:
"""Our information for Coverage.sys_info.
Returns a list of (key, value) pairs.
@@ -580,9 +581,9 @@ def sys_info(self) -> Iterable[Tuple[str, Any]]:
]
matcher_names = [
- 'source_match', 'source_pkgs_match',
- 'include_match', 'omit_match',
- 'cover_match', 'pylib_match', 'third_match', 'source_in_third_match',
+ "source_match", "source_pkgs_match",
+ "include_match", "omit_match",
+ "cover_match", "pylib_match", "third_match", "source_in_third_match",
]
for matcher_name in matcher_names:
@@ -590,7 +591,7 @@ def sys_info(self) -> Iterable[Tuple[str, Any]]:
if matcher:
matcher_info = matcher.info()
else:
- matcher_info = '-none-'
+ matcher_info = "-none-"
info.append((matcher_name, matcher_info))
return info
diff --git a/coverage/jsonreport.py b/coverage/jsonreport.py
index 7ee1fb99f..00053ebfb 100644
--- a/coverage/jsonreport.py
+++ b/coverage/jsonreport.py
@@ -9,18 +9,28 @@
import json
import sys
-from typing import Any, Dict, IO, Iterable, List, Optional, Tuple, TYPE_CHECKING
+from collections.abc import Iterable
+from typing import Any, IO, TYPE_CHECKING
from coverage import __version__
-from coverage.report import get_analysis_to_report
+from coverage.report_core import get_analysis_to_report
from coverage.results import Analysis, Numbers
from coverage.types import TMorf, TLineNo
if TYPE_CHECKING:
from coverage import Coverage
from coverage.data import CoverageData
+ from coverage.plugin import FileReporter
+# A type for data that can be JSON-serialized.
+JsonObj = dict[str, Any]
+
+# "Version 1" had no format number at all.
+# 2: add the meta.format field.
+# 3: add region information (functions, classes)
+FORMAT_VERSION = 3
+
class JsonReporter:
"""A reporter for writing JSON coverage results."""
@@ -30,9 +40,29 @@ def __init__(self, coverage: Coverage) -> None:
self.coverage = coverage
self.config = self.coverage.config
self.total = Numbers(self.config.precision)
- self.report_data: Dict[str, Any] = {}
+ self.report_data: JsonObj = {}
+
+ def make_summary(self, nums: Numbers) -> JsonObj:
+ """Create a dict summarizing `nums`."""
+ return {
+ "covered_lines": nums.n_executed,
+ "num_statements": nums.n_statements,
+ "percent_covered": nums.pc_covered,
+ "percent_covered_display": nums.pc_covered_str,
+ "missing_lines": nums.n_missing,
+ "excluded_lines": nums.n_excluded,
+ }
+
+ def make_branch_summary(self, nums: Numbers) -> JsonObj:
+ """Create a dict summarizing the branch info in `nums`."""
+ return {
+ "num_branches": nums.n_branches,
+ "num_partial_branches": nums.n_partial_branches,
+ "covered_branches": nums.n_executed_branches,
+ "missing_branches": nums.n_missing_branches,
+ }
- def report(self, morfs: Optional[Iterable[TMorf]], outfile: IO[str]) -> float:
+ def report(self, morfs: Iterable[TMorf] | None, outfile: IO[str]) -> float:
"""Generate a json report for `morfs`.
`morfs` is a list of modules or file names.
@@ -44,6 +74,7 @@ def report(self, morfs: Optional[Iterable[TMorf]], outfile: IO[str]) -> float:
coverage_data = self.coverage.get_data()
coverage_data.set_query_contexts(self.config.report_contexts)
self.report_data["meta"] = {
+ "format": FORMAT_VERSION,
"version": __version__,
"timestamp": datetime.datetime.now().isoformat(),
"branch_coverage": coverage_data.has_arcs(),
@@ -54,27 +85,15 @@ def report(self, morfs: Optional[Iterable[TMorf]], outfile: IO[str]) -> float:
for file_reporter, analysis in get_analysis_to_report(self.coverage, morfs):
measured_files[file_reporter.relative_filename()] = self.report_one_file(
coverage_data,
- analysis
+ analysis,
+ file_reporter,
)
self.report_data["files"] = measured_files
-
- self.report_data["totals"] = {
- 'covered_lines': self.total.n_executed,
- 'num_statements': self.total.n_statements,
- 'percent_covered': self.total.pc_covered,
- 'percent_covered_display': self.total.pc_covered_str,
- 'missing_lines': self.total.n_missing,
- 'excluded_lines': self.total.n_excluded,
- }
+ self.report_data["totals"] = self.make_summary(self.total)
if coverage_data.has_arcs():
- self.report_data["totals"].update({
- 'num_branches': self.total.n_branches,
- 'num_partial_branches': self.total.n_partial_branches,
- 'covered_branches': self.total.n_executed_branches,
- 'missing_branches': self.total.n_missing_branches,
- })
+ self.report_data["totals"].update(self.make_branch_summary(self.total))
json.dump(
self.report_data,
@@ -84,45 +103,76 @@ def report(self, morfs: Optional[Iterable[TMorf]], outfile: IO[str]) -> float:
return self.total.n_statements and self.total.pc_covered
- def report_one_file(self, coverage_data: CoverageData, analysis: Analysis) -> Dict[str, Any]:
+ def report_one_file(
+ self, coverage_data: CoverageData, analysis: Analysis, file_reporter: FileReporter
+ ) -> JsonObj:
"""Extract the relevant report data for a single file."""
nums = analysis.numbers
self.total += nums
- summary = {
- 'covered_lines': nums.n_executed,
- 'num_statements': nums.n_statements,
- 'percent_covered': nums.pc_covered,
- 'percent_covered_display': nums.pc_covered_str,
- 'missing_lines': nums.n_missing,
- 'excluded_lines': nums.n_excluded,
- }
- reported_file = {
- 'executed_lines': sorted(analysis.executed),
- 'summary': summary,
- 'missing_lines': sorted(analysis.missing),
- 'excluded_lines': sorted(analysis.excluded),
+ summary = self.make_summary(nums)
+ reported_file: JsonObj = {
+ "executed_lines": sorted(analysis.executed),
+ "summary": summary,
+ "missing_lines": sorted(analysis.missing),
+ "excluded_lines": sorted(analysis.excluded),
}
if self.config.json_show_contexts:
- reported_file['contexts'] = analysis.data.contexts_by_lineno(analysis.filename)
+ reported_file["contexts"] = coverage_data.contexts_by_lineno(analysis.filename)
if coverage_data.has_arcs():
- summary.update({
- 'num_branches': nums.n_branches,
- 'num_partial_branches': nums.n_partial_branches,
- 'covered_branches': nums.n_executed_branches,
- 'missing_branches': nums.n_missing_branches,
- })
- reported_file['executed_branches'] = list(
- _convert_branch_arcs(analysis.executed_branch_arcs())
+ summary.update(self.make_branch_summary(nums))
+ reported_file["executed_branches"] = list(
+ _convert_branch_arcs(analysis.executed_branch_arcs()),
)
- reported_file['missing_branches'] = list(
- _convert_branch_arcs(analysis.missing_branch_arcs())
+ reported_file["missing_branches"] = list(
+ _convert_branch_arcs(analysis.missing_branch_arcs()),
+ )
+
+ num_lines = len(file_reporter.source().splitlines())
+ for noun, plural in file_reporter.code_region_kinds():
+ reported_file[plural] = region_data = {}
+ outside_lines = set(range(1, num_lines + 1))
+ for region in file_reporter.code_regions():
+ if region.kind != noun:
+ continue
+ outside_lines -= region.lines
+ region_data[region.name] = self.make_region_data(
+ coverage_data,
+ analysis.narrow(region.lines),
+ )
+
+ region_data[""] = self.make_region_data(
+ coverage_data,
+ analysis.narrow(outside_lines),
)
return reported_file
+ def make_region_data(self, coverage_data: CoverageData, narrowed_analysis: Analysis) -> JsonObj:
+ """Create the data object for one region of a file."""
+ narrowed_nums = narrowed_analysis.numbers
+ narrowed_summary = self.make_summary(narrowed_nums)
+ this_region = {
+ "executed_lines": sorted(narrowed_analysis.executed),
+ "summary": narrowed_summary,
+ "missing_lines": sorted(narrowed_analysis.missing),
+ "excluded_lines": sorted(narrowed_analysis.excluded),
+ }
+ if self.config.json_show_contexts:
+ contexts = coverage_data.contexts_by_lineno(narrowed_analysis.filename)
+ this_region["contexts"] = contexts
+ if coverage_data.has_arcs():
+ narrowed_summary.update(self.make_branch_summary(narrowed_nums))
+ this_region["executed_branches"] = list(
+ _convert_branch_arcs(narrowed_analysis.executed_branch_arcs()),
+ )
+ this_region["missing_branches"] = list(
+ _convert_branch_arcs(narrowed_analysis.missing_branch_arcs()),
+ )
+ return this_region
+
def _convert_branch_arcs(
- branch_arcs: Dict[TLineNo, List[TLineNo]],
-) -> Iterable[Tuple[TLineNo, TLineNo]]:
+ branch_arcs: dict[TLineNo, list[TLineNo]],
+) -> Iterable[tuple[TLineNo, TLineNo]]:
"""Convert branch arcs to a list of two-element tuples."""
for source, targets in branch_arcs.items():
for target in targets:
diff --git a/coverage/lcovreport.py b/coverage/lcovreport.py
index 1789c17e6..c8512cdf8 100644
--- a/coverage/lcovreport.py
+++ b/coverage/lcovreport.py
@@ -5,20 +5,153 @@
from __future__ import annotations
-import sys
import base64
-from hashlib import md5
+import hashlib
+import sys
-from typing import IO, Iterable, Optional, TYPE_CHECKING
+from typing import IO, TYPE_CHECKING
+from collections.abc import Iterable
from coverage.plugin import FileReporter
-from coverage.report import get_analysis_to_report
+from coverage.report_core import get_analysis_to_report
from coverage.results import Analysis, Numbers
from coverage.types import TMorf
if TYPE_CHECKING:
from coverage import Coverage
- from coverage.data import CoverageData
+
+
+def line_hash(line: str) -> str:
+ """Produce a hash of a source line for use in the LCOV file."""
+ # The LCOV file format optionally allows each line to be MD5ed as a
+ # fingerprint of the file. This is not a security use. Some security
+ # scanners raise alarms about the use of MD5 here, but it is a false
+ # positive. This is not a security concern.
+ # The unusual encoding of the MD5 hash, as a base64 sequence with the
+ # trailing = signs stripped, is specified by the LCOV file format.
+ hashed = hashlib.md5(line.encode("utf-8"), usedforsecurity=False).digest()
+ return base64.b64encode(hashed).decode("ascii").rstrip("=")
+
+
+def lcov_lines(
+ analysis: Analysis,
+ lines: list[int],
+ source_lines: list[str],
+ outfile: IO[str],
+) -> None:
+ """Emit line coverage records for an analyzed file."""
+ hash_suffix = ""
+ for line in lines:
+ if source_lines:
+ hash_suffix = "," + line_hash(source_lines[line-1])
+ # Q: can we get info about the number of times a statement is
+ # executed? If so, that should be recorded here.
+ hit = int(line not in analysis.missing)
+ outfile.write(f"DA:{line},{hit}{hash_suffix}\n")
+
+ if analysis.numbers.n_statements > 0:
+ outfile.write(f"LF:{analysis.numbers.n_statements}\n")
+ outfile.write(f"LH:{analysis.numbers.n_executed}\n")
+
+
+def lcov_functions(
+ fr: FileReporter,
+ file_analysis: Analysis,
+ outfile: IO[str],
+) -> None:
+ """Emit function coverage records for an analyzed file."""
+ # lcov 2.2 introduces a new format for function coverage records.
+ # We continue to generate the old format because we don't know what
+ # version of the lcov tools will be used to read this report.
+
+ # "and region.lines" below avoids a crash due to a bug in PyPy 3.8
+ # where, for whatever reason, when collecting data in --branch mode,
+ # top-level functions have an empty lines array. Instead we just don't
+ # emit function records for those.
+
+ # suppressions because of https://github.com/pylint-dev/pylint/issues/9923
+ functions = [
+ (min(region.start, min(region.lines)), #pylint: disable=nested-min-max
+ max(region.start, max(region.lines)), #pylint: disable=nested-min-max
+ region)
+ for region in fr.code_regions()
+ if region.kind == "function" and region.lines
+ ]
+ if not functions:
+ return
+
+ functions.sort()
+ functions_hit = 0
+ for first_line, last_line, region in functions:
+ # A function counts as having been executed if any of it has been
+ # executed.
+ analysis = file_analysis.narrow(region.lines)
+ hit = int(analysis.numbers.n_executed > 0)
+ functions_hit += hit
+
+ outfile.write(f"FN:{first_line},{last_line},{region.name}\n")
+ outfile.write(f"FNDA:{hit},{region.name}\n")
+
+ outfile.write(f"FNF:{len(functions)}\n")
+ outfile.write(f"FNH:{functions_hit}\n")
+
+
+def lcov_arcs(
+ fr: FileReporter,
+ analysis: Analysis,
+ lines: list[int],
+ outfile: IO[str],
+) -> None:
+ """Emit branch coverage records for an analyzed file."""
+ branch_stats = analysis.branch_stats()
+ executed_arcs = analysis.executed_branch_arcs()
+ missing_arcs = analysis.missing_branch_arcs()
+
+ for line in lines:
+ if line not in branch_stats:
+ continue
+
+ # This is only one of several possible ways to map our sets of executed
+ # and not-executed arcs to BRDA codes. It seems to produce reasonable
+ # results when fed through genhtml.
+ _, taken = branch_stats[line]
+
+ if taken == 0:
+ # When _none_ of the out arcs from 'line' were executed,
+ # it can mean the line always raised an exception.
+ assert len(executed_arcs[line]) == 0
+ destinations = [
+ (dst, "-") for dst in missing_arcs[line]
+ ]
+ else:
+ # Q: can we get counts of the number of times each arc was executed?
+ # branch_stats has "total" and "taken" counts for each branch,
+ # but it doesn't have "taken" broken down by destination.
+ destinations = [
+ (dst, "1") for dst in executed_arcs[line]
+ ]
+ destinations.extend(
+ (dst, "0") for dst in missing_arcs[line]
+ )
+
+ # Sort exit arcs after normal arcs. Exit arcs typically come from
+ # an if statement, at the end of a function, with no else clause.
+ # This structure reads like you're jumping to the end of the function
+ # when the conditional expression is false, so it should be presented
+ # as the second alternative for the branch, after the alternative that
+ # enters the if clause.
+ destinations.sort(key=lambda d: (d[0] < 0, d))
+
+ for dst, hit in destinations:
+ branch = fr.arc_description(line, dst)
+ outfile.write(f"BRDA:{line},0,{branch},{hit}\n")
+
+ # Summary of the branch coverage.
+ brf = sum(t for t, k in branch_stats.values())
+ brh = brf - sum(t - k for t, k in branch_stats.values())
+ if brf > 0:
+ outfile.write(f"BRF:{brf}\n")
+ outfile.write(f"BRH:{brh}\n")
class LcovReporter:
@@ -28,12 +161,13 @@ class LcovReporter:
def __init__(self, coverage: Coverage) -> None:
self.coverage = coverage
+ self.config = coverage.config
self.total = Numbers(self.coverage.config.precision)
- def report(self, morfs: Optional[Iterable[TMorf]], outfile: IO[str]) -> float:
+ def report(self, morfs: Iterable[TMorf] | None, outfile: IO[str]) -> float:
"""Renders the full lcov report.
- 'morfs' is a list of modules or filenames
+ `morfs` is a list of modules or filenames
outfile is the file object to write the file into.
"""
@@ -41,81 +175,47 @@ def report(self, morfs: Optional[Iterable[TMorf]], outfile: IO[str]) -> float:
self.coverage.get_data()
outfile = outfile or sys.stdout
- for fr, analysis in get_analysis_to_report(self.coverage, morfs):
- self.get_lcov(fr, analysis, outfile)
+ # ensure file records are sorted by the _relative_ filename, not the full path
+ to_report = [
+ (fr.relative_filename(), fr, analysis)
+ for fr, analysis in get_analysis_to_report(self.coverage, morfs)
+ ]
+ to_report.sort()
+
+ for fname, fr, analysis in to_report:
+ self.total += analysis.numbers
+ self.lcov_file(fname, fr, analysis, outfile)
return self.total.n_statements and self.total.pc_covered
- def get_lcov(self, fr: FileReporter, analysis: Analysis, outfile: IO[str]) -> None:
+ def lcov_file(
+ self,
+ rel_fname: str,
+ fr: FileReporter,
+ analysis: Analysis,
+ outfile: IO[str],
+ ) -> None:
"""Produces the lcov data for a single file.
This currently supports both line and branch coverage,
however function coverage is not supported.
"""
- self.total += analysis.numbers
-
- outfile.write("TN:\n")
- outfile.write(f"SF:{fr.relative_filename()}\n")
- source_lines = fr.source().splitlines()
-
- for covered in sorted(analysis.executed):
- # Note: Coverage.py currently only supports checking *if* a line
- # has been executed, not how many times, so we set this to 1 for
- # nice output even if it's technically incorrect.
-
- # The lines below calculate a 64-bit encoded md5 hash of the line
- # corresponding to the DA lines in the lcov file, for either case
- # of the line being covered or missed in coverage.py. The final two
- # characters of the encoding ("==") are removed from the hash to
- # allow genhtml to run on the resulting lcov file.
- if source_lines:
- line = source_lines[covered-1].encode("utf-8")
- else:
- line = b""
- hashed = base64.b64encode(md5(line).digest()).decode().rstrip("=")
- outfile.write(f"DA:{covered},1,{hashed}\n")
-
- for missed in sorted(analysis.missing):
- assert source_lines
- line = source_lines[missed-1].encode("utf-8")
- hashed = base64.b64encode(md5(line).digest()).decode().rstrip("=")
- outfile.write(f"DA:{missed},0,{hashed}\n")
- outfile.write(f"LF:{analysis.numbers.n_statements}\n")
- outfile.write(f"LH:{analysis.numbers.n_executed}\n")
+ if analysis.numbers.n_statements == 0:
+ if self.config.skip_empty:
+ return
+
+ outfile.write(f"SF:{rel_fname}\n")
+
+ lines = sorted(analysis.statements)
+ if self.config.lcov_line_checksums:
+ source_lines = fr.source().splitlines()
+ else:
+ source_lines = []
- # More information dense branch coverage data.
- missing_arcs = analysis.missing_branch_arcs()
- executed_arcs = analysis.executed_branch_arcs()
- for block_number, block_line_number in enumerate(
- sorted(analysis.branch_stats().keys())
- ):
- for branch_number, line_number in enumerate(
- sorted(missing_arcs[block_line_number])
- ):
- # The exit branches have a negative line number,
- # this will not produce valid lcov. Setting
- # the line number of the exit branch to 0 will allow
- # for valid lcov, while preserving the data.
- line_number = max(line_number, 0)
- outfile.write(f"BRDA:{line_number},{block_number},{branch_number},-\n")
-
- # The start value below allows for the block number to be
- # preserved between these two for loops (stopping the loop from
- # resetting the value of the block number to 0).
- for branch_number, line_number in enumerate(
- sorted(executed_arcs[block_line_number]),
- start=len(missing_arcs[block_line_number]),
- ):
- line_number = max(line_number, 0)
- outfile.write(f"BRDA:{line_number},{block_number},{branch_number},1\n")
-
- # Summary of the branch coverage.
- if analysis.has_arcs():
- branch_stats = analysis.branch_stats()
- brf = sum(t for t, k in branch_stats.values())
- brh = brf - sum(t - k for t, k in branch_stats.values())
- outfile.write(f"BRF:{brf}\n")
- outfile.write(f"BRH:{brh}\n")
+ lcov_lines(analysis, lines, source_lines, outfile)
+ lcov_functions(fr, analysis, outfile)
+ if analysis.has_arcs:
+ lcov_arcs(fr, analysis, lines, outfile)
outfile.write("end_of_record\n")
diff --git a/coverage/misc.py b/coverage/misc.py
index e0658eb18..c5ce7f4ae 100644
--- a/coverage/misc.py
+++ b/coverage/misc.py
@@ -8,11 +8,11 @@
import contextlib
import datetime
import errno
+import functools
import hashlib
import importlib
import importlib.util
import inspect
-import locale
import os
import os.path
import re
@@ -21,11 +21,10 @@
from types import ModuleType
from typing import (
- Any, Callable, Dict, IO, Iterable, Iterator, List, Mapping, Optional,
- Sequence, Tuple, TypeVar, Union,
+ Any, NoReturn, TypeVar,
)
+from collections.abc import Iterable, Iterator, Mapping, Sequence
-from coverage import env
from coverage.exceptions import CoverageException
from coverage.types import TArc
@@ -34,7 +33,7 @@
# pylint: disable=unused-wildcard-import
from coverage.exceptions import * # pylint: disable=wildcard-import
-ISOLATED_MODULES: Dict[ModuleType, ModuleType] = {}
+ISOLATED_MODULES: dict[ModuleType, ModuleType] = {}
def isolate_module(mod: ModuleType) -> ModuleType:
@@ -80,7 +79,7 @@ def sys_modules_saved() -> Iterator[None]:
saver.restore()
-def import_third_party(modname: str) -> Tuple[ModuleType, bool]:
+def import_third_party(modname: str) -> tuple[ModuleType, bool]:
"""Import a third-party module we need, but might not be installed.
This also cleans out the module after the import, so that coverage won't
@@ -117,30 +116,7 @@ def nice_pair(pair: TArc) -> str:
return "%d-%d" % (start, end)
-TSelf = TypeVar("TSelf")
-TRetVal = TypeVar("TRetVal")
-
-def expensive(fn: Callable[[TSelf], TRetVal]) -> Callable[[TSelf], TRetVal]:
- """A decorator to indicate that a method shouldn't be called more than once.
-
- Normally, this does nothing. During testing, this raises an exception if
- called more than once.
-
- """
- if env.TESTING:
- attr = "_once_" + fn.__name__
-
- def _wrapper(self: TSelf) -> TRetVal:
- if hasattr(self, attr):
- raise AssertionError(f"Shouldn't have called {fn.__name__} more than once")
- setattr(self, attr, True)
- return fn(self)
- return _wrapper
- else:
- return fn # pragma: not testing
-
-
-def bool_or_none(b: Any) -> Optional[bool]:
+def bool_or_none(b: Any) -> bool | None:
"""Return bool(b), but preserve None."""
if b is None:
return None
@@ -180,22 +156,10 @@ def ensure_dir_for_file(path: str) -> None:
ensure_dir(os.path.dirname(path))
-def output_encoding(outfile: Optional[IO[str]] = None) -> str:
- """Determine the encoding to use for output written to `outfile` or stdout."""
- if outfile is None:
- outfile = sys.stdout
- encoding = (
- getattr(outfile, "encoding", None) or
- getattr(sys.__stdout__, "encoding", None) or
- locale.getpreferredencoding()
- )
- return encoding
-
-
class Hasher:
"""Hashes Python data for fingerprinting."""
def __init__(self) -> None:
- self.hash = hashlib.new("sha3_256")
+ self.hash = hashlib.new("sha3_256", usedforsecurity=False)
def update(self, v: Any) -> None:
"""Add `v` to the hash, recursively if needed."""
@@ -218,21 +182,21 @@ def update(self, v: Any) -> None:
self.update(v[k])
else:
for k in dir(v):
- if k.startswith('__'):
+ if k.startswith("__"):
continue
a = getattr(v, k)
if inspect.isroutine(a):
continue
self.update(k)
self.update(a)
- self.hash.update(b'.')
+ self.hash.update(b".")
def hexdigest(self) -> str:
"""Retrieve the hex digest of the hash."""
return self.hash.hexdigest()[:32]
-def _needs_to_implement(that: Any, func_name: str) -> None:
+def _needs_to_implement(that: Any, func_name: str) -> NoReturn:
"""Helper to raise NotImplementedError in interface stubs."""
if hasattr(that, "_coverage_plugin_name"):
thing = "Plugin"
@@ -243,7 +207,7 @@ def _needs_to_implement(that: Any, func_name: str) -> None:
name = f"{klass.__module__}.{klass.__name__}"
raise NotImplementedError(
- f"{thing} {name!r} needs to implement {func_name}()"
+ f"{thing} {name!r} needs to implement {func_name}()",
)
@@ -292,7 +256,7 @@ def substitute_variables(text: str, variables: Mapping[str, str]) -> str:
)
"""
- dollar_groups = ('dollar', 'word1', 'word2')
+ dollar_groups = ("dollar", "word1", "word2")
def dollar_replace(match: re.Match[str]) -> str:
"""Called for each $replacement."""
@@ -302,11 +266,11 @@ def dollar_replace(match: re.Match[str]) -> str:
return "$"
elif word in variables:
return variables[word]
- elif match['strict']:
+ elif match["strict"]:
msg = f"Variable {word} is undefined: {text!r}"
raise CoverageException(msg)
else:
- return match['defval']
+ return match["defval"]
text = re.sub(dollar_pattern, dollar_replace, text)
return text
@@ -315,10 +279,10 @@ def dollar_replace(match: re.Match[str]) -> str:
def format_local_datetime(dt: datetime.datetime) -> str:
"""Return a string with local timezone representing the date.
"""
- return dt.astimezone().strftime('%Y-%m-%d %H:%M %z')
+ return dt.astimezone().strftime("%Y-%m-%d %H:%M %z")
-def import_local_file(modname: str, modfile: Optional[str] = None) -> ModuleType:
+def import_local_file(modname: str, modfile: str | None = None) -> ModuleType:
"""Import a local file as a module.
Opens a file in the current directory named `modname`.py, imports it
@@ -327,7 +291,7 @@ def import_local_file(modname: str, modfile: Optional[str] = None) -> ModuleType
"""
if modfile is None:
- modfile = modname + '.py'
+ modfile = modname + ".py"
spec = importlib.util.spec_from_file_location(modname, modfile)
assert spec is not None
mod = importlib.util.module_from_spec(spec)
@@ -338,20 +302,25 @@ def import_local_file(modname: str, modfile: Optional[str] = None) -> ModuleType
return mod
-def _human_key(s: str) -> List[Union[str, int]]:
+@functools.cache
+def _human_key(s: str) -> tuple[list[str | int], str]:
"""Turn a string into a list of string and number chunks.
- "z23a" -> ["z", 23, "a"]
+
+ "z23a" -> (["z", 23, "a"], "z23a")
+
+ The original string is appended as a last value to ensure the
+ key is unique enough so that "x1y" and "x001y" can be distinguished.
"""
- def tryint(s: str) -> Union[str, int]:
+ def tryint(s: str) -> str | int:
"""If `s` is a number, return an int, else `s` unchanged."""
try:
return int(s)
except ValueError:
return s
- return [tryint(c) for c in re.split(r"(\d+)", s)]
+ return ([tryint(c) for c in re.split(r"(\d+)", s)], s)
-def human_sorted(strings: Iterable[str]) -> List[str]:
+def human_sorted(strings: Iterable[str]) -> list[str]:
"""Sort the given iterable of strings the way that humans expect.
Numeric components in the strings are sorted as numbers.
@@ -366,7 +335,7 @@ def human_sorted(strings: Iterable[str]) -> List[str]:
def human_sorted_items(
items: Iterable[SortableItem],
reverse: bool = False,
-) -> List[SortableItem]:
+) -> list[SortableItem]:
"""Sort (string, ...) items the way humans expect.
The elements of `items` can be any tuple/list. They'll be sorted by the
@@ -386,3 +355,15 @@ def plural(n: int, thing: str = "", things: str = "") -> str:
return thing
else:
return things or (thing + "s")
+
+
+def stdout_link(text: str, url: str) -> str:
+ """Format text+url as a clickable link for stdout.
+
+ If attached to a terminal, use escape sequences. Otherwise, just return
+ the text.
+ """
+ if hasattr(sys.stdout, "isatty") and sys.stdout.isatty():
+ return f"\033]8;;{url}\a{text}\033]8;;\a"
+ else:
+ return text
diff --git a/coverage/multiproc.py b/coverage/multiproc.py
index e11ca7b70..1ba33ad55 100644
--- a/coverage/multiproc.py
+++ b/coverage/multiproc.py
@@ -3,6 +3,8 @@
"""Monkey-patching to add multiprocessing support for coverage.py"""
+from __future__ import annotations
+
import multiprocessing
import multiprocessing.process
import os
@@ -10,8 +12,9 @@
import sys
import traceback
-from typing import Any, Dict
+from typing import Any
+from coverage.debug import DebugControl
# An attribute that will be set on the module to indicate that it has been
# monkey-patched.
@@ -26,28 +29,36 @@ class ProcessWithCoverage(OriginalProcess): # pylint: disable=abstract-m
def _bootstrap(self, *args, **kwargs): # type: ignore[no-untyped-def]
"""Wrapper around _bootstrap to start coverage."""
+ debug: DebugControl | None = None
try:
from coverage import Coverage # avoid circular import
cov = Coverage(data_suffix=True, auto_data=True)
cov._warn_preimported_source = False
cov.start()
- debug = cov._debug
- assert debug is not None
- if debug.should("multiproc"):
+ _debug = cov._debug
+ assert _debug is not None
+ if _debug.should("multiproc"):
+ debug = _debug
+ if debug:
debug.write("Calling multiprocessing bootstrap")
except Exception:
- print("Exception during multiprocessing bootstrap init:")
- traceback.print_exc(file=sys.stdout)
- sys.stdout.flush()
+ print("Exception during multiprocessing bootstrap init:", file=sys.stderr)
+ traceback.print_exc(file=sys.stderr)
+ sys.stderr.flush()
raise
try:
return original_bootstrap(self, *args, **kwargs)
finally:
- if debug.should("multiproc"):
+ if debug:
debug.write("Finished multiprocessing bootstrap")
- cov.stop()
- cov.save()
- if debug.should("multiproc"):
+ try:
+ cov.stop()
+ cov.save()
+ except Exception as exc:
+ if debug:
+ debug.write("Exception during multiprocessing bootstrap cleanup", exc=exc)
+ raise
+ if debug:
debug.write("Saved multiprocessing data")
class Stowaway:
@@ -55,11 +66,11 @@ class Stowaway:
def __init__(self, rcfile: str) -> None:
self.rcfile = rcfile
- def __getstate__(self) -> Dict[str, str]:
- return {'rcfile': self.rcfile}
+ def __getstate__(self) -> dict[str, str]:
+ return {"rcfile": self.rcfile}
- def __setstate__(self, state: Dict[str, str]) -> None:
- patch_multiprocessing(state['rcfile'])
+ def __setstate__(self, state: dict[str, str]) -> None:
+ patch_multiprocessing(state["rcfile"])
def patch_multiprocessing(rcfile: str) -> None:
@@ -83,8 +94,8 @@ def patch_multiprocessing(rcfile: str) -> None:
# When spawning processes rather than forking them, we have no state in the
# new process. We sneak in there with a Stowaway: we stuff one of our own
- # objects into the data that gets pickled and sent to the sub-process. When
- # the Stowaway is unpickled, it's __setstate__ method is called, which
+ # objects into the data that gets pickled and sent to the subprocess. When
+ # the Stowaway is unpickled, its __setstate__ method is called, which
# re-applies the monkey-patch.
# Windows only spawns, so this is needed to keep Windows working.
try:
@@ -93,10 +104,10 @@ def patch_multiprocessing(rcfile: str) -> None:
except (ImportError, AttributeError):
pass
else:
- def get_preparation_data_with_stowaway(name: str) -> Dict[str, Any]:
+ def get_preparation_data_with_stowaway(name: str) -> dict[str, Any]:
"""Get the original preparation data, and also insert our stowaway."""
d = original_get_preparation_data(name)
- d['stowaway'] = Stowaway(rcfile)
+ d["stowaway"] = Stowaway(rcfile)
return d
spawn.get_preparation_data = get_preparation_data_with_stowaway
diff --git a/coverage/numbits.py b/coverage/numbits.py
index 26e5c2725..0975a098f 100644
--- a/coverage/numbits.py
+++ b/coverage/numbits.py
@@ -20,7 +20,7 @@
import sqlite3
from itertools import zip_longest
-from typing import Iterable, List
+from collections.abc import Iterable
def nums_to_numbits(nums: Iterable[int]) -> bytes:
@@ -36,14 +36,14 @@ def nums_to_numbits(nums: Iterable[int]) -> bytes:
nbytes = max(nums) // 8 + 1
except ValueError:
# nums was empty.
- return b''
+ return b""
b = bytearray(nbytes)
for num in nums:
b[num//8] |= 1 << num % 8
return bytes(b)
-def numbits_to_nums(numbits: bytes) -> List[int]:
+def numbits_to_nums(numbits: bytes) -> list[int]:
"""Convert a numbits into a list of numbers.
Arguments:
@@ -82,7 +82,7 @@ def numbits_intersection(numbits1: bytes, numbits2: bytes) -> bytes:
"""
byte_pairs = zip_longest(numbits1, numbits2, fillvalue=0)
intersection_bytes = bytes(b1 & b2 for b1, b2 in byte_pairs)
- return intersection_bytes.rstrip(b'\0')
+ return intersection_bytes.rstrip(b"\0")
def numbits_any_intersection(numbits1: bytes, numbits2: bytes) -> bool:
@@ -130,7 +130,7 @@ def register_sqlite_functions(connection: sqlite3.Connection) -> None:
import sqlite3
from coverage.numbits import register_sqlite_functions
- conn = sqlite3.connect('example.db')
+ conn = sqlite3.connect("example.db")
register_sqlite_functions(conn)
c = conn.cursor()
# Kind of a nonsense query:
diff --git a/coverage/parser.py b/coverage/parser.py
index ae70b4f0f..fb74ea9e0 100644
--- a/coverage/parser.py
+++ b/coverage/parser.py
@@ -6,6 +6,7 @@
from __future__ import annotations
import ast
+import functools
import collections
import os
import re
@@ -13,18 +14,20 @@
import token
import tokenize
+from collections.abc import Iterable, Sequence
+from dataclasses import dataclass
from types import CodeType
-from typing import (
- cast, Any, Callable, Dict, Iterable, List, Optional, Sequence, Set, Tuple,
-)
+from typing import cast, Callable, Optional, Protocol
from coverage import env
from coverage.bytecode import code_objects
from coverage.debug import short_stack
from coverage.exceptions import NoSource, NotPython
-from coverage.misc import join_regex, nice_pair
+from coverage.misc import isolate_module, nice_pair
from coverage.phystokens import generate_tokens
-from coverage.types import Protocol, TArc, TLineNo
+from coverage.types import TArc, TLineNo
+
+os = isolate_module(os)
class PythonParser:
@@ -36,9 +39,9 @@ class PythonParser:
"""
def __init__(
self,
- text: Optional[str] = None,
- filename: Optional[str] = None,
- exclude: Optional[str] = None,
+ text: str | None = None,
+ filename: str | None = None,
+ exclude: str | None = None,
) -> None:
"""
Source can be provided as `text`, the text itself, or `filename`, from
@@ -59,59 +62,62 @@ def __init__(
self.exclude = exclude
- # The text lines of the parsed code.
- self.lines: List[str] = self.text.split('\n')
+ # The parsed AST of the text.
+ self._ast_root: ast.AST | None = None
# The normalized line numbers of the statements in the code. Exclusions
# are taken into account, and statements are adjusted to their first
# lines.
- self.statements: Set[TLineNo] = set()
+ self.statements: set[TLineNo] = set()
# The normalized line numbers of the excluded lines in the code,
# adjusted to their first lines.
- self.excluded: Set[TLineNo] = set()
+ self.excluded: set[TLineNo] = set()
# The raw_* attributes are only used in this class, and in
# lab/parser.py to show how this class is working.
# The line numbers that start statements, as reported by the line
# number table in the bytecode.
- self.raw_statements: Set[TLineNo] = set()
+ self.raw_statements: set[TLineNo] = set()
# The raw line numbers of excluded lines of code, as marked by pragmas.
- self.raw_excluded: Set[TLineNo] = set()
-
- # The line numbers of class definitions.
- self.raw_classdefs: Set[TLineNo] = set()
+ self.raw_excluded: set[TLineNo] = set()
# The line numbers of docstring lines.
- self.raw_docstrings: Set[TLineNo] = set()
+ self.raw_docstrings: set[TLineNo] = set()
# Internal detail, used by lab/parser.py.
self.show_tokens = False
# A dict mapping line numbers to lexical statement starts for
# multi-line statements.
- self._multiline: Dict[TLineNo, TLineNo] = {}
+ self._multiline: dict[TLineNo, TLineNo] = {}
# Lazily-created arc data, and missing arc descriptions.
- self._all_arcs: Optional[Set[TArc]] = None
- self._missing_arc_fragments: Optional[TArcFragments] = None
+ self._all_arcs: set[TArc] | None = None
+ self._missing_arc_fragments: TArcFragments | None = None
+ self._with_jump_fixers: dict[TArc, tuple[TArc, TArc]] = {}
- def lines_matching(self, *regexes: str) -> Set[TLineNo]:
- """Find the lines matching one of a list of regexes.
+ def lines_matching(self, regex: str) -> set[TLineNo]:
+ """Find the lines matching a regex.
- Returns a set of line numbers, the lines that contain a match for one
- of the regexes in `regexes`. The entire line needn't match, just a
- part of it.
+ Returns a set of line numbers, the lines that contain a match for
+ `regex`. The entire line needn't match, just a part of it.
+ Handles multiline regex patterns.
"""
- combined = join_regex(regexes)
- regex_c = re.compile(combined)
- matches = set()
- for i, ltext in enumerate(self.lines, start=1):
- if regex_c.search(ltext):
- matches.add(i)
+ matches: set[TLineNo] = set()
+
+ last_start = 0
+ last_start_line = 0
+ for match in re.finditer(regex, self.text, flags=re.MULTILINE):
+ start, end = match.span()
+ start_line = last_start_line + self.text.count('\n', last_start, start)
+ end_line = last_start_line + self.text.count('\n', last_start, end)
+ matches.update(self._multiline.get(i, i) for i in range(start_line + 1, end_line + 2))
+ last_start = start
+ last_start_line = start_line
return matches
def _raw_parse(self) -> None:
@@ -123,18 +129,20 @@ def _raw_parse(self) -> None:
# Find lines which match an exclusion pattern.
if self.exclude:
self.raw_excluded = self.lines_matching(self.exclude)
-
- # Tokenize, to find excluded suites, to find docstrings, and to find
- # multi-line statements.
- indent = 0
- exclude_indent = 0
- excluding = False
- excluding_decorators = False
- prev_toktype = token.INDENT
- first_line = None
- empty = True
- first_on_line = True
- nesting = 0
+ self.excluded = set(self.raw_excluded)
+
+ # The current number of indents.
+ indent: int = 0
+ # An exclusion comment will exclude an entire clause at this indent.
+ exclude_indent: int = 0
+ # Are we currently excluding lines?
+ excluding: bool = False
+ # The line number of the first line in a multi-line statement.
+ first_line: int = 0
+ # Is the file empty?
+ empty: bool = True
+ # Parenthesis (and bracket) nesting level.
+ nesting: int = 0
assert self.text is not None
tokgen = generate_tokens(self.text)
@@ -142,69 +150,48 @@ def _raw_parse(self) -> None:
if self.show_tokens: # pragma: debugging
print("%10s %5s %-20r %r" % (
tokenize.tok_name.get(toktype, toktype),
- nice_pair((slineno, elineno)), ttext, ltext
+ nice_pair((slineno, elineno)), ttext, ltext,
))
if toktype == token.INDENT:
indent += 1
elif toktype == token.DEDENT:
indent -= 1
- elif toktype == token.NAME:
- if ttext == 'class':
- # Class definitions look like branches in the bytecode, so
- # we need to exclude them. The simplest way is to note the
- # lines with the 'class' keyword.
- self.raw_classdefs.add(slineno)
elif toktype == token.OP:
- if ttext == ':' and nesting == 0:
- should_exclude = (elineno in self.raw_excluded) or excluding_decorators
+ if ttext == ":" and nesting == 0:
+ should_exclude = (
+ self.excluded.intersection(range(first_line, elineno + 1))
+ )
if not excluding and should_exclude:
# Start excluding a suite. We trigger off of the colon
# token so that the #pragma comment will be recognized on
# the same line as the colon.
- self.raw_excluded.add(elineno)
+ self.excluded.add(elineno)
exclude_indent = indent
excluding = True
- excluding_decorators = False
- elif ttext == '@' and first_on_line:
- # A decorator.
- if elineno in self.raw_excluded:
- excluding_decorators = True
- if excluding_decorators:
- self.raw_excluded.add(elineno)
elif ttext in "([{":
nesting += 1
elif ttext in ")]}":
nesting -= 1
- elif toktype == token.STRING and prev_toktype == token.INDENT:
- # Strings that are first on an indented line are docstrings.
- # (a trick from trace.py in the stdlib.) This works for
- # 99.9999% of cases. For the rest (!) see:
- # http://stackoverflow.com/questions/1769332/x/1769794#1769794
- self.raw_docstrings.update(range(slineno, elineno+1))
elif toktype == token.NEWLINE:
- if first_line is not None and elineno != first_line: # type: ignore[unreachable]
+ if first_line and elineno != first_line:
# We're at the end of a line, and we've ended on a
# different line than the first line of the statement,
# so record a multi-line range.
- for l in range(first_line, elineno+1): # type: ignore[unreachable]
+ for l in range(first_line, elineno+1):
self._multiline[l] = first_line
- first_line = None
- first_on_line = True
+ first_line = 0
if ttext.strip() and toktype != tokenize.COMMENT:
# A non-white-space token.
empty = False
- if first_line is None:
+ if not first_line:
# The token is not white space, and is the first in a statement.
first_line = slineno
# Check whether to end an excluded suite.
if excluding and indent <= exclude_indent:
excluding = False
if excluding:
- self.raw_excluded.add(elineno)
- first_on_line = False
-
- prev_toktype = toktype
+ self.excluded.add(elineno)
# Find the starts of the executable statements.
if not empty:
@@ -217,6 +204,32 @@ def _raw_parse(self) -> None:
if env.PYBEHAVIOR.module_firstline_1 and self._multiline:
self._multiline[1] = min(self.raw_statements)
+ self.excluded = self.first_lines(self.excluded)
+
+ # AST lets us find classes, docstrings, and decorator-affected
+ # functions and classes.
+ assert self._ast_root is not None
+ for node in ast.walk(self._ast_root):
+ # Find docstrings.
+ if isinstance(node, (ast.ClassDef, ast.FunctionDef, ast.AsyncFunctionDef, ast.Module)):
+ if node.body:
+ first = node.body[0]
+ if (
+ isinstance(first, ast.Expr)
+ and isinstance(first.value, ast.Constant)
+ and isinstance(first.value.value, str)
+ ):
+ self.raw_docstrings.update(
+ range(first.lineno, cast(int, first.end_lineno) + 1)
+ )
+ # Exclusions carry from decorators and signatures to the bodies of
+ # functions and classes.
+ if isinstance(node, (ast.ClassDef, ast.FunctionDef, ast.AsyncFunctionDef)):
+ first_line = min((d.lineno for d in node.decorator_list), default=node.lineno)
+ if self.excluded.intersection(range(first_line, node.lineno + 1)):
+ self.excluded.update(range(first_line, cast(int, node.end_lineno) + 1))
+
+ @functools.lru_cache(maxsize=1000)
def first_line(self, lineno: TLineNo) -> TLineNo:
"""Return the first line number of the statement including `lineno`."""
if lineno < 0:
@@ -225,7 +238,7 @@ def first_line(self, lineno: TLineNo) -> TLineNo:
lineno = self._multiline.get(lineno, lineno)
return lineno
- def first_lines(self, linenos: Iterable[TLineNo]) -> Set[TLineNo]:
+ def first_lines(self, linenos: Iterable[TLineNo]) -> set[TLineNo]:
"""Map the line numbers in `linenos` to the correct first line of the
statement.
@@ -234,13 +247,13 @@ def first_lines(self, linenos: Iterable[TLineNo]) -> Set[TLineNo]:
"""
return {self.first_line(l) for l in linenos}
- def translate_lines(self, lines: Iterable[TLineNo]) -> Set[TLineNo]:
+ def translate_lines(self, lines: Iterable[TLineNo]) -> set[TLineNo]:
"""Implement `FileReporter.translate_lines`."""
return self.first_lines(lines)
- def translate_arcs(self, arcs: Iterable[TArc]) -> Set[TArc]:
+ def translate_arcs(self, arcs: Iterable[TArc]) -> set[TArc]:
"""Implement `FileReporter.translate_arcs`."""
- return {(self.first_line(a), self.first_line(b)) for (a, b) in arcs}
+ return {(self.first_line(a), self.first_line(b)) for (a, b) in self.fix_with_jumps(arcs)}
def parse_source(self) -> None:
"""Parse source text to find executable lines, excluded lines, etc.
@@ -250,24 +263,23 @@ def parse_source(self) -> None:
"""
try:
+ self._ast_root = ast.parse(self.text)
self._raw_parse()
- except (tokenize.TokenError, IndentationError) as err:
+ except (tokenize.TokenError, IndentationError, SyntaxError) as err:
if hasattr(err, "lineno"):
lineno = err.lineno # IndentationError
else:
lineno = err.args[1][0] # TokenError
raise NotPython(
f"Couldn't parse '{self.filename}' as Python source: " +
- f"{err.args[0]!r} at line {lineno}"
+ f"{err.args[0]!r} at line {lineno}",
) from err
- self.excluded = self.first_lines(self.raw_excluded)
-
ignore = self.excluded | self.raw_docstrings
starts = self.raw_statements - ignore
self.statements = self.first_lines(starts) - ignore
- def arcs(self) -> Set[TArc]:
+ def arcs(self) -> set[TArc]:
"""Get information about the arcs available in the code.
Returns a set of line number pairs. Line numbers have been normalized
@@ -285,11 +297,17 @@ def _analyze_ast(self) -> None:
`_all_arcs` is the set of arcs in the code.
"""
- aaa = AstArcAnalyzer(self.text, self.raw_statements, self._multiline)
+ assert self._ast_root is not None
+ aaa = AstArcAnalyzer(self.filename, self._ast_root, self.raw_statements, self._multiline)
aaa.analyze()
+ arcs = aaa.arcs
+ if env.PYBEHAVIOR.exit_through_with:
+ self._with_jump_fixers = aaa.with_jump_fixers()
+ if self._with_jump_fixers:
+ arcs = self.fix_with_jumps(arcs)
self._all_arcs = set()
- for l1, l2 in aaa.arcs:
+ for l1, l2 in arcs:
fl1 = self.first_line(l1)
fl2 = self.first_line(l2)
if fl1 != fl2:
@@ -297,17 +315,56 @@ def _analyze_ast(self) -> None:
self._missing_arc_fragments = aaa.missing_arc_fragments
- def exit_counts(self) -> Dict[TLineNo, int]:
+ def fix_with_jumps(self, arcs: Iterable[TArc]) -> set[TArc]:
+ """Adjust arcs to fix jumps leaving `with` statements.
+
+ Consider this code:
+
+ with open("/tmp/test", "w") as f1:
+ a = 2
+ b = 3
+ print(4)
+
+ In 3.10+, we get traces for lines 1, 2, 3, 1, 4. But we want to present
+ it to the user as if it had been 1, 2, 3, 4. The arc 3->1 should be
+ replaced with 3->4, and 1->4 should be removed.
+
+ For this code, the fixers dict is {(3, 1): ((1, 4), (3, 4))}. The key
+ is the actual measured arc from the end of the with block back to the
+ start of the with-statement. The values are start_next (the with
+ statement to the next statement after the with), and end_next (the end
+ of the with-statement to the next statement after the with).
+
+ With nested with-statements, we have to trace through a few levels to
+ correct a longer chain of arcs.
+
+ """
+ to_remove = set()
+ to_add = set()
+ for arc in arcs:
+ if arc in self._with_jump_fixers:
+ end0 = arc[0]
+ to_remove.add(arc)
+ start_next, end_next = self._with_jump_fixers[arc]
+ while start_next in self._with_jump_fixers:
+ to_remove.add(start_next)
+ start_next, end_next = self._with_jump_fixers[start_next]
+ to_remove.add(end_next)
+ to_add.add((end0, end_next[1]))
+ to_remove.add(start_next)
+ arcs = (set(arcs) | to_add) - to_remove
+ return arcs
+
+ @functools.lru_cache
+ def exit_counts(self) -> dict[TLineNo, int]:
"""Get a count of exits from that each line.
Excluded lines are excluded.
"""
- exit_counts: Dict[TLineNo, int] = collections.defaultdict(int)
+ exit_counts: dict[TLineNo, int] = collections.defaultdict(int)
for l1, l2 in self.arcs():
- if l1 < 0:
- # Don't ever report -1 as a line number
- continue
+ assert l1 > 0, f"{l1=} should be greater than zero in {self.filename}"
if l1 in self.excluded:
# Don't report excluded lines as line numbers.
continue
@@ -316,59 +373,47 @@ def exit_counts(self) -> Dict[TLineNo, int]:
continue
exit_counts[l1] += 1
- # Class definitions have one extra exit, so remove one for each:
- for l in self.raw_classdefs:
- # Ensure key is there: class definitions can include excluded lines.
- if l in exit_counts:
- exit_counts[l] -= 1
-
return exit_counts
- def missing_arc_description(
- self,
- start: TLineNo,
- end: TLineNo,
- executed_arcs: Optional[Iterable[TArc]] = None,
- ) -> str:
+ def _finish_action_msg(self, action_msg: str | None, end: TLineNo) -> str:
+ """Apply some defaulting and formatting to an arc's description."""
+ if action_msg is None:
+ if end < 0:
+ action_msg = "jump to the function exit"
+ else:
+ action_msg = "jump to line {lineno}"
+ action_msg = action_msg.format(lineno=end)
+ return action_msg
+
+ def missing_arc_description(self, start: TLineNo, end: TLineNo) -> str:
"""Provide an English sentence describing a missing arc."""
if self._missing_arc_fragments is None:
self._analyze_ast()
assert self._missing_arc_fragments is not None
- actual_start = start
-
- if (
- executed_arcs and
- end < 0 and end == -start and
- (end, start) not in executed_arcs and
- (end, start) in self._missing_arc_fragments
- ):
- # It's a one-line callable, and we never even started it,
- # and we have a message about not starting it.
- start, end = end, start
-
fragment_pairs = self._missing_arc_fragments.get((start, end), [(None, None)])
msgs = []
- for smsg, emsg in fragment_pairs:
- if emsg is None:
- if end < 0:
- # Hmm, maybe we have a one-line callable, let's check.
- if (-end, end) in self._missing_arc_fragments:
- return self.missing_arc_description(-end, end)
- emsg = "didn't jump to the function exit"
- else:
- emsg = "didn't jump to line {lineno}"
- emsg = emsg.format(lineno=end)
-
- msg = f"line {actual_start} {emsg}"
- if smsg is not None:
- msg += f", because {smsg.format(lineno=actual_start)}"
+ for missing_cause_msg, action_msg in fragment_pairs:
+ action_msg = self._finish_action_msg(action_msg, end)
+ msg = f"line {start} didn't {action_msg}"
+ if missing_cause_msg is not None:
+ msg += f" because {missing_cause_msg.format(lineno=start)}"
msgs.append(msg)
return " or ".join(msgs)
+ def arc_description(self, start: TLineNo, end: TLineNo) -> str:
+ """Provide an English description of an arc's effect."""
+ if self._missing_arc_fragments is None:
+ self._analyze_ast()
+ assert self._missing_arc_fragments is not None
+
+ fragment_pairs = self._missing_arc_fragments.get((start, end), [(None, None)])
+ action_msg = self._finish_action_msg(fragment_pairs[0][1], end)
+ return action_msg
+
class ByteParser:
"""Parse bytecode to understand the structure of code."""
@@ -376,30 +421,34 @@ class ByteParser:
def __init__(
self,
text: str,
- code: Optional[CodeType] = None,
- filename: Optional[str] = None,
+ code: CodeType | None = None,
+ filename: str | None = None,
) -> None:
self.text = text
if code is not None:
self.code = code
else:
assert filename is not None
- try:
- self.code = compile(text, filename, "exec", dont_inherit=True)
- except SyntaxError as synerr:
- raise NotPython(
- "Couldn't parse '%s' as Python source: '%s' at line %d" % (
- filename, synerr.msg, synerr.lineno or 0
- )
- ) from synerr
+ # We only get here if earlier ast parsing succeeded, so no need to
+ # catch errors.
+ self.code = compile(text, filename, "exec", dont_inherit=True)
def child_parsers(self) -> Iterable[ByteParser]:
"""Iterate over all the code objects nested within this one.
The iteration includes `self` as its first value.
+ We skip code objects named `__annotate__` since they are deferred
+ annotations that usually are never run. If there are errors in the
+ annotations, they will be caught by type checkers or other tools that
+ use annotations.
+
"""
- return (ByteParser(self.text, code=c) for c in code_objects(self.code))
+ return (
+ ByteParser(self.text, code=c)
+ for c in code_objects(self.code)
+ if c.co_name != "__annotate__"
+ )
def _line_numbers(self) -> Iterable[TLineNo]:
"""Yield the line numbers possible in this code object.
@@ -408,6 +457,7 @@ def _line_numbers(self) -> Iterable[TLineNo]:
line numbers. Produces a sequence: l0, l1, ...
"""
if hasattr(self.code, "co_lines"):
+ # PYVERSIONS: new in 3.10
for _, _, line in self.code.co_lines():
if line:
yield line
@@ -425,7 +475,7 @@ def _line_numbers(self) -> Iterable[TLineNo]:
yield line_num
last_line_num = line_num
byte_num += byte_incr
- if env.PYBEHAVIOR.negative_lnotab and line_incr >= 0x80:
+ if line_incr >= 0x80:
line_incr -= 0x100
line_num += line_incr
if line_num != last_line_num:
@@ -447,20 +497,38 @@ def _find_statements(self) -> Iterable[TLineNo]:
# AST analysis
#
-class ArcStart(collections.namedtuple("Arc", "lineno, cause")):
+@dataclass(frozen=True, order=True)
+class ArcStart:
"""The information needed to start an arc.
`lineno` is the line number the arc starts from.
- `cause` is an English text fragment used as the `startmsg` for
+ `cause` is an English text fragment used as the `missing_cause_msg` for
AstArcAnalyzer.missing_arc_fragments. It will be used to describe why an
arc wasn't executed, so should fit well into a sentence of the form,
"Line 17 didn't run because {cause}." The fragment can include "{lineno}"
to have `lineno` interpolated into it.
+ As an example, this code::
+
+ if something(x): # line 1
+ func(x) # line 2
+ more_stuff() # line 3
+
+ would have two ArcStarts:
+
+ - ArcStart(1, "the condition on line 1 was always true")
+ - ArcStart(1, "the condition on line 1 was never true")
+
+ The first would be used to create an arc from 1 to 3, creating a message like
+ "line 1 didn't jump to line 3 because the condition on line 1 was always true."
+
+ The second would be used for the arc from 1 to 2, creating a message like
+ "line 1 didn't jump to line 2 because the condition on line 1 was never true."
+
"""
- def __new__(cls, lineno: TLineNo, cause: Optional[str] = None) -> ArcStart:
- return super().__new__(cls, lineno, cause)
+ lineno: TLineNo
+ cause: str = ""
class TAddArcFn(Protocol):
@@ -469,12 +537,23 @@ def __call__(
self,
start: TLineNo,
end: TLineNo,
- smsg: Optional[str] = None,
- emsg: Optional[str] = None,
+ missing_cause_msg: str | None = None,
+ action_msg: str | None = None,
) -> None:
- ...
+ """
+ Record an arc from `start` to `end`.
+
+ `missing_cause_msg` is a description of the reason the arc wasn't
+ taken if it wasn't taken. For example, "the condition on line 10 was
+ never true."
+
+ `action_msg` is a description of what the arc does, like "jump to line
+ 10" or "exit from function 'fooey'."
+
+ """
-TArcFragments = Dict[TArc, List[Tuple[Optional[str], Optional[str]]]]
+
+TArcFragments = dict[TArc, list[tuple[Optional[str], Optional[str]]]]
class Block:
"""
@@ -486,23 +565,19 @@ class Block:
stack.
"""
# pylint: disable=unused-argument
- def process_break_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool:
+ def process_break_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool:
"""Process break exits."""
- # Because break can only appear in loops, and most subclasses
- # implement process_break_exits, this function is never reached.
- raise AssertionError
+ return False
- def process_continue_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool:
+ def process_continue_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool:
"""Process continue exits."""
- # Because continue can only appear in loops, and most subclasses
- # implement process_continue_exits, this function is never reached.
- raise AssertionError
+ return False
- def process_raise_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool:
+ def process_raise_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool:
"""Process raise exits."""
return False
- def process_return_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool:
+ def process_return_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool:
"""Process return exits."""
return False
@@ -513,13 +588,13 @@ def __init__(self, start: TLineNo) -> None:
# The line number where the loop starts.
self.start = start
# A set of ArcStarts, the arcs from break statements exiting this loop.
- self.break_exits: Set[ArcStart] = set()
+ self.break_exits: set[ArcStart] = set()
- def process_break_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool:
+ def process_break_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool:
self.break_exits.update(exits)
return True
- def process_continue_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool:
+ def process_continue_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool:
for xit in exits:
add_arc(xit.lineno, self.start, xit.cause)
return True
@@ -533,107 +608,37 @@ def __init__(self, start: TLineNo, name: str) -> None:
# The name of the function.
self.name = name
- def process_raise_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool:
+ def process_raise_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool:
for xit in exits:
add_arc(
xit.lineno, -self.start, xit.cause,
- f"didn't except from function {self.name!r}",
+ f"except from function {self.name!r}",
)
return True
- def process_return_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool:
+ def process_return_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool:
for xit in exits:
add_arc(
xit.lineno, -self.start, xit.cause,
- f"didn't return from function {self.name!r}",
+ f"return from function {self.name!r}",
)
return True
class TryBlock(Block):
"""A block on the block stack representing a `try` block."""
- def __init__(self, handler_start: Optional[TLineNo], final_start: Optional[TLineNo]) -> None:
+ def __init__(self, handler_start: TLineNo | None, final_start: TLineNo | None) -> None:
# The line number of the first "except" handler, if any.
self.handler_start = handler_start
# The line number of the "finally:" clause, if any.
self.final_start = final_start
- # The ArcStarts for breaks/continues/returns/raises inside the "try:"
- # that need to route through the "finally:" clause.
- self.break_from: Set[ArcStart] = set()
- self.continue_from: Set[ArcStart] = set()
- self.raise_from: Set[ArcStart] = set()
- self.return_from: Set[ArcStart] = set()
-
- def process_break_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool:
- if self.final_start is not None:
- self.break_from.update(exits)
- return True
- return False
-
- def process_continue_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool:
- if self.final_start is not None:
- self.continue_from.update(exits)
- return True
- return False
-
- def process_raise_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool:
+ def process_raise_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool:
if self.handler_start is not None:
for xit in exits:
add_arc(xit.lineno, self.handler_start, xit.cause)
- else:
- assert self.final_start is not None
- self.raise_from.update(exits)
- return True
-
- def process_return_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool:
- if self.final_start is not None:
- self.return_from.update(exits)
- return True
- return False
-
-
-class WithBlock(Block):
- """A block on the block stack representing a `with` block."""
- def __init__(self, start: TLineNo) -> None:
- # We only ever use this block if it is needed, so that we don't have to
- # check this setting in all the methods.
- assert env.PYBEHAVIOR.exit_through_with
-
- # The line number of the with statement.
- self.start = start
-
- # The ArcStarts for breaks/continues/returns/raises inside the "with:"
- # that need to go through the with-statement while exiting.
- self.break_from: Set[ArcStart] = set()
- self.continue_from: Set[ArcStart] = set()
- self.return_from: Set[ArcStart] = set()
-
- def _process_exits(
- self,
- exits: Set[ArcStart],
- add_arc: TAddArcFn,
- from_set: Optional[Set[ArcStart]] = None,
- ) -> bool:
- """Helper to process the four kinds of exits."""
- for xit in exits:
- add_arc(xit.lineno, self.start, xit.cause)
- if from_set is not None:
- from_set.update(exits)
return True
- def process_break_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool:
- return self._process_exits(exits, add_arc, self.break_from)
-
- def process_continue_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool:
- return self._process_exits(exits, add_arc, self.continue_from)
-
- def process_raise_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool:
- return self._process_exits(exits, add_arc)
-
- def process_return_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool:
- return self._process_exits(exits, add_arc, self.return_from)
-
class NodeList(ast.AST):
"""A synthetic fictitious node, containing a sequence of nodes.
@@ -644,86 +649,151 @@ class NodeList(ast.AST):
"""
def __init__(self, body: Sequence[ast.AST]) -> None:
self.body = body
- self.lineno = body[0].lineno
+ self.lineno = body[0].lineno # type: ignore[attr-defined]
-# TODO: some add_arcs methods here don't add arcs, they return them. Rename them.
-# TODO: the cause messages have too many commas.
# TODO: Shouldn't the cause messages join with "and" instead of "or"?
-def _make_expression_code_method(noun: str) -> Callable[[AstArcAnalyzer, ast.AST], None]:
- """A function to make methods for expression-based callable _code_object__ methods."""
- def _code_object__expression_callable(self: AstArcAnalyzer, node: ast.AST) -> None:
- start = self.line_for_node(node)
- self.add_arc(-start, start, None, f"didn't run the {noun} on line {start}")
- self.add_arc(start, -start, None, f"didn't finish the {noun} on line {start}")
- return _code_object__expression_callable
-
class AstArcAnalyzer:
- """Analyze source text with an AST to find executable code paths."""
+ """Analyze source text with an AST to find executable code paths.
+
+ The .analyze() method does the work, and populates these attributes:
+
+ `arcs`: a set of (from, to) pairs of the the arcs possible in the code.
+
+ `missing_arc_fragments`: a dict mapping (from, to) arcs to lists of
+ message fragments explaining why the arc is missing from execution::
+
+ { (start, end): [(missing_cause_msg, action_msg), ...], }
+
+ For an arc starting from line 17, they should be usable to form complete
+ sentences like: "Line 17 didn't {action_msg} because {missing_cause_msg}".
+
+ NOTE: Starting in July 2024, I've been whittling this down to only report
+ arc that are part of true branches. It's not clear how far this work will
+ go.
+
+ """
def __init__(
self,
- text: str,
- statements: Set[TLineNo],
- multiline: Dict[TLineNo, TLineNo],
+ filename: str,
+ root_node: ast.AST,
+ statements: set[TLineNo],
+ multiline: dict[TLineNo, TLineNo],
) -> None:
- self.root_node = ast.parse(text)
- # TODO: I think this is happening in too many places.
+ self.filename = filename
+ self.root_node = root_node
self.statements = {multiline.get(l, l) for l in statements}
self.multiline = multiline
# Turn on AST dumps with an environment variable.
# $set_env.py: COVERAGE_AST_DUMP - Dump the AST nodes when parsing code.
- dump_ast = bool(int(os.environ.get("COVERAGE_AST_DUMP", 0)))
+ dump_ast = bool(int(os.getenv("COVERAGE_AST_DUMP", "0")))
if dump_ast: # pragma: debugging
# Dump the AST so that failing tests have helpful output.
print(f"Statements: {self.statements}")
print(f"Multiline map: {self.multiline}")
- ast_dump(self.root_node)
+ print(ast.dump(self.root_node, include_attributes=True, indent=4))
- self.arcs: Set[TArc] = set()
-
- # A map from arc pairs to a list of pairs of sentence fragments:
- # { (start, end): [(startmsg, endmsg), ...], }
- #
- # For an arc from line 17, they should be usable like:
- # "Line 17 {endmsg}, because {startmsg}"
+ self.arcs: set[TArc] = set()
self.missing_arc_fragments: TArcFragments = collections.defaultdict(list)
- self.block_stack: List[Block] = []
+ self.block_stack: list[Block] = []
+
+ # If `with` clauses jump to their start on the way out, we need
+ # information to be able to skip over that jump. We record the arcs
+ # from `with` into the clause (with_entries), and the arcs from the
+ # clause to the `with` (with_exits).
+ self.current_with_starts: set[TLineNo] = set()
+ self.all_with_starts: set[TLineNo] = set()
+ self.with_entries: set[TArc] = set()
+ self.with_exits: set[TArc] = set()
# $set_env.py: COVERAGE_TRACK_ARCS - Trace possible arcs added while parsing code.
- self.debug = bool(int(os.environ.get("COVERAGE_TRACK_ARCS", 0)))
+ self.debug = bool(int(os.getenv("COVERAGE_TRACK_ARCS", "0")))
def analyze(self) -> None:
- """Examine the AST tree from `root_node` to determine possible arcs.
-
- This sets the `arcs` attribute to be a set of (from, to) line number
- pairs.
-
- """
+ """Examine the AST tree from `self.root_node` to determine possible arcs."""
for node in ast.walk(self.root_node):
node_name = node.__class__.__name__
code_object_handler = getattr(self, "_code_object__" + node_name, None)
if code_object_handler is not None:
code_object_handler(node)
+ def with_jump_fixers(self) -> dict[TArc, tuple[TArc, TArc]]:
+ """Get a dict with data for fixing jumps out of with statements.
+
+ Returns a dict. The keys are arcs leaving a with-statement by jumping
+ back to its start. The values are pairs: first, the arc from the start
+ to the next statement, then the arc that exits the with without going
+ to the start.
+
+ """
+ fixers = {}
+ with_nexts = {
+ arc
+ for arc in self.arcs
+ if arc[0] in self.all_with_starts and arc not in self.with_entries
+ }
+ for start in self.all_with_starts:
+ nexts = {arc[1] for arc in with_nexts if arc[0] == start}
+ if not nexts:
+ continue
+ assert len(nexts) == 1, f"Expected one arc, got {nexts} with {start = }"
+ nxt = nexts.pop()
+ ends = {arc[0] for arc in self.with_exits if arc[1] == start}
+ for end in ends:
+ fixers[(end, start)] = ((start, nxt), (end, nxt))
+ return fixers
+
+ # Code object dispatchers: _code_object__*
+ #
+ # These methods are used by analyze() as the start of the analysis.
+ # There is one for each construct with a code object.
+
+ def _code_object__Module(self, node: ast.Module) -> None:
+ start = self.line_for_node(node)
+ if node.body:
+ exits = self.process_body(node.body)
+ for xit in exits:
+ self.add_arc(xit.lineno, -start, xit.cause, "exit the module")
+ else:
+ # Empty module.
+ self.add_arc(start, -start)
+
+ def _code_object__FunctionDef(self, node: ast.FunctionDef) -> None:
+ start = self.line_for_node(node)
+ self.block_stack.append(FunctionBlock(start=start, name=node.name))
+ exits = self.process_body(node.body)
+ self.process_return_exits(exits)
+ self.block_stack.pop()
+
+ _code_object__AsyncFunctionDef = _code_object__FunctionDef
+
+ def _code_object__ClassDef(self, node: ast.ClassDef) -> None:
+ start = self.line_for_node(node)
+ exits = self.process_body(node.body)
+ for xit in exits:
+ self.add_arc(xit.lineno, -start, xit.cause, f"exit class {node.name!r}")
+
def add_arc(
self,
start: TLineNo,
end: TLineNo,
- smsg: Optional[str] = None,
- emsg: Optional[str] = None,
+ missing_cause_msg: str | None = None,
+ action_msg: str | None = None,
) -> None:
"""Add an arc, including message fragments to use if it is missing."""
if self.debug: # pragma: debugging
- print(f"\nAdding possible arc: ({start}, {end}): {smsg!r}, {emsg!r}")
- print(short_stack(limit=10))
+ print(f"Adding possible arc: ({start}, {end}): {missing_cause_msg!r}, {action_msg!r}")
+ print(short_stack(), end="\n\n")
self.arcs.add((start, end))
+ if start in self.current_with_starts:
+ self.with_entries.add((start, end))
- if smsg is not None or emsg is not None:
- self.missing_arc_fragments[(start, end)].append((smsg, emsg))
+ if missing_cause_msg is not None or action_msg is not None:
+ self.missing_arc_fragments[(start, end)].append((missing_cause_msg, action_msg))
def nearest_blocks(self) -> Iterable[Block]:
"""Yield the blocks in nearest-to-farthest order."""
@@ -738,19 +808,25 @@ def line_for_node(self, node: ast.AST) -> TLineNo:
node_name = node.__class__.__name__
handler = cast(
Optional[Callable[[ast.AST], TLineNo]],
- getattr(self, "_line__" + node_name, None)
+ getattr(self, "_line__" + node_name, None),
)
if handler is not None:
- return handler(node)
+ line = handler(node)
else:
- return node.lineno
+ line = node.lineno # type: ignore[attr-defined]
+ return self.multiline.get(line, line)
+
+ # First lines: _line__*
+ #
+ # Dispatched by line_for_node, each method knows how to identify the first
+ # line number in the node, as Python will report it.
def _line_decorated(self, node: ast.FunctionDef) -> TLineNo:
"""Compute first line number for things that can be decorated (classes and functions)."""
- lineno = node.lineno
- if env.PYBEHAVIOR.trace_decorated_def or env.PYBEHAVIOR.def_ast_no_decorator:
- if node.decorator_list:
- lineno = node.decorator_list[0].lineno
+ if node.decorator_list:
+ lineno = node.decorator_list[0].lineno
+ else:
+ lineno = node.lineno
return lineno
def _line__Assign(self, node: ast.Assign) -> TLineNo:
@@ -763,7 +839,7 @@ def _line__Dict(self, node: ast.Dict) -> TLineNo:
if node.keys[0] is not None:
return node.keys[0].lineno
else:
- # Unpacked dict literals `{**{'a':1}}` have None as the key,
+ # Unpacked dict literals `{**{"a":1}}` have None as the key,
# use the value in that case.
return node.values[0].lineno
else:
@@ -793,8 +869,8 @@ def _line__Module(self, node: ast.Module) -> TLineNo:
"Import", "ImportFrom", "Nonlocal", "Pass",
}
- def add_arcs(self, node: ast.AST) -> Set[ArcStart]:
- """Add the arcs for `node`.
+ def node_exits(self, node: ast.AST) -> set[ArcStart]:
+ """Find the set of arc starts that exit this node.
Return a set of ArcStarts, exits from this node to the next. Because a
node represents an entire sub-tree (including its children), the exits
@@ -806,16 +882,17 @@ def add_arcs(self, node: ast.AST) -> Set[ArcStart]:
else:
doit(5)
- There are two exits from line 1: they start at line 3 and line 5.
+ There are three exits from line 1: they start at lines 1, 3 and 5.
+ There are two exits from line 2: lines 3 and 5.
"""
node_name = node.__class__.__name__
handler = cast(
- Optional[Callable[[ast.AST], Set[ArcStart]]],
- getattr(self, "_handle__" + node_name, None)
+ Optional[Callable[[ast.AST], set[ArcStart]]],
+ getattr(self, "_handle__" + node_name, None),
)
if handler is not None:
- return handler(node)
+ arc_starts = handler(node)
else:
# No handler: either it's something that's ok to default (a simple
# statement), or it's something we overlooked.
@@ -824,31 +901,42 @@ def add_arcs(self, node: ast.AST) -> Set[ArcStart]:
raise RuntimeError(f"*** Unhandled: {node}") # pragma: only failure
# Default for simple statements: one exit from this node.
- return {ArcStart(self.line_for_node(node))}
+ arc_starts = {ArcStart(self.line_for_node(node))}
+ return arc_starts
- def add_body_arcs(
+ def process_body(
self,
body: Sequence[ast.AST],
- from_start: Optional[ArcStart] = None,
- prev_starts: Optional[Set[ArcStart]] = None
- ) -> Set[ArcStart]:
- """Add arcs for the body of a compound statement.
-
- `body` is the body node. `from_start` is a single `ArcStart` that can
- be the previous line in flow before this body. `prev_starts` is a set
- of ArcStarts that can be the previous line. Only one of them should be
+ from_start: ArcStart | None = None,
+ prev_starts: set[ArcStart] | None = None,
+ ) -> set[ArcStart]:
+ """Process the body of a compound statement.
+
+ `body` is the body node to process.
+
+ `from_start` is a single `ArcStart` that starts an arc into this body.
+ `prev_starts` is a set of ArcStarts that can all be the start of arcs
+ into this body. Only one of `from_start` and `prev_starts` should be
given.
+ Records arcs within the body by calling `self.add_arc`.
+
Returns a set of ArcStarts, the exits from this body.
"""
if prev_starts is None:
- assert from_start is not None
- prev_starts = {from_start}
+ if from_start is None:
+ prev_starts = set()
+ else:
+ prev_starts = {from_start}
+ else:
+ assert from_start is None
+
+ # Loop over the nodes in the body, making arcs from each one's exits to
+ # the next node.
for body_node in body:
lineno = self.line_for_node(body_node)
- first_line = self.multiline.get(lineno, lineno)
- if first_line not in self.statements:
+ if lineno not in self.statements:
maybe_body_node = self.find_non_missing_node(body_node)
if maybe_body_node is None:
continue
@@ -856,10 +944,10 @@ def add_body_arcs(
lineno = self.line_for_node(body_node)
for prev_start in prev_starts:
self.add_arc(prev_start.lineno, lineno, prev_start.cause)
- prev_starts = self.add_arcs(body_node)
+ prev_starts = self.node_exits(body_node)
return prev_starts
- def find_non_missing_node(self, node: ast.AST) -> Optional[ast.AST]:
+ def find_non_missing_node(self, node: ast.AST) -> ast.AST | None:
"""Search `node` looking for a child that has not been optimized away.
This might return the node you started with, or it will work recursively
@@ -868,17 +956,16 @@ def find_non_missing_node(self, node: ast.AST) -> Optional[ast.AST]:
Returns a node, or None if none of the node remains.
"""
- # This repeats work just done in add_body_arcs, but this duplication
+ # This repeats work just done in process_body, but this duplication
# means we can avoid a function call in the 99.9999% case of not
# optimizing away statements.
lineno = self.line_for_node(node)
- first_line = self.multiline.get(lineno, lineno)
- if first_line in self.statements:
+ if lineno in self.statements:
return node
missing_fn = cast(
Optional[Callable[[ast.AST], Optional[ast.AST]]],
- getattr(self, "_missing__" + node.__class__.__name__, None)
+ getattr(self, "_missing__" + node.__class__.__name__, None),
)
if missing_fn is not None:
ret_node = missing_fn(node)
@@ -893,7 +980,7 @@ def find_non_missing_node(self, node: ast.AST) -> Optional[ast.AST]:
# find_non_missing_node) to find a node to use instead of the missing
# node. They can return None if the node should truly be gone.
- def _missing__If(self, node: ast.If) -> Optional[ast.AST]:
+ def _missing__If(self, node: ast.If) -> ast.AST | None:
# If the if-node is missing, then one of its children might still be
# here, but not both. So return the first of the two that isn't missing.
# Use a NodeList to hold the clauses as a single node.
@@ -904,7 +991,7 @@ def _missing__If(self, node: ast.If) -> Optional[ast.AST]:
return self.find_non_missing_node(NodeList(node.orelse))
return None
- def _missing__NodeList(self, node: NodeList) -> Optional[ast.AST]:
+ def _missing__NodeList(self, node: NodeList) -> ast.AST | None:
# A NodeList might be a mixture of missing and present nodes. Find the
# ones that are present.
non_missing_children = []
@@ -920,22 +1007,22 @@ def _missing__NodeList(self, node: NodeList) -> Optional[ast.AST]:
return non_missing_children[0]
return NodeList(non_missing_children)
- def _missing__While(self, node: ast.While) -> Optional[ast.AST]:
+ def _missing__While(self, node: ast.While) -> ast.AST | None:
body_nodes = self.find_non_missing_node(NodeList(node.body))
if not body_nodes:
return None
# Make a synthetic While-true node.
- new_while = ast.While()
- new_while.lineno = body_nodes.lineno
- new_while.test = ast.Name()
- new_while.test.lineno = body_nodes.lineno
+ new_while = ast.While() # type: ignore[call-arg]
+ new_while.lineno = body_nodes.lineno # type: ignore[attr-defined]
+ new_while.test = ast.Name() # type: ignore[call-arg]
+ new_while.test.lineno = body_nodes.lineno # type: ignore[attr-defined]
new_while.test.id = "True"
assert hasattr(body_nodes, "body")
new_while.body = body_nodes.body
new_while.orelse = []
return new_while
- def is_constant_expr(self, node: ast.AST) -> Optional[str]:
+ def is_constant_expr(self, node: ast.AST) -> str | None:
"""Is this a compile-time constant?"""
node_name = node.__class__.__name__
if node_name in ["Constant", "NameConstant", "Num"]:
@@ -960,97 +1047,84 @@ def is_constant_expr(self, node: ast.AST) -> Optional[str]:
# enclosing loop block, or the nearest enclosing finally block, whichever
# is nearer.
- def process_break_exits(self, exits: Set[ArcStart]) -> None:
+ def process_break_exits(self, exits: set[ArcStart]) -> None:
"""Add arcs due to jumps from `exits` being breaks."""
for block in self.nearest_blocks(): # pragma: always breaks
if block.process_break_exits(exits, self.add_arc):
break
- def process_continue_exits(self, exits: Set[ArcStart]) -> None:
+ def process_continue_exits(self, exits: set[ArcStart]) -> None:
"""Add arcs due to jumps from `exits` being continues."""
for block in self.nearest_blocks(): # pragma: always breaks
if block.process_continue_exits(exits, self.add_arc):
break
- def process_raise_exits(self, exits: Set[ArcStart]) -> None:
+ def process_raise_exits(self, exits: set[ArcStart]) -> None:
"""Add arcs due to jumps from `exits` being raises."""
for block in self.nearest_blocks():
if block.process_raise_exits(exits, self.add_arc):
break
- def process_return_exits(self, exits: Set[ArcStart]) -> None:
+ def process_return_exits(self, exits: set[ArcStart]) -> None:
"""Add arcs due to jumps from `exits` being returns."""
for block in self.nearest_blocks(): # pragma: always breaks
if block.process_return_exits(exits, self.add_arc):
break
- # Handlers: _handle__*
+ # Node handlers: _handle__*
#
# Each handler deals with a specific AST node type, dispatched from
- # add_arcs. Handlers return the set of exits from that node, and can
+ # node_exits. Handlers return the set of exits from that node, and can
# also call self.add_arc to record arcs they find. These functions mirror
# the Python semantics of each syntactic construct. See the docstring
- # for add_arcs to understand the concept of exits from a node.
+ # for node_exits to understand the concept of exits from a node.
#
# Every node type that represents a statement should have a handler, or it
# should be listed in OK_TO_DEFAULT.
- def _handle__Break(self, node: ast.Break) -> Set[ArcStart]:
+ def _handle__Break(self, node: ast.Break) -> set[ArcStart]:
here = self.line_for_node(node)
break_start = ArcStart(here, cause="the break on line {lineno} wasn't executed")
self.process_break_exits({break_start})
return set()
- def _handle_decorated(self, node: ast.FunctionDef) -> Set[ArcStart]:
+ def _handle_decorated(self, node: ast.FunctionDef) -> set[ArcStart]:
"""Add arcs for things that can be decorated (classes and functions)."""
main_line: TLineNo = node.lineno
- last: Optional[TLineNo] = node.lineno
+ last: TLineNo | None = node.lineno
decs = node.decorator_list
if decs:
- if env.PYBEHAVIOR.trace_decorated_def or env.PYBEHAVIOR.def_ast_no_decorator:
- last = None
+ last = None
for dec_node in decs:
dec_start = self.line_for_node(dec_node)
- if last is not None and dec_start != last:
- self.add_arc(last, dec_start)
+ if last is not None and dec_start != last: # type: ignore[unreachable]
+ self.add_arc(last, dec_start) # type: ignore[unreachable]
last = dec_start
assert last is not None
- if env.PYBEHAVIOR.trace_decorated_def:
- self.add_arc(last, main_line)
- last = main_line
- if env.PYBEHAVIOR.trace_decorator_line_again:
- for top, bot in zip(decs, decs[1:]):
- self.add_arc(self.line_for_node(bot), self.line_for_node(top))
- self.add_arc(self.line_for_node(decs[0]), main_line)
- self.add_arc(main_line, self.line_for_node(decs[-1]))
+ self.add_arc(last, main_line)
+ last = main_line
# The definition line may have been missed, but we should have it
# in `self.statements`. For some constructs, `line_for_node` is
# not what we'd think of as the first line in the statement, so map
# it to the first one.
- if node.body:
- body_start = self.line_for_node(node.body[0])
- body_start = self.multiline.get(body_start, body_start)
- for lineno in range(last+1, body_start):
- if lineno in self.statements:
- self.add_arc(last, lineno)
- last = lineno
+ assert node.body, f"Oops: {node.body = } in {self.filename}@{node.lineno}"
# The body is handled in collect_arcs.
assert last is not None
return {ArcStart(last)}
_handle__ClassDef = _handle_decorated
- def _handle__Continue(self, node: ast.Continue) -> Set[ArcStart]:
+ def _handle__Continue(self, node: ast.Continue) -> set[ArcStart]:
here = self.line_for_node(node)
continue_start = ArcStart(here, cause="the continue on line {lineno} wasn't executed")
self.process_continue_exits({continue_start})
return set()
- def _handle__For(self, node: ast.For) -> Set[ArcStart]:
+ def _handle__For(self, node: ast.For) -> set[ArcStart]:
start = self.line_for_node(node.iter)
self.block_stack.append(LoopBlock(start=start))
from_start = ArcStart(start, cause="the loop on line {lineno} never started")
- exits = self.add_body_arcs(node.body, from_start=from_start)
+ exits = self.process_body(node.body, from_start=from_start)
# Any exit from the body will go back to the top of the loop.
for xit in exits:
self.add_arc(xit.lineno, start, xit.cause)
@@ -1059,7 +1133,7 @@ def _handle__For(self, node: ast.For) -> Set[ArcStart]:
exits = my_block.break_exits
from_start = ArcStart(start, cause="the loop on line {lineno} didn't complete")
if node.orelse:
- else_exits = self.add_body_arcs(node.orelse, from_start=from_start)
+ else_exits = self.process_body(node.orelse, from_start=from_start)
exits |= else_exits
else:
# No else clause: exit from the for line.
@@ -1071,58 +1145,67 @@ def _handle__For(self, node: ast.For) -> Set[ArcStart]:
_handle__FunctionDef = _handle_decorated
_handle__AsyncFunctionDef = _handle_decorated
- def _handle__If(self, node: ast.If) -> Set[ArcStart]:
+ def _handle__If(self, node: ast.If) -> set[ArcStart]:
start = self.line_for_node(node.test)
from_start = ArcStart(start, cause="the condition on line {lineno} was never true")
- exits = self.add_body_arcs(node.body, from_start=from_start)
- from_start = ArcStart(start, cause="the condition on line {lineno} was never false")
- exits |= self.add_body_arcs(node.orelse, from_start=from_start)
+ exits = self.process_body(node.body, from_start=from_start)
+ from_start = ArcStart(start, cause="the condition on line {lineno} was always true")
+ exits |= self.process_body(node.orelse, from_start=from_start)
return exits
if sys.version_info >= (3, 10):
- def _handle__Match(self, node: ast.Match) -> Set[ArcStart]:
+ def _handle__Match(self, node: ast.Match) -> set[ArcStart]:
start = self.line_for_node(node)
last_start = start
exits = set()
- had_wildcard = False
for case in node.cases:
case_start = self.line_for_node(case.pattern)
- pattern = case.pattern
- while isinstance(pattern, ast.MatchOr):
- pattern = pattern.patterns[-1]
- if isinstance(pattern, ast.MatchAs):
- had_wildcard = True
self.add_arc(last_start, case_start, "the pattern on line {lineno} always matched")
from_start = ArcStart(
case_start,
cause="the pattern on line {lineno} never matched",
)
- exits |= self.add_body_arcs(case.body, from_start=from_start)
+ exits |= self.process_body(case.body, from_start=from_start)
last_start = case_start
+
+ # case is now the last case, check for wildcard match.
+ pattern = case.pattern # pylint: disable=undefined-loop-variable
+ while isinstance(pattern, ast.MatchOr):
+ pattern = pattern.patterns[-1]
+ while isinstance(pattern, ast.MatchAs) and pattern.pattern is not None:
+ pattern = pattern.pattern
+ had_wildcard = (
+ isinstance(pattern, ast.MatchAs)
+ and pattern.pattern is None
+ and case.guard is None # pylint: disable=undefined-loop-variable
+ )
+
if not had_wildcard:
- exits.add(from_start)
+ exits.add(
+ ArcStart(case_start, cause="the pattern on line {lineno} always matched"),
+ )
return exits
- def _handle__NodeList(self, node: NodeList) -> Set[ArcStart]:
+ def _handle__NodeList(self, node: NodeList) -> set[ArcStart]:
start = self.line_for_node(node)
- exits = self.add_body_arcs(node.body, from_start=ArcStart(start))
+ exits = self.process_body(node.body, from_start=ArcStart(start))
return exits
- def _handle__Raise(self, node: ast.Raise) -> Set[ArcStart]:
+ def _handle__Raise(self, node: ast.Raise) -> set[ArcStart]:
here = self.line_for_node(node)
raise_start = ArcStart(here, cause="the raise on line {lineno} wasn't executed")
self.process_raise_exits({raise_start})
# `raise` statement jumps away, no exits from here.
return set()
- def _handle__Return(self, node: ast.Return) -> Set[ArcStart]:
+ def _handle__Return(self, node: ast.Return) -> set[ArcStart]:
here = self.line_for_node(node)
return_start = ArcStart(here, cause="the return on line {lineno} wasn't executed")
self.process_return_exits({return_start})
# `return` statement jumps away, no exits from here.
return set()
- def _handle__Try(self, node: ast.Try) -> Set[ArcStart]:
+ def _handle__Try(self, node: ast.Try) -> set[ArcStart]:
if node.handlers:
handler_start = self.line_for_node(node.handlers[0])
else:
@@ -1140,91 +1223,35 @@ def _handle__Try(self, node: ast.Try) -> Set[ArcStart]:
self.block_stack.append(try_block)
start = self.line_for_node(node)
- exits = self.add_body_arcs(node.body, from_start=ArcStart(start))
+ exits = self.process_body(node.body, from_start=ArcStart(start))
# We're done with the `try` body, so this block no longer handles
# exceptions. We keep the block so the `finally` clause can pick up
# flows from the handlers and `else` clause.
if node.finalbody:
try_block.handler_start = None
- if node.handlers:
- # If there are `except` clauses, then raises in the try body
- # will already jump to them. Start this set over for raises in
- # `except` and `else`.
- try_block.raise_from = set()
else:
self.block_stack.pop()
- handler_exits: Set[ArcStart] = set()
+ handler_exits: set[ArcStart] = set()
if node.handlers:
- last_handler_start: Optional[TLineNo] = None
for handler_node in node.handlers:
handler_start = self.line_for_node(handler_node)
- if last_handler_start is not None:
- self.add_arc(last_handler_start, handler_start)
- last_handler_start = handler_start
from_cause = "the exception caught by line {lineno} didn't happen"
from_start = ArcStart(handler_start, cause=from_cause)
- handler_exits |= self.add_body_arcs(handler_node.body, from_start=from_start)
+ handler_exits |= self.process_body(handler_node.body, from_start=from_start)
if node.orelse:
- exits = self.add_body_arcs(node.orelse, prev_starts=exits)
+ exits = self.process_body(node.orelse, prev_starts=exits)
exits |= handler_exits
if node.finalbody:
self.block_stack.pop()
- final_from = ( # You can get to the `finally` clause from:
- exits | # the exits of the body or `else` clause,
- try_block.break_from | # or a `break`,
- try_block.continue_from | # or a `continue`,
- try_block.raise_from | # or a `raise`,
- try_block.return_from # or a `return`.
- )
-
- final_exits = self.add_body_arcs(node.finalbody, prev_starts=final_from)
-
- if try_block.break_from:
- if env.PYBEHAVIOR.finally_jumps_back:
- for break_line in try_block.break_from:
- lineno = break_line.lineno
- cause = break_line.cause.format(lineno=lineno)
- for final_exit in final_exits:
- self.add_arc(final_exit.lineno, lineno, cause)
- breaks = try_block.break_from
- else:
- breaks = self._combine_finally_starts(try_block.break_from, final_exits)
- self.process_break_exits(breaks)
-
- if try_block.continue_from:
- if env.PYBEHAVIOR.finally_jumps_back:
- for continue_line in try_block.continue_from:
- lineno = continue_line.lineno
- cause = continue_line.cause.format(lineno=lineno)
- for final_exit in final_exits:
- self.add_arc(final_exit.lineno, lineno, cause)
- continues = try_block.continue_from
- else:
- continues = self._combine_finally_starts(try_block.continue_from, final_exits)
- self.process_continue_exits(continues)
-
- if try_block.raise_from:
- self.process_raise_exits(
- self._combine_finally_starts(try_block.raise_from, final_exits)
- )
+ final_from = exits
- if try_block.return_from:
- if env.PYBEHAVIOR.finally_jumps_back:
- for return_line in try_block.return_from:
- lineno = return_line.lineno
- cause = return_line.cause.format(lineno=lineno)
- for final_exit in final_exits:
- self.add_arc(final_exit.lineno, lineno, cause)
- returns = try_block.return_from
- else:
- returns = self._combine_finally_starts(try_block.return_from, final_exits)
- self.process_return_exits(returns)
+ final_exits = self.process_body(node.finalbody, prev_starts=final_from)
if exits:
# The finally clause's exits are only exits for the try block
@@ -1233,22 +1260,7 @@ def _handle__Try(self, node: ast.Try) -> Set[ArcStart]:
return exits
- def _combine_finally_starts(self, starts: Set[ArcStart], exits: Set[ArcStart]) -> Set[ArcStart]:
- """Helper for building the cause of `finally` branches.
-
- "finally" clauses might not execute their exits, and the causes could
- be due to a failure to execute any of the exits in the try block. So
- we use the causes from `starts` as the causes for `exits`.
- """
- causes = []
- for start in sorted(starts):
- if start.cause is not None:
- causes.append(start.cause.format(lineno=start.lineno))
- cause = " or ".join(causes)
- exits = {ArcStart(xit.lineno, cause) for xit in exits}
- return exits
-
- def _handle__While(self, node: ast.While) -> Set[ArcStart]:
+ def _handle__While(self, node: ast.While) -> set[ArcStart]:
start = to_top = self.line_for_node(node.test)
constant_test = self.is_constant_expr(node.test)
top_is_body0 = False
@@ -1260,16 +1272,16 @@ def _handle__While(self, node: ast.While) -> Set[ArcStart]:
to_top = self.line_for_node(node.body[0])
self.block_stack.append(LoopBlock(start=to_top))
from_start = ArcStart(start, cause="the condition on line {lineno} was never true")
- exits = self.add_body_arcs(node.body, from_start=from_start)
+ exits = self.process_body(node.body, from_start=from_start)
for xit in exits:
self.add_arc(xit.lineno, to_top, xit.cause)
exits = set()
my_block = self.block_stack.pop()
assert isinstance(my_block, LoopBlock)
exits.update(my_block.break_exits)
- from_start = ArcStart(start, cause="the condition on line {lineno} was never false")
+ from_start = ArcStart(start, cause="the condition on line {lineno} was always true")
if node.orelse:
- else_exits = self.add_body_arcs(node.orelse, from_start=from_start)
+ else_exits = self.process_body(node.orelse, from_start=from_start)
exits |= else_exits
else:
# No `else` clause: you can exit from the start.
@@ -1277,143 +1289,28 @@ def _handle__While(self, node: ast.While) -> Set[ArcStart]:
exits.add(from_start)
return exits
- def _handle__With(self, node: ast.With) -> Set[ArcStart]:
- start = self.line_for_node(node)
+ def _handle__With(self, node: ast.With) -> set[ArcStart]:
+ if env.PYBEHAVIOR.exit_with_through_ctxmgr:
+ starts = [self.line_for_node(item.context_expr) for item in node.items]
+ else:
+ starts = [self.line_for_node(node)]
if env.PYBEHAVIOR.exit_through_with:
- self.block_stack.append(WithBlock(start=start))
- exits = self.add_body_arcs(node.body, from_start=ArcStart(start))
+ for start in starts:
+ self.current_with_starts.add(start)
+ self.all_with_starts.add(start)
+
+ exits = self.process_body(node.body, from_start=ArcStart(starts[-1]))
+
if env.PYBEHAVIOR.exit_through_with:
- with_block = self.block_stack.pop()
- assert isinstance(with_block, WithBlock)
+ start = starts[-1]
+ self.current_with_starts.remove(start)
with_exit = {ArcStart(start)}
if exits:
for xit in exits:
self.add_arc(xit.lineno, start)
+ self.with_exits.add((xit.lineno, start))
exits = with_exit
- if with_block.break_from:
- self.process_break_exits(
- self._combine_finally_starts(with_block.break_from, with_exit)
- )
- if with_block.continue_from:
- self.process_continue_exits(
- self._combine_finally_starts(with_block.continue_from, with_exit)
- )
- if with_block.return_from:
- self.process_return_exits(
- self._combine_finally_starts(with_block.return_from, with_exit)
- )
+
return exits
_handle__AsyncWith = _handle__With
-
- # Code object dispatchers: _code_object__*
- #
- # These methods are used by analyze() as the start of the analysis.
- # There is one for each construct with a code object.
-
- def _code_object__Module(self, node: ast.Module) -> None:
- start = self.line_for_node(node)
- if node.body:
- exits = self.add_body_arcs(node.body, from_start=ArcStart(-start))
- for xit in exits:
- self.add_arc(xit.lineno, -start, xit.cause, "didn't exit the module")
- else:
- # Empty module.
- self.add_arc(-start, start)
- self.add_arc(start, -start)
-
- def _code_object__FunctionDef(self, node: ast.FunctionDef) -> None:
- start = self.line_for_node(node)
- self.block_stack.append(FunctionBlock(start=start, name=node.name))
- exits = self.add_body_arcs(node.body, from_start=ArcStart(-start))
- self.process_return_exits(exits)
- self.block_stack.pop()
-
- _code_object__AsyncFunctionDef = _code_object__FunctionDef
-
- def _code_object__ClassDef(self, node: ast.ClassDef) -> None:
- start = self.line_for_node(node)
- self.add_arc(-start, start)
- exits = self.add_body_arcs(node.body, from_start=ArcStart(start))
- for xit in exits:
- self.add_arc(
- xit.lineno, -start, xit.cause,
- f"didn't exit the body of class {node.name!r}",
- )
-
- _code_object__Lambda = _make_expression_code_method("lambda")
- _code_object__GeneratorExp = _make_expression_code_method("generator expression")
- _code_object__DictComp = _make_expression_code_method("dictionary comprehension")
- _code_object__SetComp = _make_expression_code_method("set comprehension")
- _code_object__ListComp = _make_expression_code_method("list comprehension")
-
-
-# Code only used when dumping the AST for debugging.
-
-SKIP_DUMP_FIELDS = ["ctx"]
-
-def _is_simple_value(value: Any) -> bool:
- """Is `value` simple enough to be displayed on a single line?"""
- return (
- value in [None, [], (), {}, set(), frozenset(), Ellipsis] or
- isinstance(value, (bytes, int, float, str))
- )
-
-def ast_dump(
- node: ast.AST,
- depth: int = 0,
- print: Callable[[str], None] = print, # pylint: disable=redefined-builtin
-) -> None:
- """Dump the AST for `node`.
-
- This recursively walks the AST, printing a readable version.
-
- """
- indent = " " * depth
- lineno = getattr(node, "lineno", None)
- if lineno is not None:
- linemark = f" @ {node.lineno},{node.col_offset}"
- if hasattr(node, "end_lineno"):
- assert hasattr(node, "end_col_offset")
- linemark += ":"
- if node.end_lineno != node.lineno:
- linemark += f"{node.end_lineno},"
- linemark += f"{node.end_col_offset}"
- else:
- linemark = ""
- head = f"{indent}<{node.__class__.__name__}{linemark}"
-
- named_fields = [
- (name, value)
- for name, value in ast.iter_fields(node)
- if name not in SKIP_DUMP_FIELDS
- ]
- if not named_fields:
- print(f"{head}>")
- elif len(named_fields) == 1 and _is_simple_value(named_fields[0][1]):
- field_name, value = named_fields[0]
- print(f"{head} {field_name}: {value!r}>")
- else:
- print(head)
- if 0:
- print("{}# mro: {}".format( # type: ignore[unreachable]
- indent, ", ".join(c.__name__ for c in node.__class__.__mro__[1:]),
- ))
- next_indent = indent + " "
- for field_name, value in named_fields:
- prefix = f"{next_indent}{field_name}:"
- if _is_simple_value(value):
- print(f"{prefix} {value!r}")
- elif isinstance(value, list):
- print(f"{prefix} [")
- for n in value:
- if _is_simple_value(n):
- print(f"{next_indent} {n!r}")
- else:
- ast_dump(n, depth + 8, print=print)
- print(f"{next_indent}]")
- else:
- print(prefix)
- ast_dump(value, depth + 8, print=print)
-
- print(f"{indent}>")
diff --git a/coverage/phystokens.py b/coverage/phystokens.py
index 4d1ee46e6..8c29402ad 100644
--- a/coverage/phystokens.py
+++ b/coverage/phystokens.py
@@ -13,7 +13,7 @@
import token
import tokenize
-from typing import Iterable, List, Optional, Set, Tuple
+from collections.abc import Iterable
from coverage import env
from coverage.types import TLineNo, TSourceTokenLines
@@ -32,7 +32,7 @@ def _phys_tokens(toks: TokenInfos) -> TokenInfos:
Returns the same values as generate_tokens()
"""
- last_line: Optional[str] = None
+ last_line: str | None = None
last_lineno = -1
last_ttext: str = ""
for ttype, ttext, (slineno, scol), (elineno, ecol), ltext in toks:
@@ -57,10 +57,21 @@ def _phys_tokens(toks: TokenInfos) -> TokenInfos:
if last_ttext.endswith("\\"):
inject_backslash = False
elif ttype == token.STRING:
- if "\n" in ttext and ttext.split('\n', 1)[0][-1] == '\\':
+ if (last_line.endswith("\\\n") and # pylint: disable=simplifiable-if-statement
+ last_line.rstrip(" \\\n").endswith(last_ttext)):
+ # Deal with special cases like such code::
+ #
+ # a = ["aaa",\ # there may be zero or more blanks between "," and "\".
+ # "bbb \
+ # ccc"]
+ #
+ inject_backslash = True
+ else:
# It's a multi-line string and the first line ends with
# a backslash, so we don't need to inject another.
inject_backslash = False
+ elif sys.version_info >= (3, 12) and ttype == token.FSTRING_MIDDLE:
+ inject_backslash = False
if inject_backslash:
# Figure out what column the backslash is in.
ccol = len(last_line.split("\n")[-2]) - 1
@@ -68,7 +79,7 @@ def _phys_tokens(toks: TokenInfos) -> TokenInfos:
yield tokenize.TokenInfo(
99999, "\\\n",
(slineno, ccol), (slineno, ccol+2),
- last_line
+ last_line,
)
last_line = ltext
if ttype not in (tokenize.NEWLINE, tokenize.NL):
@@ -77,20 +88,19 @@ def _phys_tokens(toks: TokenInfos) -> TokenInfos:
last_lineno = elineno
-class MatchCaseFinder(ast.NodeVisitor):
- """Helper for finding match/case lines."""
- def __init__(self, source: str) -> None:
- # This will be the set of line numbers that start match or case statements.
- self.match_case_lines: Set[TLineNo] = set()
- self.visit(ast.parse(source))
+def find_soft_key_lines(source: str) -> set[TLineNo]:
+ """Helper for finding lines with soft keywords, like match/case lines."""
+ soft_key_lines: set[TLineNo] = set()
- if sys.version_info >= (3, 10):
- def visit_Match(self, node: ast.Match) -> None:
- """Invoked by ast.NodeVisitor.visit"""
- self.match_case_lines.add(node.lineno)
+ for node in ast.walk(ast.parse(source)):
+ if sys.version_info >= (3, 10) and isinstance(node, ast.Match):
+ soft_key_lines.add(node.lineno)
for case in node.cases:
- self.match_case_lines.add(case.pattern.lineno)
- self.generic_visit(node)
+ soft_key_lines.add(case.pattern.lineno)
+ elif sys.version_info >= (3, 12) and isinstance(node, ast.TypeAlias):
+ soft_key_lines.add(node.lineno)
+
+ return soft_key_lines
def source_token_lines(source: str) -> TSourceTokenLines:
@@ -110,24 +120,26 @@ def source_token_lines(source: str) -> TSourceTokenLines:
"""
ws_tokens = {token.INDENT, token.DEDENT, token.NEWLINE, tokenize.NL}
- line: List[Tuple[str, str]] = []
+ line: list[tuple[str, str]] = []
col = 0
- source = source.expandtabs(8).replace('\r\n', '\n')
+ source = source.expandtabs(8).replace("\r\n", "\n")
tokgen = generate_tokens(source)
if env.PYBEHAVIOR.soft_keywords:
- match_case_lines = MatchCaseFinder(source).match_case_lines
+ soft_key_lines = find_soft_key_lines(source)
+ else:
+ soft_key_lines = set()
for ttype, ttext, (sline, scol), (_, ecol), _ in _phys_tokens(tokgen):
mark_start = True
- for part in re.split('(\n)', ttext):
- if part == '\n':
+ for part in re.split("(\n)", ttext):
+ if part == "\n":
yield line
line = []
col = 0
mark_end = False
- elif part == '':
+ elif part == "":
mark_end = False
elif ttype in ws_tokens:
mark_end = False
@@ -135,7 +147,7 @@ def source_token_lines(source: str) -> TSourceTokenLines:
if mark_start and scol > col:
line.append(("ws", " " * (scol - col)))
mark_start = False
- tok_class = tokenize.tok_name.get(ttype, 'xx').lower()[:3]
+ tok_class = tokenize.tok_name.get(ttype, "xx").lower()[:3]
if ttype == token.NAME:
if keyword.iskeyword(ttext):
# Hard keywords are always keywords.
@@ -144,15 +156,14 @@ def source_token_lines(source: str) -> TSourceTokenLines:
# Need the version_info check to keep mypy from borking
# on issoftkeyword here.
if env.PYBEHAVIOR.soft_keywords and keyword.issoftkeyword(ttext):
- # Soft keywords appear at the start of the line,
- # on lines that start match or case statements.
+ # Soft keywords appear at the start of their line.
if len(line) == 0:
is_start_of_line = True
elif (len(line) == 1) and line[0][0] == "ws":
is_start_of_line = True
else:
is_start_of_line = False
- if is_start_of_line and sline in match_case_lines:
+ if is_start_of_line and sline in soft_key_lines:
tok_class = "key"
line.append((tok_class, part))
mark_end = True
@@ -164,35 +175,15 @@ def source_token_lines(source: str) -> TSourceTokenLines:
yield line
-class CachedTokenizer:
- """A one-element cache around tokenize.generate_tokens.
-
- When reporting, coverage.py tokenizes files twice, once to find the
- structure of the file, and once to syntax-color it. Tokenizing is
- expensive, and easily cached.
+def generate_tokens(text: str) -> TokenInfos:
+ """A helper around `tokenize.generate_tokens`.
- This is a one-element cache so that our twice-in-a-row tokenizing doesn't
- actually tokenize twice.
+ Originally this was used to cache the results, but it didn't seem to make
+ reporting go faster, and caused issues with using too much memory.
"""
- def __init__(self) -> None:
- self.last_text: Optional[str] = None
- self.last_tokens: List[tokenize.TokenInfo] = []
-
- def generate_tokens(self, text: str) -> TokenInfos:
- """A stand-in for `tokenize.generate_tokens`."""
- if text != self.last_text:
- self.last_text = text
- readline = io.StringIO(text).readline
- try:
- self.last_tokens = list(tokenize.generate_tokens(readline))
- except:
- self.last_text = None
- raise
- return self.last_tokens
-
-# Create our generate_tokens cache as a callable replacement function.
-generate_tokens = CachedTokenizer().generate_tokens
+ readline = io.StringIO(text).readline
+ return tokenize.generate_tokens(readline)
def source_encoding(source: bytes) -> str:
diff --git a/coverage/plugin.py b/coverage/plugin.py
index 5279c4d06..11c0679f8 100644
--- a/coverage/plugin.py
+++ b/coverage/plugin.py
@@ -114,10 +114,12 @@ def coverage_init(reg, options):
from __future__ import annotations
+import dataclasses
import functools
from types import FrameType
-from typing import Any, Dict, Iterable, Optional, Set, Tuple, Union
+from typing import Any
+from collections.abc import Iterable
from coverage import files
from coverage.misc import _needs_to_implement
@@ -130,7 +132,7 @@ class CoveragePlugin:
_coverage_plugin_name: str
_coverage_enabled: bool
- def file_tracer(self, filename: str) -> Optional[FileTracer]: # pylint: disable=unused-argument
+ def file_tracer(self, filename: str) -> FileTracer | None: # pylint: disable=unused-argument
"""Get a :class:`FileTracer` object for a file.
Plug-in type: file tracer.
@@ -170,10 +172,10 @@ def file_tracer(self, filename: str) -> Optional[FileTracer]: # pylint: disable=
"""
return None
- def file_reporter( # type: ignore[return]
+ def file_reporter(
self,
filename: str, # pylint: disable=unused-argument
- ) -> Union[FileReporter, str]: # str should be Literal["python"]
+ ) -> FileReporter | str: # str should be Literal["python"]
"""Get the :class:`FileReporter` class to use for a file.
Plug-in type: file tracer.
@@ -190,7 +192,7 @@ def file_reporter( # type: ignore[return]
def dynamic_context(
self,
frame: FrameType, # pylint: disable=unused-argument
- ) -> Optional[str]:
+ ) -> str | None:
"""Get the dynamically computed context label for `frame`.
Plug-in type: dynamic context.
@@ -238,7 +240,7 @@ def configure(self, config: TConfigurable) -> None:
"""
pass
- def sys_info(self) -> Iterable[Tuple[str, Any]]:
+ def sys_info(self) -> Iterable[tuple[str, Any]]:
"""Get a list of information useful for debugging.
Plug-in type: any.
@@ -274,7 +276,7 @@ class FileTracer(CoveragePluginBase):
"""
- def source_filename(self) -> str: # type: ignore[return]
+ def source_filename(self) -> str:
"""The source file name for this file.
This may be any file name you like. A key responsibility of a plug-in
@@ -311,7 +313,7 @@ def dynamic_source_filename(
self,
filename: str, # pylint: disable=unused-argument
frame: FrameType, # pylint: disable=unused-argument
- ) -> Optional[str]:
+ ) -> str | None:
"""Get a dynamically computed source file name.
Some plug-ins need to compute the source file name dynamically for each
@@ -326,7 +328,7 @@ def dynamic_source_filename(
"""
return None
- def line_number_range(self, frame: FrameType) -> Tuple[TLineNo, TLineNo]:
+ def line_number_range(self, frame: FrameType) -> tuple[TLineNo, TLineNo]:
"""Get the range of source line numbers for a given a call frame.
The call frame is examined, and the source line number in the original
@@ -344,6 +346,35 @@ def line_number_range(self, frame: FrameType) -> Tuple[TLineNo, TLineNo]:
return lineno, lineno
+@dataclasses.dataclass
+class CodeRegion:
+ """Data for a region of code found by :meth:`FileReporter.code_regions`."""
+
+ #: The kind of region, like `"function"` or `"class"`. Must be one of the
+ #: singular values returned by :meth:`FileReporter.code_region_kinds`.
+ kind: str
+
+ #: The name of the region. For example, a function or class name.
+ name: str
+
+ #: The line in the source file to link to when navigating to the region.
+ #: Can be a line not mentioned in `lines`.
+ start: int
+
+ #: The lines in the region. Should be lines that could be executed in the
+ #: region. For example, a class region includes all of the lines in the
+ #: methods of the class, but not the lines defining class attributes, since
+ #: they are executed on import, not as part of exercising the class. The
+ #: set can include non-executable lines like blanks and comments.
+ lines: set[int]
+
+ def __lt__(self, other: CodeRegion) -> bool:
+ """To support sorting to make test-writing easier."""
+ if self.name == other.name:
+ return min(self.lines) < min(other.lines)
+ return self.name < other.name
+
+
@functools.total_ordering
class FileReporter(CoveragePluginBase):
"""Support needed for files during the analysis and reporting phases.
@@ -369,7 +400,7 @@ def __init__(self, filename: str) -> None:
self.filename = filename
def __repr__(self) -> str:
- return "<{0.__class__.__name__} filename={0.filename!r}>".format(self)
+ return f"<{self.__class__.__name__} filename={self.filename!r}>"
def relative_filename(self) -> str:
"""Get the relative file name for this file.
@@ -395,7 +426,7 @@ def source(self) -> str:
with open(self.filename, encoding="utf-8") as f:
return f.read()
- def lines(self) -> Set[TLineNo]: # type: ignore[return]
+ def lines(self) -> set[TLineNo]:
"""Get the executable lines in this file.
Your plug-in must determine which lines in the file were possibly
@@ -406,7 +437,7 @@ def lines(self) -> Set[TLineNo]: # type: ignore[return]
"""
_needs_to_implement(self, "lines")
- def excluded_lines(self) -> Set[TLineNo]:
+ def excluded_lines(self) -> set[TLineNo]:
"""Get the excluded executable lines in this file.
Your plug-in can use any method it likes to allow the user to exclude
@@ -419,7 +450,7 @@ def excluded_lines(self) -> Set[TLineNo]:
"""
return set()
- def translate_lines(self, lines: Iterable[TLineNo]) -> Set[TLineNo]:
+ def translate_lines(self, lines: Iterable[TLineNo]) -> set[TLineNo]:
"""Translate recorded lines into reported lines.
Some file formats will want to report lines slightly differently than
@@ -439,7 +470,7 @@ def translate_lines(self, lines: Iterable[TLineNo]) -> Set[TLineNo]:
"""
return set(lines)
- def arcs(self) -> Set[TArc]:
+ def arcs(self) -> set[TArc]:
"""Get the executable arcs in this file.
To support branch coverage, your plug-in needs to be able to indicate
@@ -453,7 +484,7 @@ def arcs(self) -> Set[TArc]:
"""
return set()
- def no_branch_lines(self) -> Set[TLineNo]:
+ def no_branch_lines(self) -> set[TLineNo]:
"""Get the lines excused from branch coverage in this file.
Your plug-in can use any method it likes to allow the user to exclude
@@ -466,7 +497,7 @@ def no_branch_lines(self) -> Set[TLineNo]:
"""
return set()
- def translate_arcs(self, arcs: Iterable[TArc]) -> Set[TArc]:
+ def translate_arcs(self, arcs: Iterable[TArc]) -> set[TArc]:
"""Translate recorded arcs into reported arcs.
Similar to :meth:`translate_lines`, but for arcs. `arcs` is a set of
@@ -479,7 +510,7 @@ def translate_arcs(self, arcs: Iterable[TArc]) -> Set[TArc]:
"""
return set(arcs)
- def exit_counts(self) -> Dict[TLineNo, int]:
+ def exit_counts(self) -> dict[TLineNo, int]:
"""Get a count of exits from that each line.
To determine which lines are branches, coverage.py looks for lines that
@@ -496,7 +527,7 @@ def missing_arc_description(
self,
start: TLineNo,
end: TLineNo,
- executed_arcs: Optional[Iterable[TArc]] = None, # pylint: disable=unused-argument
+ executed_arcs: Iterable[TArc] | None = None, # pylint: disable=unused-argument
) -> str:
"""Provide an English sentence describing a missing arc.
@@ -512,6 +543,14 @@ def missing_arc_description(
"""
return f"Line {start} didn't jump to line {end}"
+ def arc_description(
+ self,
+ start: TLineNo, # pylint: disable=unused-argument
+ end: TLineNo
+ ) -> str:
+ """Provide an English description of an arc's effect."""
+ return f"jump to line {end}"
+
def source_token_lines(self) -> TSourceTokenLines:
"""Generate a series of tokenized lines, one for each line in `source`.
@@ -519,29 +558,54 @@ def source_token_lines(self) -> TSourceTokenLines:
Each line is a list of pairs, each pair is a token::
- [('key', 'def'), ('ws', ' '), ('nam', 'hello'), ('op', '('), ... ]
+ [("key", "def"), ("ws", " "), ("nam", "hello"), ("op", "("), ... ]
Each pair has a token class, and the token text. The token classes
are:
- * ``'com'``: a comment
- * ``'key'``: a keyword
- * ``'nam'``: a name, or identifier
- * ``'num'``: a number
- * ``'op'``: an operator
- * ``'str'``: a string literal
- * ``'ws'``: some white space
- * ``'txt'``: some other kind of text
+ * ``"com"``: a comment
+ * ``"key"``: a keyword
+ * ``"nam"``: a name, or identifier
+ * ``"num"``: a number
+ * ``"op"``: an operator
+ * ``"str"``: a string literal
+ * ``"ws"``: some white space
+ * ``"txt"``: some other kind of text
If you concatenate all the token texts, and then join them with
newlines, you should have your original source back.
The default implementation simply returns each line tagged as
- ``'txt'``.
+ ``"txt"``.
"""
for line in self.source().splitlines():
- yield [('txt', line)]
+ yield [("txt", line)]
+
+ def code_regions(self) -> Iterable[CodeRegion]:
+ """Identify regions in the source file for finer reporting than by file.
+
+ Returns an iterable of :class:`CodeRegion` objects. The kinds reported
+ should be in the possibilities returned by :meth:`code_region_kinds`.
+
+ """
+ return []
+
+ def code_region_kinds(self) -> Iterable[tuple[str, str]]:
+ """Return the kinds of code regions this plugin can find.
+
+ The returned pairs are the singular and plural forms of the kinds::
+
+ [
+ ("function", "functions"),
+ ("class", "classes"),
+ ]
+
+ This will usually be hard-coded, but could also differ by the specific
+ source file involved.
+
+ """
+ return []
def __eq__(self, other: Any) -> bool:
return isinstance(other, FileReporter) and self.filename == other.filename
diff --git a/coverage/plugin_support.py b/coverage/plugin_support.py
index 4ed02c5c0..99e3bc22b 100644
--- a/coverage/plugin_support.py
+++ b/coverage/plugin_support.py
@@ -10,7 +10,8 @@
import sys
from types import FrameType
-from typing import Any, Dict, Iterable, Iterator, List, Optional, Set, Tuple, Union
+from typing import Any
+from collections.abc import Iterable, Iterator
from coverage.exceptions import PluginError
from coverage.misc import isolate_module
@@ -26,21 +27,21 @@ class Plugins:
"""The currently loaded collection of coverage.py plugins."""
def __init__(self) -> None:
- self.order: List[CoveragePlugin] = []
- self.names: Dict[str, CoveragePlugin] = {}
- self.file_tracers: List[CoveragePlugin] = []
- self.configurers: List[CoveragePlugin] = []
- self.context_switchers: List[CoveragePlugin] = []
+ self.order: list[CoveragePlugin] = []
+ self.names: dict[str, CoveragePlugin] = {}
+ self.file_tracers: list[CoveragePlugin] = []
+ self.configurers: list[CoveragePlugin] = []
+ self.context_switchers: list[CoveragePlugin] = []
- self.current_module: Optional[str] = None
- self.debug: Optional[TDebugCtl]
+ self.current_module: str | None = None
+ self.debug: TDebugCtl | None
@classmethod
def load_plugins(
cls,
modules: Iterable[str],
config: TPluginConfig,
- debug: Optional[TDebugCtl] = None,
+ debug: TDebugCtl | None = None,
) -> Plugins:
"""Load plugins from `modules`.
@@ -58,7 +59,7 @@ def load_plugins(
coverage_init = getattr(mod, "coverage_init", None)
if not coverage_init:
raise PluginError(
- f"Plugin module {module!r} didn't define a coverage_init function"
+ f"Plugin module {module!r} didn't define a coverage_init function",
)
options = config.get_plugin_options(module)
@@ -105,7 +106,7 @@ def add_noop(self, plugin: CoveragePlugin) -> None:
def _add_plugin(
self,
plugin: CoveragePlugin,
- specialized: Optional[List[CoveragePlugin]],
+ specialized: list[CoveragePlugin] | None,
) -> None:
"""Add a plugin object.
@@ -114,7 +115,7 @@ def _add_plugin(
"""
plugin_name = f"{self.current_module}.{plugin.__class__.__name__}"
- if self.debug and self.debug.should('plugin'):
+ if self.debug and self.debug.should("plugin"):
self.debug.write(f"Loaded plugin {self.current_module!r}: {plugin!r}")
labelled = LabelledDebug(f"plugin {self.current_module!r}", self.debug)
plugin = DebugPluginWrapper(plugin, labelled)
@@ -150,7 +151,7 @@ def add_label(self, label: str) -> LabelledDebug:
def message_prefix(self) -> str:
"""The prefix to use on messages, combining the labels."""
- prefixes = self.labels + ['']
+ prefixes = self.labels + [""]
return ":\n".join(" "*i+label for i, label in enumerate(prefixes))
def write(self, message: str) -> None:
@@ -166,7 +167,7 @@ def __init__(self, plugin: CoveragePlugin, debug: LabelledDebug) -> None:
self.plugin = plugin
self.debug = debug
- def file_tracer(self, filename: str) -> Optional[FileTracer]:
+ def file_tracer(self, filename: str) -> FileTracer | None:
tracer = self.plugin.file_tracer(filename)
self.debug.write(f"file_tracer({filename!r}) --> {tracer!r}")
if tracer:
@@ -174,7 +175,7 @@ def file_tracer(self, filename: str) -> Optional[FileTracer]:
tracer = DebugFileTracerWrapper(tracer, debug)
return tracer
- def file_reporter(self, filename: str) -> Union[FileReporter, str]:
+ def file_reporter(self, filename: str) -> FileReporter | str:
reporter = self.plugin.file_reporter(filename)
assert isinstance(reporter, FileReporter)
self.debug.write(f"file_reporter({filename!r}) --> {reporter!r}")
@@ -183,7 +184,7 @@ def file_reporter(self, filename: str) -> Union[FileReporter, str]:
reporter = DebugFileReporterWrapper(filename, reporter, debug)
return reporter
- def dynamic_context(self, frame: FrameType) -> Optional[str]:
+ def dynamic_context(self, frame: FrameType) -> str | None:
context = self.plugin.dynamic_context(frame)
self.debug.write(f"dynamic_context({frame!r}) --> {context!r}")
return context
@@ -197,7 +198,7 @@ def configure(self, config: TConfigurable) -> None:
self.debug.write(f"configure({config!r})")
self.plugin.configure(config)
- def sys_info(self) -> Iterable[Tuple[str, Any]]:
+ def sys_info(self) -> Iterable[tuple[str, Any]]:
return self.plugin.sys_info()
@@ -225,14 +226,14 @@ def has_dynamic_source_filename(self) -> bool:
self.debug.write(f"has_dynamic_source_filename() --> {has!r}")
return has
- def dynamic_source_filename(self, filename: str, frame: FrameType) -> Optional[str]:
+ def dynamic_source_filename(self, filename: str, frame: FrameType) -> str | None:
dyn = self.tracer.dynamic_source_filename(filename, frame)
self.debug.write("dynamic_source_filename({!r}, {}) --> {!r}".format(
filename, self._show_frame(frame), dyn,
))
return dyn
- def line_number_range(self, frame: FrameType) -> Tuple[TLineNo, TLineNo]:
+ def line_number_range(self, frame: FrameType) -> tuple[TLineNo, TLineNo]:
pair = self.tracer.line_number_range(frame)
self.debug.write(f"line_number_range({self._show_frame(frame)}) --> {pair!r}")
return pair
@@ -251,37 +252,37 @@ def relative_filename(self) -> str:
self.debug.write(f"relative_filename() --> {ret!r}")
return ret
- def lines(self) -> Set[TLineNo]:
+ def lines(self) -> set[TLineNo]:
ret = self.reporter.lines()
self.debug.write(f"lines() --> {ret!r}")
return ret
- def excluded_lines(self) -> Set[TLineNo]:
+ def excluded_lines(self) -> set[TLineNo]:
ret = self.reporter.excluded_lines()
self.debug.write(f"excluded_lines() --> {ret!r}")
return ret
- def translate_lines(self, lines: Iterable[TLineNo]) -> Set[TLineNo]:
+ def translate_lines(self, lines: Iterable[TLineNo]) -> set[TLineNo]:
ret = self.reporter.translate_lines(lines)
self.debug.write(f"translate_lines({lines!r}) --> {ret!r}")
return ret
- def translate_arcs(self, arcs: Iterable[TArc]) -> Set[TArc]:
+ def translate_arcs(self, arcs: Iterable[TArc]) -> set[TArc]:
ret = self.reporter.translate_arcs(arcs)
self.debug.write(f"translate_arcs({arcs!r}) --> {ret!r}")
return ret
- def no_branch_lines(self) -> Set[TLineNo]:
+ def no_branch_lines(self) -> set[TLineNo]:
ret = self.reporter.no_branch_lines()
self.debug.write(f"no_branch_lines() --> {ret!r}")
return ret
- def exit_counts(self) -> Dict[TLineNo, int]:
+ def exit_counts(self) -> dict[TLineNo, int]:
ret = self.reporter.exit_counts()
self.debug.write(f"exit_counts() --> {ret!r}")
return ret
- def arcs(self) -> Set[TArc]:
+ def arcs(self) -> set[TArc]:
ret = self.reporter.arcs()
self.debug.write(f"arcs() --> {ret!r}")
return ret
diff --git a/coverage/python.py b/coverage/python.py
index 744ab4cb8..e87ff43cd 100644
--- a/coverage/python.py
+++ b/coverage/python.py
@@ -9,15 +9,17 @@
import types
import zipimport
-from typing import Dict, Iterable, Optional, Set, TYPE_CHECKING
+from typing import TYPE_CHECKING
+from collections.abc import Iterable
from coverage import env
from coverage.exceptions import CoverageException, NoSource
from coverage.files import canonical_filename, relative_filename, zip_location
-from coverage.misc import expensive, isolate_module, join_regex
+from coverage.misc import isolate_module, join_regex
from coverage.parser import PythonParser
from coverage.phystokens import source_token_lines, source_encoding
-from coverage.plugin import FileReporter
+from coverage.plugin import CodeRegion, FileReporter
+from coverage.regions import code_regions
from coverage.types import TArc, TLineNo, TMorf, TSourceTokenLines
if TYPE_CHECKING:
@@ -46,7 +48,7 @@ def get_python_source(filename: str) -> str:
else:
exts = [ext]
- source_bytes: Optional[bytes]
+ source_bytes: bytes | None
for ext in exts:
try_filename = base + ext
if os.path.exists(try_filename):
@@ -63,17 +65,17 @@ def get_python_source(filename: str) -> str:
raise NoSource(f"No source for code: '{filename}'.")
# Replace \f because of http://bugs.python.org/issue19035
- source_bytes = source_bytes.replace(b'\f', b' ')
+ source_bytes = source_bytes.replace(b"\f", b" ")
source = source_bytes.decode(source_encoding(source_bytes), "replace")
# Python code should always end with a line with a newline.
- if source and source[-1] != '\n':
- source += '\n'
+ if source and source[-1] != "\n":
+ source += "\n"
return source
-def get_zip_bytes(filename: str) -> Optional[bytes]:
+def get_zip_bytes(filename: str) -> bytes | None:
"""Get data from `filename` if it is a zip file path.
Returns the bytestring data read from the zip file, or None if no zip file
@@ -127,7 +129,7 @@ def source_for_file(filename: str) -> str:
def source_for_morf(morf: TMorf) -> str:
"""Get the source filename for the module-or-file `morf`."""
- if hasattr(morf, '__file__') and morf.__file__:
+ if hasattr(morf, "__file__") and morf.__file__:
filename = morf.__file__
elif isinstance(morf, types.ModuleType):
# A module should have had .__file__, otherwise we can't use it.
@@ -143,7 +145,7 @@ def source_for_morf(morf: TMorf) -> str:
class PythonFileReporter(FileReporter):
"""Report support for a Python file."""
- def __init__(self, morf: TMorf, coverage: Optional[Coverage] = None) -> None:
+ def __init__(self, morf: TMorf, coverage: Coverage | None = None) -> None:
self.coverage = coverage
filename = source_for_morf(morf)
@@ -157,17 +159,17 @@ def __init__(self, morf: TMorf, coverage: Optional[Coverage] = None) -> None:
fname = canonical_filename(filename)
super().__init__(fname)
- if hasattr(morf, '__name__'):
+ if hasattr(morf, "__name__"):
name = morf.__name__.replace(".", os.sep)
- if os.path.basename(filename).startswith('__init__.'):
+ if os.path.basename(filename).startswith("__init__."):
name += os.sep + "__init__"
name += ".py"
else:
name = relative_filename(filename)
self.relname = name
- self._source: Optional[str] = None
- self._parser: Optional[PythonParser] = None
+ self._source: str | None = None
+ self._parser: PythonParser | None = None
self._excluded = None
def __repr__(self) -> str:
@@ -183,49 +185,55 @@ def parser(self) -> PythonParser:
if self._parser is None:
self._parser = PythonParser(
filename=self.filename,
- exclude=self.coverage._exclude_regex('exclude'),
+ exclude=self.coverage._exclude_regex("exclude"),
)
self._parser.parse_source()
return self._parser
- def lines(self) -> Set[TLineNo]:
+ def lines(self) -> set[TLineNo]:
"""Return the line numbers of statements in the file."""
return self.parser.statements
- def excluded_lines(self) -> Set[TLineNo]:
+ def excluded_lines(self) -> set[TLineNo]:
"""Return the line numbers of statements in the file."""
return self.parser.excluded
- def translate_lines(self, lines: Iterable[TLineNo]) -> Set[TLineNo]:
+ def translate_lines(self, lines: Iterable[TLineNo]) -> set[TLineNo]:
return self.parser.translate_lines(lines)
- def translate_arcs(self, arcs: Iterable[TArc]) -> Set[TArc]:
+ def translate_arcs(self, arcs: Iterable[TArc]) -> set[TArc]:
return self.parser.translate_arcs(arcs)
- @expensive
- def no_branch_lines(self) -> Set[TLineNo]:
+ def no_branch_lines(self) -> set[TLineNo]:
assert self.coverage is not None
no_branch = self.parser.lines_matching(
- join_regex(self.coverage.config.partial_list),
- join_regex(self.coverage.config.partial_always_list),
+ join_regex(
+ self.coverage.config.partial_list
+ + self.coverage.config.partial_always_list
+ )
)
return no_branch
- @expensive
- def arcs(self) -> Set[TArc]:
+ def arcs(self) -> set[TArc]:
return self.parser.arcs()
- @expensive
- def exit_counts(self) -> Dict[TLineNo, int]:
+ def exit_counts(self) -> dict[TLineNo, int]:
return self.parser.exit_counts()
def missing_arc_description(
self,
start: TLineNo,
end: TLineNo,
- executed_arcs: Optional[Iterable[TArc]] = None,
+ executed_arcs: Iterable[TArc] | None = None,
) -> str:
- return self.parser.missing_arc_description(start, end, executed_arcs)
+ return self.parser.missing_arc_description(start, end)
+
+ def arc_description(
+ self,
+ start: TLineNo,
+ end: TLineNo
+ ) -> str:
+ return self.parser.arc_description(start, end)
def source(self) -> str:
if self._source is None:
@@ -244,7 +252,7 @@ def should_be_python(self) -> bool:
_, ext = os.path.splitext(self.filename)
# Anything named *.py* should be Python.
- if ext.startswith('.py'):
+ if ext.startswith(".py"):
return True
# A file with no extension should be Python.
if not ext:
@@ -254,3 +262,12 @@ def should_be_python(self) -> bool:
def source_token_lines(self) -> TSourceTokenLines:
return source_token_lines(self.source())
+
+ def code_regions(self) -> Iterable[CodeRegion]:
+ return code_regions(self.source())
+
+ def code_region_kinds(self) -> Iterable[tuple[str, str]]:
+ return [
+ ("function", "functions"),
+ ("class", "classes"),
+ ]
diff --git a/coverage/pytracer.py b/coverage/pytracer.py
index 6723c2a1b..7c119c070 100644
--- a/coverage/pytracer.py
+++ b/coverage/pytracer.py
@@ -7,32 +7,53 @@
import atexit
import dis
+import itertools
import sys
import threading
from types import FrameType, ModuleType
-from typing import Any, Callable, Dict, List, Optional, Set, Tuple, cast
+from typing import Any, Callable, cast
from coverage import env
from coverage.types import (
- TArc, TFileDisposition, TLineNo, TTraceData, TTraceFileData, TTraceFn,
- TTracer, TWarnFn,
+ TArc,
+ TFileDisposition,
+ TLineNo,
+ TShouldStartContextFn,
+ TShouldTraceFn,
+ TTraceData,
+ TTraceFileData,
+ TTraceFn,
+ TWarnFn,
+ Tracer,
)
+
+# I don't understand why, but if we use `cast(set[TLineNo], ...)` inside
+# the _trace() function, we get some strange behavior on PyPy 3.10.
+# Assigning these names here and using them below fixes the problem.
+# See https://github.com/nedbat/coveragepy/issues/1902
+set_TLineNo = set[TLineNo]
+set_TArc = set[TArc]
+
+
# We need the YIELD_VALUE opcode below, in a comparison-friendly form.
-RESUME = dis.opmap.get('RESUME')
-RETURN_VALUE = dis.opmap['RETURN_VALUE']
+# PYVERSIONS: RESUME is new in Python3.11
+RESUME = dis.opmap.get("RESUME")
+RETURN_VALUE = dis.opmap["RETURN_VALUE"]
if RESUME is None:
- YIELD_VALUE = dis.opmap['YIELD_VALUE']
- YIELD_FROM = dis.opmap['YIELD_FROM']
+ YIELD_VALUE = dis.opmap["YIELD_VALUE"]
+ YIELD_FROM = dis.opmap["YIELD_FROM"]
YIELD_FROM_OFFSET = 0 if env.PYPY else 2
+else:
+ YIELD_VALUE = YIELD_FROM = YIELD_FROM_OFFSET = -1
# When running meta-coverage, this file can try to trace itself, which confuses
# everything. Don't trace ourselves.
THIS_FILE = __file__.rstrip("co")
-class PyTracer(TTracer):
+class PyTracer(Tracer):
"""Python implementation of the raw data tracer."""
# Because of poor implementations of trace-function-manipulating tools,
@@ -51,53 +72,62 @@ class PyTracer(TTracer):
# PyTracer to get accurate results. The command-line --timid argument is
# used to force the use of this tracer.
+ tracer_ids = itertools.count()
+
def __init__(self) -> None:
- # pylint: disable=super-init-not-called
+ # Which tracer are we?
+ self.id = next(self.tracer_ids)
+
# Attributes set from the collector:
self.data: TTraceData
self.trace_arcs = False
- self.should_trace: Callable[[str, FrameType], TFileDisposition]
- self.should_trace_cache: Dict[str, Optional[TFileDisposition]]
- self.should_start_context: Optional[Callable[[FrameType], Optional[str]]] = None
- self.switch_context: Optional[Callable[[Optional[str]], None]] = None
+ self.should_trace: TShouldTraceFn
+ self.should_trace_cache: dict[str, TFileDisposition | None]
+ self.should_start_context: TShouldStartContextFn | None = None
+ self.switch_context: Callable[[str | None], None] | None = None
+ self.lock_data: Callable[[], None]
+ self.unlock_data: Callable[[], None]
self.warn: TWarnFn
# The threading module to use, if any.
- self.threading: Optional[ModuleType] = None
+ self.threading: ModuleType | None = None
- self.cur_file_data: Optional[TTraceFileData] = None
+ self.cur_file_data: TTraceFileData | None = None
self.last_line: TLineNo = 0
- self.cur_file_name: Optional[str] = None
- self.context: Optional[str] = None
+ self.cur_file_name: str | None = None
+ self.context: str | None = None
self.started_context = False
- self.data_stack: List[Tuple[Optional[TTraceFileData], Optional[str], TLineNo, bool]] = []
- self.thread: Optional[threading.Thread] = None
+ # The data_stack parallels the Python call stack. Each entry is
+ # information about an active frame, a four-element tuple:
+ # [0] The TTraceData for this frame's file. Could be None if we
+ # aren't tracing this frame.
+ # [1] The current file name for the frame. None if we aren't tracing
+ # this frame.
+ # [2] The last line number executed in this frame.
+ # [3] Boolean: did this frame start a new context?
+ self.data_stack: list[tuple[TTraceFileData | None, str | None, TLineNo, bool]] = []
+ self.thread: threading.Thread | None = None
self.stopped = False
self._activity = False
self.in_atexit = False
# On exit, self.in_atexit = True
- atexit.register(setattr, self, 'in_atexit', True)
+ atexit.register(setattr, self, "in_atexit", True)
# Cache a bound method on the instance, so that we don't have to
# re-create a bound method object all the time.
self._cached_bound_method_trace: TTraceFn = self._trace
def __repr__(self) -> str:
- me = id(self)
points = sum(len(v) for v in self.data.values())
files = len(self.data)
- return f""
+ return f""
def log(self, marker: str, *args: Any) -> None:
"""For hard-core logging of what this tracer is doing."""
with open("/tmp/debug_trace.txt", "a") as f:
- f.write("{} {}[{}]".format(
- marker,
- id(self),
- len(self.data_stack),
- ))
+ f.write(f"{marker} {self.id}[{len(self.data_stack)}]")
if 0: # if you want thread ids..
f.write(".{:x}.{:x}".format( # type: ignore[unreachable]
self.thread.ident,
@@ -118,14 +148,15 @@ def _trace(
frame: FrameType,
event: str,
arg: Any, # pylint: disable=unused-argument
- lineno: Optional[TLineNo] = None, # pylint: disable=unused-argument
- ) -> Optional[TTraceFn]:
+ lineno: TLineNo | None = None, # pylint: disable=unused-argument
+ ) -> TTraceFn | None:
"""The trace function passed to sys.settrace."""
if THIS_FILE in frame.f_code.co_filename:
return None
- #self.log(":", frame.f_code.co_filename, frame.f_lineno, frame.f_code.co_name + "()", event)
+ # f = frame; code = f.f_code
+ # self.log(":", f"{code.co_filename} {f.f_lineno} {code.co_name}()", event)
if (self.stopped and sys.gettrace() == self._cached_bound_method_trace): # pylint: disable=comparison-with-callable
# The PyTrace.stop() method has been called, possibly by another
@@ -146,22 +177,22 @@ def _trace(
"Empty stack!",
frame.f_code.co_filename,
frame.f_lineno,
- frame.f_code.co_name
+ frame.f_code.co_name,
)
return None
- # if event != 'call' and frame.f_code.co_filename != self.cur_file_name:
+ # if event != "call" and frame.f_code.co_filename != self.cur_file_name:
# self.log("---\n*", frame.f_code.co_filename, self.cur_file_name, frame.f_lineno)
- if event == 'call':
+ if event == "call":
# Should we start a new context?
if self.should_start_context and self.context is None:
- context_maybe = self.should_start_context(frame)
+ context_maybe = self.should_start_context(frame) # pylint: disable=not-callable
if context_maybe is not None:
self.context = context_maybe
started_context = True
assert self.switch_context is not None
- self.switch_context(self.context)
+ self.switch_context(self.context) # pylint: disable=not-callable
else:
started_context = False
else:
@@ -176,7 +207,7 @@ def _trace(
self.cur_file_name,
self.last_line,
started_context,
- )
+ ),
)
# Improve tracing performance: when calling a function, both caller
@@ -197,8 +228,12 @@ def _trace(
if disp.trace:
tracename = disp.source_filename
assert tracename is not None
- if tracename not in self.data:
- self.data[tracename] = set() # type: ignore[assignment]
+ self.lock_data()
+ try:
+ if tracename not in self.data:
+ self.data[tracename] = set()
+ finally:
+ self.unlock_data()
self.cur_file_data = self.data[tracename]
else:
frame.f_trace_lines = False
@@ -215,24 +250,24 @@ def _trace(
oparg = frame.f_code.co_code[frame.f_lasti + 1]
real_call = (oparg == 0)
else:
- real_call = (getattr(frame, 'f_lasti', -1) < 0)
+ real_call = (getattr(frame, "f_lasti", -1) < 0)
if real_call:
self.last_line = -frame.f_code.co_firstlineno
else:
self.last_line = frame.f_lineno
- elif event == 'line':
+ elif event == "line":
# Record an executed line.
if self.cur_file_data is not None:
flineno: TLineNo = frame.f_lineno
if self.trace_arcs:
- cast(Set[TArc], self.cur_file_data).add((self.last_line, flineno))
+ cast(set_TArc, self.cur_file_data).add((self.last_line, flineno))
else:
- cast(Set[TLineNo], self.cur_file_data).add(flineno)
+ cast(set_TLineNo, self.cur_file_data).add(flineno)
self.last_line = flineno
- elif event == 'return':
+ elif event == "return":
if self.trace_arcs and self.cur_file_data:
# Record an arc leaving the function, but beware that a
# "return" event might just mean yielding from a generator.
@@ -243,8 +278,10 @@ def _trace(
# A return from the end of a code object is a real return.
real_return = True
else:
- # it's a real return.
- real_return = (code[lasti + 2] != RESUME)
+ # It is a real return if we aren't going to resume next.
+ if env.PYBEHAVIOR.lasti_is_yield:
+ lasti += 2
+ real_return = (code[lasti] != RESUME)
else:
if code[lasti] == RETURN_VALUE:
real_return = True
@@ -258,7 +295,7 @@ def _trace(
real_return = True
if real_return:
first = frame.f_code.co_firstlineno
- cast(Set[TArc], self.cur_file_data).add((self.last_line, -first))
+ cast(set_TArc, self.cur_file_data).add((self.last_line, -first))
# Leaving this function, pop the filename stack.
self.cur_file_data, self.cur_file_name, self.last_line, self.started_context = (
@@ -268,7 +305,7 @@ def _trace(
if self.started_context:
assert self.switch_context is not None
self.context = None
- self.switch_context(None)
+ self.switch_context(None) # pylint: disable=not-callable
return self._cached_bound_method_trace
def start(self) -> TTraceFn:
@@ -281,13 +318,6 @@ def start(self) -> TTraceFn:
if self.threading:
if self.thread is None:
self.thread = self.threading.current_thread()
- else:
- if self.thread.ident != self.threading.current_thread().ident:
- # Re-starting from a different thread!? Don't set the trace
- # function, but we are marked as running again, so maybe it
- # will be ok?
- #self.log("~", "starting on different threads")
- return self._cached_bound_method_trace
sys.settrace(self._cached_bound_method_trace)
return self._cached_bound_method_trace
@@ -312,12 +342,16 @@ def stop(self) -> None:
#self.log("~", "stopping on different threads")
return
- if self.warn:
- # PyPy clears the trace function before running atexit functions,
- # so don't warn if we are in atexit on PyPy and the trace function
- # has changed to None.
- dont_warn = (env.PYPY and env.PYPYVERSION >= (5, 4) and self.in_atexit and tf is None)
- if (not dont_warn) and tf != self._cached_bound_method_trace: # pylint: disable=comparison-with-callable
+ # PyPy clears the trace function before running atexit functions,
+ # so don't warn if we are in atexit on PyPy and the trace function
+ # has changed to None. Metacoverage also messes this up, so don't
+ # warn if we are measuring ourselves.
+ suppress_warning = (
+ (env.PYPY and self.in_atexit and tf is None)
+ or env.METACOV
+ )
+ if self.warn and not suppress_warning:
+ if tf != self._cached_bound_method_trace: # pylint: disable=comparison-with-callable
self.warn(
"Trace function changed, data is likely wrong: " +
f"{tf!r} != {self._cached_bound_method_trace!r}",
@@ -332,6 +366,6 @@ def reset_activity(self) -> None:
"""Reset the activity() flag."""
self._activity = False
- def get_stats(self) -> Optional[Dict[str, int]]:
+ def get_stats(self) -> dict[str, int] | None:
"""Return a dictionary of statistics, or None."""
return None
diff --git a/coverage/regions.py b/coverage/regions.py
new file mode 100644
index 000000000..7954be69d
--- /dev/null
+++ b/coverage/regions.py
@@ -0,0 +1,126 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
+
+"""Find functions and classes in Python code."""
+
+from __future__ import annotations
+
+import ast
+import dataclasses
+
+from typing import cast
+
+from coverage.plugin import CodeRegion
+
+
+@dataclasses.dataclass
+class Context:
+ """The nested named context of a function or class."""
+ name: str
+ kind: str
+ lines: set[int]
+
+
+class RegionFinder:
+ """An ast visitor that will find and track regions of code.
+
+ Functions and classes are tracked by name. Results are in the .regions
+ attribute.
+
+ """
+ def __init__(self) -> None:
+ self.regions: list[CodeRegion] = []
+ self.context: list[Context] = []
+
+ def parse_source(self, source: str) -> None:
+ """Parse `source` and walk the ast to populate the .regions attribute."""
+ self.handle_node(ast.parse(source))
+
+ def fq_node_name(self) -> str:
+ """Get the current fully qualified name we're processing."""
+ return ".".join(c.name for c in self.context)
+
+ def handle_node(self, node: ast.AST) -> None:
+ """Recursively handle any node."""
+ if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef)):
+ self.handle_FunctionDef(node)
+ elif isinstance(node, ast.ClassDef):
+ self.handle_ClassDef(node)
+ else:
+ self.handle_node_body(node)
+
+ def handle_node_body(self, node: ast.AST) -> None:
+ """Recursively handle the nodes in this node's body, if any."""
+ for body_node in getattr(node, "body", ()):
+ self.handle_node(body_node)
+
+ def handle_FunctionDef(self, node: ast.FunctionDef | ast.AsyncFunctionDef) -> None:
+ """Called for `def` or `async def`."""
+ lines = set(range(node.body[0].lineno, cast(int, node.body[-1].end_lineno) + 1))
+ if self.context and self.context[-1].kind == "class":
+ # Function bodies are part of their enclosing class.
+ self.context[-1].lines |= lines
+ # Function bodies should be excluded from the nearest enclosing function.
+ for ancestor in reversed(self.context):
+ if ancestor.kind == "function":
+ ancestor.lines -= lines
+ break
+ self.context.append(Context(node.name, "function", lines))
+ self.regions.append(
+ CodeRegion(
+ kind="function",
+ name=self.fq_node_name(),
+ start=node.lineno,
+ lines=lines,
+ )
+ )
+ self.handle_node_body(node)
+ self.context.pop()
+
+ def handle_ClassDef(self, node: ast.ClassDef) -> None:
+ """Called for `class`."""
+ # The lines for a class are the lines in the methods of the class.
+ # We start empty, and count on visit_FunctionDef to add the lines it
+ # finds.
+ lines: set[int] = set()
+ self.context.append(Context(node.name, "class", lines))
+ self.regions.append(
+ CodeRegion(
+ kind="class",
+ name=self.fq_node_name(),
+ start=node.lineno,
+ lines=lines,
+ )
+ )
+ self.handle_node_body(node)
+ self.context.pop()
+ # Class bodies should be excluded from the enclosing classes.
+ for ancestor in reversed(self.context):
+ if ancestor.kind == "class":
+ ancestor.lines -= lines
+
+
+def code_regions(source: str) -> list[CodeRegion]:
+ """Find function and class regions in source code.
+
+ Analyzes the code in `source`, and returns a list of :class:`CodeRegion`
+ objects describing functions and classes as regions of the code::
+
+ [
+ CodeRegion(kind="function", name="func1", start=8, lines={10, 11, 12}),
+ CodeRegion(kind="function", name="MyClass.method", start=30, lines={34, 35, 36}),
+ CodeRegion(kind="class", name="MyClass", start=25, lines={34, 35, 36}),
+ ]
+
+ The line numbers will include comments and blank lines. Later processing
+ will need to ignore those lines as needed.
+
+ Nested functions and classes are excluded from their enclosing region. No
+ line should be reported as being part of more than one function, or more
+ than one class. Lines in methods are reported as being in a function and
+ in a class.
+
+ """
+ rf = RegionFinder()
+ rf.parse_source(source)
+ return rf.regions
diff --git a/coverage/report.py b/coverage/report.py
index 09eed0a82..bf04f2a86 100644
--- a/coverage/report.py
+++ b/coverage/report.py
@@ -1,117 +1,282 @@
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-"""Reporter foundation for coverage.py."""
+"""Summary reporting"""
from __future__ import annotations
import sys
-from typing import Callable, Iterable, Iterator, IO, Optional, Tuple, TYPE_CHECKING
+from typing import Any, IO, TYPE_CHECKING
+from collections.abc import Iterable
-from coverage.exceptions import NoDataError, NotPython
-from coverage.files import prep_patterns, GlobMatcher
-from coverage.misc import ensure_dir_for_file, file_be_gone
+from coverage.exceptions import ConfigError, NoDataError
+from coverage.misc import human_sorted_items
from coverage.plugin import FileReporter
-from coverage.results import Analysis
-from coverage.types import Protocol, TMorf
+from coverage.report_core import get_analysis_to_report
+from coverage.results import Analysis, Numbers
+from coverage.types import TMorf
if TYPE_CHECKING:
from coverage import Coverage
-class Reporter(Protocol):
- """What we expect of reporters."""
-
- report_type: str
-
- def report(self, morfs: Optional[Iterable[TMorf]], outfile: IO[str]) -> float:
- """Generate a report of `morfs`, written to `outfile`."""
-
-
-def render_report(
- output_path: str,
- reporter: Reporter,
- morfs: Optional[Iterable[TMorf]],
- msgfn: Callable[[str], None],
-) -> float:
- """Run a one-file report generator, managing the output file.
-
- This function ensures the output file is ready to be written to. Then writes
- the report to it. Then closes the file and cleans up.
-
- """
- file_to_close = None
- delete_file = False
-
- if output_path == "-":
- outfile = sys.stdout
- else:
- # Ensure that the output directory is created; done here because this
- # report pre-opens the output file. HtmlReporter does this on its own
- # because its task is more complex, being multiple files.
- ensure_dir_for_file(output_path)
- outfile = open(output_path, "w", encoding="utf-8")
- file_to_close = outfile
- delete_file = True
-
- try:
- ret = reporter.report(morfs, outfile=outfile)
- if file_to_close is not None:
- msgfn(f"Wrote {reporter.report_type} to {output_path}")
- delete_file = False
- return ret
- finally:
- if file_to_close is not None:
- file_to_close.close()
- if delete_file:
- file_be_gone(output_path) # pragma: part covered (doesn't return)
-
-
-def get_analysis_to_report(
- coverage: Coverage,
- morfs: Optional[Iterable[TMorf]],
-) -> Iterator[Tuple[FileReporter, Analysis]]:
- """Get the files to report on.
-
- For each morf in `morfs`, if it should be reported on (based on the omit
- and include configuration options), yield a pair, the `FileReporter` and
- `Analysis` for the morf.
-
- """
- file_reporters = coverage._get_file_reporters(morfs)
- config = coverage.config
-
- if config.report_include:
- matcher = GlobMatcher(prep_patterns(config.report_include), "report_include")
- file_reporters = [fr for fr in file_reporters if matcher.match(fr.filename)]
-
- if config.report_omit:
- matcher = GlobMatcher(prep_patterns(config.report_omit), "report_omit")
- file_reporters = [fr for fr in file_reporters if not matcher.match(fr.filename)]
-
- if not file_reporters:
- raise NoDataError("No data to report.")
-
- for fr in sorted(file_reporters):
- try:
- analysis = coverage._analyze(fr)
- except NotPython:
- # Only report errors for .py files, and only if we didn't
- # explicitly suppress those errors.
- # NotPython is only raised by PythonFileReporter, which has a
- # should_be_python() method.
- if fr.should_be_python(): # type: ignore[attr-defined]
- if config.ignore_errors:
- msg = f"Couldn't parse Python file '{fr.filename}'"
- coverage._warn(msg, slug="couldnt-parse")
- else:
- raise
- except Exception as exc:
- if config.ignore_errors:
- msg = f"Couldn't parse '{fr.filename}': {exc}".rstrip()
- coverage._warn(msg, slug="couldnt-parse")
+class SummaryReporter:
+ """A reporter for writing the summary report."""
+
+ def __init__(self, coverage: Coverage) -> None:
+ self.coverage = coverage
+ self.config = self.coverage.config
+ self.branches = coverage.get_data().has_arcs()
+ self.outfile: IO[str] | None = None
+ self.output_format = self.config.format or "text"
+ if self.output_format not in {"text", "markdown", "total"}:
+ raise ConfigError(f"Unknown report format choice: {self.output_format!r}")
+ self.fr_analysis: list[tuple[FileReporter, Analysis]] = []
+ self.skipped_count = 0
+ self.empty_count = 0
+ self.total = Numbers(precision=self.config.precision)
+
+ def write(self, line: str) -> None:
+ """Write a line to the output, adding a newline."""
+ assert self.outfile is not None
+ self.outfile.write(line.rstrip())
+ self.outfile.write("\n")
+
+ def write_items(self, items: Iterable[str]) -> None:
+ """Write a list of strings, joined together."""
+ self.write("".join(items))
+
+ def _report_text(
+ self,
+ header: list[str],
+ lines_values: list[list[Any]],
+ total_line: list[Any],
+ end_lines: list[str],
+ ) -> None:
+ """Internal method that prints report data in text format.
+
+ `header` is a list with captions.
+ `lines_values` is list of lists of sortable values.
+ `total_line` is a list with values of the total line.
+ `end_lines` is a list of ending lines with information about skipped files.
+
+ """
+ # Prepare the formatting strings, header, and column sorting.
+ max_name = max([len(line[0]) for line in lines_values] + [5]) + 1
+ max_n = max(len(total_line[header.index("Cover")]) + 2, len(" Cover")) + 1
+ max_n = max([max_n] + [len(line[header.index("Cover")]) + 2 for line in lines_values])
+ formats = dict(
+ Name="{:{name_len}}",
+ Stmts="{:>7}",
+ Miss="{:>7}",
+ Branch="{:>7}",
+ BrPart="{:>7}",
+ Cover="{:>{n}}",
+ Missing="{:>10}",
+ )
+ header_items = [
+ formats[item].format(item, name_len=max_name, n=max_n)
+ for item in header
+ ]
+ header_str = "".join(header_items)
+ rule = "-" * len(header_str)
+
+ # Write the header
+ self.write(header_str)
+ self.write(rule)
+
+ formats.update(dict(Cover="{:>{n}}%"), Missing=" {:9}")
+ for values in lines_values:
+ # build string with line values
+ line_items = [
+ formats[item].format(str(value),
+ name_len=max_name, n=max_n-1) for item, value in zip(header, values)
+ ]
+ self.write_items(line_items)
+
+ # Write a TOTAL line
+ if lines_values:
+ self.write(rule)
+
+ line_items = [
+ formats[item].format(str(value),
+ name_len=max_name, n=max_n-1) for item, value in zip(header, total_line)
+ ]
+ self.write_items(line_items)
+
+ for end_line in end_lines:
+ self.write(end_line)
+
+ def _report_markdown(
+ self,
+ header: list[str],
+ lines_values: list[list[Any]],
+ total_line: list[Any],
+ end_lines: list[str],
+ ) -> None:
+ """Internal method that prints report data in markdown format.
+
+ `header` is a list with captions.
+ `lines_values` is a sorted list of lists containing coverage information.
+ `total_line` is a list with values of the total line.
+ `end_lines` is a list of ending lines with information about skipped files.
+
+ """
+ # Prepare the formatting strings, header, and column sorting.
+ max_name = max((len(line[0].replace("_", "\\_")) for line in lines_values), default=0)
+ max_name = max(max_name, len("**TOTAL**")) + 1
+ formats = dict(
+ Name="| {:{name_len}}|",
+ Stmts="{:>9} |",
+ Miss="{:>9} |",
+ Branch="{:>9} |",
+ BrPart="{:>9} |",
+ Cover="{:>{n}} |",
+ Missing="{:>10} |",
+ )
+ max_n = max(len(total_line[header.index("Cover")]) + 6, len(" Cover "))
+ header_items = [formats[item].format(item, name_len=max_name, n=max_n) for item in header]
+ header_str = "".join(header_items)
+ rule_str = "|" + " ".join(["- |".rjust(len(header_items[0])-1, "-")] +
+ ["-: |".rjust(len(item)-1, "-") for item in header_items[1:]],
+ )
+
+ # Write the header
+ self.write(header_str)
+ self.write(rule_str)
+
+ for values in lines_values:
+ # build string with line values
+ formats.update(dict(Cover="{:>{n}}% |"))
+ line_items = [
+ formats[item].format(str(value).replace("_", "\\_"), name_len=max_name, n=max_n-1)
+ for item, value in zip(header, values)
+ ]
+ self.write_items(line_items)
+
+ # Write the TOTAL line
+ formats.update(dict(Name="|{:>{name_len}} |", Cover="{:>{n}} |"))
+ total_line_items: list[str] = []
+ for item, value in zip(header, total_line):
+ if value == "":
+ insert = value
+ elif item == "Cover":
+ insert = f" **{value}%**"
else:
- raise
+ insert = f" **{value}**"
+ total_line_items += formats[item].format(insert, name_len=max_name, n=max_n)
+ self.write_items(total_line_items)
+ for end_line in end_lines:
+ self.write(end_line)
+
+ def report(self, morfs: Iterable[TMorf] | None, outfile: IO[str] | None = None) -> float:
+ """Writes a report summarizing coverage statistics per module.
+
+ `outfile` is a text-mode file object to write the summary to.
+
+ """
+ self.outfile = outfile or sys.stdout
+
+ self.coverage.get_data().set_query_contexts(self.config.report_contexts)
+ for fr, analysis in get_analysis_to_report(self.coverage, morfs):
+ self.report_one_file(fr, analysis)
+
+ if not self.total.n_files and not self.skipped_count:
+ raise NoDataError("No data to report.")
+
+ if self.output_format == "total":
+ self.write(self.total.pc_covered_str)
+ else:
+ self.tabular_report()
+
+ return self.total.pc_covered
+
+ def tabular_report(self) -> None:
+ """Writes tabular report formats."""
+ # Prepare the header line and column sorting.
+ header = ["Name", "Stmts", "Miss"]
+ if self.branches:
+ header += ["Branch", "BrPart"]
+ header += ["Cover"]
+ if self.config.show_missing:
+ header += ["Missing"]
+
+ column_order = dict(name=0, stmts=1, miss=2, cover=-1)
+ if self.branches:
+ column_order.update(dict(branch=3, brpart=4))
+
+ # `lines_values` is list of lists of sortable values.
+ lines_values = []
+
+ for (fr, analysis) in self.fr_analysis:
+ nums = analysis.numbers
+
+ args = [fr.relative_filename(), nums.n_statements, nums.n_missing]
+ if self.branches:
+ args += [nums.n_branches, nums.n_partial_branches]
+ args += [nums.pc_covered_str]
+ if self.config.show_missing:
+ args += [analysis.missing_formatted(branches=True)]
+ args += [nums.pc_covered]
+ lines_values.append(args)
+
+ # Line sorting.
+ sort_option = (self.config.sort or "name").lower()
+ reverse = False
+ if sort_option[0] == "-":
+ reverse = True
+ sort_option = sort_option[1:]
+ elif sort_option[0] == "+":
+ sort_option = sort_option[1:]
+ sort_idx = column_order.get(sort_option)
+ if sort_idx is None:
+ raise ConfigError(f"Invalid sorting option: {self.config.sort!r}")
+ if sort_option == "name":
+ lines_values = human_sorted_items(lines_values, reverse=reverse)
+ else:
+ lines_values.sort(
+ key=lambda line: (line[sort_idx], line[0]),
+ reverse=reverse,
+ )
+
+ # Calculate total if we had at least one file.
+ total_line = ["TOTAL", self.total.n_statements, self.total.n_missing]
+ if self.branches:
+ total_line += [self.total.n_branches, self.total.n_partial_branches]
+ total_line += [self.total.pc_covered_str]
+ if self.config.show_missing:
+ total_line += [""]
+
+ # Create other final lines.
+ end_lines = []
+ if self.config.skip_covered and self.skipped_count:
+ file_suffix = "s" if self.skipped_count>1 else ""
+ end_lines.append(
+ f"\n{self.skipped_count} file{file_suffix} skipped due to complete coverage.",
+ )
+ if self.config.skip_empty and self.empty_count:
+ file_suffix = "s" if self.empty_count > 1 else ""
+ end_lines.append(f"\n{self.empty_count} empty file{file_suffix} skipped.")
+
+ if self.output_format == "markdown":
+ formatter = self._report_markdown
+ else:
+ formatter = self._report_text
+ formatter(header, lines_values, total_line, end_lines)
+
+ def report_one_file(self, fr: FileReporter, analysis: Analysis) -> None:
+ """Report on just one file, the callback from report()."""
+ nums = analysis.numbers
+ self.total += nums
+
+ no_missing_lines = (nums.n_missing == 0)
+ no_missing_branches = (nums.n_partial_branches == 0)
+ if self.config.skip_covered and no_missing_lines and no_missing_branches:
+ # Don't report on 100% files.
+ self.skipped_count += 1
+ elif self.config.skip_empty and nums.n_statements == 0:
+ # Don't report on empty files.
+ self.empty_count += 1
else:
- yield (fr, analysis)
+ self.fr_analysis.append((fr, analysis))
diff --git a/coverage/report_core.py b/coverage/report_core.py
new file mode 100644
index 000000000..477034bae
--- /dev/null
+++ b/coverage/report_core.py
@@ -0,0 +1,120 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
+
+"""Reporter foundation for coverage.py."""
+
+from __future__ import annotations
+
+import sys
+
+from typing import (
+ Callable, IO, Protocol, TYPE_CHECKING,
+)
+from collections.abc import Iterable, Iterator
+
+from coverage.exceptions import NoDataError, NotPython
+from coverage.files import prep_patterns, GlobMatcher
+from coverage.misc import ensure_dir_for_file, file_be_gone
+from coverage.plugin import FileReporter
+from coverage.results import Analysis
+from coverage.types import TMorf
+
+if TYPE_CHECKING:
+ from coverage import Coverage
+
+
+class Reporter(Protocol):
+ """What we expect of reporters."""
+
+ report_type: str
+
+ def report(self, morfs: Iterable[TMorf] | None, outfile: IO[str]) -> float:
+ """Generate a report of `morfs`, written to `outfile`."""
+
+
+def render_report(
+ output_path: str,
+ reporter: Reporter,
+ morfs: Iterable[TMorf] | None,
+ msgfn: Callable[[str], None],
+) -> float:
+ """Run a one-file report generator, managing the output file.
+
+ This function ensures the output file is ready to be written to. Then writes
+ the report to it. Then closes the file and cleans up.
+
+ """
+ file_to_close = None
+ delete_file = False
+
+ if output_path == "-":
+ outfile = sys.stdout
+ else:
+ # Ensure that the output directory is created; done here because this
+ # report pre-opens the output file. HtmlReporter does this on its own
+ # because its task is more complex, being multiple files.
+ ensure_dir_for_file(output_path)
+ outfile = open(output_path, "w", encoding="utf-8")
+ file_to_close = outfile
+ delete_file = True
+
+ try:
+ ret = reporter.report(morfs, outfile=outfile)
+ if file_to_close is not None:
+ msgfn(f"Wrote {reporter.report_type} to {output_path}")
+ delete_file = False
+ return ret
+ finally:
+ if file_to_close is not None:
+ file_to_close.close()
+ if delete_file:
+ file_be_gone(output_path) # pragma: part covered (doesn't return)
+
+
+def get_analysis_to_report(
+ coverage: Coverage,
+ morfs: Iterable[TMorf] | None,
+) -> Iterator[tuple[FileReporter, Analysis]]:
+ """Get the files to report on.
+
+ For each morf in `morfs`, if it should be reported on (based on the omit
+ and include configuration options), yield a pair, the `FileReporter` and
+ `Analysis` for the morf.
+
+ """
+ fr_morfs = coverage._get_file_reporters(morfs)
+ config = coverage.config
+
+ if config.report_include:
+ matcher = GlobMatcher(prep_patterns(config.report_include), "report_include")
+ fr_morfs = [(fr, morf) for (fr, morf) in fr_morfs if matcher.match(fr.filename)]
+
+ if config.report_omit:
+ matcher = GlobMatcher(prep_patterns(config.report_omit), "report_omit")
+ fr_morfs = [(fr, morf) for (fr, morf) in fr_morfs if not matcher.match(fr.filename)]
+
+ if not fr_morfs:
+ raise NoDataError("No data to report.")
+
+ for fr, morf in sorted(fr_morfs):
+ try:
+ analysis = coverage._analyze(morf)
+ except NotPython:
+ # Only report errors for .py files, and only if we didn't
+ # explicitly suppress those errors.
+ # NotPython is only raised by PythonFileReporter, which has a
+ # should_be_python() method.
+ if fr.should_be_python(): # type: ignore[attr-defined]
+ if config.ignore_errors:
+ msg = f"Couldn't parse Python file '{fr.filename}'"
+ coverage._warn(msg, slug="couldnt-parse")
+ else:
+ raise
+ except Exception as exc:
+ if config.ignore_errors:
+ msg = f"Couldn't parse '{fr.filename}': {exc}".rstrip()
+ coverage._warn(msg, slug="couldnt-parse")
+ else:
+ raise
+ else:
+ yield (fr, analysis)
diff --git a/coverage/results.py b/coverage/results.py
index 2731700ed..a9bde97c3 100644
--- a/coverage/results.py
+++ b/coverage/results.py
@@ -6,10 +6,11 @@
from __future__ import annotations
import collections
+import dataclasses
-from typing import Callable, Dict, Iterable, List, Optional, Tuple, TYPE_CHECKING
+from collections.abc import Container, Iterable
+from typing import TYPE_CHECKING
-from coverage.debug import AutoReprMixin
from coverage.exceptions import ConfigError
from coverage.misc import nice_pair
from coverage.types import TArc, TLineNo
@@ -19,45 +20,93 @@
from coverage.plugin import FileReporter
+def analysis_from_file_reporter(
+ data: CoverageData,
+ precision: int,
+ file_reporter: FileReporter,
+ filename: str,
+) -> Analysis:
+ """Create an Analysis from a FileReporter."""
+ has_arcs = data.has_arcs()
+ statements = file_reporter.lines()
+ excluded = file_reporter.excluded_lines()
+ executed = file_reporter.translate_lines(data.lines(filename) or [])
+
+ if has_arcs:
+ arc_possibilities_set = file_reporter.arcs()
+ arcs: Iterable[TArc] = data.arcs(filename) or []
+ arcs = file_reporter.translate_arcs(arcs)
+
+ # Reduce the set of arcs to the ones that could be branches.
+ dests = collections.defaultdict(set)
+ for fromno, tono in arc_possibilities_set:
+ dests[fromno].add(tono)
+ single_dests = {
+ fromno: list(tonos)[0]
+ for fromno, tonos in dests.items()
+ if len(tonos) == 1
+ }
+ new_arcs = set()
+ for fromno, tono in arcs:
+ if fromno != tono:
+ new_arcs.add((fromno, tono))
+ else:
+ if fromno in single_dests:
+ new_arcs.add((fromno, single_dests[fromno]))
+
+ arcs_executed_set = file_reporter.translate_arcs(new_arcs)
+ exit_counts = file_reporter.exit_counts()
+ no_branch = file_reporter.no_branch_lines()
+ else:
+ arc_possibilities_set = set()
+ arcs_executed_set = set()
+ exit_counts = {}
+ no_branch = set()
+
+ return Analysis(
+ precision=precision,
+ filename=filename,
+ has_arcs=has_arcs,
+ statements=statements,
+ excluded=excluded,
+ executed=executed,
+ arc_possibilities_set=arc_possibilities_set,
+ arcs_executed_set=arcs_executed_set,
+ exit_counts=exit_counts,
+ no_branch=no_branch,
+ )
+
+
+@dataclasses.dataclass
class Analysis:
"""The results of analyzing a FileReporter."""
- def __init__(
- self,
- data: CoverageData,
- precision: int,
- file_reporter: FileReporter,
- file_mapper: Callable[[str], str],
- ) -> None:
- self.data = data
- self.file_reporter = file_reporter
- self.filename = file_mapper(self.file_reporter.filename)
- self.statements = self.file_reporter.lines()
- self.excluded = self.file_reporter.excluded_lines()
-
- # Identify missing statements.
- executed: Iterable[TLineNo]
- executed = self.data.lines(self.filename) or []
- executed = self.file_reporter.translate_lines(executed)
- self.executed = executed
+ precision: int
+ filename: str
+ has_arcs: bool
+ statements: set[TLineNo]
+ excluded: set[TLineNo]
+ executed: set[TLineNo]
+ arc_possibilities_set: set[TArc]
+ arcs_executed_set: set[TArc]
+ exit_counts: dict[TLineNo, int]
+ no_branch: set[TLineNo]
+
+ def __post_init__(self) -> None:
+ self.arc_possibilities = sorted(self.arc_possibilities_set)
+ self.arcs_executed = sorted(self.arcs_executed_set)
self.missing = self.statements - self.executed
- if self.data.has_arcs():
- self._arc_possibilities = sorted(self.file_reporter.arcs())
- self.exit_counts = self.file_reporter.exit_counts()
- self.no_branch = self.file_reporter.no_branch_lines()
+ if self.has_arcs:
n_branches = self._total_branches()
mba = self.missing_branch_arcs()
n_partial_branches = sum(len(v) for k,v in mba.items() if k not in self.missing)
n_missing_branches = sum(len(v) for k,v in mba.items())
else:
- self._arc_possibilities = []
- self.exit_counts = {}
- self.no_branch = set()
n_branches = n_partial_branches = n_missing_branches = 0
self.numbers = Numbers(
- precision=precision,
+ precision=self.precision,
n_files=1,
n_statements=len(self.statements),
n_excluded=len(self.excluded),
@@ -67,6 +116,50 @@ def __init__(
n_missing_branches=n_missing_branches,
)
+ def narrow(self, lines: Container[TLineNo]) -> Analysis:
+ """Create a narrowed Analysis.
+
+ The current analysis is copied to make a new one that only considers
+ the lines in `lines`.
+ """
+
+ statements = {lno for lno in self.statements if lno in lines}
+ excluded = {lno for lno in self.excluded if lno in lines}
+ executed = {lno for lno in self.executed if lno in lines}
+
+ if self.has_arcs:
+ arc_possibilities_set = {
+ (a, b) for a, b in self.arc_possibilities_set
+ if a in lines or b in lines
+ }
+ arcs_executed_set = {
+ (a, b) for a, b in self.arcs_executed_set
+ if a in lines or b in lines
+ }
+ exit_counts = {
+ lno: num for lno, num in self.exit_counts.items()
+ if lno in lines
+ }
+ no_branch = {lno for lno in self.no_branch if lno in lines}
+ else:
+ arc_possibilities_set = set()
+ arcs_executed_set = set()
+ exit_counts = {}
+ no_branch = set()
+
+ return Analysis(
+ precision=self.precision,
+ filename=self.filename,
+ has_arcs=self.has_arcs,
+ statements=statements,
+ excluded=excluded,
+ executed=executed,
+ arc_possibilities_set=arc_possibilities_set,
+ arcs_executed_set=arcs_executed_set,
+ exit_counts=exit_counts,
+ no_branch=no_branch,
+ )
+
def missing_formatted(self, branches: bool = False) -> str:
"""The missing line numbers, formatted nicely.
@@ -75,58 +168,24 @@ def missing_formatted(self, branches: bool = False) -> str:
If `branches` is true, includes the missing branch arcs also.
"""
- if branches and self.has_arcs():
+ if branches and self.has_arcs:
arcs = self.missing_branch_arcs().items()
else:
arcs = None
return format_lines(self.statements, self.missing, arcs=arcs)
- def has_arcs(self) -> bool:
- """Were arcs measured in this result?"""
- return self.data.has_arcs()
-
- def arc_possibilities(self) -> List[TArc]:
- """Returns a sorted list of the arcs in the code."""
- return self._arc_possibilities
-
- def arcs_executed(self) -> List[TArc]:
- """Returns a sorted list of the arcs actually executed in the code."""
- executed: Iterable[TArc]
- executed = self.data.arcs(self.filename) or []
- executed = self.file_reporter.translate_arcs(executed)
- return sorted(executed)
-
- def arcs_missing(self) -> List[TArc]:
+ def arcs_missing(self) -> list[TArc]:
"""Returns a sorted list of the un-executed arcs in the code."""
- possible = self.arc_possibilities()
- executed = self.arcs_executed()
missing = (
- p for p in possible
- if p not in executed
+ p for p in self.arc_possibilities
+ if p not in self.arcs_executed_set
and p[0] not in self.no_branch
and p[1] not in self.excluded
)
return sorted(missing)
- def arcs_unpredicted(self) -> List[TArc]:
- """Returns a sorted list of the executed arcs missing from the code."""
- possible = self.arc_possibilities()
- executed = self.arcs_executed()
- # Exclude arcs here which connect a line to itself. They can occur
- # in executed data in some cases. This is where they can cause
- # trouble, and here is where it's the least burden to remove them.
- # Also, generators can somehow cause arcs from "enter" to "exit", so
- # make sure we have at least one positive value.
- unpredicted = (
- e for e in executed
- if e not in possible
- and e[0] != e[1]
- and (e[0] > 0 or e[1] > 0)
- )
- return sorted(unpredicted)
-
- def _branch_lines(self) -> List[TLineNo]:
+ def _branch_lines(self) -> list[TLineNo]:
"""Returns a list of line numbers that have more than one exit."""
return [l1 for l1,count in self.exit_counts.items() if count > 1]
@@ -134,7 +193,7 @@ def _total_branches(self) -> int:
"""How many total branches are there?"""
return sum(count for count in self.exit_counts.values() if count > 1)
- def missing_branch_arcs(self) -> Dict[TLineNo, List[TLineNo]]:
+ def missing_branch_arcs(self) -> dict[TLineNo, list[TLineNo]]:
"""Return arcs that weren't executed from branch lines.
Returns {l1:[l2a,l2b,...], ...}
@@ -144,25 +203,30 @@ def missing_branch_arcs(self) -> Dict[TLineNo, List[TLineNo]]:
branch_lines = set(self._branch_lines())
mba = collections.defaultdict(list)
for l1, l2 in missing:
+ assert l1 != l2, f"In {self.filename}, didn't expect {l1} == {l2}"
if l1 in branch_lines:
mba[l1].append(l2)
return mba
- def executed_branch_arcs(self) -> Dict[TLineNo, List[TLineNo]]:
+ def executed_branch_arcs(self) -> dict[TLineNo, list[TLineNo]]:
"""Return arcs that were executed from branch lines.
+ Only include ones that we considered possible.
+
Returns {l1:[l2a,l2b,...], ...}
"""
- executed = self.arcs_executed()
branch_lines = set(self._branch_lines())
eba = collections.defaultdict(list)
- for l1, l2 in executed:
+ for l1, l2 in self.arcs_executed:
+ assert l1 != l2, f"Oops: Didn't think this could happen: {l1 = }, {l2 = }"
+ if (l1, l2) not in self.arc_possibilities_set:
+ continue
if l1 in branch_lines:
eba[l1].append(l2)
return eba
- def branch_stats(self) -> Dict[TLineNo, Tuple[int, int]]:
+ def branch_stats(self) -> dict[TLineNo, tuple[int, int]]:
"""Get stats about branches.
Returns a dict mapping line numbers to a tuple:
@@ -178,7 +242,8 @@ def branch_stats(self) -> Dict[TLineNo, Tuple[int, int]]:
return stats
-class Numbers(AutoReprMixin):
+@dataclasses.dataclass
+class Numbers:
"""The numerical results of measuring coverage.
This holds the basic statistics from `Analysis`, and is used to roll
@@ -186,36 +251,14 @@ class Numbers(AutoReprMixin):
"""
- def __init__(
- self,
- precision: int = 0,
- n_files: int = 0,
- n_statements: int = 0,
- n_excluded: int = 0,
- n_missing: int = 0,
- n_branches: int = 0,
- n_partial_branches: int = 0,
- n_missing_branches: int = 0,
- ) -> None:
- assert 0 <= precision < 10
- self._precision = precision
- self._near0 = 1.0 / 10**precision
- self._near100 = 100.0 - self._near0
- self.n_files = n_files
- self.n_statements = n_statements
- self.n_excluded = n_excluded
- self.n_missing = n_missing
- self.n_branches = n_branches
- self.n_partial_branches = n_partial_branches
- self.n_missing_branches = n_missing_branches
-
- def init_args(self) -> List[int]:
- """Return a list for __init__(*args) to recreate this object."""
- return [
- self._precision,
- self.n_files, self.n_statements, self.n_excluded, self.n_missing,
- self.n_branches, self.n_partial_branches, self.n_missing_branches,
- ]
+ precision: int = 0
+ n_files: int = 0
+ n_statements: int = 0
+ n_excluded: int = 0
+ n_missing: int = 0
+ n_branches: int = 0
+ n_partial_branches: int = 0
+ n_missing_branches: int = 0
@property
def n_executed(self) -> int:
@@ -246,52 +289,26 @@ def pc_covered_str(self) -> str:
result in either "0" or "100".
"""
- return self.display_covered(self.pc_covered)
-
- def display_covered(self, pc: float) -> str:
- """Return a displayable total percentage, as a string.
-
- Note that "0" is only returned when the value is truly zero, and "100"
- is only returned when the value is truly 100. Rounding can never
- result in either "0" or "100".
-
- """
- if 0 < pc < self._near0:
- pc = self._near0
- elif self._near100 < pc < 100:
- pc = self._near100
- else:
- pc = round(pc, self._precision)
- return "%.*f" % (self._precision, pc)
-
- def pc_str_width(self) -> int:
- """How many characters wide can pc_covered_str be?"""
- width = 3 # "100"
- if self._precision > 0:
- width += 1 + self._precision
- return width
+ return display_covered(self.pc_covered, self.precision)
@property
- def ratio_covered(self) -> Tuple[int, int]:
+ def ratio_covered(self) -> tuple[int, int]:
"""Return a numerator and denominator for the coverage ratio."""
numerator = self.n_executed + self.n_executed_branches
denominator = self.n_statements + self.n_branches
return numerator, denominator
def __add__(self, other: Numbers) -> Numbers:
- nums = Numbers(precision=self._precision)
- nums.n_files = self.n_files + other.n_files
- nums.n_statements = self.n_statements + other.n_statements
- nums.n_excluded = self.n_excluded + other.n_excluded
- nums.n_missing = self.n_missing + other.n_missing
- nums.n_branches = self.n_branches + other.n_branches
- nums.n_partial_branches = (
- self.n_partial_branches + other.n_partial_branches
+ return Numbers(
+ self.precision,
+ self.n_files + other.n_files,
+ self.n_statements + other.n_statements,
+ self.n_excluded + other.n_excluded,
+ self.n_missing + other.n_missing,
+ self.n_branches + other.n_branches,
+ self.n_partial_branches + other.n_partial_branches,
+ self.n_missing_branches + other.n_missing_branches,
)
- nums.n_missing_branches = (
- self.n_missing_branches + other.n_missing_branches
- )
- return nums
def __radd__(self, other: int) -> Numbers:
# Implementing 0+Numbers allows us to sum() a list of Numbers.
@@ -299,10 +316,28 @@ def __radd__(self, other: int) -> Numbers:
return self
+def display_covered(pc: float, precision: int) -> str:
+ """Return a displayable total percentage, as a string.
+
+ Note that "0" is only returned when the value is truly zero, and "100"
+ is only returned when the value is truly 100. Rounding can never
+ result in either "0" or "100".
+
+ """
+ near0 = 1.0 / 10 ** precision
+ if 0 < pc < near0:
+ pc = near0
+ elif (100.0 - near0) < pc < 100:
+ pc = 100.0 - near0
+ else:
+ pc = round(pc, precision)
+ return "%.*f" % (precision, pc)
+
+
def _line_ranges(
statements: Iterable[TLineNo],
lines: Iterable[TLineNo],
-) -> List[Tuple[TLineNo, TLineNo]]:
+) -> list[tuple[TLineNo, TLineNo]]:
"""Produce a list of ranges for `format_lines`."""
statements = sorted(statements)
lines = sorted(lines)
@@ -329,7 +364,7 @@ def _line_ranges(
def format_lines(
statements: Iterable[TLineNo],
lines: Iterable[TLineNo],
- arcs: Optional[Iterable[Tuple[TLineNo, List[TLineNo]]]] = None,
+ arcs: Iterable[tuple[TLineNo, list[TLineNo]]] | None = None,
) -> str:
"""Nicely format a list of line numbers.
@@ -357,7 +392,7 @@ def format_lines(
dest = (ex if ex > 0 else "exit")
line_items.append((line, f"{line}->{dest}"))
- ret = ', '.join(t[-1] for t in sorted(line_items))
+ ret = ", ".join(t[-1] for t in sorted(line_items))
return ret
diff --git a/coverage/sqldata.py b/coverage/sqldata.py
index 42cf4501d..76b569285 100644
--- a/coverage/sqldata.py
+++ b/coverage/sqldata.py
@@ -6,32 +6,31 @@
from __future__ import annotations
import collections
-import contextlib
import datetime
import functools
import glob
import itertools
import os
import random
-import re
import socket
import sqlite3
+import string
import sys
import textwrap
import threading
import zlib
from typing import (
- cast, Any, Callable, Collection, Dict, Iterable, Iterator, List, Mapping,
- Optional, Sequence, Set, Tuple, TypeVar, Union,
+ cast, Any, Callable,
)
+from collections.abc import Collection, Mapping, Sequence
-from coverage.debug import NoDebugging, AutoReprMixin, clipped_repr
+from coverage.debug import NoDebugging, auto_repr
from coverage.exceptions import CoverageException, DataError
-from coverage.files import PathAliases
from coverage.misc import file_be_gone, isolate_module
from coverage.numbits import numbits_to_nums, numbits_union, nums_to_numbits
-from coverage.types import FilePath, TArc, TDebugCtl, TLineNo, TWarnFn
+from coverage.sqlitedb import SqliteDb
+from coverage.types import AnyCallable, FilePath, TArc, TDebugCtl, TLineNo, TWarnFn
from coverage.version import __version__
os = isolate_module(os)
@@ -112,9 +111,7 @@
);
"""
-TMethod = TypeVar("TMethod", bound=Callable[..., Any])
-
-def _locked(method: TMethod) -> TMethod:
+def _locked(method: AnyCallable) -> AnyCallable:
"""A decorator for methods that should hold self._lock."""
@functools.wraps(method)
def _wrapped(self: CoverageData, *args: Any, **kwargs: Any) -> Any:
@@ -124,10 +121,10 @@ def _wrapped(self: CoverageData, *args: Any, **kwargs: Any) -> Any:
if self._debug.should("lock"):
self._debug.write(f"Locked {self._lock!r} for {method.__name__}")
return method(self, *args, **kwargs)
- return _wrapped # type: ignore[return-value]
+ return _wrapped
-class CoverageData(AutoReprMixin):
+class CoverageData:
"""Manages collected coverage data, including file storage.
This class is the public supported API to the data that coverage.py
@@ -214,11 +211,11 @@ class CoverageData(AutoReprMixin):
def __init__(
self,
- basename: Optional[FilePath] = None,
- suffix: Optional[Union[str, bool]] = None,
+ basename: FilePath | None = None,
+ suffix: str | bool | None = None,
no_disk: bool = False,
- warn: Optional[TWarnFn] = None,
- debug: Optional[TDebugCtl] = None,
+ warn: TWarnFn | None = None,
+ debug: TDebugCtl | None = None,
) -> None:
"""Create a :class:`CoverageData` object to hold coverage-measured data.
@@ -242,9 +239,9 @@ def __init__(
self._choose_filename()
# Maps filenames to row ids.
- self._file_map: Dict[str, int] = {}
+ self._file_map: dict[str, int] = {}
# Maps thread ids to SqliteDb objects.
- self._dbs: Dict[int, SqliteDb] = {}
+ self._dbs: dict[int, SqliteDb] = {}
self._pid = os.getpid()
# Synchronize the operations used during collection.
self._lock = threading.RLock()
@@ -255,9 +252,11 @@ def __init__(
self._has_lines = False
self._has_arcs = False
- self._current_context: Optional[str] = None
- self._current_context_id: Optional[int] = None
- self._query_context_ids: Optional[List[int]] = None
+ self._current_context: str | None = None
+ self._current_context_id: int | None = None
+ self._query_context_ids: list[int] | None = None
+
+ __repr__ = auto_repr
def _choose_filename(self) -> None:
"""Set self._filename based on inited attributes."""
@@ -298,16 +297,16 @@ def _read_db(self) -> None:
else:
raise DataError(
"Data file {!r} doesn't seem to be a coverage data file: {}".format(
- self._filename, exc
- )
+ self._filename, exc,
+ ),
) from exc
else:
schema_version = row[0]
if schema_version != SCHEMA_VERSION:
raise DataError(
"Couldn't use data file {!r}: wrong schema: {} instead of {}".format(
- self._filename, schema_version, SCHEMA_VERSION
- )
+ self._filename, schema_version, SCHEMA_VERSION,
+ ),
)
row = db.execute_one("select value from meta where key = 'has_arcs'")
@@ -396,7 +395,7 @@ def loads(self, data: bytes) -> None:
self._debug.write(f"Loading data into data file {self._filename!r}")
if data[:1] != b"z":
raise DataError(
- f"Unrecognized serialization: {data[:40]!r} (head of {len(data)} bytes)"
+ f"Unrecognized serialization: {data[:40]!r} (head of {len(data)} bytes)",
)
script = zlib.decompress(data[1:]).decode("utf-8")
self._dbs[threading.get_ident()] = db = SqliteDb(self._filename, self._debug)
@@ -405,7 +404,7 @@ def loads(self, data: bytes) -> None:
self._read_db()
self._have_used = True
- def _file_id(self, filename: str, add: bool = False) -> Optional[int]:
+ def _file_id(self, filename: str, add: bool = False) -> int | None:
"""Get the file id for `filename`.
If filename is not in the database yet, add it if `add` is True.
@@ -416,11 +415,11 @@ def _file_id(self, filename: str, add: bool = False) -> Optional[int]:
with self._connect() as con:
self._file_map[filename] = con.execute_for_rowid(
"insert or replace into file (path) values (?)",
- (filename,)
+ (filename,),
)
return self._file_map.get(filename)
- def _context_id(self, context: str) -> Optional[int]:
+ def _context_id(self, context: str) -> int | None:
"""Get the id for a context."""
assert context is not None
self._start_using()
@@ -432,7 +431,7 @@ def _context_id(self, context: str) -> Optional[int]:
return None
@_locked
- def set_context(self, context: Optional[str]) -> None:
+ def set_context(self, context: str | None) -> None:
"""Set the current context for future :meth:`add_lines` etc.
`context` is a str, the name of the context to use for the next data
@@ -442,7 +441,7 @@ def set_context(self, context: Optional[str]) -> None:
"""
if self._debug.should("dataop"):
- self._debug.write(f"Setting context: {context!r}")
+ self._debug.write(f"Setting coverage context: {context!r}")
self._current_context = context
self._current_context_id = None
@@ -456,7 +455,7 @@ def _set_context_id(self) -> None:
with self._connect() as con:
self._current_context_id = con.execute_for_rowid(
"insert into context (context) values (?)",
- (context,)
+ (context,),
)
def base_filename(self) -> str:
@@ -486,8 +485,11 @@ def add_lines(self, line_data: Mapping[str, Collection[TLineNo]]) -> None:
"""
if self._debug.should("dataop"):
self._debug.write("Adding lines: %d files, %d lines total" % (
- len(line_data), sum(bool(len(lines)) for lines in line_data.values())
+ len(line_data), sum(len(lines) for lines in line_data.values()),
))
+ if self._debug.should("dataop2"):
+ for filename, linenos in sorted(line_data.items()):
+ self._debug.write(f" {filename}: {linenos}")
self._start_using()
self._choose_lines_or_arcs(lines=True)
if not line_data:
@@ -495,18 +497,18 @@ def add_lines(self, line_data: Mapping[str, Collection[TLineNo]]) -> None:
with self._connect() as con:
self._set_context_id()
for filename, linenos in line_data.items():
- linemap = nums_to_numbits(linenos)
+ line_bits = nums_to_numbits(linenos)
file_id = self._file_id(filename, add=True)
query = "select numbits from line_bits where file_id = ? and context_id = ?"
with con.execute(query, (file_id, self._current_context_id)) as cur:
existing = list(cur)
if existing:
- linemap = numbits_union(linemap, existing[0][0])
+ line_bits = numbits_union(line_bits, existing[0][0])
con.execute_void(
"insert or replace into line_bits " +
" (file_id, context_id, numbits) values (?, ?, ?)",
- (file_id, self._current_context_id, linemap),
+ (file_id, self._current_context_id, line_bits),
)
@_locked
@@ -521,8 +523,11 @@ def add_arcs(self, arc_data: Mapping[str, Collection[TArc]]) -> None:
"""
if self._debug.should("dataop"):
self._debug.write("Adding arcs: %d files, %d arcs total" % (
- len(arc_data), sum(len(arcs) for arcs in arc_data.values())
+ len(arc_data), sum(len(arcs) for arcs in arc_data.values()),
))
+ if self._debug.should("dataop2"):
+ for filename, arcs in sorted(arc_data.items()):
+ self._debug.write(f" {filename}: {arcs}")
self._start_using()
self._choose_lines_or_arcs(arcs=True)
if not arc_data:
@@ -558,7 +563,7 @@ def _choose_lines_or_arcs(self, lines: bool = False, arcs: bool = False) -> None
with self._connect() as con:
con.execute_void(
"insert or ignore into meta (key, value) values (?, ?)",
- ("has_arcs", str(int(arcs)))
+ ("has_arcs", str(int(arcs))),
)
@_locked
@@ -582,12 +587,12 @@ def add_file_tracers(self, file_tracers: Mapping[str, str]) -> None:
raise DataError(
"Conflicting file tracer name for '{}': {!r} vs {!r}".format(
filename, existing_plugin, plugin_name,
- )
+ ),
)
elif plugin_name:
con.execute_void(
"insert into tracer (file_id, tracer) values (?, ?)",
- (file_id, plugin_name)
+ (file_id, plugin_name),
)
def touch_file(self, filename: str, plugin_name: str = "") -> None:
@@ -598,7 +603,7 @@ def touch_file(self, filename: str, plugin_name: str = "") -> None:
"""
self.touch_files([filename], plugin_name)
- def touch_files(self, filenames: Collection[str], plugin_name: Optional[str] = None) -> None:
+ def touch_files(self, filenames: Collection[str], plugin_name: str | None = None) -> None:
"""Ensure that `filenames` appear in the data, empty if needed.
`plugin_name` is the name of the plugin responsible for these files.
@@ -641,12 +646,16 @@ def purge_files(self, filenames: Collection[str]) -> None:
continue
con.execute_void(sql, (file_id,))
- def update(self, other_data: CoverageData, aliases: Optional[PathAliases] = None) -> None:
- """Update this data with data from several other :class:`CoverageData` instances.
+ def update(
+ self,
+ other_data: CoverageData,
+ map_path: Callable[[str], str] | None = None,
+ ) -> None:
+ """Update this data with data from another :class:`CoverageData`.
- If `aliases` is provided, it's a `PathAliases` object that is used to
- re-map paths to match the local machine's. Note: `aliases` is None
- only when called directly from the test suite.
+ If `map_path` is provided, it's a function that re-map paths to match
+ the local machine's. Note: `map_path` is None only when called
+ directly from the test suite.
"""
if self._debug.should("dataop"):
@@ -654,11 +663,11 @@ def update(self, other_data: CoverageData, aliases: Optional[PathAliases] = None
getattr(other_data, "_filename", "???"),
))
if self._has_lines and other_data._has_arcs:
- raise DataError("Can't combine arc data with line data")
+ raise DataError("Can't combine branch coverage data with statement data")
if self._has_arcs and other_data._has_lines:
- raise DataError("Can't combine line data with arc data")
+ raise DataError("Can't combine statement coverage data with branch data")
- aliases = aliases or PathAliases()
+ map_path = map_path or (lambda p: p)
# Force the database we're writing to to exist before we start nesting contexts.
self._start_using()
@@ -668,7 +677,7 @@ def update(self, other_data: CoverageData, aliases: Optional[PathAliases] = None
with other_data._connect() as con:
# Get files data.
with con.execute("select path from file") as cur:
- files = {path: aliases.map(path) for (path,) in cur}
+ files = {path: map_path(path) for (path,) in cur}
# Get contexts data.
with con.execute("select context from context") as cur:
@@ -679,7 +688,7 @@ def update(self, other_data: CoverageData, aliases: Optional[PathAliases] = None
"select file.path, context.context, arc.fromno, arc.tono " +
"from arc " +
"inner join file on file.id = arc.file_id " +
- "inner join context on context.id = arc.context_id"
+ "inner join context on context.id = arc.context_id",
) as cur:
arcs = [
(files[path], context, fromno, tono)
@@ -691,9 +700,9 @@ def update(self, other_data: CoverageData, aliases: Optional[PathAliases] = None
"select file.path, context.context, line_bits.numbits " +
"from line_bits " +
"inner join file on file.id = line_bits.file_id " +
- "inner join context on context.id = line_bits.context_id"
+ "inner join context on context.id = line_bits.context_id",
) as cur:
- lines: Dict[Tuple[str, str], bytes] = {}
+ lines: dict[tuple[str, str], bytes] = {}
for path, context, numbits in cur:
key = (files[path], context)
if key in lines:
@@ -704,7 +713,7 @@ def update(self, other_data: CoverageData, aliases: Optional[PathAliases] = None
with con.execute(
"select file.path, tracer " +
"from tracer " +
- "inner join file on file.id = tracer.file_id"
+ "inner join file on file.id = tracer.file_id",
) as cur:
tracers = {files[path]: tracer for (path, tracer) in cur}
@@ -720,24 +729,24 @@ def update(self, other_data: CoverageData, aliases: Optional[PathAliases] = None
this_tracers = {path: "" for path, in cur}
with con.execute(
"select file.path, tracer from tracer " +
- "inner join file on file.id = tracer.file_id"
+ "inner join file on file.id = tracer.file_id",
) as cur:
this_tracers.update({
- aliases.map(path): tracer
+ map_path(path): tracer
for path, tracer in cur
})
# Create all file and context rows in the DB.
con.executemany_void(
"insert or ignore into file (path) values (?)",
- ((file,) for file in files.values())
+ ((file,) for file in files.values()),
)
with con.execute("select id, path from file") as cur:
file_ids = {path: id for id, path in cur}
self._file_map.update(file_ids)
con.executemany_void(
"insert or ignore into context (context) values (?)",
- ((context,) for context in contexts)
+ ((context,) for context in contexts),
)
with con.execute("select id, context from context") as cur:
context_ids = {context: id for id, context in cur}
@@ -753,56 +762,54 @@ def update(self, other_data: CoverageData, aliases: Optional[PathAliases] = None
if this_tracer is not None and this_tracer != other_tracer:
raise DataError(
"Conflicting file tracer name for '{}': {!r} vs {!r}".format(
- path, this_tracer, other_tracer
- )
+ path, this_tracer, other_tracer,
+ ),
)
tracer_map[path] = other_tracer
# Prepare arc and line rows to be inserted by converting the file
# and context strings with integer ids. Then use the efficient
# `executemany()` to insert all rows at once.
- arc_rows = (
- (file_ids[file], context_ids[context], fromno, tono)
- for file, context, fromno, tono in arcs
- )
-
- # Get line data.
- with con.execute(
- "select file.path, context.context, line_bits.numbits " +
- "from line_bits " +
- "inner join file on file.id = line_bits.file_id " +
- "inner join context on context.id = line_bits.context_id"
- ) as cur:
- for path, context, numbits in cur:
- key = (aliases.map(path), context)
- if key in lines:
- numbits = numbits_union(lines[key], numbits)
- lines[key] = numbits
if arcs:
self._choose_lines_or_arcs(arcs=True)
+ arc_rows = (
+ (file_ids[file], context_ids[context], fromno, tono)
+ for file, context, fromno, tono in arcs
+ )
+
# Write the combined data.
con.executemany_void(
"insert or ignore into arc " +
"(file_id, context_id, fromno, tono) values (?, ?, ?, ?)",
- arc_rows
+ arc_rows,
)
if lines:
self._choose_lines_or_arcs(lines=True)
- con.execute_void("delete from line_bits")
+
+ for (file, context), numbits in lines.items():
+ with con.execute(
+ "select numbits from line_bits where file_id = ? and context_id = ?",
+ (file_ids[file], context_ids[context]),
+ ) as cur:
+ existing = list(cur)
+ if existing:
+ lines[(file, context)] = numbits_union(numbits, existing[0][0])
+
con.executemany_void(
- "insert into line_bits " +
+ "insert or replace into line_bits " +
"(file_id, context_id, numbits) values (?, ?, ?)",
[
(file_ids[file], context_ids[context], numbits)
for (file, context), numbits in lines.items()
- ]
+ ],
)
+
con.executemany_void(
"insert or ignore into tracer (file_id, tracer) values (?, ?)",
- ((file_ids[filename], tracer) for filename, tracer in tracer_map.items())
+ ((file_ids[filename], tracer) for filename, tracer in tracer_map.items()),
)
if not self._no_disk:
@@ -857,7 +864,7 @@ def has_arcs(self) -> bool:
"""Does the database have arcs (True) or lines (False)."""
return bool(self._has_arcs)
- def measured_files(self) -> Set[str]:
+ def measured_files(self) -> set[str]:
"""A set of all files that have been measured.
Note that a file may be mentioned as measured even though no lines or
@@ -866,7 +873,7 @@ def measured_files(self) -> Set[str]:
"""
return set(self._file_map)
- def measured_contexts(self) -> Set[str]:
+ def measured_contexts(self) -> set[str]:
"""A set of all contexts that have been measured.
.. versionadded:: 5.0
@@ -878,7 +885,7 @@ def measured_contexts(self) -> Set[str]:
contexts = {row[0] for row in cur}
return contexts
- def file_tracer(self, filename: str) -> Optional[str]:
+ def file_tracer(self, filename: str) -> str | None:
"""Get the plugin name of the file tracer for a file.
Returns the name of the plugin that handles this file. If the file was
@@ -912,7 +919,7 @@ def set_query_context(self, context: str) -> None:
with con.execute("select id from context where context = ?", (context,)) as cur:
self._query_context_ids = [row[0] for row in cur.fetchall()]
- def set_query_contexts(self, contexts: Optional[Sequence[str]]) -> None:
+ def set_query_contexts(self, contexts: Sequence[str] | None) -> None:
"""Set a number of contexts for subsequent querying.
The next :meth:`lines`, :meth:`arcs`, or :meth:`contexts_by_lineno`
@@ -933,7 +940,7 @@ def set_query_contexts(self, contexts: Optional[Sequence[str]]) -> None:
else:
self._query_context_ids = None
- def lines(self, filename: str) -> Optional[List[TLineNo]]:
+ def lines(self, filename: str) -> list[TLineNo] | None:
"""Get the list of lines executed for a source file.
If the file was not measured, returns None. A file might be measured,
@@ -968,7 +975,7 @@ def lines(self, filename: str) -> Optional[List[TLineNo]]:
nums.update(numbits_to_nums(row[0]))
return list(nums)
- def arcs(self, filename: str) -> Optional[List[TArc]]:
+ def arcs(self, filename: str) -> list[TArc] | None:
"""Get the list of arcs executed for a file.
If the file was not measured, returns None. A file might be measured,
@@ -1000,7 +1007,7 @@ def arcs(self, filename: str) -> Optional[List[TArc]]:
with con.execute(query, data) as cur:
return list(cur)
- def contexts_by_lineno(self, filename: str) -> Dict[TLineNo, List[str]]:
+ def contexts_by_lineno(self, filename: str) -> dict[TLineNo, list[str]]:
"""Get the contexts for each line in a file.
Returns:
@@ -1052,7 +1059,7 @@ def contexts_by_lineno(self, filename: str) -> Dict[TLineNo, List[str]]:
return {lineno: list(contexts) for lineno, contexts in lineno_contexts_map.items()}
@classmethod
- def sys_info(cls) -> List[Tuple[str, Any]]:
+ def sys_info(cls) -> list[tuple[str, Any]]:
"""Our information for `Coverage.sys_info`.
Returns a list of (key, value) pairs.
@@ -1072,7 +1079,7 @@ def sys_info(cls) -> List[Tuple[str, Any]]:
]
-def filename_suffix(suffix: Union[str, bool, None]) -> Union[str, None]:
+def filename_suffix(suffix: str | bool | None) -> str | None:
"""Compute a filename suffix for a data file.
If `suffix` is a string or None, simply return it. If `suffix` is True,
@@ -1087,193 +1094,10 @@ def filename_suffix(suffix: Union[str, bool, None]) -> Union[str, None]:
# plenty of distinguishing information. We do this here in
# `save()` at the last minute so that the pid will be correct even
# if the process forks.
- dice = random.Random(os.urandom(8)).randint(0, 999999)
- suffix = "%s.%s.%06d" % (socket.gethostname(), os.getpid(), dice)
+ die = random.Random(os.urandom(8))
+ letters = string.ascii_uppercase + string.ascii_lowercase
+ rolls = "".join(die.choice(letters) for _ in range(6))
+ suffix = f"{socket.gethostname()}.{os.getpid()}.X{rolls}x"
elif suffix is False:
suffix = None
return suffix
-
-
-class SqliteDb(AutoReprMixin):
- """A simple abstraction over a SQLite database.
-
- Use as a context manager, then you can use it like a
- :class:`python:sqlite3.Connection` object::
-
- with SqliteDb(filename, debug_control) as db:
- db.execute("insert into schema (version) values (?)", (SCHEMA_VERSION,))
-
- """
- def __init__(self, filename: str, debug: TDebugCtl) -> None:
- self.debug = debug
- self.filename = filename
- self.nest = 0
- self.con: Optional[sqlite3.Connection] = None
-
- def _connect(self) -> None:
- """Connect to the db and do universal initialization."""
- if self.con is not None:
- return
-
- # It can happen that Python switches threads while the tracer writes
- # data. The second thread will also try to write to the data,
- # effectively causing a nested context. However, given the idempotent
- # nature of the tracer operations, sharing a connection among threads
- # is not a problem.
- if self.debug.should("sql"):
- self.debug.write(f"Connecting to {self.filename!r}")
- try:
- self.con = sqlite3.connect(self.filename, check_same_thread=False)
- except sqlite3.Error as exc:
- raise DataError(f"Couldn't use data file {self.filename!r}: {exc}") from exc
-
- self.con.create_function("REGEXP", 2, lambda txt, pat: re.search(txt, pat) is not None)
-
- # This pragma makes writing faster. It disables rollbacks, but we never need them.
- # PyPy needs the .close() calls here, or sqlite gets twisted up:
- # https://bitbucket.org/pypy/pypy/issues/2872/default-isolation-mode-is-different-on
- self.execute_void("pragma journal_mode=off")
- # This pragma makes writing faster.
- self.execute_void("pragma synchronous=off")
-
- def close(self) -> None:
- """If needed, close the connection."""
- if self.con is not None and self.filename != ":memory:":
- self.con.close()
- self.con = None
-
- def __enter__(self) -> SqliteDb:
- if self.nest == 0:
- self._connect()
- assert self.con is not None
- self.con.__enter__()
- self.nest += 1
- return self
-
- def __exit__(self, exc_type, exc_value, traceback) -> None: # type: ignore[no-untyped-def]
- self.nest -= 1
- if self.nest == 0:
- try:
- assert self.con is not None
- self.con.__exit__(exc_type, exc_value, traceback)
- self.close()
- except Exception as exc:
- if self.debug.should("sql"):
- self.debug.write(f"EXCEPTION from __exit__: {exc}")
- raise DataError(f"Couldn't end data file {self.filename!r}: {exc}") from exc
-
- def _execute(self, sql: str, parameters: Iterable[Any]) -> sqlite3.Cursor:
- """Same as :meth:`python:sqlite3.Connection.execute`."""
- if self.debug.should("sql"):
- tail = f" with {parameters!r}" if parameters else ""
- self.debug.write(f"Executing {sql!r}{tail}")
- try:
- assert self.con is not None
- try:
- return self.con.execute(sql, parameters) # type: ignore[arg-type]
- except Exception:
- # In some cases, an error might happen that isn't really an
- # error. Try again immediately.
- # https://github.com/nedbat/coveragepy/issues/1010
- return self.con.execute(sql, parameters) # type: ignore[arg-type]
- except sqlite3.Error as exc:
- msg = str(exc)
- try:
- # `execute` is the first thing we do with the database, so try
- # hard to provide useful hints if something goes wrong now.
- with open(self.filename, "rb") as bad_file:
- cov4_sig = b"!coverage.py: This is a private format"
- if bad_file.read(len(cov4_sig)) == cov4_sig:
- msg = (
- "Looks like a coverage 4.x data file. " +
- "Are you mixing versions of coverage?"
- )
- except Exception: # pragma: cant happen
- pass
- if self.debug.should("sql"):
- self.debug.write(f"EXCEPTION from execute: {msg}")
- raise DataError(f"Couldn't use data file {self.filename!r}: {msg}") from exc
-
- @contextlib.contextmanager
- def execute(
- self,
- sql: str,
- parameters: Iterable[Any] = (),
- ) -> Iterator[sqlite3.Cursor]:
- """Context managed :meth:`python:sqlite3.Connection.execute`.
-
- Use with a ``with`` statement to auto-close the returned cursor.
- """
- cur = self._execute(sql, parameters)
- try:
- yield cur
- finally:
- cur.close()
-
- def execute_void(self, sql: str, parameters: Iterable[Any] = ()) -> None:
- """Same as :meth:`python:sqlite3.Connection.execute` when you don't need the cursor."""
- self._execute(sql, parameters).close()
-
- def execute_for_rowid(self, sql: str, parameters: Iterable[Any] = ()) -> int:
- """Like execute, but returns the lastrowid."""
- with self.execute(sql, parameters) as cur:
- assert cur.lastrowid is not None
- rowid: int = cur.lastrowid
- if self.debug.should("sqldata"):
- self.debug.write(f"Row id result: {rowid!r}")
- return rowid
-
- def execute_one(self, sql: str, parameters: Iterable[Any] = ()) -> Optional[Tuple[Any, ...]]:
- """Execute a statement and return the one row that results.
-
- This is like execute(sql, parameters).fetchone(), except it is
- correct in reading the entire result set. This will raise an
- exception if more than one row results.
-
- Returns a row, or None if there were no rows.
- """
- with self.execute(sql, parameters) as cur:
- rows = list(cur)
- if len(rows) == 0:
- return None
- elif len(rows) == 1:
- return cast(Tuple[Any, ...], rows[0])
- else:
- raise AssertionError(f"SQL {sql!r} shouldn't return {len(rows)} rows")
-
- def _executemany(self, sql: str, data: List[Any]) -> sqlite3.Cursor:
- """Same as :meth:`python:sqlite3.Connection.executemany`."""
- if self.debug.should("sql"):
- final = ":" if self.debug.should("sqldata") else ""
- self.debug.write(f"Executing many {sql!r} with {len(data)} rows{final}")
- if self.debug.should("sqldata"):
- for i, row in enumerate(data):
- self.debug.write(f"{i:4d}: {row!r}")
- assert self.con is not None
- try:
- return self.con.executemany(sql, data)
- except Exception: # pragma: cant happen
- # In some cases, an error might happen that isn't really an
- # error. Try again immediately.
- # https://github.com/nedbat/coveragepy/issues/1010
- return self.con.executemany(sql, data)
-
- def executemany_void(self, sql: str, data: Iterable[Any]) -> None:
- """Same as :meth:`python:sqlite3.Connection.executemany` when you don't need the cursor."""
- data = list(data)
- if data:
- self._executemany(sql, data).close()
-
- def executescript(self, script: str) -> None:
- """Same as :meth:`python:sqlite3.Connection.executescript`."""
- if self.debug.should("sql"):
- self.debug.write("Executing script with {} chars: {}".format(
- len(script), clipped_repr(script, 100),
- ))
- assert self.con is not None
- self.con.executescript(script).close()
-
- def dump(self) -> str:
- """Return a multi-line string, the SQL dump of the database."""
- assert self.con is not None
- return "\n".join(self.con.iterdump())
diff --git a/coverage/sqlitedb.py b/coverage/sqlitedb.py
new file mode 100644
index 000000000..1bbe7e1ba
--- /dev/null
+++ b/coverage/sqlitedb.py
@@ -0,0 +1,231 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
+
+"""SQLite abstraction for coverage.py"""
+
+from __future__ import annotations
+
+import contextlib
+import re
+import sqlite3
+
+from typing import cast, Any
+from collections.abc import Iterable, Iterator
+
+from coverage.debug import auto_repr, clipped_repr, exc_one_line
+from coverage.exceptions import DataError
+from coverage.types import TDebugCtl
+
+
+class SqliteDb:
+ """A simple abstraction over a SQLite database.
+
+ Use as a context manager, then you can use it like a
+ :class:`python:sqlite3.Connection` object::
+
+ with SqliteDb(filename, debug_control) as db:
+ with db.execute("select a, b from some_table") as cur:
+ for a, b in cur:
+ etc(a, b)
+
+ """
+ def __init__(self, filename: str, debug: TDebugCtl) -> None:
+ self.debug = debug
+ self.filename = filename
+ self.nest = 0
+ self.con: sqlite3.Connection | None = None
+
+ __repr__ = auto_repr
+
+ def _connect(self) -> None:
+ """Connect to the db and do universal initialization."""
+ if self.con is not None:
+ return
+
+ # It can happen that Python switches threads while the tracer writes
+ # data. The second thread will also try to write to the data,
+ # effectively causing a nested context. However, given the idempotent
+ # nature of the tracer operations, sharing a connection among threads
+ # is not a problem.
+ if self.debug.should("sql"):
+ self.debug.write(f"Connecting to {self.filename!r}")
+ try:
+ self.con = sqlite3.connect(self.filename, check_same_thread=False)
+ except sqlite3.Error as exc:
+ raise DataError(f"Couldn't use data file {self.filename!r}: {exc}") from exc
+
+ if self.debug.should("sql"):
+ self.debug.write(f"Connected to {self.filename!r} as {self.con!r}")
+
+ self.con.create_function("REGEXP", 2, lambda txt, pat: re.search(txt, pat) is not None)
+
+ # Turning off journal_mode can speed up writing. It can't always be
+ # disabled, so we have to be prepared for *-journal files elsewhere.
+ # In Python 3.12+, we can change the config to allow journal_mode=off.
+ if hasattr(sqlite3, "SQLITE_DBCONFIG_DEFENSIVE"):
+ # Turn off defensive mode, so that journal_mode=off can succeed.
+ self.con.setconfig( # type: ignore[attr-defined, unused-ignore]
+ sqlite3.SQLITE_DBCONFIG_DEFENSIVE, False,
+ )
+
+ # This pragma makes writing faster. It disables rollbacks, but we never need them.
+ self.execute_void("pragma journal_mode=off")
+
+ # This pragma makes writing faster. It can fail in unusual situations
+ # (https://github.com/nedbat/coveragepy/issues/1646), so use fail_ok=True
+ # to keep things going.
+ self.execute_void("pragma synchronous=off", fail_ok=True)
+
+ def close(self) -> None:
+ """If needed, close the connection."""
+ if self.con is not None and self.filename != ":memory:":
+ if self.debug.should("sql"):
+ self.debug.write(f"Closing {self.con!r} on {self.filename!r}")
+ self.con.close()
+ self.con = None
+
+ def __enter__(self) -> SqliteDb:
+ if self.nest == 0:
+ self._connect()
+ assert self.con is not None
+ self.con.__enter__()
+ self.nest += 1
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback) -> None: # type: ignore[no-untyped-def]
+ self.nest -= 1
+ if self.nest == 0:
+ try:
+ assert self.con is not None
+ self.con.__exit__(exc_type, exc_value, traceback)
+ self.close()
+ except Exception as exc:
+ if self.debug.should("sql"):
+ self.debug.write(f"EXCEPTION from __exit__: {exc_one_line(exc)}")
+ raise DataError(f"Couldn't end data file {self.filename!r}: {exc}") from exc
+
+ def _execute(self, sql: str, parameters: Iterable[Any]) -> sqlite3.Cursor:
+ """Same as :meth:`python:sqlite3.Connection.execute`."""
+ if self.debug.should("sql"):
+ tail = f" with {parameters!r}" if parameters else ""
+ self.debug.write(f"Executing {sql!r}{tail}")
+ try:
+ assert self.con is not None
+ try:
+ return self.con.execute(sql, parameters) # type: ignore[arg-type]
+ except Exception:
+ # In some cases, an error might happen that isn't really an
+ # error. Try again immediately.
+ # https://github.com/nedbat/coveragepy/issues/1010
+ return self.con.execute(sql, parameters) # type: ignore[arg-type]
+ except sqlite3.Error as exc:
+ msg = str(exc)
+ if self.filename != ":memory:":
+ try:
+ # `execute` is the first thing we do with the database, so try
+ # hard to provide useful hints if something goes wrong now.
+ with open(self.filename, "rb") as bad_file:
+ cov4_sig = b"!coverage.py: This is a private format"
+ if bad_file.read(len(cov4_sig)) == cov4_sig:
+ msg = (
+ "Looks like a coverage 4.x data file. " +
+ "Are you mixing versions of coverage?"
+ )
+ except Exception:
+ pass
+ if self.debug.should("sql"):
+ self.debug.write(f"EXCEPTION from execute: {exc_one_line(exc)}")
+ raise DataError(f"Couldn't use data file {self.filename!r}: {msg}") from exc
+
+ @contextlib.contextmanager
+ def execute(
+ self,
+ sql: str,
+ parameters: Iterable[Any] = (),
+ ) -> Iterator[sqlite3.Cursor]:
+ """Context managed :meth:`python:sqlite3.Connection.execute`.
+
+ Use with a ``with`` statement to auto-close the returned cursor.
+ """
+ cur = self._execute(sql, parameters)
+ try:
+ yield cur
+ finally:
+ cur.close()
+
+ def execute_void(self, sql: str, parameters: Iterable[Any] = (), fail_ok: bool = False) -> None:
+ """Same as :meth:`python:sqlite3.Connection.execute` when you don't need the cursor.
+
+ If `fail_ok` is True, then SQLite errors are ignored.
+ """
+ try:
+ # PyPy needs the .close() calls here, or sqlite gets twisted up:
+ # https://bitbucket.org/pypy/pypy/issues/2872/default-isolation-mode-is-different-on
+ self._execute(sql, parameters).close()
+ except DataError:
+ if not fail_ok:
+ raise
+
+ def execute_for_rowid(self, sql: str, parameters: Iterable[Any] = ()) -> int:
+ """Like execute, but returns the lastrowid."""
+ with self.execute(sql, parameters) as cur:
+ assert cur.lastrowid is not None
+ rowid: int = cur.lastrowid
+ if self.debug.should("sqldata"):
+ self.debug.write(f"Row id result: {rowid!r}")
+ return rowid
+
+ def execute_one(self, sql: str, parameters: Iterable[Any] = ()) -> tuple[Any, ...] | None:
+ """Execute a statement and return the one row that results.
+
+ This is like execute(sql, parameters).fetchone(), except it is
+ correct in reading the entire result set. This will raise an
+ exception if more than one row results.
+
+ Returns a row, or None if there were no rows.
+ """
+ with self.execute(sql, parameters) as cur:
+ rows = list(cur)
+ if len(rows) == 0:
+ return None
+ elif len(rows) == 1:
+ return cast(tuple[Any, ...], rows[0])
+ else:
+ raise AssertionError(f"SQL {sql!r} shouldn't return {len(rows)} rows")
+
+ def _executemany(self, sql: str, data: list[Any]) -> sqlite3.Cursor:
+ """Same as :meth:`python:sqlite3.Connection.executemany`."""
+ if self.debug.should("sql"):
+ final = ":" if self.debug.should("sqldata") else ""
+ self.debug.write(f"Executing many {sql!r} with {len(data)} rows{final}")
+ if self.debug.should("sqldata"):
+ for i, row in enumerate(data):
+ self.debug.write(f"{i:4d}: {row!r}")
+ assert self.con is not None
+ try:
+ return self.con.executemany(sql, data)
+ except Exception:
+ # In some cases, an error might happen that isn't really an
+ # error. Try again immediately.
+ # https://github.com/nedbat/coveragepy/issues/1010
+ return self.con.executemany(sql, data)
+
+ def executemany_void(self, sql: str, data: Iterable[Any]) -> None:
+ """Same as :meth:`python:sqlite3.Connection.executemany` when you don't need the cursor."""
+ data = list(data)
+ if data:
+ self._executemany(sql, data).close()
+
+ def executescript(self, script: str) -> None:
+ """Same as :meth:`python:sqlite3.Connection.executescript`."""
+ if self.debug.should("sql"):
+ self.debug.write("Executing script with {} chars: {}".format(
+ len(script), clipped_repr(script, 100),
+ ))
+ assert self.con is not None
+ self.con.executescript(script).close()
+
+ def dump(self) -> str:
+ """Return a multi-line string, the SQL dump of the database."""
+ assert self.con is not None
+ return "\n".join(self.con.iterdump())
diff --git a/coverage/summary.py b/coverage/summary.py
deleted file mode 100644
index c4c7fd1de..000000000
--- a/coverage/summary.py
+++ /dev/null
@@ -1,281 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""Summary reporting"""
-
-from __future__ import annotations
-
-import sys
-
-from typing import Any, IO, Iterable, List, Optional, Tuple, TYPE_CHECKING
-
-from coverage.exceptions import ConfigError, NoDataError
-from coverage.misc import human_sorted_items
-from coverage.plugin import FileReporter
-from coverage.report import get_analysis_to_report
-from coverage.results import Analysis, Numbers
-from coverage.types import TMorf
-
-if TYPE_CHECKING:
- from coverage import Coverage
-
-
-class SummaryReporter:
- """A reporter for writing the summary report."""
-
- def __init__(self, coverage: Coverage) -> None:
- self.coverage = coverage
- self.config = self.coverage.config
- self.branches = coverage.get_data().has_arcs()
- self.outfile: Optional[IO[str]] = None
- self.output_format = self.config.format or "text"
- if self.output_format not in {"text", "markdown", "total"}:
- raise ConfigError(f"Unknown report format choice: {self.output_format!r}")
- self.fr_analysis: List[Tuple[FileReporter, Analysis]] = []
- self.skipped_count = 0
- self.empty_count = 0
- self.total = Numbers(precision=self.config.precision)
-
- def write(self, line: str) -> None:
- """Write a line to the output, adding a newline."""
- assert self.outfile is not None
- self.outfile.write(line.rstrip())
- self.outfile.write("\n")
-
- def write_items(self, items: Iterable[str]) -> None:
- """Write a list of strings, joined together."""
- self.write("".join(items))
-
- def _report_text(
- self,
- header: List[str],
- lines_values: List[List[Any]],
- total_line: List[Any],
- end_lines: List[str],
- ) -> None:
- """Internal method that prints report data in text format.
-
- `header` is a list with captions.
- `lines_values` is list of lists of sortable values.
- `total_line` is a list with values of the total line.
- `end_lines` is a list of ending lines with information about skipped files.
-
- """
- # Prepare the formatting strings, header, and column sorting.
- max_name = max([len(line[0]) for line in lines_values] + [5]) + 1
- max_n = max(len(total_line[header.index("Cover")]) + 2, len(" Cover")) + 1
- max_n = max([max_n] + [len(line[header.index("Cover")]) + 2 for line in lines_values])
- formats = dict(
- Name="{:{name_len}}",
- Stmts="{:>7}",
- Miss="{:>7}",
- Branch="{:>7}",
- BrPart="{:>7}",
- Cover="{:>{n}}",
- Missing="{:>10}",
- )
- header_items = [
- formats[item].format(item, name_len=max_name, n=max_n)
- for item in header
- ]
- header_str = "".join(header_items)
- rule = "-" * len(header_str)
-
- # Write the header
- self.write(header_str)
- self.write(rule)
-
- formats.update(dict(Cover="{:>{n}}%"), Missing=" {:9}")
- for values in lines_values:
- # build string with line values
- line_items = [
- formats[item].format(str(value),
- name_len=max_name, n=max_n-1) for item, value in zip(header, values)
- ]
- self.write_items(line_items)
-
- # Write a TOTAL line
- if lines_values:
- self.write(rule)
-
- line_items = [
- formats[item].format(str(value),
- name_len=max_name, n=max_n-1) for item, value in zip(header, total_line)
- ]
- self.write_items(line_items)
-
- for end_line in end_lines:
- self.write(end_line)
-
- def _report_markdown(
- self,
- header: List[str],
- lines_values: List[List[Any]],
- total_line: List[Any],
- end_lines: List[str],
- ) -> None:
- """Internal method that prints report data in markdown format.
-
- `header` is a list with captions.
- `lines_values` is a sorted list of lists containing coverage information.
- `total_line` is a list with values of the total line.
- `end_lines` is a list of ending lines with information about skipped files.
-
- """
- # Prepare the formatting strings, header, and column sorting.
- max_name = max((len(line[0].replace("_", "\\_")) for line in lines_values), default=0)
- max_name = max(max_name, len("**TOTAL**")) + 1
- formats = dict(
- Name="| {:{name_len}}|",
- Stmts="{:>9} |",
- Miss="{:>9} |",
- Branch="{:>9} |",
- BrPart="{:>9} |",
- Cover="{:>{n}} |",
- Missing="{:>10} |",
- )
- max_n = max(len(total_line[header.index("Cover")]) + 6, len(" Cover "))
- header_items = [formats[item].format(item, name_len=max_name, n=max_n) for item in header]
- header_str = "".join(header_items)
- rule_str = "|" + " ".join(["- |".rjust(len(header_items[0])-1, '-')] +
- ["-: |".rjust(len(item)-1, '-') for item in header_items[1:]]
- )
-
- # Write the header
- self.write(header_str)
- self.write(rule_str)
-
- for values in lines_values:
- # build string with line values
- formats.update(dict(Cover="{:>{n}}% |"))
- line_items = [
- formats[item].format(str(value).replace("_", "\\_"), name_len=max_name, n=max_n-1)
- for item, value in zip(header, values)
- ]
- self.write_items(line_items)
-
- # Write the TOTAL line
- formats.update(dict(Name="|{:>{name_len}} |", Cover="{:>{n}} |"))
- total_line_items: List[str] = []
- for item, value in zip(header, total_line):
- if value == "":
- insert = value
- elif item == "Cover":
- insert = f" **{value}%**"
- else:
- insert = f" **{value}**"
- total_line_items += formats[item].format(insert, name_len=max_name, n=max_n)
- self.write_items(total_line_items)
- for end_line in end_lines:
- self.write(end_line)
-
- def report(self, morfs: Optional[Iterable[TMorf]], outfile: Optional[IO[str]] = None) -> float:
- """Writes a report summarizing coverage statistics per module.
-
- `outfile` is a text-mode file object to write the summary to.
-
- """
- self.outfile = outfile or sys.stdout
-
- self.coverage.get_data().set_query_contexts(self.config.report_contexts)
- for fr, analysis in get_analysis_to_report(self.coverage, morfs):
- self.report_one_file(fr, analysis)
-
- if not self.total.n_files and not self.skipped_count:
- raise NoDataError("No data to report.")
-
- if self.output_format == "total":
- self.write(self.total.pc_covered_str)
- else:
- self.tabular_report()
-
- return self.total.pc_covered
-
- def tabular_report(self) -> None:
- """Writes tabular report formats."""
- # Prepare the header line and column sorting.
- header = ["Name", "Stmts", "Miss"]
- if self.branches:
- header += ["Branch", "BrPart"]
- header += ["Cover"]
- if self.config.show_missing:
- header += ["Missing"]
-
- column_order = dict(name=0, stmts=1, miss=2, cover=-1)
- if self.branches:
- column_order.update(dict(branch=3, brpart=4))
-
- # `lines_values` is list of lists of sortable values.
- lines_values = []
-
- for (fr, analysis) in self.fr_analysis:
- nums = analysis.numbers
-
- args = [fr.relative_filename(), nums.n_statements, nums.n_missing]
- if self.branches:
- args += [nums.n_branches, nums.n_partial_branches]
- args += [nums.pc_covered_str]
- if self.config.show_missing:
- args += [analysis.missing_formatted(branches=True)]
- args += [nums.pc_covered]
- lines_values.append(args)
-
- # Line sorting.
- sort_option = (self.config.sort or "name").lower()
- reverse = False
- if sort_option[0] == '-':
- reverse = True
- sort_option = sort_option[1:]
- elif sort_option[0] == '+':
- sort_option = sort_option[1:]
- sort_idx = column_order.get(sort_option)
- if sort_idx is None:
- raise ConfigError(f"Invalid sorting option: {self.config.sort!r}")
- if sort_option == "name":
- lines_values = human_sorted_items(lines_values, reverse=reverse)
- else:
- lines_values.sort(
- key=lambda line: (line[sort_idx], line[0]), # type: ignore[index]
- reverse=reverse,
- )
-
- # Calculate total if we had at least one file.
- total_line = ["TOTAL", self.total.n_statements, self.total.n_missing]
- if self.branches:
- total_line += [self.total.n_branches, self.total.n_partial_branches]
- total_line += [self.total.pc_covered_str]
- if self.config.show_missing:
- total_line += [""]
-
- # Create other final lines.
- end_lines = []
- if self.config.skip_covered and self.skipped_count:
- file_suffix = 's' if self.skipped_count>1 else ''
- end_lines.append(
- f"\n{self.skipped_count} file{file_suffix} skipped due to complete coverage."
- )
- if self.config.skip_empty and self.empty_count:
- file_suffix = 's' if self.empty_count > 1 else ''
- end_lines.append(f"\n{self.empty_count} empty file{file_suffix} skipped.")
-
- if self.output_format == "markdown":
- formatter = self._report_markdown
- else:
- formatter = self._report_text
- formatter(header, lines_values, total_line, end_lines)
-
- def report_one_file(self, fr: FileReporter, analysis: Analysis) -> None:
- """Report on just one file, the callback from report()."""
- nums = analysis.numbers
- self.total += nums
-
- no_missing_lines = (nums.n_missing == 0)
- no_missing_branches = (nums.n_partial_branches == 0)
- if self.config.skip_covered and no_missing_lines and no_missing_branches:
- # Don't report on 100% files.
- self.skipped_count += 1
- elif self.config.skip_empty and nums.n_statements == 0:
- # Don't report on empty files.
- self.empty_count += 1
- else:
- self.fr_analysis.append((fr, analysis))
diff --git a/coverage/sysmon.py b/coverage/sysmon.py
new file mode 100644
index 000000000..2809aa087
--- /dev/null
+++ b/coverage/sysmon.py
@@ -0,0 +1,438 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
+
+"""Callback functions and support for sys.monitoring data collection."""
+
+# TODO: https://github.com/python/cpython/issues/111963#issuecomment-2386584080
+
+from __future__ import annotations
+
+import functools
+import inspect
+import os
+import os.path
+import sys
+import threading
+import traceback
+
+from dataclasses import dataclass
+from types import CodeType, FrameType
+from typing import (
+ Any,
+ Callable,
+ TYPE_CHECKING,
+ cast,
+)
+
+from coverage.debug import short_filename, short_stack
+from coverage.misc import isolate_module
+from coverage.types import (
+ AnyCallable,
+ TArc,
+ TFileDisposition,
+ TLineNo,
+ TShouldStartContextFn,
+ TShouldTraceFn,
+ TTraceData,
+ TTraceFileData,
+ Tracer,
+ TWarnFn,
+)
+
+os = isolate_module(os)
+
+# pylint: disable=unused-argument
+
+LOG = False
+
+# This module will be imported in all versions of Python, but only used in 3.12+
+# It will be type-checked for 3.12, but not for earlier versions.
+sys_monitoring = getattr(sys, "monitoring", None)
+
+if TYPE_CHECKING:
+ assert sys_monitoring is not None
+ # I want to say this but it's not allowed:
+ # MonitorReturn = Literal[sys.monitoring.DISABLE] | None
+ MonitorReturn = Any
+
+
+if LOG: # pragma: debugging
+
+ class LoggingWrapper:
+ """Wrap a namespace to log all its functions."""
+
+ def __init__(self, wrapped: Any, namespace: str) -> None:
+ self.wrapped = wrapped
+ self.namespace = namespace
+
+ def __getattr__(self, name: str) -> Callable[..., Any]:
+ def _wrapped(*args: Any, **kwargs: Any) -> Any:
+ log(f"{self.namespace}.{name}{args}{kwargs}")
+ return getattr(self.wrapped, name)(*args, **kwargs)
+
+ return _wrapped
+
+ sys_monitoring = LoggingWrapper(sys_monitoring, "sys.monitoring")
+ assert sys_monitoring is not None
+
+ short_stack = functools.partial(
+ short_stack, full=True, short_filenames=True, frame_ids=True,
+ )
+ seen_threads: set[int] = set()
+
+ def log(msg: str) -> None:
+ """Write a message to our detailed debugging log(s)."""
+ # Thread ids are reused across processes?
+ # Make a shorter number more likely to be unique.
+ pid = os.getpid()
+ tid = cast(int, threading.current_thread().ident)
+ tslug = f"{(pid * tid) % 9_999_991:07d}"
+ if tid not in seen_threads:
+ seen_threads.add(tid)
+ log(f"New thread {tid} {tslug}:\n{short_stack()}")
+ # log_seq = int(os.getenv("PANSEQ", "0"))
+ # root = f"/tmp/pan.{log_seq:03d}"
+ for filename in [
+ "/tmp/foo.out",
+ # f"{root}.out",
+ # f"{root}-{pid}.out",
+ # f"{root}-{pid}-{tslug}.out",
+ ]:
+ with open(filename, "a") as f:
+ print(f"{pid}:{tslug}: {msg}", file=f, flush=True)
+
+ def arg_repr(arg: Any) -> str:
+ """Make a customized repr for logged values."""
+ if isinstance(arg, CodeType):
+ return (
+ f""
+ )
+ return repr(arg)
+
+ def panopticon(*names: str | None) -> AnyCallable:
+ """Decorate a function to log its calls."""
+
+ def _decorator(method: AnyCallable) -> AnyCallable:
+ @functools.wraps(method)
+ def _wrapped(self: Any, *args: Any) -> Any:
+ try:
+ # log(f"{method.__name__}() stack:\n{short_stack()}")
+ args_reprs = []
+ for name, arg in zip(names, args):
+ if name is None:
+ continue
+ args_reprs.append(f"{name}={arg_repr(arg)}")
+ log(f"{id(self):#x}:{method.__name__}({', '.join(args_reprs)})")
+ ret = method(self, *args)
+ # log(f" end {id(self):#x}:{method.__name__}({', '.join(args_reprs)})")
+ return ret
+ except Exception as exc:
+ log(f"!!{exc.__class__.__name__}: {exc}")
+ log("".join(traceback.format_exception(exc))) # pylint: disable=[no-value-for-parameter]
+ try:
+ assert sys_monitoring is not None
+ sys_monitoring.set_events(sys.monitoring.COVERAGE_ID, 0)
+ except ValueError:
+ # We might have already shut off monitoring.
+ log("oops, shutting off events with disabled tool id")
+ raise
+
+ return _wrapped
+
+ return _decorator
+
+else:
+
+ def log(msg: str) -> None:
+ """Write a message to our detailed debugging log(s), but not really."""
+
+ def panopticon(*names: str | None) -> AnyCallable:
+ """Decorate a function to log its calls, but not really."""
+
+ def _decorator(meth: AnyCallable) -> AnyCallable:
+ return meth
+
+ return _decorator
+
+
+@dataclass
+class CodeInfo:
+ """The information we want about each code object."""
+
+ tracing: bool
+ file_data: TTraceFileData | None
+ # TODO: what is byte_to_line for?
+ byte_to_line: dict[int, int] | None
+
+
+def bytes_to_lines(code: CodeType) -> dict[int, int]:
+ """Make a dict mapping byte code offsets to line numbers."""
+ b2l = {}
+ for bstart, bend, lineno in code.co_lines():
+ if lineno is not None:
+ for boffset in range(bstart, bend, 2):
+ b2l[boffset] = lineno
+ return b2l
+
+
+class SysMonitor(Tracer):
+ """Python implementation of the raw data tracer for PEP669 implementations."""
+
+ # One of these will be used across threads. Be careful.
+
+ def __init__(self, tool_id: int) -> None:
+ # Attributes set from the collector:
+ self.data: TTraceData
+ self.trace_arcs = False
+ self.should_trace: TShouldTraceFn
+ self.should_trace_cache: dict[str, TFileDisposition | None]
+ # TODO: should_start_context and switch_context are unused!
+ # Change tests/testenv.py:DYN_CONTEXTS when this is updated.
+ self.should_start_context: TShouldStartContextFn | None = None
+ self.switch_context: Callable[[str | None], None] | None = None
+ self.lock_data: Callable[[], None]
+ self.unlock_data: Callable[[], None]
+ # TODO: warn is unused.
+ self.warn: TWarnFn
+
+ self.myid = tool_id
+
+ # Map id(code_object) -> CodeInfo
+ self.code_infos: dict[int, CodeInfo] = {}
+ # A list of code_objects, just to keep them alive so that id's are
+ # useful as identity.
+ self.code_objects: list[CodeType] = []
+ self.last_lines: dict[FrameType, int] = {}
+ # Map id(code_object) -> code_object
+ self.local_event_codes: dict[int, CodeType] = {}
+ self.sysmon_on = False
+ self.lock = threading.Lock()
+
+ self.stats = {
+ "starts": 0,
+ }
+
+ self.stopped = False
+ self._activity = False
+
+ def __repr__(self) -> str:
+ points = sum(len(v) for v in self.data.values())
+ files = len(self.data)
+ return f""
+
+ @panopticon()
+ def start(self) -> None:
+ """Start this Tracer."""
+ self.stopped = False
+
+ assert sys_monitoring is not None
+ sys_monitoring.use_tool_id(self.myid, "coverage.py")
+ register = functools.partial(sys_monitoring.register_callback, self.myid)
+ events = sys_monitoring.events
+ if self.trace_arcs:
+ sys_monitoring.set_events(
+ self.myid,
+ events.PY_START | events.PY_UNWIND,
+ )
+ register(events.PY_START, self.sysmon_py_start)
+ register(events.PY_RESUME, self.sysmon_py_resume_arcs)
+ register(events.PY_RETURN, self.sysmon_py_return_arcs)
+ register(events.PY_UNWIND, self.sysmon_py_unwind_arcs)
+ register(events.LINE, self.sysmon_line_arcs)
+ else:
+ sys_monitoring.set_events(self.myid, events.PY_START)
+ register(events.PY_START, self.sysmon_py_start)
+ register(events.LINE, self.sysmon_line_lines)
+ sys_monitoring.restart_events()
+ self.sysmon_on = True
+
+ @panopticon()
+ def stop(self) -> None:
+ """Stop this Tracer."""
+ if not self.sysmon_on:
+ # In forking situations, we might try to stop when we are not
+ # started. Do nothing in that case.
+ return
+ assert sys_monitoring is not None
+ sys_monitoring.set_events(self.myid, 0)
+ with self.lock:
+ self.sysmon_on = False
+ for code in self.local_event_codes.values():
+ sys_monitoring.set_local_events(self.myid, code, 0)
+ self.local_event_codes = {}
+ sys_monitoring.free_tool_id(self.myid)
+
+ @panopticon()
+ def post_fork(self) -> None:
+ """The process has forked, clean up as needed."""
+ self.stop()
+
+ def activity(self) -> bool:
+ """Has there been any activity?"""
+ return self._activity
+
+ def reset_activity(self) -> None:
+ """Reset the activity() flag."""
+ self._activity = False
+
+ def get_stats(self) -> dict[str, int] | None:
+ """Return a dictionary of statistics, or None."""
+ return None
+
+ # The number of frames in callers_frame takes @panopticon into account.
+ if LOG:
+
+ def callers_frame(self) -> FrameType:
+ """Get the frame of the Python code we're monitoring."""
+ return (
+ inspect.currentframe().f_back.f_back.f_back # type: ignore[union-attr,return-value]
+ )
+
+ else:
+
+ def callers_frame(self) -> FrameType:
+ """Get the frame of the Python code we're monitoring."""
+ return inspect.currentframe().f_back.f_back # type: ignore[union-attr,return-value]
+
+ @panopticon("code", "@")
+ def sysmon_py_start(self, code: CodeType, instruction_offset: int) -> MonitorReturn:
+ """Handle sys.monitoring.events.PY_START events."""
+ # Entering a new frame. Decide if we should trace in this file.
+ self._activity = True
+ self.stats["starts"] += 1
+
+ code_info = self.code_infos.get(id(code))
+ tracing_code: bool | None = None
+ file_data: TTraceFileData | None = None
+ if code_info is not None:
+ tracing_code = code_info.tracing
+ file_data = code_info.file_data
+
+ if tracing_code is None:
+ filename = code.co_filename
+ disp = self.should_trace_cache.get(filename)
+ if disp is None:
+ frame = inspect.currentframe().f_back # type: ignore[union-attr]
+ if LOG:
+ # @panopticon adds a frame.
+ frame = frame.f_back # type: ignore[union-attr]
+ disp = self.should_trace(filename, frame) # type: ignore[arg-type]
+ self.should_trace_cache[filename] = disp
+
+ tracing_code = disp.trace
+ if tracing_code:
+ tracename = disp.source_filename
+ assert tracename is not None
+ self.lock_data()
+ try:
+ if tracename not in self.data:
+ self.data[tracename] = set()
+ finally:
+ self.unlock_data()
+ file_data = self.data[tracename]
+ b2l = bytes_to_lines(code)
+ else:
+ file_data = None
+ b2l = None
+
+ self.code_infos[id(code)] = CodeInfo(
+ tracing=tracing_code,
+ file_data=file_data,
+ byte_to_line=b2l,
+ )
+ self.code_objects.append(code)
+
+ if tracing_code:
+ events = sys.monitoring.events
+ with self.lock:
+ if self.sysmon_on:
+ assert sys_monitoring is not None
+ sys_monitoring.set_local_events(
+ self.myid,
+ code,
+ events.PY_RETURN
+ #
+ | events.PY_RESUME
+ # | events.PY_YIELD
+ | events.LINE,
+ # | events.BRANCH
+ # | events.JUMP
+ )
+ self.local_event_codes[id(code)] = code
+
+ if tracing_code and self.trace_arcs:
+ frame = self.callers_frame()
+ self.last_lines[frame] = -code.co_firstlineno
+ return None
+ else:
+ return sys.monitoring.DISABLE
+
+ @panopticon("code", "@")
+ def sysmon_py_resume_arcs(
+ self, code: CodeType, instruction_offset: int,
+ ) -> MonitorReturn:
+ """Handle sys.monitoring.events.PY_RESUME events for branch coverage."""
+ frame = self.callers_frame()
+ self.last_lines[frame] = frame.f_lineno
+
+ @panopticon("code", "@", None)
+ def sysmon_py_return_arcs(
+ self, code: CodeType, instruction_offset: int, retval: object,
+ ) -> MonitorReturn:
+ """Handle sys.monitoring.events.PY_RETURN events for branch coverage."""
+ frame = self.callers_frame()
+ code_info = self.code_infos.get(id(code))
+ if code_info is not None and code_info.file_data is not None:
+ last_line = self.last_lines.get(frame)
+ if last_line is not None:
+ arc = (last_line, -code.co_firstlineno)
+ # log(f"adding {arc=}")
+ cast(set[TArc], code_info.file_data).add(arc)
+
+ # Leaving this function, no need for the frame any more.
+ self.last_lines.pop(frame, None)
+
+ @panopticon("code", "@", "exc")
+ def sysmon_py_unwind_arcs(
+ self, code: CodeType, instruction_offset: int, exception: BaseException,
+ ) -> MonitorReturn:
+ """Handle sys.monitoring.events.PY_UNWIND events for branch coverage."""
+ frame = self.callers_frame()
+ # Leaving this function.
+ last_line = self.last_lines.pop(frame, None)
+ if isinstance(exception, GeneratorExit):
+ # We don't want to count generator exits as arcs.
+ return
+ code_info = self.code_infos.get(id(code))
+ if code_info is not None and code_info.file_data is not None:
+ if last_line is not None:
+ arc = (last_line, -code.co_firstlineno)
+ # log(f"adding {arc=}")
+ cast(set[TArc], code_info.file_data).add(arc)
+
+
+ @panopticon("code", "line")
+ def sysmon_line_lines(self, code: CodeType, line_number: int) -> MonitorReturn:
+ """Handle sys.monitoring.events.LINE events for line coverage."""
+ code_info = self.code_infos[id(code)]
+ if code_info.file_data is not None:
+ cast(set[TLineNo], code_info.file_data).add(line_number)
+ # log(f"adding {line_number=}")
+ return sys.monitoring.DISABLE
+
+ @panopticon("code", "line")
+ def sysmon_line_arcs(self, code: CodeType, line_number: int) -> MonitorReturn:
+ """Handle sys.monitoring.events.LINE events for branch coverage."""
+ code_info = self.code_infos[id(code)]
+ ret = None
+ if code_info.file_data is not None:
+ frame = self.callers_frame()
+ last_line = self.last_lines.get(frame)
+ if last_line is not None:
+ arc = (last_line, line_number)
+ cast(set[TArc], code_info.file_data).add(arc)
+ # log(f"adding {arc=}")
+ self.last_lines[frame] = line_number
+ return ret
diff --git a/coverage/templite.py b/coverage/templite.py
index 897a58f95..4689f9573 100644
--- a/coverage/templite.py
+++ b/coverage/templite.py
@@ -15,7 +15,7 @@
import re
from typing import (
- Any, Callable, Dict, List, NoReturn, Optional, Set, Union, cast,
+ Any, Callable, NoReturn, cast,
)
@@ -33,7 +33,7 @@ class CodeBuilder:
"""Build source code conveniently."""
def __init__(self, indent: int = 0) -> None:
- self.code: List[Union[str, CodeBuilder]] = []
+ self.code: list[str | CodeBuilder] = []
self.indent_level = indent
def __str__(self) -> str:
@@ -63,14 +63,14 @@ def dedent(self) -> None:
"""Decrease the current indent for following lines."""
self.indent_level -= self.INDENT_STEP
- def get_globals(self) -> Dict[str, Any]:
+ def get_globals(self) -> dict[str, Any]:
"""Execute the code, and return a dict of globals it defines."""
# A check that the caller really finished all the blocks they started.
assert self.indent_level == 0
# Get the Python source as a single string.
python_source = str(self)
# Execute the source, defining globals, and return them.
- global_namespace: Dict[str, Any] = {}
+ global_namespace: dict[str, Any] = {}
exec(python_source, global_namespace)
return global_namespace
@@ -109,15 +109,15 @@ class Templite:
You are interested in {{topic}}.
{% endif %}
''',
- {'upper': str.upper},
+ {"upper": str.upper},
)
text = templite.render({
- 'name': "Ned",
- 'topics': ['Python', 'Geometry', 'Juggling'],
+ "name": "Ned",
+ "topics": ["Python", "Geometry", "Juggling"],
})
"""
- def __init__(self, text: str, *contexts: Dict[str, Any]) -> None:
+ def __init__(self, text: str, *contexts: dict[str, Any]) -> None:
"""Construct a Templite with the given `text`.
`contexts` are dictionaries of values to use for future renderings.
@@ -128,8 +128,8 @@ def __init__(self, text: str, *contexts: Dict[str, Any]) -> None:
for context in contexts:
self.context.update(context)
- self.all_vars: Set[str] = set()
- self.loop_vars: Set[str] = set()
+ self.all_vars: set[str] = set()
+ self.loop_vars: set[str] = set()
# We construct a function in source form, then compile it and hold onto
# it, and execute it to render the template.
@@ -143,7 +143,7 @@ def __init__(self, text: str, *contexts: Dict[str, Any]) -> None:
code.add_line("extend_result = result.extend")
code.add_line("to_str = str")
- buffered: List[str] = []
+ buffered: list[str] = []
def flush_output() -> None:
"""Force `buffered` to the code builder."""
@@ -161,49 +161,46 @@ def flush_output() -> None:
squash = in_joined = False
for token in tokens:
- if token.startswith('{'):
+ if token.startswith("{"):
start, end = 2, -2
- squash = (token[-3] == '-')
+ squash = (token[-3] == "-")
if squash:
end = -3
- if token.startswith('{#'):
+ if token.startswith("{#"):
# Comment: ignore it and move on.
continue
- elif token.startswith('{{'):
+ elif token.startswith("{{"):
# An expression to evaluate.
expr = self._expr_code(token[start:end].strip())
buffered.append("to_str(%s)" % expr)
else:
- # token.startswith('{%')
+ # token.startswith("{%")
# Action tag: split into words and parse further.
flush_output()
words = token[start:end].strip().split()
- if words[0] == 'if':
+ if words[0] == "if":
# An if statement: evaluate the expression to determine if.
if len(words) != 2:
self._syntax_error("Don't understand if", token)
- ops_stack.append('if')
+ ops_stack.append("if")
code.add_line("if %s:" % self._expr_code(words[1]))
code.indent()
- elif words[0] == 'for':
+ elif words[0] == "for":
# A loop: iterate over expression result.
- if len(words) != 4 or words[2] != 'in':
+ if len(words) != 4 or words[2] != "in":
self._syntax_error("Don't understand for", token)
- ops_stack.append('for')
+ ops_stack.append("for")
self._variable(words[1], self.loop_vars)
code.add_line(
- "for c_{} in {}:".format(
- words[1],
- self._expr_code(words[3])
- )
+ f"for c_{words[1]} in {self._expr_code(words[3])}:",
)
code.indent()
- elif words[0] == 'joined':
- ops_stack.append('joined')
+ elif words[0] == "joined":
+ ops_stack.append("joined")
in_joined = True
- elif words[0].startswith('end'):
+ elif words[0].startswith("end"):
# Endsomething. Pop the ops stack.
if len(words) != 1:
self._syntax_error("Don't understand end", token)
@@ -213,7 +210,7 @@ def flush_output() -> None:
start_what = ops_stack.pop()
if start_what != end_what:
self._syntax_error("Mismatched end tag", end_what)
- if end_what == 'joined':
+ if end_what == "joined":
in_joined = False
else:
code.dedent()
@@ -236,14 +233,14 @@ def flush_output() -> None:
for var_name in self.all_vars - self.loop_vars:
vars_code.add_line(f"c_{var_name} = context[{var_name!r}]")
- code.add_line('return "".join(result)')
+ code.add_line("return ''.join(result)")
code.dedent()
self._render_function = cast(
Callable[
- [Dict[str, Any], Callable[..., Any]],
- str
+ [dict[str, Any], Callable[..., Any]],
+ str,
],
- code.get_globals()['render_function'],
+ code.get_globals()["render_function"],
)
def _expr_code(self, expr: str) -> str:
@@ -268,7 +265,7 @@ def _syntax_error(self, msg: str, thing: Any) -> NoReturn:
"""Raise a syntax error using `msg`, and showing `thing`."""
raise TempliteSyntaxError(f"{msg}: {thing!r}")
- def _variable(self, name: str, vars_set: Set[str]) -> None:
+ def _variable(self, name: str, vars_set: set[str]) -> None:
"""Track that `name` is used as a variable.
Adds the name to `vars_set`, a set of variable names.
@@ -280,7 +277,7 @@ def _variable(self, name: str, vars_set: Set[str]) -> None:
self._syntax_error("Not a valid name", name)
vars_set.add(name)
- def render(self, context: Optional[Dict[str, Any]] = None) -> str:
+ def render(self, context: dict[str, Any] | None = None) -> str:
"""Render this template by applying it to `context`.
`context` is a dictionary of values to use in this rendering.
@@ -302,7 +299,7 @@ def _do_dots(self, value: Any, *dots: str) -> Any:
value = value[dot]
except (TypeError, KeyError) as exc:
raise TempliteValueError(
- f"Couldn't evaluate {value!r}.{dot}"
+ f"Couldn't evaluate {value!r}.{dot}",
) from exc
if callable(value):
value = value()
diff --git a/coverage/tomlconfig.py b/coverage/tomlconfig.py
index 139cb2c1b..0110ac18c 100644
--- a/coverage/tomlconfig.py
+++ b/coverage/tomlconfig.py
@@ -8,13 +8,15 @@
import os
import re
-from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Type, TypeVar
+from typing import Any, Callable, TypeVar
+from collections.abc import Iterable
from coverage import env
from coverage.exceptions import ConfigError
-from coverage.misc import import_third_party, substitute_variables
+from coverage.misc import import_third_party, isolate_module, substitute_variables
from coverage.types import TConfigSectionOut, TConfigValueOut
+os = isolate_module(os)
if env.PYVERSION >= (3, 11, 0, "alpha", 7):
import tomllib # pylint: disable=import-error
@@ -40,9 +42,9 @@ class TomlConfigParser:
def __init__(self, our_file: bool) -> None:
self.our_file = our_file
- self.data: Dict[str, Any] = {}
+ self.data: dict[str, Any] = {}
- def read(self, filenames: Iterable[str]) -> List[str]:
+ def read(self, filenames: Iterable[str]) -> list[str]:
# RawConfigParser takes a filename or list of filenames, but we only
# ever call this with a single filename.
assert isinstance(filenames, (bytes, str, os.PathLike))
@@ -67,7 +69,7 @@ def read(self, filenames: Iterable[str]) -> List[str]:
raise ConfigError(msg.format(filename))
return []
- def _get_section(self, section: str) -> Tuple[Optional[str], Optional[TConfigSectionOut]]:
+ def _get_section(self, section: str) -> tuple[str | None, TConfigSectionOut | None]:
"""Get a section from the data.
Arguments:
@@ -94,7 +96,7 @@ def _get_section(self, section: str) -> Tuple[Optional[str], Optional[TConfigSec
return None, None
return real_section, data
- def _get(self, section: str, option: str) -> Tuple[str, TConfigValueOut]:
+ def _get(self, section: str, option: str) -> tuple[str, TConfigValueOut]:
"""Like .get, but returns the real section name and the value."""
name, data = self._get_section(section)
if data is None:
@@ -123,7 +125,7 @@ def has_option(self, section: str, option: str) -> bool:
return False
return option in data
- def real_section(self, section: str) -> Optional[str]:
+ def real_section(self, section: str) -> str | None:
name, _ = self._get_section(section)
return name
@@ -131,7 +133,7 @@ def has_section(self, section: str) -> bool:
name, _ = self._get_section(section)
return bool(name)
- def options(self, section: str) -> List[str]:
+ def options(self, section: str) -> list[str]:
_, data = self._get_section(section)
if data is None:
raise ConfigError(f"No section: {section!r}")
@@ -150,8 +152,8 @@ def _check_type(
section: str,
option: str,
value: Any,
- type_: Type[TWant],
- converter: Optional[Callable[[Any], TWant]],
+ type_: type[TWant],
+ converter: Callable[[Any], TWant] | None,
type_desc: str,
) -> TWant:
"""Check that `value` has the type we want, converting if needed.
@@ -165,10 +167,10 @@ def _check_type(
return converter(value)
except Exception as e:
raise ValueError(
- f"Option [{section}]{option} couldn't convert to {type_desc}: {value!r}"
+ f"Option [{section}]{option} couldn't convert to {type_desc}: {value!r}",
) from e
raise ValueError(
- f"Option [{section}]{option} is not {type_desc}: {value!r}"
+ f"Option [{section}]{option} is not {type_desc}: {value!r}",
)
def getboolean(self, section: str, option: str) -> bool:
@@ -176,18 +178,18 @@ def getboolean(self, section: str, option: str) -> bool:
bool_strings = {"true": True, "false": False}
return self._check_type(name, option, value, bool, bool_strings.__getitem__, "a boolean")
- def _get_list(self, section: str, option: str) -> Tuple[str, List[str]]:
+ def _get_list(self, section: str, option: str) -> tuple[str, list[str]]:
"""Get a list of strings, substituting environment variables in the elements."""
name, values = self._get(section, option)
values = self._check_type(name, option, values, list, None, "a list")
values = [substitute_variables(value, os.environ) for value in values]
return name, values
- def getlist(self, section: str, option: str) -> List[str]:
+ def getlist(self, section: str, option: str) -> list[str]:
_, values = self._get_list(section, option)
return values
- def getregexlist(self, section: str, option: str) -> List[str]:
+ def getregexlist(self, section: str, option: str) -> list[str]:
name, values = self._get_list(section, option)
for value in values:
value = value.strip()
diff --git a/coverage/tracer.pyi b/coverage/tracer.pyi
index d1281767b..d850493ed 100644
--- a/coverage/tracer.pyi
+++ b/coverage/tracer.pyi
@@ -1,11 +1,14 @@
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
+"""Typing information for the constructs from our .c files."""
+
from typing import Any, Dict
-from coverage.types import TFileDisposition, TTraceData, TTraceFn, TTracer
+from coverage.types import TFileDisposition, TTraceData, TTraceFn, Tracer
class CFileDisposition(TFileDisposition):
+ """CFileDisposition is in ctracer/filedisp.c"""
canonical_filename: Any
file_tracer: Any
has_dynamic_filename: Any
@@ -15,7 +18,8 @@ class CFileDisposition(TFileDisposition):
trace: Any
def __init__(self) -> None: ...
-class CTracer(TTracer):
+class CTracer(Tracer):
+ """CTracer is in ctracer/tracer.c"""
check_include: Any
concur_id_func: Any
data: TTraceData
@@ -25,6 +29,8 @@ class CTracer(TTracer):
should_trace: Any
should_trace_cache: Any
switch_context: Any
+ lock_data: Any
+ unlock_data: Any
trace_arcs: Any
warn: Any
def __init__(self) -> None: ...
diff --git a/coverage/types.py b/coverage/types.py
index 828ab20bb..bcf8396d6 100644
--- a/coverage/types.py
+++ b/coverage/types.py
@@ -10,21 +10,18 @@
import os
import pathlib
+from collections.abc import Iterable, Mapping
from types import FrameType, ModuleType
from typing import (
- Any, Callable, Dict, Iterable, List, Mapping, Optional, Set, Tuple, Type, Union,
- TYPE_CHECKING,
+ Any, Callable, Optional, Protocol,
+ Union, TYPE_CHECKING,
)
if TYPE_CHECKING:
- # Protocol is new in 3.8. PYVERSIONS
- from typing import Protocol
-
from coverage.plugin import FileTracer
-else:
- class Protocol: # pylint: disable=missing-class-docstring
- pass
+
+AnyCallable = Callable[..., Any]
## File paths
@@ -36,7 +33,7 @@ class Protocol: # pylint: disable=missing-class-docstring
FilePath = Union[str, os.PathLike]
# For testing FilePath arguments
FilePathClasses = [str, pathlib.Path]
-FilePathType = Union[Type[str], Type[pathlib.Path]]
+FilePathType = Union[type[str], type[pathlib.Path]]
## Python tracing
@@ -47,8 +44,8 @@ def __call__(
frame: FrameType,
event: str,
arg: Any,
- lineno: Optional[TLineNo] = None # Our own twist, see collector.py
- ) -> Optional[TTraceFn]:
+ lineno: TLineNo | None = None, # Our own twist, see collector.py
+ ) -> TTraceFn | None:
...
## Coverage.py tracing
@@ -56,17 +53,17 @@ def __call__(
# Line numbers are pervasive enough that they deserve their own type.
TLineNo = int
-TArc = Tuple[TLineNo, TLineNo]
+TArc = tuple[TLineNo, TLineNo]
class TFileDisposition(Protocol):
"""A simple value type for recording what to do with a file."""
original_filename: str
canonical_filename: str
- source_filename: Optional[str]
+ source_filename: str | None
trace: bool
reason: str
- file_tracer: Optional[FileTracer]
+ file_tracer: FileTracer | None
has_dynamic_filename: bool
@@ -78,26 +75,33 @@ class TFileDisposition(Protocol):
# - If measuring arcs in the C tracer, the values are sets of packed arcs (two
# line numbers combined into one integer).
-TTraceFileData = Union[Set[TLineNo], Set[TArc], Set[int]]
+TTraceFileData = Union[set[TLineNo], set[TArc], set[int]]
-TTraceData = Dict[str, TTraceFileData]
+TTraceData = dict[str, TTraceFileData]
-class TTracer(Protocol):
- """Either CTracer or PyTracer."""
+# Functions passed into collectors.
+TShouldTraceFn = Callable[[str, FrameType], TFileDisposition]
+TCheckIncludeFn = Callable[[str, FrameType], bool]
+TShouldStartContextFn = Callable[[FrameType], Union[str, None]]
+
+class Tracer(Protocol):
+ """Anything that can report on Python execution."""
data: TTraceData
trace_arcs: bool
- should_trace: Callable[[str, FrameType], TFileDisposition]
- should_trace_cache: Mapping[str, Optional[TFileDisposition]]
- should_start_context: Optional[Callable[[FrameType], Optional[str]]]
- switch_context: Optional[Callable[[Optional[str]], None]]
+ should_trace: TShouldTraceFn
+ should_trace_cache: Mapping[str, TFileDisposition | None]
+ should_start_context: TShouldStartContextFn | None
+ switch_context: Callable[[str | None], None] | None
+ lock_data: Callable[[], None]
+ unlock_data: Callable[[], None]
warn: TWarnFn
def __init__(self) -> None:
...
- def start(self) -> TTraceFn:
- """Start this tracer, returning a trace function."""
+ def start(self) -> TTraceFn | None:
+ """Start this tracer, return a trace function if based on sys.settrace."""
def stop(self) -> None:
"""Stop this tracer."""
@@ -108,9 +112,10 @@ def activity(self) -> bool:
def reset_activity(self) -> None:
"""Reset the activity() flag."""
- def get_stats(self) -> Optional[Dict[str, int]]:
+ def get_stats(self) -> dict[str, int] | None:
"""Return a dictionary of statistics, or None."""
+
## Coverage
# Many places use kwargs as Coverage kwargs.
@@ -121,7 +126,7 @@ def get_stats(self) -> Optional[Dict[str, int]]:
# One value read from a config file.
TConfigValueIn = Optional[Union[bool, int, float, str, Iterable[str]]]
-TConfigValueOut = Optional[Union[bool, int, float, str, List[str]]]
+TConfigValueOut = Optional[Union[bool, int, float, str, list[str]]]
# An entire config section, mapping option names to values.
TConfigSectionIn = Mapping[str, TConfigValueIn]
TConfigSectionOut = Mapping[str, TConfigValueOut]
@@ -129,7 +134,7 @@ def get_stats(self) -> Optional[Dict[str, int]]:
class TConfigurable(Protocol):
"""Something that can proxy to the coverage configuration settings."""
- def get_option(self, option_name: str) -> Optional[TConfigValueOut]:
+ def get_option(self, option_name: str) -> TConfigValueOut | None:
"""Get an option from the configuration.
`option_name` is a colon-separated string indicating the section and
@@ -140,7 +145,7 @@ def get_option(self, option_name: str) -> Optional[TConfigValueOut]:
"""
- def set_option(self, option_name: str, value: Union[TConfigValueIn, TConfigSectionIn]) -> None:
+ def set_option(self, option_name: str, value: TConfigValueIn | TConfigSectionIn) -> None:
"""Set an option in the configuration.
`option_name` is a colon-separated string indicating the section and
@@ -162,7 +167,8 @@ def get_plugin_options(self, plugin: str) -> TConfigSectionOut:
TMorf = Union[ModuleType, str]
-TSourceTokenLines = Iterable[List[Tuple[str, str]]]
+TSourceTokenLines = Iterable[list[tuple[str, str]]]
+
## Plugins
@@ -176,7 +182,7 @@ class TPlugin(Protocol):
class TWarnFn(Protocol):
"""A callable warn() function."""
- def __call__(self, msg: str, slug: Optional[str] = None, once: bool = False) -> None:
+ def __call__(self, msg: str, slug: str | None = None, once: bool = False) -> None:
...
diff --git a/coverage/version.py b/coverage/version.py
index c9e8d7f7e..a76371e77 100644
--- a/coverage/version.py
+++ b/coverage/version.py
@@ -8,7 +8,7 @@
# version_info: same semantics as sys.version_info.
# _dev: the .devN suffix if any.
-version_info = (7, 2, 2, "final", 0)
+version_info = (7, 6, 10, "final", 0)
_dev = 0
@@ -21,10 +21,10 @@ def _make_version(
dev: int = 0,
) -> str:
"""Create a readable version string from version_info tuple components."""
- assert releaselevel in ['alpha', 'beta', 'candidate', 'final']
+ assert releaselevel in ["alpha", "beta", "candidate", "final"]
version = "%d.%d.%d" % (major, minor, micro)
- if releaselevel != 'final':
- short = {'alpha': 'a', 'beta': 'b', 'candidate': 'rc'}[releaselevel]
+ if releaselevel != "final":
+ short = {"alpha": "a", "beta": "b", "candidate": "rc"}[releaselevel]
version += f"{short}{serial}"
if dev != 0:
version += f".dev{dev}"
diff --git a/coverage/xmlreport.py b/coverage/xmlreport.py
index fd2e9f81b..487c2659c 100644
--- a/coverage/xmlreport.py
+++ b/coverage/xmlreport.py
@@ -12,12 +12,13 @@
import xml.dom.minidom
from dataclasses import dataclass
-from typing import Any, Dict, IO, Iterable, Optional, TYPE_CHECKING, cast
+from typing import Any, IO, TYPE_CHECKING
+from collections.abc import Iterable
from coverage import __version__, files
from coverage.misc import isolate_module, human_sorted, human_sorted_items
from coverage.plugin import FileReporter
-from coverage.report import get_analysis_to_report
+from coverage.report_core import get_analysis_to_report
from coverage.results import Analysis
from coverage.types import TMorf
from coverage.version import __url__
@@ -28,7 +29,7 @@
os = isolate_module(os)
-DTD_URL = 'https://raw.githubusercontent.com/cobertura/web/master/htdocs/xml/coverage-04.dtd'
+DTD_URL = "https://raw.githubusercontent.com/cobertura/web/master/htdocs/xml/coverage-04.dtd"
def rate(hit: int, num: int) -> str:
@@ -42,7 +43,7 @@ def rate(hit: int, num: int) -> str:
@dataclass
class PackageData:
"""Data we keep about each "package" (in Java terms)."""
- elements: Dict[str, xml.dom.minidom.Element]
+ elements: dict[str, xml.dom.minidom.Element]
hits: int
lines: int
br_hits: int
@@ -67,13 +68,15 @@ def __init__(self, coverage: Coverage) -> None:
if self.config.source:
for src in self.config.source:
if os.path.exists(src):
- if not self.config.relative_files:
+ if self.config.relative_files:
+ src = src.rstrip(r"\/")
+ else:
src = files.canonical_filename(src)
self.source_paths.add(src)
- self.packages: Dict[str, PackageData] = {}
+ self.packages: dict[str, PackageData] = {}
self.xml_out: xml.dom.minidom.Document
- def report(self, morfs: Optional[Iterable[TMorf]], outfile: Optional[IO[str]] = None) -> float:
+ def report(self, morfs: Iterable[TMorf] | None, outfile: IO[str] | None = None) -> float:
"""Generate a Cobertura-compatible XML report for `morfs`.
`morfs` is a list of modules or file names.
@@ -95,7 +98,7 @@ def report(self, morfs: Optional[Iterable[TMorf]], outfile: Optional[IO[str]] =
xcoverage.setAttribute("version", __version__)
xcoverage.setAttribute("timestamp", str(int(time.time()*1000)))
xcoverage.appendChild(self.xml_out.createComment(
- f" Generated by coverage.py: {__url__} "
+ f" Generated by coverage.py: {__url__} ",
))
xcoverage.appendChild(self.xml_out.createComment(f" Based on {DTD_URL} "))
@@ -127,7 +130,7 @@ def report(self, morfs: Optional[Iterable[TMorf]], outfile: Optional[IO[str]] =
appendChild(xpackage, xclasses)
for _, class_elt in human_sorted_items(pkg_data.elements.items()):
appendChild(xclasses, class_elt)
- xpackage.setAttribute("name", pkg_name.replace(os.sep, '.'))
+ xpackage.setAttribute("name", pkg_name.replace(os.sep, "."))
xpackage.setAttribute("line-rate", rate(pkg_data.hits, pkg_data.lines))
if has_arcs:
branch_rate = rate(pkg_data.br_hits, pkg_data.branches)
@@ -172,7 +175,7 @@ def xml_file(self, fr: FileReporter, analysis: Analysis, has_arcs: bool) -> None
if analysis.numbers.n_statements == 0:
return
- # Create the 'lines' and 'package' XML elements, which
+ # Create the "lines" and "package" XML elements, which
# are populated later. Note that a package == a directory.
filename = fr.filename.replace("\\", "/")
for source_path in self.source_paths:
@@ -205,7 +208,7 @@ def xml_file(self, fr: FileReporter, analysis: Analysis, has_arcs: bool) -> None
branch_stats = analysis.branch_stats()
missing_branch_arcs = analysis.missing_branch_arcs()
- # For each statement, create an XML 'line' element.
+ # For each statement, create an XML "line" element.
for line in sorted(analysis.statements):
xline = self.xml_out.createElement("line")
xline.setAttribute("number", str(line))
@@ -220,7 +223,7 @@ def xml_file(self, fr: FileReporter, analysis: Analysis, has_arcs: bool) -> None
xline.setAttribute("branch", "true")
xline.setAttribute(
"condition-coverage",
- "%d%% (%d/%d)" % (100*taken//total, taken, total)
+ "%d%% (%d/%d)" % (100*taken//total, taken, total),
)
if line in missing_branch_arcs:
annlines = ["exit" if b < 0 else str(b) for b in missing_branch_arcs[line]]
@@ -255,4 +258,4 @@ def xml_file(self, fr: FileReporter, analysis: Analysis, has_arcs: bool) -> None
def serialize_xml(dom: xml.dom.minidom.Document) -> str:
"""Serialize a minidom node to XML."""
- return cast(str, dom.toprettyxml())
+ return dom.toprettyxml()
diff --git a/doc/_static/coverage.css b/doc/_static/coverage.css
index a85fe4312..43d2ae12e 100644
--- a/doc/_static/coverage.css
+++ b/doc/_static/coverage.css
@@ -26,8 +26,6 @@ img.tideliftlogo {
margin-bottom: 12px;
}
-/* Tabs */
-
.ui.menu {
font-family: Helvetica;
min-height: 0;
@@ -41,10 +39,6 @@ img.tideliftlogo {
padding: 0;
}
-.sphinx-tabs {
- margin-bottom: 1em;
-}
-
.sig {
font-family: Consolas,"Andale Mono WT","Andale Mono","Lucida Console","Lucida Sans Typewriter","DejaVu Sans Mono","Bitstream Vera Sans Mono","Liberation Mono","Nimbus Mono L",Monaco,"Courier New",Courier,monospace;
}
@@ -71,3 +65,53 @@ img.tideliftlogo {
color: #404040;
background: #efc;
}
+
+/* I'm not sure why I had to make this so specific to get it to take effect... */
+div.rst-content div.document div.wy-table-responsive table.docutils.align-default tbody tr td {
+ vertical-align: top !important;
+}
+
+/* And this doesn't work, and I guess I just have to live with it. */
+div.rst-content div.document div.wy-table-responsive table.docutils.align-default tbody tr td .line-block {
+ margin-bottom: 0 !important;
+}
+
+/* sphinx-code-tabs */
+
+/* Some selectors here are extra-specific (.container) because this file comes
+ * before code-tabs.css, so we need the specificity to override it.
+ */
+
+div.tabs.container > ul.tabbar > li.tabbutton {
+ color: #666;
+ background-color: #ddd;
+ border-color: #aaa;
+}
+
+div.tabs.container > ul.tabbar > li.tabbutton:hover {
+ background-color: #eee;
+}
+
+div.tabs.container > ul.tabbar > li.tabbutton.selected {
+ color: black;
+ background-color: #fff;
+ border-color: #aaa;
+ border-bottom-color: #fff;
+}
+
+div.tabs.container > ul.tabbar > li.tabbutton.selected:hover {
+ background-color: #fff;
+}
+
+div.tabs.container {
+ margin-bottom: 1em;
+}
+
+div.tab.selected {
+ border: 1px solid #ccc;
+ border-radius: 0 .5em .5em .5em;
+}
+
+div.tab.codetab.selected {
+ border: none;
+}
diff --git a/doc/api_coverage.rst b/doc/api_coverage.rst
index 7eb9bd370..b98c0b63a 100644
--- a/doc/api_coverage.rst
+++ b/doc/api_coverage.rst
@@ -6,10 +6,7 @@
The Coverage class
------------------
-.. module:: coverage
- :noindex:
-
-.. autoclass:: Coverage
+.. autoclass:: coverage.Coverage
:members:
:exclude-members: sys_info
:special-members: __init__
diff --git a/doc/api_coveragedata.rst b/doc/api_coveragedata.rst
index 155e4fdab..0a78a74d5 100644
--- a/doc/api_coveragedata.rst
+++ b/doc/api_coveragedata.rst
@@ -8,9 +8,6 @@ The CoverageData class
.. versionadded:: 4.0
-.. module:: coverage
- :noindex:
-
-.. autoclass:: CoverageData
+.. autoclass:: coverage.CoverageData
:members:
:special-members: __init__
diff --git a/doc/api_exceptions.rst b/doc/api_exceptions.rst
index e018a9703..634e065f9 100644
--- a/doc/api_exceptions.rst
+++ b/doc/api_exceptions.rst
@@ -6,11 +6,6 @@
Coverage exceptions
-------------------
-.. module:: coverage.exceptions
-
-.. autoclass:: CoverageException
-
.. automodule:: coverage.exceptions
- :noindex:
:members:
- :exclude-members: CoverageException
+ :member-order: bysource
diff --git a/doc/api_module.rst b/doc/api_module.rst
index c3da90fba..d69e243f3 100644
--- a/doc/api_module.rst
+++ b/doc/api_module.rst
@@ -26,7 +26,7 @@ available by name.
A string with the version of coverage.py, for example, ``"5.0b2"``.
-.. autoclass:: CoverageException
+.. autoexception:: coverage.CoverageException
Starting coverage.py automatically
@@ -35,4 +35,4 @@ Starting coverage.py automatically
This function is used to start coverage measurement automatically when Python
starts. See :ref:`subprocess` for details.
-.. autofunction:: process_startup
+.. autofunction:: coverage.process_startup
diff --git a/doc/api_plugin.rst b/doc/api_plugin.rst
index 00acecbca..1d4593654 100644
--- a/doc/api_plugin.rst
+++ b/doc/api_plugin.rst
@@ -9,26 +9,30 @@ Plug-in classes
.. automodule:: coverage.plugin
-.. module:: coverage
- :noindex:
-
The CoveragePlugin class
------------------------
-.. autoclass:: CoveragePlugin
+.. autoclass:: coverage.CoveragePlugin
:members:
:member-order: bysource
The FileTracer class
--------------------
-.. autoclass:: FileTracer
+.. autoclass:: coverage.FileTracer
:members:
:member-order: bysource
The FileReporter class
----------------------
-.. autoclass:: FileReporter
+.. autoclass:: coverage.FileReporter
+ :members:
+ :member-order: bysource
+
+The CodeRegion class
+--------------------
+
+.. autoclass:: coverage.CodeRegion
:members:
:member-order: bysource
diff --git a/doc/branch.rst b/doc/branch.rst
index f500287f8..a1a4e9d69 100644
--- a/doc/branch.rst
+++ b/doc/branch.rst
@@ -116,3 +116,16 @@ Here the while loop will never complete because the break will always be taken
at some point. Coverage.py can't work that out on its own, but the "no branch"
pragma indicates that the branch is known to be partial, and the line is not
flagged.
+
+Generator expressions
+=====================
+
+Generator expressions may also report partial branch coverage. Consider the
+following example::
+
+ value = next(i in range(1))
+
+While we might expect this line of code to be reported as covered, the
+generator did not iterate until ``StopIteration`` is raised, the indication
+that the loop is complete. This is another case
+where adding ``# pragma: no branch`` may be desirable.
diff --git a/doc/changes.rst b/doc/changes.rst
index 54a3c81be..813ff637a 100644
--- a/doc/changes.rst
+++ b/doc/changes.rst
@@ -6,7 +6,7 @@
.. The recent changes from the top-level file:
.. include:: ../CHANGES.rst
- :end-before: scriv-end-here
+ :end-before: endchangesinclude
.. Older changes here:
@@ -547,7 +547,7 @@ Version 5.0a2 — 2018-09-03
may need ``parallel=true`` where you didn't before.
- The old data format is still available (for now) by setting the environment
- variable COVERAGE_STORAGE=json. Please tell me if you think you need to
+ variable ``COVERAGE_STORAGE=json``. Please tell me if you think you need to
keep the JSON format.
- The database schema is guaranteed to change in the future, to support new
@@ -845,10 +845,10 @@ Version 4.3.2 — 2017-01-16
would cause a "No data to report" error, as reported in `issue 549`_. This is
now fixed; thanks, Loïc Dachary.
-- If-statements can be optimized away during compilation, for example, `if 0:`
- or `if __debug__:`. Coverage.py had problems properly understanding these
- statements which existed in the source, but not in the compiled bytecode.
- This problem, reported in `issue 522`_, is now fixed.
+- If-statements can be optimized away during compilation, for example,
+ ``if 0:`` or ``if __debug__:``. Coverage.py had problems properly
+ understanding these statements which existed in the source, but not in the
+ compiled bytecode. This problem, reported in `issue 522`_, is now fixed.
- If you specified ``--source`` as a directory, then coverage.py would look for
importable Python files in that directory, and could identify ones that had
@@ -1060,12 +1060,12 @@ Work from the PyCon 2016 Sprints!
- The ``concurrency`` option can now take multiple values, to support programs
using multiprocessing and another library such as eventlet. This is only
possible in the configuration file, not from the command line. The
- configuration file is the only way for sub-processes to all run with the same
+ configuration file is the only way for subprocesses to all run with the same
options. Fixes `issue 484`_. Thanks to Josh Williams for prototyping.
- Using a ``concurrency`` setting of ``multiprocessing`` now implies
``--parallel`` so that the main program is measured similarly to the
- sub-processes.
+ subprocesses.
- When using `automatic subprocess measurement`_, running coverage commands
would create spurious data files. This is now fixed, thanks to diagnosis and
@@ -1521,7 +1521,7 @@ Version 4.0a6 — 2015-06-21
persisted in pursuing this despite Ned's pessimism. Fixes `issue 308`_ and
`issue 324`_.
-- The COVERAGE_DEBUG environment variable can be used to set the
+- The ``COVERAGE_DEBUG`` environment variable can be used to set the
``[run] debug`` configuration option to control what internal operations are
logged.
@@ -2590,8 +2590,8 @@ Version 3.0b3 — 2009-05-16
interface still uses automatic saving.
-Version 3.0b — 2009-04-30
--------------------------
+Version 3.0b2 — 2009-04-30
+--------------------------
HTML reporting, and continued refactoring.
diff --git a/doc/cmd.rst b/doc/cmd.rst
index 0704e940a..fa6565678 100644
--- a/doc/cmd.rst
+++ b/doc/cmd.rst
@@ -1,34 +1,12 @@
.. Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
.. For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-.. This file is meant to be processed with cog to insert the latest command
+.. This file is processed with cog to insert the latest command
help into the docs. If it's out of date, the quality checks will fail.
Running "make prebuild" will bring it up to date.
.. [[[cog
- # optparse wraps help to the COLUMNS value. Set it here to be sure it's
- # consistent regardless of the environment. Has to be set before we
- # import cmdline.py, which creates the optparse objects.
- import os
- os.environ["COLUMNS"] = "80"
-
- import contextlib
- import io
- import re
- import textwrap
- from coverage.cmdline import CoverageScript
-
- def show_help(cmd):
- with contextlib.redirect_stdout(io.StringIO()) as stdout:
- CoverageScript().command_line([cmd, "--help"])
- help = stdout.getvalue()
- help = help.replace("__main__.py", "coverage")
- help = re.sub(r"(?m)^Full doc.*$", "", help)
- help = help.rstrip()
-
- print(".. code::\n")
- print(f" $ coverage {cmd} --help")
- print(textwrap.indent(help, " "))
+ from cog_helpers import show_configs, show_help
.. ]]]
.. [[[end]]] (checksum: d41d8cd98f00b204e9800998ecf8427e)
@@ -43,8 +21,8 @@ Command line usage
When you install coverage.py, a command-line script called ``coverage`` is
placed on your path. To help with multi-version installs, it will also create
a ``coverage3`` alias, and a ``coverage-X.Y`` alias, depending on the version
-of Python you're using. For example, when installing on Python 3.7, you will
-be able to use ``coverage``, ``coverage3``, or ``coverage-3.7`` on the command
+of Python you're using. For example, when installing on Python 3.10, you will
+be able to use ``coverage``, ``coverage3``, or ``coverage-3.10`` on the command
line.
Coverage.py has a number of commands:
@@ -158,15 +136,14 @@ There are many options:
--source=SRC1,SRC2,...
A list of directories or importable names of code to
measure.
- --timid Use a simpler but slower trace method. Try this if you
- get seemingly impossible results!
+ --timid Use the slower Python trace function core.
--debug=OPTS Debug options, separated by commas. [env:
COVERAGE_DEBUG]
-h, --help Get help on this command.
--rcfile=RCFILE Specify configuration file. By default '.coveragerc',
'setup.cfg', 'tox.ini', and 'pyproject.toml' are
tried. [env: COVERAGE_RCFILE]
-.. [[[end]]] (checksum: 05d15818e42e6f989c42894fb2b3c753)
+.. [[[end]]] (checksum: b1a0fffe2768fc142f1d97ae556b621d)
If you want :ref:`branch coverage ` measurement, use the ``--branch``
flag. Otherwise only statement coverage is measured.
@@ -199,15 +176,15 @@ You can combine multiple values for ``--concurrency``, separated with commas.
You can specify ``thread`` and also one of ``eventlet``, ``gevent``, or
``greenlet``.
-If you are using ``--concurrency=multiprocessing``, you must set other options
-in the configuration file. Options on the command line will not be passed to
-the processes that multiprocessing creates. Best practice is to use the
-configuration file for all options.
+If you are using ``--concurrency=multiprocessing``, you must set your other
+options in the configuration file. Options on the command line will not be
+passed to the processes that multiprocessing creates. Best practice is to use
+the configuration file for all options.
.. _multiprocessing: https://docs.python.org/3/library/multiprocessing.html
.. _greenlet: https://greenlet.readthedocs.io/
-.. _gevent: http://www.gevent.org/
-.. _eventlet: http://eventlet.net/
+.. _gevent: https://www.gevent.org/
+.. _eventlet: https://eventlet.readthedocs.io/
If you are measuring coverage in a multi-process program, or across a number of
machines, you'll want the ``--parallel-mode`` switch to keep the data separate
@@ -225,6 +202,11 @@ If your coverage results seem to be overlooking code that you know has been
executed, try running coverage.py again with the ``--timid`` flag. This uses a
simpler but slower trace method, and might be needed in rare cases.
+In Python 3.12 and above, you can try an experimental core based on the new
+:mod:`sys.monitoring ` module by defining a
+``COVERAGE_CORE=sysmon`` environment variable. This should be faster, though
+plugins and dynamic contexts are not yet supported with it.
+
Coverage.py sets an environment variable, ``COVERAGE_RUN`` to indicate that
your code is running under coverage measurement. The value is not relevant,
and may change in the future.
@@ -291,17 +273,49 @@ Conflicting dynamic contexts (dynamic-conflict)
:meth:`.Coverage.switch_context` function to change the context. Only one of
these mechanisms should be in use at a time.
+sys.monitoring isn't available, using default core (no-sysmon)
+ You requested to use the sys.monitoring measurement core, but are running on
+ Python 3.11 or lower where it isn't available. A default core will be used
+ instead.
+
Individual warnings can be disabled with the :ref:`disable_warnings
` configuration setting. To silence "No data was
-collected," add this to your .coveragerc file::
+collected," add this to your configuration file:
- [run]
- disable_warnings = no-data-collected
+.. [[[cog
+ show_configs(
+ ini=r"""
+ [run]
+ disable_warnings = no-data-collected
+ """,
+ toml=r"""
+ [tool.coverage.run]
+ disable_warnings = ["no-data-collected"]
+ """,
+ )
+.. ]]]
+
+.. tabs::
+
+ .. code-tab:: ini
+ :caption: .coveragerc
-or pyproject.toml::
+ [run]
+ disable_warnings = no-data-collected
- [tool.coverage.run]
- disable_warnings = ['no-data-collected']
+ .. code-tab:: toml
+ :caption: pyproject.toml
+
+ [tool.coverage.run]
+ disable_warnings = ["no-data-collected"]
+
+ .. code-tab:: ini
+ :caption: setup.cfg or tox.ini
+
+ [coverage:run]
+ disable_warnings = no-data-collected
+
+.. [[[end]]] (checksum: 489285bcfa173b69a286f03fe13e4554)
.. _cmd_datafile:
@@ -310,8 +324,8 @@ Data file
.........
Coverage.py collects execution data in a file called ".coverage". If need be,
-you can set a new file name with the COVERAGE_FILE environment variable. This
-can include a path to another directory.
+you can set a new file name with the ``COVERAGE_FILE`` environment variable.
+This can include a path to another directory.
By default, each run of your program starts with an empty data set. If you need
to run your program multiple times to get complete data (for example, because
@@ -624,9 +638,10 @@ Here's a `sample report`__.
__ https://nedbatchelder.com/files/sample_coverage_html/index.html
-Lines are highlighted green for executed, red for missing, and gray for
-excluded. The counts at the top of the file are buttons to turn on and off
-the highlighting.
+Lines are highlighted: green for executed, red for missing, and gray for
+excluded. If you've used branch coverage, partial branches are yellow. The
+colored counts at the top of the file are buttons to turn on and off the
+highlighting.
A number of keyboard shortcuts are available for navigating the report.
Click the keyboard icon in the upper right to see the complete list.
@@ -900,8 +915,7 @@ Text annotation: ``coverage annotate``
.. note::
The **annotate** command has been obsoleted by more modern reporting tools,
- including the **html** command. **annotate** will be removed in a future
- version.
+ including the **html** command.
The **annotate** command produces a text annotation of your source code. With
a ``-d`` argument specifying an output directory, each Python file becomes a
@@ -1016,7 +1030,10 @@ of operation to log:
* ``dataio``: log when reading or writing any data file.
-* ``dataop``: log when data is added to the CoverageData object.
+* ``dataop``: log a summary of data being added to CoverageData objects.
+
+* ``dataop2``: when used with ``debug=dataop``, log the actual data being added
+ to CoverageData objects.
* ``lock``: log operations acquiring locks in the data layer.
@@ -1037,6 +1054,8 @@ of operation to log:
* ``pybehave``: show the values of `internal flags `_ describing the
behavior of the current version of Python.
+* ``pytest``: indicate the name of the current pytest test when it changes.
+
* ``self``: annotate each debug message with the object printing the message.
* ``sql``: log the SQL statements used for recording data.
diff --git a/doc/cog_helpers.py b/doc/cog_helpers.py
new file mode 100644
index 000000000..d30030875
--- /dev/null
+++ b/doc/cog_helpers.py
@@ -0,0 +1,97 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
+
+"""
+Functions for use with cog in the documentation.
+"""
+
+# For help text in doc/cmd.rst:
+# optparse wraps help to the COLUMNS value. Set it here to be sure it's
+# consistent regardless of the environment. Has to be set before we
+# import cmdline.py, which creates the optparse objects.
+
+# pylint: disable=wrong-import-position
+import os
+os.environ["COLUMNS"] = "80"
+
+import contextlib
+import io
+import re
+import textwrap
+
+import cog # pylint: disable=import-error
+
+from coverage.cmdline import CoverageScript
+from coverage.config import read_coverage_config
+
+
+def show_help(cmd):
+ """
+ Insert the help output from a command.
+ """
+ with contextlib.redirect_stdout(io.StringIO()) as stdout:
+ CoverageScript().command_line([cmd, "--help"])
+ help_text = stdout.getvalue()
+ help_text = help_text.replace("__main__.py", "coverage")
+ help_text = re.sub(r"(?m)^Full doc.*$", "", help_text)
+ help_text = help_text.rstrip()
+
+ print(".. code::\n")
+ print(f" $ coverage {cmd} --help")
+ print(textwrap.indent(help_text, " "))
+
+
+def _read_config(text, fname):
+ """
+ Prep and read configuration text.
+
+ Returns the prepared text, and a dict of the settings.
+ """
+ # Text will be triple-quoted with an initial ignored newline.
+ assert text[0] == "\n"
+ text = textwrap.dedent(text[1:])
+
+ os.makedirs("tmp", exist_ok=True)
+ with open(f"tmp/{fname}", "w") as f:
+ f.write(text)
+
+ config = read_coverage_config(f"tmp/{fname}", warn=cog.error)
+
+ values = {}
+ for name, val in vars(config).items():
+ if name.startswith("_"):
+ continue
+ if "config_file" in name:
+ continue
+ values[name] = val
+ return text, values
+
+
+def show_configs(ini, toml):
+ """
+ Show configuration text in a tabbed box.
+
+ `ini` is the ini-file syntax, `toml` is the equivalent TOML syntax.
+ The equivalence is checked for accuracy, and the process fails if there's
+ a mismatch.
+
+ A three-tabbed box will be produced.
+ """
+ ini, ini_vals = _read_config(ini, "covrc")
+ toml, toml_vals = _read_config(toml, "covrc.toml")
+ for key, val in ini_vals.items():
+ if val != toml_vals[key]:
+ cog.error(f"Mismatch! {key}:\nini: {val!r}\ntoml: {toml_vals[key]!r}")
+
+ ini2 = re.sub(r"(?m)^\[", "[coverage:", ini)
+ print()
+ print(".. tabs::\n")
+ for name, syntax, text in [
+ (".coveragerc", "ini", ini),
+ ("pyproject.toml", "toml", toml),
+ ("setup.cfg or tox.ini", "ini", ini2),
+ ]:
+ print(f" .. code-tab:: {syntax}")
+ print(f" :caption: {name}")
+ print()
+ print(textwrap.indent(text, " " * 8))
diff --git a/doc/conf.py b/doc/conf.py
index 59907127a..9756049e0 100644
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -26,7 +26,7 @@
#sys.path.append(os.path.abspath('.'))
# on_rtd is whether we are on readthedocs.org
-on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
+on_rtd = os.getenv('READTHEDOCS') == 'True'
# -- General configuration -----------------------------------------------------
@@ -36,20 +36,20 @@
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.ifconfig',
- 'sphinxcontrib.spelling',
'sphinx.ext.intersphinx',
'sphinxcontrib.restbuilder',
'sphinx.ext.napoleon',
- #'sphinx_tabs.tabs',
+ 'sphinx_code_tabs',
+ 'sphinx_rtd_theme',
]
autodoc_typehints = "description"
# Add any paths that contain templates here, relative to this directory.
-templates_path = ['_templates']
+templates_path = []
# The suffix of source filenames.
-source_suffix = '.rst'
+source_suffix = {'.rst': 'restructuredtext'}
# The encoding of source files.
#source_encoding = 'utf-8'
@@ -65,16 +65,16 @@
# built documents.
# @@@ editable
-copyright = "2009–2023, Ned Batchelder" # pylint: disable=redefined-builtin
+copyright = "2009–2024, Ned Batchelder" # pylint: disable=redefined-builtin
# The short X.Y.Z version.
-version = "7.2.2"
+version = "7.6.10"
# The full version, including alpha/beta/rc tags.
-release = "7.2.2"
+release = "7.6.10"
# The date of release, in "monthname day, year" format.
-release_date = "March 16, 2023"
+release_date = "December 26, 2024"
# @@@ end
-rst_epilog = """
+rst_epilog = f"""
.. |release_date| replace:: {release_date}
.. |coverage-equals-release| replace:: coverage=={release}
.. |doc-url| replace:: https://coverage.readthedocs.io/en/{release}
@@ -82,7 +82,7 @@
-""".format(release=release, release_date=release_date)
+"""
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
@@ -126,10 +126,9 @@
}
nitpick_ignore = [
- ("py:class", "frame"),
- ("py:class", "module"),
("py:class", "DefaultValue"),
("py:class", "FilePath"),
+ ("py:class", "types.FrameType"),
("py:class", "TWarnFn"),
("py:class", "TDebugCtl"),
]
@@ -142,15 +141,7 @@
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
-#html_theme = 'default'
-
-if not on_rtd: # only import and set the theme if we're building docs locally
- import sphinx_rtd_theme
- html_theme = 'sphinx_rtd_theme'
- html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
-
-# otherwise, readthedocs.org uses their theme by default, so no need to specify it
-
+html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
@@ -160,7 +151,7 @@
#html_add_permalinks = ""
# Add any paths that contain custom themes here, relative to this directory.
-html_theme_path = ['_templates']
+#html_theme_path = ['_templates']
# The name for this set of Sphinx documents. If None, it defaults to
# " v documentation".
@@ -220,6 +211,9 @@
# -- Spelling ---
if any("spell" in arg for arg in sys.argv):
+ # sphinxcontrib.spelling needs the native "enchant" library, which often is
+ # missing, so only use the extension if we are specifically spell-checking.
+ extensions += ['sphinxcontrib.spelling']
names_file = tempfile.NamedTemporaryFile(mode='w', prefix="coverage_names_", suffix=".txt")
with open("../CONTRIBUTORS.txt") as contributors:
names = set(re.split(r"[^\w']", contributors.read()))
diff --git a/doc/config.rst b/doc/config.rst
index 152b3af48..87cbdd108 100644
--- a/doc/config.rst
+++ b/doc/config.rst
@@ -1,6 +1,16 @@
.. Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
.. For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
+.. This file is processed with cog to create the tabbed multi-syntax
+ configuration examples. If those are wrong, the quality checks will fail.
+ Running "make prebuild" checks them and produces the output.
+
+.. [[[cog
+ from cog_helpers import show_configs
+.. ]]]
+.. [[[end]]] (checksum: d41d8cd98f00b204e9800998ecf8427e)
+
+
.. _config:
=======================
@@ -15,9 +25,9 @@ specification of options that are otherwise only available in the
:ref:`API `.
Configuration files also make it easier to get coverage testing of spawned
-sub-processes. See :ref:`subprocess` for more details.
+subprocesses. See :ref:`subprocess` for more details.
-The default name for configuration files is ``.coveragerc``, in the same
+The default name for the configuration file is ``.coveragerc``, in the same
directory coverage.py is being run in. Most of the settings in the
configuration file are tied to your source code and how it should be measured,
so it should be stored with your source, and checked into source control,
@@ -27,23 +37,26 @@ A different location for the configuration file can be specified with the
``--rcfile=FILE`` command line option or with the ``COVERAGE_RCFILE``
environment variable.
-Coverage.py will read settings from other usual configuration files if no other
-configuration file is used. It will automatically read from "setup.cfg" or
-"tox.ini" if they exist. In this case, the section names have "coverage:"
-prefixed, so the ``[run]`` options described below will be found in the
-``[coverage:run]`` section of the file.
+If ``.coveragerc`` doesn't exist and another file hasn't been specified, then
+coverage.py will look for settings in other common configuration files, in this
+order: setup.cfg, tox.ini, or pyproject.toml. The first file found with
+coverage.py settings will be used and other files won't be consulted.
Coverage.py will read from "pyproject.toml" if TOML support is available,
either because you are running on Python 3.11 or later, or because you
installed with the ``toml`` extra (``pip install coverage[toml]``).
-Configuration must be within the ``[tool.coverage]`` section, for example,
-``[tool.coverage.run]``. Environment variable expansion in values is
-available, but only within quoted strings, even for non-string values.
Syntax
------
+The specific syntax of a configuration file depends on what type it is.
+All configuration files are assumed to be in INI format, unless their file
+extension is .toml, which are TOML.
+
+INI Syntax
+..........
+
A coverage.py configuration file is in classic .ini file format: sections are
introduced by a ``[section]`` header, and contain ``name = value`` entries.
Lines beginning with ``#`` or ``;`` are ignored as comments.
@@ -54,6 +67,26 @@ values on multiple lines.
Boolean values can be specified as ``on``, ``off``, ``true``, ``false``, ``1``,
or ``0`` and are case-insensitive.
+In setup.cfg or tox.ini, the section names have "coverage:" prefixed, so the
+``[run]`` options described below will be found in the ``[coverage:run]``
+section of the file.
+
+TOML Syntax
+...........
+
+`TOML syntax`_ uses explicit lists with brackets, and strings with quotes.
+Booleans are ``true`` or ``false``.
+
+Configuration must be within the ``[tool.coverage]`` section, for example,
+``[tool.coverage.run]``. Environment variable expansion in values is
+available, but only within quoted strings, even for non-string values.
+
+.. _TOML syntax: https://toml.io
+
+
+Environment variables
+.....................
+
Environment variables can be substituted in by using dollar signs: ``$WORD``
or ``${WORD}`` will be replaced with the value of ``WORD`` in the environment.
A dollar sign can be inserted with ``$$``. Special forms can be used to
@@ -68,40 +101,168 @@ control what happens if the variable isn't defined in the environment:
- Otherwise, missing environment variables will result in empty strings with no
error.
-Many sections and settings correspond roughly to commands and options in
-the :ref:`command-line interface `.
-Here's a sample configuration file::
+Sample file
+...........
+
+Here's a sample configuration file, in each syntax:
+
+.. [[[cog
+ show_configs(
+ ini=r"""
+ [run]
+ branch = True
+
+ [report]
+ ; Regexes for lines to exclude from consideration
+ exclude_also =
+ ; Don't complain about missing debug-only code:
+ def __repr__
+ if self\.debug
+
+ ; Don't complain if tests don't hit defensive assertion code:
+ raise AssertionError
+ raise NotImplementedError
+
+ ; Don't complain if non-runnable code isn't run:
+ if 0:
+ if __name__ == .__main__.:
+
+ ; Don't complain about abstract methods, they aren't run:
+ @(abc\.)?abstractmethod
+
+ ignore_errors = True
+
+ [html]
+ directory = coverage_html_report
+ """,
+ toml=r"""
+ [tool.coverage.run]
+ branch = true
+
+ [tool.coverage.report]
+ # Regexes for lines to exclude from consideration
+ exclude_also = [
+ # Don't complain about missing debug-only code:
+ "def __repr__",
+ "if self\\.debug",
+
+ # Don't complain if tests don't hit defensive assertion code:
+ "raise AssertionError",
+ "raise NotImplementedError",
+
+ # Don't complain if non-runnable code isn't run:
+ "if 0:",
+ "if __name__ == .__main__.:",
+
+ # Don't complain about abstract methods, they aren't run:
+ "@(abc\\.)?abstractmethod",
+ ]
+
+ ignore_errors = true
+
+ [tool.coverage.html]
+ directory = "coverage_html_report"
+ """,
+ )
+.. ]]]
+
+.. tabs::
+
+ .. code-tab:: ini
+ :caption: .coveragerc
+
+ [run]
+ branch = True
+
+ [report]
+ ; Regexes for lines to exclude from consideration
+ exclude_also =
+ ; Don't complain about missing debug-only code:
+ def __repr__
+ if self\.debug
+
+ ; Don't complain if tests don't hit defensive assertion code:
+ raise AssertionError
+ raise NotImplementedError
+
+ ; Don't complain if non-runnable code isn't run:
+ if 0:
+ if __name__ == .__main__.:
+
+ ; Don't complain about abstract methods, they aren't run:
+ @(abc\.)?abstractmethod
+
+ ignore_errors = True
- # .coveragerc to control coverage.py
- [run]
- branch = True
+ [html]
+ directory = coverage_html_report
- [report]
- # Regexes for lines to exclude from consideration
- exclude_lines =
- # Have to re-enable the standard pragma
- pragma: no cover
+ .. code-tab:: toml
+ :caption: pyproject.toml
- # Don't complain about missing debug-only code:
- def __repr__
- if self\.debug
+ [tool.coverage.run]
+ branch = true
- # Don't complain if tests don't hit defensive assertion code:
- raise AssertionError
- raise NotImplementedError
+ [tool.coverage.report]
+ # Regexes for lines to exclude from consideration
+ exclude_also = [
+ # Don't complain about missing debug-only code:
+ "def __repr__",
+ "if self\\.debug",
- # Don't complain if non-runnable code isn't run:
- if 0:
- if __name__ == .__main__.:
+ # Don't complain if tests don't hit defensive assertion code:
+ "raise AssertionError",
+ "raise NotImplementedError",
- # Don't complain about abstract methods, they aren't run:
- @(abc\.)?abstractmethod
+ # Don't complain if non-runnable code isn't run:
+ "if 0:",
+ "if __name__ == .__main__.:",
- ignore_errors = True
+ # Don't complain about abstract methods, they aren't run:
+ "@(abc\\.)?abstractmethod",
+ ]
- [html]
- directory = coverage_html_report
+ ignore_errors = true
+
+ [tool.coverage.html]
+ directory = "coverage_html_report"
+
+ .. code-tab:: ini
+ :caption: setup.cfg or tox.ini
+
+ [coverage:run]
+ branch = True
+
+ [coverage:report]
+ ; Regexes for lines to exclude from consideration
+ exclude_also =
+ ; Don't complain about missing debug-only code:
+ def __repr__
+ if self\.debug
+
+ ; Don't complain if tests don't hit defensive assertion code:
+ raise AssertionError
+ raise NotImplementedError
+
+ ; Don't complain if non-runnable code isn't run:
+ if 0:
+ if __name__ == .__main__.:
+
+ ; Don't complain about abstract methods, they aren't run:
+ @(abc\.)?abstractmethod
+
+ ignore_errors = True
+
+ [coverage:html]
+ directory = coverage_html_report
+
+.. [[[end]]] (checksum: 1d4d59eb69af44aacb77c9ebad869b65)
+
+
+The specific configuration settings are described below. Many sections and
+settings correspond roughly to commands and options in the :ref:`command-line
+interface `.
.. _config_run:
@@ -146,8 +307,8 @@ produce very wrong results.
.. _multiprocessing: https://docs.python.org/3/library/multiprocessing.html
.. _greenlet: https://greenlet.readthedocs.io/
-.. _gevent: http://www.gevent.org/
-.. _eventlet: http://eventlet.net/
+.. _gevent: https://www.gevent.org/
+.. _eventlet: https://eventlet.readthedocs.io/
See :ref:`subprocess` for details of multi-process measurement.
@@ -282,11 +443,12 @@ need to know the source origin.
(boolean, default False) if true, register a SIGTERM signal handler to capture
data when the process ends due to a SIGTERM signal. This includes
-:meth:`Process.terminate `, and other
+:meth:`Process.terminate ` and other
ways to terminate a process. This can help when collecting data in usual
situations, but can also introduce problems (see `issue 1310`_).
-Only on Linux and Mac.
+The signal handler is only registered on Linux and Mac. On Windows, this
+setting has no effect.
.. _issue 1310: https://github.com/nedbat/coveragepy/issues/1310
@@ -319,9 +481,9 @@ ambiguities between packages and directories.
[run] timid
...........
-(boolean, default False) Use a simpler but slower trace method. This uses
-PyTracer instead of CTracer, and is only needed in very unusual circumstances.
-Try this if you get seemingly impossible results.
+(boolean, default False) Use a simpler but slower trace method. This uses the
+PyTracer trace function core instead of CTracer, and is only needed in very
+unusual circumstances.
.. _config_paths:
@@ -330,13 +492,60 @@ Try this if you get seemingly impossible results.
-------
The entries in this section are lists of file paths that should be considered
-equivalent when combining data from different machines::
+equivalent when combining data from different machines:
+
+.. [[[cog
+ show_configs(
+ ini=r"""
+ [paths]
+ source =
+ src/
+ /jenkins/build/*/src
+ c:\myproj\src
+ """,
+ toml=r"""
+ [tool.coverage.paths]
+ source = [
+ "src/",
+ "/jenkins/build/*/src",
+ "c:\\myproj\\src",
+ ]
+ """,
+ )
+.. ]]]
+
+.. tabs::
+
+ .. code-tab:: ini
+ :caption: .coveragerc
+
+ [paths]
+ source =
+ src/
+ /jenkins/build/*/src
+ c:\myproj\src
+
+ .. code-tab:: toml
+ :caption: pyproject.toml
+
+ [tool.coverage.paths]
+ source = [
+ "src/",
+ "/jenkins/build/*/src",
+ "c:\\myproj\\src",
+ ]
+
+ .. code-tab:: ini
+ :caption: setup.cfg or tox.ini
+
+ [coverage:paths]
+ source =
+ src/
+ /jenkins/build/*/src
+ c:\myproj\src
+
+.. [[[end]]] (checksum: a074a5f121a23135dcb6733bca3e20bd)
- [paths]
- source =
- src/
- /jenkins/build/*/src
- c:\myproj\src
The names of the entries ("source" in this example) are ignored, you may choose
any name that you like. The value is a list of strings. When combining data
@@ -375,6 +584,21 @@ See :ref:`cmd_combine_remapping` and :ref:`source_glob` for more information.
Settings common to many kinds of reporting.
+.. _config_report_exclude_also:
+
+[report] exclude_also
+.....................
+
+(multi-string) A list of regular expressions. This setting is similar to
+:ref:`config_report_exclude_lines`: it specifies patterns for lines to exclude
+from reporting. This setting is preferred, because it will preserve the
+default exclude pattern ``pragma: no cover`` instead of overwriting it.
+
+See :ref:`config_report_exclude_lines` for further details.
+
+.. versionadded:: 7.2.0
+
+
.. _config_report_exclude_lines:
[report] exclude_lines
@@ -384,7 +608,9 @@ Settings common to many kinds of reporting.
containing a match for one of these regexes is excluded from being reported as
missing. More details are in :ref:`excluding`. If you use this option, you
are replacing all the exclude regexes, so you'll need to also supply the
-"pragma: no cover" regex if you still want to use it.
+"pragma: no cover" regex if you still want to use it. The
+:ref:`config_report_exclude_also` setting can be used to specify patterns
+without overwriting the default set.
You can exclude lines introducing blocks, and the entire block is excluded. If
you exclude a ``def`` line or decorator line, the entire function is excluded.
@@ -394,18 +620,9 @@ only have to match a portion of the line. For example, if you write ``...``,
you'll exclude any line with three or more of any character. If you write
``pass``, you'll also exclude the line ``my_pass="foo"``, and so on.
-
-.. _config_report_exclude_also:
-
-[report] exclude_also
-.....................
-
-(multi-string) A list of regular expressions. This setting is the same as
-:ref:`config_report_exclude_lines`: it adds patterns for lines to exclude from
-reporting. This setting will preserve the default exclude patterns instead of
-overwriting them.
-
-.. versionadded:: 7.2.0
+All of the regexes here and in :ref:`config_report_exclude_also` are combined
+into one regex for processing, so you cannot use global flags like ``(?s)`` in
+your regexes. Use the scoped flag form instead: ``(?s:...)``
.. _config_report_fail_under:
@@ -420,6 +637,19 @@ use of the decimal places. A setting of 100 will fail any value under 100,
regardless of the number of decimal places of precision.
+.. _config_report_format:
+
+[report] format
+...............
+
+(string, default "text") The format to use for the textual report. The default
+is "text" which produces a simple textual table. You can use "markdown" to
+produce a Markdown table, or "total" to output only the total coverage
+percentage.
+
+.. versionadded:: 7.0
+
+
.. _config_report_ignore_errors:
[report] ignore_errors
@@ -663,7 +893,22 @@ Settings particular to LCOV reporting (see :ref:`cmd_lcov`).
.. versionadded:: 6.3
+.. _config_lcov_output:
+
[lcov] output
.............
(string, default "coverage.lcov") Where to write the LCOV file.
+
+.. _config_lcov_line_checksums:
+
+[lcov] line_checksums
+.....................
+
+(boolean, default false) Whether to write per-line checksums as part of the
+lcov file. Because these checksums cover only lines with actual code on
+them, and do not verify the ordering of lines, they provide only a weak
+assurance that the source code available to analysis tools (e.g. ``genhtml``)
+matches the code that was used to generate the coverage data.
+
+.. versionadded:: 7.6.2
diff --git a/doc/contexts.rst b/doc/contexts.rst
index fbf940405..75080f0cb 100644
--- a/doc/contexts.rst
+++ b/doc/contexts.rst
@@ -1,6 +1,16 @@
.. Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
.. For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
+.. This file is processed with cog to create the tabbed multi-syntax
+ configuration examples. If those are wrong, the quality checks will fail.
+ Running "make prebuild" checks them and produces the output.
+
+.. [[[cog
+ from cog_helpers import show_configs
+.. ]]]
+.. [[[end]]] (checksum: d41d8cd98f00b204e9800998ecf8427e)
+
+
.. _contexts:
====================
@@ -63,10 +73,42 @@ There are three ways to enable dynamic contexts:
.. highlight:: ini
The ``[run] dynamic_context`` setting has only one option now. Set it to
-``test_function`` to start a new dynamic context for every test function::
+``test_function`` to start a new dynamic context for every test function:
+
+.. [[[cog
+ show_configs(
+ ini=r"""
+ [run]
+ dynamic_context = test_function
+ """,
+ toml=r"""
+ [tool.coverage.run]
+ dynamic_context = "test_function"
+ """,
+ )
+.. ]]]
+
+.. tabs::
+
+ .. code-tab:: ini
+ :caption: .coveragerc
+
+ [run]
+ dynamic_context = test_function
+
+ .. code-tab:: toml
+ :caption: pyproject.toml
+
+ [tool.coverage.run]
+ dynamic_context = "test_function"
+
+ .. code-tab:: ini
+ :caption: setup.cfg or tox.ini
+
+ [coverage:run]
+ dynamic_context = test_function
- [run]
- dynamic_context = test_function
+.. [[[end]]] (checksum: 7594c36231f0ef52b554aad8c835ccf4)
Each test function you run will be considered a separate dynamic context, and
coverage data will be segregated for each. A test function is any function
diff --git a/doc/contributing.rst b/doc/contributing.rst
index e9d2c3a40..368029081 100644
--- a/doc/contributing.rst
+++ b/doc/contributing.rst
@@ -1,6 +1,8 @@
.. Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
.. For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
+.. Command samples here were made with a 100-column terminal.
+
.. _contributing:
===========================
@@ -9,9 +11,10 @@ Contributing to coverage.py
.. highlight:: console
-I welcome contributions to coverage.py. Over the years, dozens of people have
-provided patches of various sizes to add features or fix bugs. This page
-should have all the information you need to make a contribution.
+I welcome contributions to coverage.py. Over the years, hundreds of people
+have provided contributions of various sizes to add features, fix bugs, or just
+help diagnose thorny issues. This page should have all the information you
+need to make a contribution.
One source of history or ideas are the `bug reports`_ against coverage.py.
There you can find ideas for requested features, or the remains of rejected
@@ -29,107 +32,180 @@ previous work in the area. Things are not always as straightforward as they
seem, and having the benefit of lessons learned by those before you can save
you frustration.
+We have a `#coverage channel in the Python Discord `_ that can be a
+good place to explore ideas, get help, or help people with coverage.py.
+`Join us `_!
+
+.. _discord: https://discord.com/channels/267624335836053506/1253355750684753950
Getting the code
----------------
+.. PYVERSIONS (mention of lowest version in the "create virtualenv" step).
+
The coverage.py code is hosted on a GitHub repository at
https://github.com/nedbat/coveragepy. To get a working environment, follow
these steps:
-.. minimum of PYVERSIONS:
+#. `Fork the repo`_ into your own GitHub account. The coverage.py code will
+ then be copied into a GitHub repository at
+ ``https://github.com/GITHUB_USER/coveragepy`` where GITHUB_USER is your
+ GitHub username.
-#. Create a Python 3.7 virtualenv to work in, and activate it.
+#. (Optional) Create a virtualenv to work in, and activate it. There
+ are a number of ways to do this. Use the method you are comfortable with.
+ Ideally, use Python 3.9 (the lowest version coverage.py supports).
#. Clone the repository::
- $ git clone https://github.com/nedbat/coveragepy
+ $ git clone https://github.com/GITHUB_USER/coveragepy
$ cd coveragepy
-#. Install the requirements::
+#. Install the requirements with either of these commands::
+ $ make install
$ python3 -m pip install -r requirements/dev.pip
- If this fails due to incorrect or missing hashes, use
- ``dev.in`` instead::
-
- $ python3 -m pip install -r requirements/dev.in
-
-#. Install a number of versions of Python. Coverage.py supports a range
- of Python versions. The more you can test with, the more easily your code
- can be used as-is. If you only have one version, that's OK too, but may
- mean more work integrating your contribution.
+ Note: You may need to upgrade pip to install the requirements.
Running the tests
-----------------
+.. To get the test output:
+ # Resize terminal width to 95
+ % make sterile
+
+.. with COVERAGE_ONE_CORE=
+
The tests are written mostly as standard unittest-style tests, and are run with
pytest running under `tox`_::
- $ tox
- py37 create: /Users/nedbat/coverage/trunk/.tox/py37
- py37 installdeps: -rrequirements/pip.pip, -rrequirements/pytest.pip, eventlet==0.25.1, greenlet==0.4.15
- py37 develop-inst: /Users/nedbat/coverage/trunk
- py37 installed: apipkg==1.5,appdirs==1.4.4,attrs==20.3.0,backports.functools-lru-cache==1.6.4,-e git+git@github.com:nedbat/coveragepy.git@36ef0e03c0439159c2245d38de70734fa08cddb4#egg=coverage,decorator==5.0.7,distlib==0.3.1,dnspython==2.1.0,eventlet==0.25.1,execnet==1.8.0,filelock==3.0.12,flaky==3.7.0,future==0.18.2,greenlet==0.4.15,hypothesis==6.10.1,importlib-metadata==4.0.1,iniconfig==1.1.1,monotonic==1.6,packaging==20.9,pluggy==0.13.1,py==1.10.0,PyContracts @ git+https://github.com/slorg1/contracts@c5a6da27d4dc9985f68e574d20d86000880919c3,pyparsing==2.4.7,pytest==6.2.3,pytest-forked==1.3.0,pytest-xdist==2.2.1,qualname==0.1.0,six==1.15.0,sortedcontainers==2.3.0,toml==0.10.2,typing-extensions==3.10.0.0,virtualenv==20.4.4,zipp==3.4.1
- py37 run-test-pre: PYTHONHASHSEED='376882681'
- py37 run-test: commands[0] | python setup.py --quiet clean develop
- py37 run-test: commands[1] | python igor.py zip_mods remove_extension
- py37 run-test: commands[2] | python igor.py test_with_tracer py
- === CPython 3.7.10 with Python tracer (.tox/py37/bin/python) ===
+ $ python3 -m tox -e py38
+ py38: wheel-0.43.0-py3-none-any.whl already present in /Users/ned/Library/Application Support/virtualenv/wheel/3.8/embed/3/wheel.json
+ py38: pip-24.0-py3-none-any.whl already present in /Users/ned/Library/Application Support/virtualenv/wheel/3.8/embed/3/pip.json
+ py38: setuptools-69.2.0-py3-none-any.whl already present in /Users/ned/Library/Application Support/virtualenv/wheel/3.8/embed/3/setuptools.json
+ py38: install_deps> python -m pip install -U -r requirements/pip.pip -r requirements/pytest.pip -r requirements/light-threads.pip
+ .pkg: install_requires> python -I -m pip install setuptools
+ .pkg: _optional_hooks> python /usr/local/virtualenvs/coverage/lib/python3.8/site-packages/pyproject_api/_backend.py True setuptools.build_meta
+ .pkg: get_requires_for_build_editable> python /usr/local/virtualenvs/coverage/lib/python3.8/site-packages/pyproject_api/_backend.py True setuptools.build_meta
+ .pkg: install_requires_for_build_editable> python -I -m pip install wheel
+ .pkg: build_editable> python /usr/local/virtualenvs/coverage/lib/python3.8/site-packages/pyproject_api/_backend.py True setuptools.build_meta
+ py38: install_package_deps> python -m pip install -U 'tomli; python_full_version <= "3.11.0a6"'
+ py38: install_package> python -m pip install -U --force-reinstall --no-deps .tox/.tmp/package/1/coverage-7.4.5a0.dev1-0.editable-cp38-cp38-macosx_14_0_arm64.whl
+ py38: commands[0]> python igor.py zip_mods
+ py38: commands[1]> python setup.py --quiet build_ext --inplace
+ ld: warning: duplicate -rpath '/usr/local/pyenv/pyenv/versions/3.8.18/lib' ignored
+ ld: warning: duplicate -rpath '/opt/homebrew/lib' ignored
+ py38: commands[2]> python -m pip install -q -e .
+ py38: commands[3]> python igor.py test_with_core ctrace
+ === CPython 3.8.18 with C tracer (.tox/py38/bin/python) ===
bringing up nodes...
- ........................................................................................................................................................... [ 15%]
- ........................................................................................................................................................... [ 31%]
- ...........................................................................................................................................s............... [ 47%]
- ...........................................s...................................................................................sss.sssssssssssssssssss..... [ 63%]
- ........................................................................................................................................................s.. [ 79%]
- ......................................s..................................s................................................................................. [ 95%]
- ........................................ss...... [100%]
- 949 passed, 29 skipped in 40.56s
- py37 run-test: commands[3] | python setup.py --quiet build_ext --inplace
- py37 run-test: commands[4] | python igor.py test_with_tracer c
- === CPython 3.7.10 with C tracer (.tox/py37/bin/python) ===
+ ....................................................................................... [ 6%]
+ .....................................................x...x............s......s.s....s.. [ 12%]
+ ....................................................................................... [ 18%]
+ ....................................................................................... [ 25%]
+ ....................................................................................... [ 31%]
+ ....................................................................................... [ 37%]
+ ....................................................................................... [ 44%]
+ ....................................................................................... [ 50%]
+ ....................................................................................... [ 56%]
+ ........................s...........s.................................................. [ 63%]
+ ...........................................................................s........... [ 69%]
+ .................................s............s.s.................s.................... [ 75%]
+ ...........................................s........................................s.. [ 81%]
+ ................................s...................................................... [ 88%]
+ ....................................................................................... [ 94%]
+ ............................................................s................... [100%]
+ 1368 passed, 15 skipped, 2 xfailed in 13.10s
+ py38: commands[4]> python igor.py remove_extension
+ py38: commands[5]> python igor.py test_with_core pytrace
+ === CPython 3.8.18 with Python tracer (.tox/py38/bin/python) ===
bringing up nodes...
- ........................................................................................................................................................... [ 15%]
- ........................................................................................................................................................... [ 31%]
- ......................................................................s.................................................................................... [ 47%]
- ........................................................................................................................................................... [ 63%]
- ..........................s................................................s............................................................................... [ 79%]
- .................................................................................s......................................................................... [ 95%]
- ......................................s......... [100%]
- 973 passed, 5 skipped in 41.36s
- ____________________________________________________________________________ summary _____________________________________________________________________________
- py37: commands succeeded
- congratulations :)
-
-Tox runs the complete test suite twice for each version of Python you have
-installed. The first run uses the Python implementation of the trace function,
-the second uses the C implementation.
+ ....................................................................................... [ 6%]
+ ....................x..x.............................................s.ss...s.......... [ 12%]
+ ..........................................................................s.ss.s..s.... [ 18%]
+ s........s........s..s...s............................................................. [ 25%]
+ ................s...................................................................... [ 31%]
+ ...................s......ss..........................ssss...........................s. [ 37%]
+ ....................................................................................... [ 43%]
+ ....................................................................................... [ 50%]
+ .................................................................s..................... [ 56%]
+ ........s..s.........sss.s............................................................. [ 62%]
+ ...................................................................ss.................. [ 69%]
+ ..............................................ss...........s.s......................... [ 75%]
+ ................................ssssss................................................. [ 81%]
+ ......s...ss........ss................................................................. [ 88%]
+ .............................................s......................................... [ 94%]
+ .......................................................................ss....... [100%]
+ 1333 passed, 50 skipped, 2 xfailed in 11.17s
+ py38: OK (37.60=setup[9.10]+cmd[0.11,0.49,2.83,13.59,0.11,11.39] seconds)
+ congratulations :) (37.91 seconds)
+
+Tox runs the complete test suite a few times for each version of Python you
+have installed. The first run uses the C implementation of the trace function,
+the second uses the Python implementation. If `sys.monitoring`_ is available,
+the suite will be run again with that core.
To limit tox to just a few versions of Python, use the ``-e`` switch::
- $ tox -e py37,py39
+ $ python3 -m tox -e py38,py39
-To run just a few tests, you can use `pytest test selectors`_::
+On the tox command line, options after ``--`` are passed to pytest. To run
+just a few tests, you can use `pytest test selectors`_::
- $ tox tests/test_misc.py
- $ tox tests/test_misc.py::HasherTest
- $ tox tests/test_misc.py::HasherTest::test_string_hashing
+ $ python3 -m tox -- tests/test_misc.py
+ $ python3 -m tox -- tests/test_misc.py::HasherTest
+ $ python3 -m tox -- tests/test_misc.py::HasherTest::test_string_hashing
-These command run the tests in one file, one class, and just one test,
-respectively.
+.. with COVERAGE_ONE_CORE=1
-You can also affect the test runs with environment variables. Define any of
-these as 1 to use them:
+These commands run the tests in one file, one class, and just one test,
+respectively. The pytest ``-k`` option selects tests based on a word in their
+name, which can be very convenient for ad-hoc test selection. Of course you
+can combine tox and pytest options::
-- ``COVERAGE_NO_PYTRACER=1`` disables the Python tracer if you only want to
- run the CTracer tests.
+ $ python3 -m tox -q -e py310 -- -n 0 -vv -k hash
+ ================================== test session starts ===================================
+ platform darwin -- Python 3.10.13, pytest-8.1.1, pluggy-1.4.0 -- /Users/ned/coverage/trunk/.tox/py310/bin/python
+ cachedir: .tox/py310/.pytest_cache
+ hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase(PosixPath('/Users/ned/coverage/trunk/.hypothesis/examples'))
+ rootdir: /Users/ned/coverage/trunk
+ configfile: pyproject.toml
+ plugins: flaky-3.8.1, xdist-3.5.0, hypothesis-6.99.6
+ collected 1385 items / 1375 deselected / 10 selected
+ run-last-failure: no previously failed tests, not deselecting items.
-- ``COVERAGE_NO_CTRACER=1`` disables the C tracer if you only want to run the
- PyTracer tests.
+ tests/test_data.py::CoverageDataTest::test_add_to_hash_with_lines PASSED [ 10%]
+ tests/test_data.py::CoverageDataTest::test_add_to_hash_with_arcs PASSED [ 20%]
+ tests/test_data.py::CoverageDataTest::test_add_to_lines_hash_with_missing_file PASSED [ 30%]
+ tests/test_data.py::CoverageDataTest::test_add_to_arcs_hash_with_missing_file PASSED [ 40%]
+ tests/test_execfile.py::RunPycFileTest::test_running_hashed_pyc PASSED [ 50%]
+ tests/test_misc.py::HasherTest::test_string_hashing PASSED [ 60%]
+ tests/test_misc.py::HasherTest::test_bytes_hashing PASSED [ 70%]
+ tests/test_misc.py::HasherTest::test_unicode_hashing PASSED [ 80%]
+ tests/test_misc.py::HasherTest::test_dict_hashing PASSED [ 90%]
+ tests/test_misc.py::HasherTest::test_dict_collision PASSED [100%]
-- ``COVERAGE_ONE_TRACER=1`` will use only one tracer for each Python version.
- This will use the C tracer if it is available, or the Python tracer if not.
+ ========================== 10 passed, 1375 deselected in 0.60s ===========================
+ Skipping tests with Python tracer: Only one core: not running pytrace
+ py310: OK (6.41 seconds)
+ congratulations :) (6.72 seconds)
+
+
+You can also affect the test runs with environment variables:
+
+- ``COVERAGE_ONE_CORE=1`` will use only one tracing core for each Python
+ version. This isn't about CPU cores, it's about the central code that tracks
+ execution. This will use the preferred core for the Python version and
+ implementation being tested.
+
+- ``COVERAGE_TEST_CORES=...`` defines the cores to run tests on. Three cores
+ are available, specify them as a comma-separated string:
+
+ - ``ctrace`` is a sys.settrace function implemented in C.
+ - ``pytrace`` is a sys.settrace function implemented in Python.
+ - ``sysmon`` is a `sys.monitoring`_ implementation.
- ``COVERAGE_AST_DUMP=1`` will dump the AST tree as it is being used during
code parsing.
@@ -137,11 +213,9 @@ these as 1 to use them:
There are other environment variables that affect tests. I use `set_env.py`_
as a simple terminal interface to see and set them.
-Of course, run all the tests on every version of Python you have, before
+Of course, run all the tests on every version of Python you have before
submitting a change.
-.. _pytest test selectors: https://doc.pytest.org/en/stable/usage.html#specifying-which-tests-to-run
-
Lint, etc
---------
@@ -156,7 +230,8 @@ some warnings. Please try to keep it that way, but don't let pylint warnings
keep you from sending patches. I can clean them up.
Lines should be kept to a 100-character maximum length. I recommend an
-`editorconfig.org`_ plugin for your editor of choice.
+`editorconfig.org`_ plugin for your editor of choice, which will also help with
+indentation, line endings and so on.
Other style questions are best answered by looking at the existing code.
Formatting of docstrings, comments, long lines, and so on, should match the
@@ -187,7 +262,7 @@ files. These are created by running ``make upgrade``.
.. minimum of PYVERSIONS:
-It's important to use Python 3.7 to run ``make upgrade`` so that the pinned
+It's important to use Python 3.9 to run ``make upgrade`` so that the pinned
versions will work on all of the Python versions currently supported by
coverage.py.
@@ -220,7 +295,10 @@ All contributions are expected to include tests for new functionality and
fixes. If you need help writing tests, please ask.
+.. _fork the repo: https://docs.github.com/en/get-started/quickstart/fork-a-repo
.. _editorconfig.org: http://editorconfig.org
.. _tox: https://tox.readthedocs.io/
.. _black: https://pypi.org/project/black/
.. _set_env.py: https://nedbatchelder.com/blog/201907/set_envpy.html
+.. _pytest test selectors: https://doc.pytest.org/en/stable/usage.html#specifying-which-tests-to-run
+.. _sys.monitoring: https://docs.python.org/3/library/sys.monitoring.html
diff --git a/doc/dict.txt b/doc/dict.txt
index 63544dcde..41d8c94f4 100644
--- a/doc/dict.txt
+++ b/doc/dict.txt
@@ -1,18 +1,36 @@
+API
+BOM
+BTW
+CPython
+CTracer
+Cobertura
+Consolas
+Cython
+DOCTYPE
+DOM
+HTML
+Jinja
+Mako
+OK
+PYTHONPATH
+TODO
+Tidelift
+URL
+UTF
+XML
activestate
-api
apache
-API
+api
args
argv
ascii
+async
basename
basenames
bitbucket
-BOM
bom
boolean
booleans
-BTW
btw
builtin
builtins
@@ -27,7 +45,6 @@ canonicalizes
chdir'd
clickable
cmdline
-Cobertura
codecs
colorsys
combinable
@@ -38,17 +55,16 @@ configurability
configurability's
configurer
configurers
-Consolas
cov
coveragepy
coveragerc
covhtml
-CPython
css
-CTracer
-Cython
+dataio
datetime
deallocating
+debounce
+decodable
dedent
defaultdict
deserialize
@@ -62,8 +78,6 @@ docstring
docstrings
doctest
doctests
-DOCTYPE
-DOM
encodable
encodings
endfor
@@ -75,6 +89,7 @@ exec'ing
execfile
executability
executable's
+execv
expr
extensibility
favicon
@@ -96,10 +111,10 @@ github
gitignore
globals
greenlet
+hintedness
hotkey
hotkeys
html
-HTML
htmlcov
http
https
@@ -111,15 +126,13 @@ ints
invariants
iterable
iterables
-Jinja
-jquery
jQuery
+jquery
json
jython
kwargs
lcov
localStorage
-Mako
manylinux
matcher
matchers
@@ -136,8 +149,10 @@ monospaced
morf
morfs
multi
+multiproc
mumbo
mycode
+mypy
namespace
namespaces
nano
@@ -145,13 +160,14 @@ nbsp
ned
nedbat
nedbatchelder
+newb
+nocover
nosetests
nullary
num
numbits
numpy
ok
-OK
opcode
opcodes
optparse
@@ -161,13 +177,15 @@ overridable
parallelizing
parsable
parsers
+pathlib
pathnames
plugin
plugins
pragma
-pragmas
pragma'd
+pragmas
pre
+premain
prepended
prepending
programmability
@@ -175,17 +193,19 @@ programmatically
py
py's
pyc
+pyenv
pyexpat
+pylib
pylint
pyproject
pypy
pytest
pythonpath
-PYTHONPATH
pyw
rcfile
readme
readthedocs
+realpath
recordable
refactored
refactoring
@@ -194,9 +214,11 @@ regex
regexes
reimplemented
renderer
+rootname
runnable
runtime
scrollbar
+septatrix
serializable
settrace
setuptools
@@ -217,12 +239,10 @@ symlink
symlinks
syntaxes
sys
-templite
templating
+templite
testability
-Tidelift
todo
-TODO
tokenization
tokenize
tokenized
@@ -247,9 +267,8 @@ unparsable
unrunnable
unsubscriptable
untokenizable
+usecache
username
-URL
-UTF
utf
vendored
versionadded
@@ -258,7 +277,7 @@ wikipedia
wildcard
wildcards
www
+xdist
xml
-XML
xrange
xyzzy
diff --git a/doc/excluding.rst b/doc/excluding.rst
index 4651e6bba..a3481fb5f 100644
--- a/doc/excluding.rst
+++ b/doc/excluding.rst
@@ -1,6 +1,16 @@
.. Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
.. For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
+.. This file is processed with cog to create the tabbed multi-syntax
+ configuration examples. If those are wrong, the quality checks will fail.
+ Running "make prebuild" checks them and produces the output.
+
+.. [[[cog
+ from cog_helpers import show_configs
+.. ]]]
+.. [[[end]]] (checksum: d41d8cd98f00b204e9800998ecf8427e)
+
+
.. _excluding:
===============================
@@ -24,10 +34,10 @@ code, the "if debug" clause is excluded from reporting::
log_message(msg, a)
b = my_function2()
-Any line with a comment of "pragma: no cover" is excluded. If that line
-introduces a clause, for example, an if clause, or a function or class
-definition, then the entire clause is also excluded. Here the __repr__
-function is not reported as missing::
+By default, any line with a comment of ``pragma: no cover`` is excluded. If
+that line introduces a clause, for example, an ``if`` clause, or a function or
+class definition, then the entire clause is also excluded. Here the
+``__repr__`` function is not reported as missing::
class MyObject(object):
def __init__(self):
@@ -63,13 +73,17 @@ line, so it isn't considered a branch at all.
Advanced exclusion
------------------
-Coverage.py identifies exclusions by matching lines against a list of regular
-expressions. Using :ref:`configuration files ` or the coverage
+Coverage.py identifies exclusions by matching source code against a list of
+regular expressions. Using :ref:`configuration files ` or the coverage
:ref:`API `, you can add to that list. This is useful if you have
often-used constructs to exclude that can be matched with a regex. You can
exclude them all at once without littering your code with exclusion pragmas.
-If the matched line introduces a block, the entire block is excluded from
+Before coverage.py 7.6.0, the regexes were matched against single lines of your
+source code. Now they can be multi-line regexes that find matches across
+lines. See :ref:`multi_line_exclude`.
+
+If a matched line introduces a block, the entire block is excluded from
reporting. Matching a ``def`` line or decorator line will exclude an entire
function.
@@ -77,33 +91,143 @@ function.
For example, you might decide that __repr__ functions are usually only used in
debugging code, and are uninteresting to test themselves. You could exclude
-all of them by adding a regex to the exclusion list::
-
- [report]
- exclude_lines =
- def __repr__
-
-For example, here's a list of exclusions I've used::
-
- [report]
- exclude_lines =
- pragma: no cover
- def __repr__
- if self.debug:
- if settings.DEBUG
- raise AssertionError
- raise NotImplementedError
- if 0:
- if __name__ == .__main__.:
- if TYPE_CHECKING:
- class .*\bProtocol\):
- @(abc\.)?abstractmethod
-
-Note that when using the ``exclude_lines`` option in a configuration file, you
-are taking control of the entire list of regexes, so you need to re-specify the
-default "pragma: no cover" match if you still want it to apply. The
-``exclude_also`` option can be used instead to preserve the default
-exclusions while adding new ones.
+all of them by adding a regex to the exclusion list:
+
+.. [[[cog
+ show_configs(
+ ini=r"""
+ [report]
+ exclude_also =
+ def __repr__
+ """,
+ toml=r"""
+ [tool.coverage.report]
+ exclude_also = [
+ "def __repr__",
+ ]
+ """,
+ )
+.. ]]]
+
+.. tabs::
+
+ .. code-tab:: ini
+ :caption: .coveragerc
+
+ [report]
+ exclude_also =
+ def __repr__
+
+ .. code-tab:: toml
+ :caption: pyproject.toml
+
+ [tool.coverage.report]
+ exclude_also = [
+ "def __repr__",
+ ]
+
+ .. code-tab:: ini
+ :caption: setup.cfg or tox.ini
+
+ [coverage:report]
+ exclude_also =
+ def __repr__
+
+.. [[[end]]] (checksum: e3194120285bcbac38a92b109edaa20c)
+
+For example, here's a list of exclusions I've used:
+
+.. [[[cog
+ show_configs(
+ ini=r"""
+ [report]
+ exclude_also =
+ def __repr__
+ if self.debug:
+ if settings.DEBUG
+ raise AssertionError
+ raise NotImplementedError
+ if 0:
+ if __name__ == .__main__.:
+ if TYPE_CHECKING:
+ class .*\bProtocol\):
+ @(abc\.)?abstractmethod
+ """,
+ toml=r"""
+ [tool.coverage.report]
+ exclude_also = [
+ "def __repr__",
+ "if self.debug:",
+ "if settings.DEBUG",
+ "raise AssertionError",
+ "raise NotImplementedError",
+ "if 0:",
+ "if __name__ == .__main__.:",
+ "if TYPE_CHECKING:",
+ "class .*\\bProtocol\\):",
+ "@(abc\\.)?abstractmethod",
+ ]
+ """,
+ )
+.. ]]]
+
+.. tabs::
+
+ .. code-tab:: ini
+ :caption: .coveragerc
+
+ [report]
+ exclude_also =
+ def __repr__
+ if self.debug:
+ if settings.DEBUG
+ raise AssertionError
+ raise NotImplementedError
+ if 0:
+ if __name__ == .__main__.:
+ if TYPE_CHECKING:
+ class .*\bProtocol\):
+ @(abc\.)?abstractmethod
+
+ .. code-tab:: toml
+ :caption: pyproject.toml
+
+ [tool.coverage.report]
+ exclude_also = [
+ "def __repr__",
+ "if self.debug:",
+ "if settings.DEBUG",
+ "raise AssertionError",
+ "raise NotImplementedError",
+ "if 0:",
+ "if __name__ == .__main__.:",
+ "if TYPE_CHECKING:",
+ "class .*\\bProtocol\\):",
+ "@(abc\\.)?abstractmethod",
+ ]
+
+ .. code-tab:: ini
+ :caption: setup.cfg or tox.ini
+
+ [coverage:report]
+ exclude_also =
+ def __repr__
+ if self.debug:
+ if settings.DEBUG
+ raise AssertionError
+ raise NotImplementedError
+ if 0:
+ if __name__ == .__main__.:
+ if TYPE_CHECKING:
+ class .*\bProtocol\):
+ @(abc\.)?abstractmethod
+
+.. [[[end]]] (checksum: 91f09828a1e6d0e92543e14a8ea3ba39)
+
+The :ref:`config_report_exclude_also` option adds regexes to the built-in
+default list so that you can add your own exclusions. The older
+:ref:`config_report_exclude_lines` option completely overwrites the list of
+regexes.
The regexes only have to match part of a line. Be careful not to over-match. A
value of ``...`` will match any line with more than three characters in it.
@@ -112,6 +236,111 @@ A similar pragma, "no branch", can be used to tailor branch coverage
measurement. See :ref:`branch` for details.
+.. _multi_line_exclude:
+
+Multi-line exclusion regexes
+----------------------------
+
+.. versionadded:: 7.6.0
+
+Exclusion regexes can match multi-line regions. All of the lines in a matched
+region will be excluded. If part of the region introduces a block, the entire
+block is excluded even if part of it is outside the matched region.
+
+When writing regexes to match multiple lines, remember that ``"."`` won't match
+a newline character, but ``"\n"`` or ``"(?s:.)"`` will. The regexes in these
+settings are combined, so you cannot use global flags like ``(?s)`` in
+your regexes. Use the scoped flag form instead: ``(?s:...)``
+
+Here are some examples:
+
+.. [[[cog
+ show_configs(
+ ini=r"""
+ [report]
+ exclude_also =
+ ; 1. Exclude an except clause of a specific form:
+ except ValueError:\n\s*assume\(False\)
+ ; 2. Comments to turn coverage on and off:
+ no cover: start(?s:.)*?no cover: stop
+ ; 3. A pragma comment that excludes an entire file:
+ \A(?s:.*# pragma: exclude file.*)\Z
+ """,
+ toml=r"""
+ [tool.coverage.report]
+ exclude_also = [
+ # 1. Exclude an except clause of a specific form:
+ "except ValueError:\\n\\s*assume\\(False\\)",
+ # 2. Comments to turn coverage on and off:
+ "no cover: start(?s:.)*?no cover: stop",
+ # 3. A pragma comment that excludes an entire file:
+ "\\A(?s:.*# pragma: exclude file.*)\\Z",
+ ]
+ """,
+ )
+.. ]]]
+
+.. tabs::
+
+ .. code-tab:: ini
+ :caption: .coveragerc
+
+ [report]
+ exclude_also =
+ ; 1. Exclude an except clause of a specific form:
+ except ValueError:\n\s*assume\(False\)
+ ; 2. Comments to turn coverage on and off:
+ no cover: start(?s:.)*?no cover: stop
+ ; 3. A pragma comment that excludes an entire file:
+ \A(?s:.*# pragma: exclude file.*)\Z
+
+ .. code-tab:: toml
+ :caption: pyproject.toml
+
+ [tool.coverage.report]
+ exclude_also = [
+ # 1. Exclude an except clause of a specific form:
+ "except ValueError:\\n\\s*assume\\(False\\)",
+ # 2. Comments to turn coverage on and off:
+ "no cover: start(?s:.)*?no cover: stop",
+ # 3. A pragma comment that excludes an entire file:
+ "\\A(?s:.*# pragma: exclude file.*)\\Z",
+ ]
+
+ .. code-tab:: ini
+ :caption: setup.cfg or tox.ini
+
+ [coverage:report]
+ exclude_also =
+ ; 1. Exclude an except clause of a specific form:
+ except ValueError:\n\s*assume\(False\)
+ ; 2. Comments to turn coverage on and off:
+ no cover: start(?s:.)*?no cover: stop
+ ; 3. A pragma comment that excludes an entire file:
+ \A(?s:.*# pragma: exclude file.*)\Z
+
+.. [[[end]]] (checksum: ee3ef14b5a5d73f987b924df623a4927)
+
+The first regex matches a specific except line followed by a specific function
+call. Both lines must be present for the exclusion to take effect. Note that
+the regex uses ``"\n\s*"`` to match the newline and the indentation of the
+second line. Without these, the regex won't match.
+
+The second regex creates a pair of comments that can be used to exclude
+statements between them. All lines between ``# no cover: start`` and ``# no
+cover: stop`` will be excluded. The regex doesn't start with ``#`` because
+that's a comment in a .coveragerc file. Be careful with wildcards: we've used
+the non-greedy ``*?`` to match the fewest possible characters between the
+comments. If you used the greedy ``*`` instead, the star would match as many
+as possible, and you could accidentally exclude large swaths of code.
+
+The third regex matches the entire text of a file containing the comment ``#
+pragma: exclude file``. This lets you exclude files from coverage measurement
+with an internal comment instead of naming them in a settings file. This regex
+uses the ``"(?s:...)"`` regex flag to let a dot match any character including a
+newline.
+
+
Excluding source files
----------------------
diff --git a/doc/faq.rst b/doc/faq.rst
index 8252eeb98..bd2dc5bba 100644
--- a/doc/faq.rst
+++ b/doc/faq.rst
@@ -23,7 +23,7 @@ environment variable.
This will write a line for each file considered, indicating whether it is
traced or not, and if not, why not. Be careful though: the output might be
swallowed by your test runner. If so, a ``COVERAGE_DEBUG_FILE=/tmp/cov.out``
-environemnt variable can direct the output to a file insttead to ensure you see
+environment variable can direct the output to a file instead to ensure you see
everything.
@@ -121,7 +121,7 @@ Make sure you are using the C trace function. Coverage.py provides two
implementations of the trace function. The C implementation runs much faster.
To see what you are running, use ``coverage debug sys``. The output contains
details of the environment, including a line that says either
-``CTrace: available`` or ``CTracer: unavailable``. If it says unavailable,
+``CTracer: available`` or ``CTracer: unavailable``. If it says unavailable,
then you are using the slow Python implementation.
Try re-installing coverage.py to see what happened and if you get the CTracer
@@ -136,39 +136,21 @@ It's good, but `it isn't perfect`__.
__ https://nedbatchelder.com/blog/200710/flaws_in_coverage_measurement.html
-.. Other resources
- ---------------
-
- There are a number of projects that help integrate coverage.py into other
- systems:
-
- - `trialcoverage`_ is a plug-in for Twisted trial.
-
- .. _trialcoverage: https://pypi.org/project/trialcoverage/
-
- - `pytest-cov`_
-
- .. _pytest-cov: https://pypi.org/project/pytest-cov/
-
- - `django-coverage`_ for use with Django.
-
- .. _django-coverage: https://pypi.org/project/django-coverage/
-
-
Q: Where can I get more help with coverage.py?
..............................................
You can discuss coverage.py or get help using it on the `Python discussion
-forums`_. If you ping me (``@nedbat``), there's a higher chance I'll see the
-post.
+forums`_ or in the `Python Discord`_. If you ping me (``@nedbat``), there's a
+higher chance I'll see the post.
.. _Python discussion forums: https://discuss.python.org/
+.. _Python Discord: https://discord.com/channels/267624335836053506/1253355750684753950
Bug reports are gladly accepted at the `GitHub issue tracker`_.
.. _GitHub issue tracker: https://github.com/nedbat/coveragepy/issues
-`I can be reached`__ in a number of ways, I'm happy to answer questions about
+`I can be reached`__ in a number of ways. I'm happy to answer questions about
using coverage.py.
__ https://nedbatchelder.com/site/aboutned.html
diff --git a/doc/howitworks.rst b/doc/howitworks.rst
index f1599dc70..454a7da9f 100644
--- a/doc/howitworks.rst
+++ b/doc/howitworks.rst
@@ -110,7 +110,7 @@ branches.
Reporting
---------
-Once we have the set of executed lines and missing lines, reporting is just a
+Once we have the set of executed lines and missing lines, reporting is a
matter of formatting that information in a useful way. Each reporting method
(text, HTML, JSON, annotated source, XML) has a different output format, but
the process is the same: write out the information in the particular format,
diff --git a/doc/index.rst b/doc/index.rst
index e5ac5a0ae..f835c3b4e 100644
--- a/doc/index.rst
+++ b/doc/index.rst
@@ -18,8 +18,8 @@ supported on:
.. PYVERSIONS
-* Python versions 3.7 through 3.12.0a6.
-* PyPy3 7.3.11.
+* Python 3.9 through 3.14 alpha 2, including free-threading.
+* PyPy3 versions 3.9 and 3.10.
.. ifconfig:: prerelease
@@ -64,7 +64,7 @@ Getting started is easy:
normally run your test suite, you can use your test runner under coverage.
.. tip::
- If your test runner command starts with "python", just replace the initial
+ If your test runner command starts with "python", replace the initial
"python" with "coverage run".
``python something.py`` becomes ``coverage run something.py``
@@ -73,7 +73,9 @@ Getting started is easy:
Other instructions for specific test runners:
- - **pytest**
+ .. tabs::
+
+ .. tab:: pytest
If you usually use::
@@ -86,32 +88,37 @@ Getting started is easy:
Many people choose to use the `pytest-cov`_ plugin, but for most
purposes, it is unnecessary.
- - **unittest**
+ .. tab:: unittest
Change "python" to "coverage run", so this::
- $ python -m unittest discover
+ $ python3 -m unittest discover
becomes::
$ coverage run -m unittest discover
- .. - **nosetest**
- ..
- .. *Nose has been unmaintained for a long time. You should seriously
- .. consider adopting a different test runner.*
- ..
- .. Change this::
- ..
- .. $ nosetests arg1 arg2
- ..
- .. to this::
- ..
- .. $ coverage run -m nose arg1 arg2
+ .. tab:: nosetest
+
+ .. note:: Nose has been `unmaintained since at least 2015 `_.
+ *You should seriously consider using a different test runner.*
+
+ Change this::
+
+ $ nosetests arg1 arg2
+
+ to::
+
+ $ coverage run -m nose arg1 arg2
+
+ Coverage doesn't distinguish between tests and the code being tested.
+ We `recommend that you include your tests in coverage measurement `_.
To limit coverage measurement to code in the current directory, and also
find files that weren't executed at all, add the ``--source=.`` argument to
- your coverage command line.
+ your coverage command line. You can also :ref:`specify source files to
+ measure ` or :ref:`exclude code from measurement `.
#. Use ``coverage report`` to report on the results::
@@ -141,6 +148,9 @@ Getting started is easy:
.. _report like this: https://nedbatchelder.com/files/sample_coverage_html/index.html
.. _report like this one: https://nedbatchelder.com/files/sample_coverage_html_beta/index.html
+.. _nose state: https://github.com/nose-devs/nose/commit/0f40fa995384afad77e191636c89eb7d5b8870ca
+.. _include tests: https://nedbatchelder.com/blog/202008/you_should_include_your_tests_in_coverage.html
+
Capabilities
@@ -230,8 +240,10 @@ More information
api
howitworks
plugins
+ other
contributing
trouble
faq
Change history
+ migrating
sleepy
diff --git a/doc/install.rst b/doc/install.rst
index 1b940b4bb..6cc4e9709 100644
--- a/doc/install.rst
+++ b/doc/install.rst
@@ -84,6 +84,6 @@ You can also invoke coverage.py as a module:
.. parsed-literal::
- $ python -m coverage --version
+ $ python3 -m coverage --version
Coverage.py, version |release| with C extension
Documentation at |doc-url|
diff --git a/doc/migrating.rst b/doc/migrating.rst
new file mode 100644
index 000000000..865ede3a0
--- /dev/null
+++ b/doc/migrating.rst
@@ -0,0 +1,66 @@
+.. Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+.. For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
+
+.. _migrating:
+
+==========================
+Migrating between versions
+==========================
+
+New versions of coverage.py or Python might require you to adjust your
+settings, options, or other aspects of how you use coverage.py. This page
+details those changes.
+
+.. _migrating_cov7x:
+
+Migrating to coverage.py 7.x
+----------------------------
+
+Consider these changes when migrating to coverage.py 7.x:
+
+- The way that wildcards when specifying file paths work in certain cases has
+ changed in 7.x:
+
+ - Previously, ``*`` would incorrectly match directory separators, making
+ precise matching difficult. Patterns such as ``*tests/*``
+ will need to be changed to ``*/tests/*``.
+
+ - ``**`` now matches any number of nested directories. If you wish to retain
+ the behavior of ``**/tests/*`` in previous versions then ``*/**/tests/*``
+ can be used instead.
+
+- When remapping file paths with ``[paths]``, a path will be remapped only if
+ the resulting path exists. Ensure that remapped ``[paths]`` exist when
+ upgrading as this is now being enforced.
+
+- The :ref:`config_report_exclude_also` setting is new in 7.2.0. It adds
+ exclusion regexes while keeping the default built-in set. It's better than
+ the older :ref:`config_report_exclude_lines` setting, which overwrote the
+ entire list. Newer versions of coverage.py will be adding to the default set
+ of exclusions. Using ``exclude_also`` will let you benefit from those
+ updates.
+
+
+.. _migrating_cov62:
+
+Migrating to coverage.py 6.2
+----------------------------
+
+- The ``--concurrency`` settings changed in 6.2 to be a list of values. You
+ might need to explicitly list concurrency options that we previously implied.
+ For example, ``--concurrency=multiprocessing`` used to implicitly enable
+ thread concurrency. Now that must be explicitly enabled with
+ ``--concurrency=multiprocessing,thread``.
+
+
+.. _migrating_py312:
+
+Migrating to Python 3.12
+------------------------
+
+Keep these things in mind when running under Python 3.12:
+
+- Python 3.12 now inlines list, dict, and set comprehensions. Previously, they
+ were compiled as functions that were called internally. Coverage.py would
+ warn you if comprehensions weren't fully completed, but this no longer
+ happens with Python 3.12.
diff --git a/doc/other.rst b/doc/other.rst
new file mode 100644
index 000000000..4d3c09f8b
--- /dev/null
+++ b/doc/other.rst
@@ -0,0 +1,136 @@
+.. Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+.. For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
+
+.. _other:
+
+===============
+Other resources
+===============
+
+There are a number of projects that help integrate coverage.py into other
+systems, provide help using it, offer assistance, and so on.
+
+There's no guarantee these items are maintained or work well. Some of them
+seem to be quite old. If you have suggestions for updates to this page, `open
+a pull request`_ or `get in touch`_ some other way.
+
+.. _open a pull request: https://github.com/nedbat/coveragepy/blob/master/doc/other.rst
+.. _get in touch: https://nedbatchelder.com/site/aboutned.html
+
+
+Test runners
+------------
+
+Helpers for using coverage with specific test runners.
+
+* `pytest-cov`__ is a pytest plugin to coordinate coverage.py usage.
+
+ __ https://pypi.org/project/pytest-cov/
+
+* `trialcoverage`__ is a plug-in for Twisted trial.
+
+ __ https://pypi.org/project/trialcoverage/
+
+
+Configuration helpers
+---------------------
+
+Tools to provide more control over how coverage is configured.
+
+* `covdefaults`__ provides "sensible" default settings for coverage.
+
+ __ https://github.com/asottile/covdefaults
+
+* `coverage-conditional-plugin`__ lets you use conditions instead of simple "no
+ cover" pragmas to control what lines are considered under different
+ conditions.
+
+ __ https://github.com/wemake-services/coverage-conditional-plugin
+
+* `coverage-simple-excludes`__ defines new comment formats for excluding code
+ based on Python versions and operating system.
+
+ __ https://pypi.org/project/coverage-simple-excludes/
+
+
+Language plugins
+----------------
+
+Coverage.py plugins to enable coverage measurement of other languages.
+
+* `django-coverage-plugin`__ measures the coverage of Django templates.
+
+ __ https://pypi.org/project/django-coverage-plugin/
+
+* `Cython`__ provides a plugin for measuring Cythonized code.
+
+ __ https://cython.readthedocs.io/en/latest/src/tutorial/profiling_tutorial.html#enabling-coverage-analysis
+
+* `coverage-jinja-plugin`__ is an incomplete Jinja2 plugin.
+
+ __ https://github.com/MrSenko/coverage-jinja-plugin
+
+* `coverage-sh`__ measures code coverage of shell (sh or bash) scripts executed
+ from Python with subprocess.
+
+ __ https://github.com/lackhove/coverage-sh
+
+* `hy-coverage`__ supports the Hy language.
+
+ __ https://github.com/timmartin/hy-coverage
+
+* `coverage-mako-plugin`__ measures coverage in Mako templates.
+ Doesn't work yet, probably needs some changes in Mako itself.
+
+ __ https://bitbucket-archive.softwareheritage.org/projects/ne/ned/coverage-mako-plugin.html
+
+
+Reporting helpers
+-----------------
+
+Helpers for seeing the results.
+
+* `python-coverage-comment-action`__ can publish a delta coverage report as a
+ pull request comment, create a coverage badge, or a dashboard to display in
+ your readme.
+
+ __ https://github.com/py-cov-action/python-coverage-comment-action
+
+* `diff-cover`__ reports on the coverage of lines changed in a pull request.
+
+ __ https://pypi.org/project/diff-cover/
+
+* `cuvner`__ offers alternate visualizations of coverage data, including ones
+ for use in terminals.
+
+ __ https://meejah.ca/projects/cuvner
+
+* `emacs-python-coverage`__ is an experimental Emacs package to report code
+ coverage output produced by Python's coverage package directly inside Emacs
+ buffers.
+
+ __ https://github.com/wbolster/emacs-python-coverage
+
+* `python-genbadge`__ provides a set of command line utilities to generate
+ badges for tools that do not provide one, including coverage badges.
+
+ __ https://smarie.github.io/python-genbadge/
+
+
+Other articles
+--------------
+
+Writings about ways to enhance your use of coverage.py.
+
+* `How to Ditch Codecov for Python Projects`__: using GitHub Actions to manage
+ coverage across versions and report on results.
+
+ __ https://hynek.me/articles/ditch-codecov-python/
+
+* `Making a coverage badge`__: using GitHub Actions to produce a colored badge.
+
+ __ https://nedbatchelder.com/blog/202209/making_a_coverage_badge.html
+
+* `Coverage goals`__: a sidecar tool for reporting on per-file coverage goals.
+
+ __ https://nedbatchelder.com/blog/202111/coverage_goals.html
diff --git a/doc/plugins.rst b/doc/plugins.rst
index a289ba7e6..147fb1db4 100644
--- a/doc/plugins.rst
+++ b/doc/plugins.rst
@@ -1,6 +1,16 @@
.. Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
.. For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
+.. This file is processed with cog to create the tabbed multi-syntax
+ configuration examples. If those are wrong, the quality checks will fail.
+ Running "make prebuild" checks them and produces the output.
+
+.. [[[cog
+ from cog_helpers import show_configs
+.. ]]]
+.. [[[end]]] (checksum: d41d8cd98f00b204e9800998ecf8427e)
+
+
.. _plugins:
========
@@ -15,6 +25,8 @@ implementing coverage measurement for non-Python files.
Information about using plug-ins is on this page. To write a plug-in, see
:ref:`api_plugin`.
+See :ref:`other` for available plug-ins.
+
.. versionadded:: 4.0
@@ -36,48 +48,90 @@ a coverage.py plug-in called ``something.plugin``.
``plugins`` setting indicates your plug-in. It's a list of importable
module names of plug-ins:
- .. code-block:: ini
+ .. [[[cog
+ show_configs(
+ ini=r"""
+ [run]
+ plugins =
+ something.plugin
+ """,
+ toml=r"""
+ [tool.coverage.run]
+ plugins = [ "something.plugin" ]
+ """,
+ )
+ .. ]]]
- [run]
- plugins =
- something.plugin
+ .. tabs::
-#. If the plug-in needs its own configuration, you can add those settings in
- the .coveragerc file in a section named for the plug-in:
+ .. code-tab:: ini
+ :caption: .coveragerc
- .. code-block:: ini
+ [run]
+ plugins =
+ something.plugin
- [something.plugin]
- option1 = True
- option2 = abc.foo
+ .. code-tab:: toml
+ :caption: pyproject.toml
- Check the documentation for the plug-in for details on the options it takes.
+ [tool.coverage.run]
+ plugins = [ "something.plugin" ]
-#. Run your tests with coverage.py as you usually would. If you get a message
- like "Plugin file tracers (something.plugin) aren't supported with
- PyTracer," then you don't have the :ref:`C extension `
- installed. The C extension is needed for certain plug-ins.
+ .. code-tab:: ini
+ :caption: setup.cfg or tox.ini
+ [coverage:run]
+ plugins =
+ something.plugin
-Available plug-ins
-------------------
+ .. [[[end]]] (checksum: 6e866323d4bc319d42e3199b08615111)
-Some coverage.py plug-ins you might find useful:
+#. If the plug-in needs its own configuration, you can add those settings in
+ the .coveragerc file in a section named for the plug-in:
+
+ .. [[[cog
+ show_configs(
+ ini=r"""
+ [something.plugin]
+ option1 = True
+ option2 = abc.foo
+ """,
+ toml=r"""
+ [tool.coverage.something.plugin]
+ option1 = true
+ option2 = "abc.foo"
+ """,
+ )
+ .. ]]]
+
+ .. tabs::
-* `Django template coverage.py plug-in`__: for measuring coverage in Django
- templates.
+ .. code-tab:: ini
+ :caption: .coveragerc
- .. __: https://pypi.org/project/django_coverage_plugin/
+ [something.plugin]
+ option1 = True
+ option2 = abc.foo
-* `Conditional coverage plug-in`__: for measuring coverage based
- on any rules you define!
- Can exclude different lines of code that are only executed
- on different platforms, python versions,
- and with different dependencies installed.
+ .. code-tab:: toml
+ :caption: pyproject.toml
- .. __: https://github.com/wemake-services/coverage-conditional-plugin
+ [tool.coverage.something.plugin]
+ option1 = true
+ option2 = "abc.foo"
-* `Mako template coverage plug-in`__: for measuring coverage in Mako templates.
- Doesn't work yet, probably needs some changes in Mako itself.
+ .. code-tab:: ini
+ :caption: setup.cfg or tox.ini
- .. __: https://bitbucket-archive.softwareheritage.org/projects/ne/ned/coverage-mako-plugin.html
+ [coverage:something.plugin]
+ option1 = True
+ option2 = abc.foo
+
+ .. [[[end]]] (checksum: b690115dbe7f6c7806567e009b5715c4)
+
+ Check the documentation for the plug-in for details on the options it takes.
+
+#. Run your tests with coverage.py as you usually would. If you get a message
+ like "Plugin file tracers (something.plugin) aren't supported with
+ PyTracer," then you don't have the :ref:`C extension `
+ installed. The C extension is needed for certain plug-ins.
diff --git a/doc/python-coverage.1.txt b/doc/python-coverage.1.txt
index 9d38f4f73..05e0c6004 100644
--- a/doc/python-coverage.1.txt
+++ b/doc/python-coverage.1.txt
@@ -384,8 +384,7 @@ COMMAND REFERENCE
A list of packages or directories of code to be measured.
\--timid
- Use a simpler but slower trace method. Try this if you get
- seemingly impossible results!
+ Use the slower Python trace function core.
**xml** [ `options` ... ] [ `MODULES` ... ]
diff --git a/doc/requirements.in b/doc/requirements.in
index 42eca4052..4486c06ac 100644
--- a/doc/requirements.in
+++ b/doc/requirements.in
@@ -2,17 +2,17 @@
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
# PyPI requirements input for building documentation for coverage.py
-# "make upgrade" turns this into doc/requirements.pip
+# "make doc_upgrade" turns this into doc/requirements.pip
-c ../requirements/pins.pip
cogapp
-#doc8
+doc8
pyenchant
-scriv # for writing GitHub releases
sphinx
sphinx-autobuild
sphinx_rtd_theme
-#sphinx-tabs
+sphinx-code-tabs
+sphinx-lint
sphinxcontrib-restbuilder
sphinxcontrib-spelling
diff --git a/doc/requirements.pip b/doc/requirements.pip
index 5ef2a81b2..20b13366e 100644
--- a/doc/requirements.pip
+++ b/doc/requirements.pip
@@ -1,324 +1,119 @@
#
-# This file is autogenerated by pip-compile with Python 3.7
+# This file is autogenerated by pip-compile with Python 3.11
# by the following command:
#
-# make upgrade
+# make doc_upgrade
#
-alabaster==0.7.13 \
- --hash=sha256:1ee19aca801bbabb5ba3f5f258e4422dfa86f82f3e9cefb0859b283cdd7f62a3 \
- --hash=sha256:a27a4a084d5e690e16e01e03ad2b2e552c61a65469419b907243193de1a84ae2
+alabaster==1.0.0
# via sphinx
-attrs==22.2.0 \
- --hash=sha256:29e95c7f6778868dbd49170f98f8818f78f3dc5e0e37c0b1f474e3561b240836 \
- --hash=sha256:c9227bfc2f01993c03f68db37d1d15c9690188323c067c641f1a35ca58185f99
- # via scriv
-babel==2.12.1 \
- --hash=sha256:b4246fb7677d3b98f501a39d43396d3cafdc8eadb045f4a31be01863f655c610 \
- --hash=sha256:cc2d99999cd01d44420ae725a21c9e3711b3aadc7976d6147f622d8581963455
+anyio==4.7.0
+ # via
+ # starlette
+ # watchfiles
+babel==2.16.0
# via sphinx
-certifi==2022.12.7 \
- --hash=sha256:35824b4c3a97115964b408844d64aa14db1cc518f6562e8d7261699d1350a9e3 \
- --hash=sha256:4ad3232f5e926d6718ec31cfc1fcadfde020920e278684144551c91769c7bc18
+certifi==2024.12.14
# via requests
-charset-normalizer==3.1.0 \
- --hash=sha256:04afa6387e2b282cf78ff3dbce20f0cc071c12dc8f685bd40960cc68644cfea6 \
- --hash=sha256:04eefcee095f58eaabe6dc3cc2262f3bcd776d2c67005880894f447b3f2cb9c1 \
- --hash=sha256:0be65ccf618c1e7ac9b849c315cc2e8a8751d9cfdaa43027d4f6624bd587ab7e \
- --hash=sha256:0c95f12b74681e9ae127728f7e5409cbbef9cd914d5896ef238cc779b8152373 \
- --hash=sha256:0ca564606d2caafb0abe6d1b5311c2649e8071eb241b2d64e75a0d0065107e62 \
- --hash=sha256:10c93628d7497c81686e8e5e557aafa78f230cd9e77dd0c40032ef90c18f2230 \
- --hash=sha256:11d117e6c63e8f495412d37e7dc2e2fff09c34b2d09dbe2bee3c6229577818be \
- --hash=sha256:11d3bcb7be35e7b1bba2c23beedac81ee893ac9871d0ba79effc7fc01167db6c \
- --hash=sha256:12a2b561af122e3d94cdb97fe6fb2bb2b82cef0cdca131646fdb940a1eda04f0 \
- --hash=sha256:12d1a39aa6b8c6f6248bb54550efcc1c38ce0d8096a146638fd4738e42284448 \
- --hash=sha256:1435ae15108b1cb6fffbcea2af3d468683b7afed0169ad718451f8db5d1aff6f \
- --hash=sha256:1c60b9c202d00052183c9be85e5eaf18a4ada0a47d188a83c8f5c5b23252f649 \
- --hash=sha256:1e8fcdd8f672a1c4fc8d0bd3a2b576b152d2a349782d1eb0f6b8e52e9954731d \
- --hash=sha256:20064ead0717cf9a73a6d1e779b23d149b53daf971169289ed2ed43a71e8d3b0 \
- --hash=sha256:21fa558996782fc226b529fdd2ed7866c2c6ec91cee82735c98a197fae39f706 \
- --hash=sha256:22908891a380d50738e1f978667536f6c6b526a2064156203d418f4856d6e86a \
- --hash=sha256:3160a0fd9754aab7d47f95a6b63ab355388d890163eb03b2d2b87ab0a30cfa59 \
- --hash=sha256:322102cdf1ab682ecc7d9b1c5eed4ec59657a65e1c146a0da342b78f4112db23 \
- --hash=sha256:34e0a2f9c370eb95597aae63bf85eb5e96826d81e3dcf88b8886012906f509b5 \
- --hash=sha256:3573d376454d956553c356df45bb824262c397c6e26ce43e8203c4c540ee0acb \
- --hash=sha256:3747443b6a904001473370d7810aa19c3a180ccd52a7157aacc264a5ac79265e \
- --hash=sha256:38e812a197bf8e71a59fe55b757a84c1f946d0ac114acafaafaf21667a7e169e \
- --hash=sha256:3a06f32c9634a8705f4ca9946d667609f52cf130d5548881401f1eb2c39b1e2c \
- --hash=sha256:3a5fc78f9e3f501a1614a98f7c54d3969f3ad9bba8ba3d9b438c3bc5d047dd28 \
- --hash=sha256:3d9098b479e78c85080c98e1e35ff40b4a31d8953102bb0fd7d1b6f8a2111a3d \
- --hash=sha256:3dc5b6a8ecfdc5748a7e429782598e4f17ef378e3e272eeb1340ea57c9109f41 \
- --hash=sha256:4155b51ae05ed47199dc5b2a4e62abccb274cee6b01da5b895099b61b1982974 \
- --hash=sha256:49919f8400b5e49e961f320c735388ee686a62327e773fa5b3ce6721f7e785ce \
- --hash=sha256:53d0a3fa5f8af98a1e261de6a3943ca631c526635eb5817a87a59d9a57ebf48f \
- --hash=sha256:5f008525e02908b20e04707a4f704cd286d94718f48bb33edddc7d7b584dddc1 \
- --hash=sha256:628c985afb2c7d27a4800bfb609e03985aaecb42f955049957814e0491d4006d \
- --hash=sha256:65ed923f84a6844de5fd29726b888e58c62820e0769b76565480e1fdc3d062f8 \
- --hash=sha256:6734e606355834f13445b6adc38b53c0fd45f1a56a9ba06c2058f86893ae8017 \
- --hash=sha256:6baf0baf0d5d265fa7944feb9f7451cc316bfe30e8df1a61b1bb08577c554f31 \
- --hash=sha256:6f4f4668e1831850ebcc2fd0b1cd11721947b6dc7c00bf1c6bd3c929ae14f2c7 \
- --hash=sha256:6f5c2e7bc8a4bf7c426599765b1bd33217ec84023033672c1e9a8b35eaeaaaf8 \
- --hash=sha256:6f6c7a8a57e9405cad7485f4c9d3172ae486cfef1344b5ddd8e5239582d7355e \
- --hash=sha256:7381c66e0561c5757ffe616af869b916c8b4e42b367ab29fedc98481d1e74e14 \
- --hash=sha256:73dc03a6a7e30b7edc5b01b601e53e7fc924b04e1835e8e407c12c037e81adbd \
- --hash=sha256:74db0052d985cf37fa111828d0dd230776ac99c740e1a758ad99094be4f1803d \
- --hash=sha256:75f2568b4189dda1c567339b48cba4ac7384accb9c2a7ed655cd86b04055c795 \
- --hash=sha256:78cacd03e79d009d95635e7d6ff12c21eb89b894c354bd2b2ed0b4763373693b \
- --hash=sha256:80d1543d58bd3d6c271b66abf454d437a438dff01c3e62fdbcd68f2a11310d4b \
- --hash=sha256:830d2948a5ec37c386d3170c483063798d7879037492540f10a475e3fd6f244b \
- --hash=sha256:891cf9b48776b5c61c700b55a598621fdb7b1e301a550365571e9624f270c203 \
- --hash=sha256:8f25e17ab3039b05f762b0a55ae0b3632b2e073d9c8fc88e89aca31a6198e88f \
- --hash=sha256:9a3267620866c9d17b959a84dd0bd2d45719b817245e49371ead79ed4f710d19 \
- --hash=sha256:a04f86f41a8916fe45ac5024ec477f41f886b3c435da2d4e3d2709b22ab02af1 \
- --hash=sha256:aaf53a6cebad0eae578f062c7d462155eada9c172bd8c4d250b8c1d8eb7f916a \
- --hash=sha256:abc1185d79f47c0a7aaf7e2412a0eb2c03b724581139193d2d82b3ad8cbb00ac \
- --hash=sha256:ac0aa6cd53ab9a31d397f8303f92c42f534693528fafbdb997c82bae6e477ad9 \
- --hash=sha256:ac3775e3311661d4adace3697a52ac0bab17edd166087d493b52d4f4f553f9f0 \
- --hash=sha256:b06f0d3bf045158d2fb8837c5785fe9ff9b8c93358be64461a1089f5da983137 \
- --hash=sha256:b116502087ce8a6b7a5f1814568ccbd0e9f6cfd99948aa59b0e241dc57cf739f \
- --hash=sha256:b82fab78e0b1329e183a65260581de4375f619167478dddab510c6c6fb04d9b6 \
- --hash=sha256:bd7163182133c0c7701b25e604cf1611c0d87712e56e88e7ee5d72deab3e76b5 \
- --hash=sha256:c36bcbc0d5174a80d6cccf43a0ecaca44e81d25be4b7f90f0ed7bcfbb5a00909 \
- --hash=sha256:c3af8e0f07399d3176b179f2e2634c3ce9c1301379a6b8c9c9aeecd481da494f \
- --hash=sha256:c84132a54c750fda57729d1e2599bb598f5fa0344085dbde5003ba429a4798c0 \
- --hash=sha256:cb7b2ab0188829593b9de646545175547a70d9a6e2b63bf2cd87a0a391599324 \
- --hash=sha256:cca4def576f47a09a943666b8f829606bcb17e2bc2d5911a46c8f8da45f56755 \
- --hash=sha256:cf6511efa4801b9b38dc5546d7547d5b5c6ef4b081c60b23e4d941d0eba9cbeb \
- --hash=sha256:d16fd5252f883eb074ca55cb622bc0bee49b979ae4e8639fff6ca3ff44f9f854 \
- --hash=sha256:d2686f91611f9e17f4548dbf050e75b079bbc2a82be565832bc8ea9047b61c8c \
- --hash=sha256:d7fc3fca01da18fbabe4625d64bb612b533533ed10045a2ac3dd194bfa656b60 \
- --hash=sha256:dd5653e67b149503c68c4018bf07e42eeed6b4e956b24c00ccdf93ac79cdff84 \
- --hash=sha256:de5695a6f1d8340b12a5d6d4484290ee74d61e467c39ff03b39e30df62cf83a0 \
- --hash=sha256:e0ac8959c929593fee38da1c2b64ee9778733cdf03c482c9ff1d508b6b593b2b \
- --hash=sha256:e1b25e3ad6c909f398df8921780d6a3d120d8c09466720226fc621605b6f92b1 \
- --hash=sha256:e633940f28c1e913615fd624fcdd72fdba807bf53ea6925d6a588e84e1151531 \
- --hash=sha256:e89df2958e5159b811af9ff0f92614dabf4ff617c03a4c1c6ff53bf1c399e0e1 \
- --hash=sha256:ea9f9c6034ea2d93d9147818f17c2a0860d41b71c38b9ce4d55f21b6f9165a11 \
- --hash=sha256:f645caaf0008bacf349875a974220f1f1da349c5dbe7c4ec93048cdc785a3326 \
- --hash=sha256:f8303414c7b03f794347ad062c0516cee0e15f7a612abd0ce1e25caf6ceb47df \
- --hash=sha256:fca62a8301b605b954ad2e9c3666f9d97f63872aa4efcae5492baca2056b74ab
+charset-normalizer==3.4.1
# via requests
-click==8.1.3 \
- --hash=sha256:7682dc8afb30297001674575ea00d1814d808d6a36af415a82bd481d37ba7b8e \
- --hash=sha256:bb4d8133cb15a609f44e8213d9b391b0809795062913b383c62be0ee95b1db48
- # via
- # click-log
- # scriv
-click-log==0.4.0 \
- --hash=sha256:3970f8570ac54491237bcdb3d8ab5e3eef6c057df29f8c3d1151a51a9c23b975 \
- --hash=sha256:a43e394b528d52112af599f2fc9e4b7cf3c15f94e53581f74fa6867e68c91756
- # via scriv
-cogapp==3.3.0 \
- --hash=sha256:1be95183f70282422d594fa42426be6923070a4bd8335621f6347f3aeee81db0 \
- --hash=sha256:8b5b5f6063d8ee231961c05da010cb27c30876b2279e23ad0eae5f8f09460d50
+click==8.1.8
+ # via uvicorn
+cogapp==3.4.1
# via -r doc/requirements.in
-colorama==0.4.6 \
- --hash=sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44 \
- --hash=sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6
+colorama==0.4.6
# via sphinx-autobuild
-docutils==0.18.1 \
- --hash=sha256:23010f129180089fbcd3bc08cfefccb3b890b0050e1ca00c867036e9d161b98c \
- --hash=sha256:679987caf361a7539d76e584cbeddc311e3aee937877c87346f31debc63e9d06
+doc8==1.1.2
+ # via -r doc/requirements.in
+docutils==0.21.2
# via
+ # doc8
+ # restructuredtext-lint
# sphinx
# sphinx-rtd-theme
-idna==3.4 \
- --hash=sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4 \
- --hash=sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2
- # via requests
-imagesize==1.4.1 \
- --hash=sha256:0d8d18d08f840c19d0ee7ca1fd82490fdc3729b7ac93f49870406ddde8ef8d8b \
- --hash=sha256:69150444affb9cb0d5cc5a92b3676f0b2fb7cd9ae39e947a5e11a36b4497cd4a
- # via sphinx
-importlib-metadata==6.0.0 \
- --hash=sha256:7efb448ec9a5e313a57655d35aa54cd3e01b7e1fbcf72dce1bf06119420f5bad \
- --hash=sha256:e354bedeb60efa6affdcc8ae121b73544a7aa74156d047311948f6d711cd378d
+h11==0.14.0
+ # via uvicorn
+idna==3.10
# via
- # click
- # sphinx
- # sphinxcontrib-spelling
-jinja2==3.1.2 \
- --hash=sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852 \
- --hash=sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61
- # via
- # scriv
- # sphinx
-livereload==2.6.3 \
- --hash=sha256:776f2f865e59fde56490a56bcc6773b6917366bce0c267c60ee8aaf1a0959869 \
- --hash=sha256:ad4ac6f53b2d62bb6ce1a5e6e96f1f00976a32348afedcb4b6d68df2a1d346e4
- # via sphinx-autobuild
-markupsafe==2.1.2 \
- --hash=sha256:0576fe974b40a400449768941d5d0858cc624e3249dfd1e0c33674e5c7ca7aed \
- --hash=sha256:085fd3201e7b12809f9e6e9bc1e5c96a368c8523fad5afb02afe3c051ae4afcc \
- --hash=sha256:090376d812fb6ac5f171e5938e82e7f2d7adc2b629101cec0db8b267815c85e2 \
- --hash=sha256:0b462104ba25f1ac006fdab8b6a01ebbfbce9ed37fd37fd4acd70c67c973e460 \
- --hash=sha256:137678c63c977754abe9086a3ec011e8fd985ab90631145dfb9294ad09c102a7 \
- --hash=sha256:1bea30e9bf331f3fef67e0a3877b2288593c98a21ccb2cf29b74c581a4eb3af0 \
- --hash=sha256:22152d00bf4a9c7c83960521fc558f55a1adbc0631fbb00a9471e097b19d72e1 \
- --hash=sha256:22731d79ed2eb25059ae3df1dfc9cb1546691cc41f4e3130fe6bfbc3ecbbecfa \
- --hash=sha256:2298c859cfc5463f1b64bd55cb3e602528db6fa0f3cfd568d3605c50678f8f03 \
- --hash=sha256:28057e985dace2f478e042eaa15606c7efccb700797660629da387eb289b9323 \
- --hash=sha256:2e7821bffe00aa6bd07a23913b7f4e01328c3d5cc0b40b36c0bd81d362faeb65 \
- --hash=sha256:2ec4f2d48ae59bbb9d1f9d7efb9236ab81429a764dedca114f5fdabbc3788013 \
- --hash=sha256:340bea174e9761308703ae988e982005aedf427de816d1afe98147668cc03036 \
- --hash=sha256:40627dcf047dadb22cd25ea7ecfe9cbf3bbbad0482ee5920b582f3809c97654f \
- --hash=sha256:40dfd3fefbef579ee058f139733ac336312663c6706d1163b82b3003fb1925c4 \
- --hash=sha256:4cf06cdc1dda95223e9d2d3c58d3b178aa5dacb35ee7e3bbac10e4e1faacb419 \
- --hash=sha256:50c42830a633fa0cf9e7d27664637532791bfc31c731a87b202d2d8ac40c3ea2 \
- --hash=sha256:55f44b440d491028addb3b88f72207d71eeebfb7b5dbf0643f7c023ae1fba619 \
- --hash=sha256:608e7073dfa9e38a85d38474c082d4281f4ce276ac0010224eaba11e929dd53a \
- --hash=sha256:63ba06c9941e46fa389d389644e2d8225e0e3e5ebcc4ff1ea8506dce646f8c8a \
- --hash=sha256:65608c35bfb8a76763f37036547f7adfd09270fbdbf96608be2bead319728fcd \
- --hash=sha256:665a36ae6f8f20a4676b53224e33d456a6f5a72657d9c83c2aa00765072f31f7 \
- --hash=sha256:6d6607f98fcf17e534162f0709aaad3ab7a96032723d8ac8750ffe17ae5a0666 \
- --hash=sha256:7313ce6a199651c4ed9d7e4cfb4aa56fe923b1adf9af3b420ee14e6d9a73df65 \
- --hash=sha256:7668b52e102d0ed87cb082380a7e2e1e78737ddecdde129acadb0eccc5423859 \
- --hash=sha256:7df70907e00c970c60b9ef2938d894a9381f38e6b9db73c5be35e59d92e06625 \
- --hash=sha256:7e007132af78ea9df29495dbf7b5824cb71648d7133cf7848a2a5dd00d36f9ff \
- --hash=sha256:835fb5e38fd89328e9c81067fd642b3593c33e1e17e2fdbf77f5676abb14a156 \
- --hash=sha256:8bca7e26c1dd751236cfb0c6c72d4ad61d986e9a41bbf76cb445f69488b2a2bd \
- --hash=sha256:8db032bf0ce9022a8e41a22598eefc802314e81b879ae093f36ce9ddf39ab1ba \
- --hash=sha256:99625a92da8229df6d44335e6fcc558a5037dd0a760e11d84be2260e6f37002f \
- --hash=sha256:9cad97ab29dfc3f0249b483412c85c8ef4766d96cdf9dcf5a1e3caa3f3661cf1 \
- --hash=sha256:a4abaec6ca3ad8660690236d11bfe28dfd707778e2442b45addd2f086d6ef094 \
- --hash=sha256:a6e40afa7f45939ca356f348c8e23048e02cb109ced1eb8420961b2f40fb373a \
- --hash=sha256:a6f2fcca746e8d5910e18782f976489939d54a91f9411c32051b4aab2bd7c513 \
- --hash=sha256:a806db027852538d2ad7555b203300173dd1b77ba116de92da9afbc3a3be3eed \
- --hash=sha256:abcabc8c2b26036d62d4c746381a6f7cf60aafcc653198ad678306986b09450d \
- --hash=sha256:b8526c6d437855442cdd3d87eede9c425c4445ea011ca38d937db299382e6fa3 \
- --hash=sha256:bb06feb762bade6bf3c8b844462274db0c76acc95c52abe8dbed28ae3d44a147 \
- --hash=sha256:c0a33bc9f02c2b17c3ea382f91b4db0e6cde90b63b296422a939886a7a80de1c \
- --hash=sha256:c4a549890a45f57f1ebf99c067a4ad0cb423a05544accaf2b065246827ed9603 \
- --hash=sha256:ca244fa73f50a800cf8c3ebf7fd93149ec37f5cb9596aa8873ae2c1d23498601 \
- --hash=sha256:cf877ab4ed6e302ec1d04952ca358b381a882fbd9d1b07cccbfd61783561f98a \
- --hash=sha256:d9d971ec1e79906046aa3ca266de79eac42f1dbf3612a05dc9368125952bd1a1 \
- --hash=sha256:da25303d91526aac3672ee6d49a2f3db2d9502a4a60b55519feb1a4c7714e07d \
- --hash=sha256:e55e40ff0cc8cc5c07996915ad367fa47da6b3fc091fdadca7f5403239c5fec3 \
- --hash=sha256:f03a532d7dee1bed20bc4884194a16160a2de9ffc6354b3878ec9682bb623c54 \
- --hash=sha256:f1cd098434e83e656abf198f103a8207a8187c0fc110306691a2e94a78d0abb2 \
- --hash=sha256:f2bfb563d0211ce16b63c7cb9395d2c682a23187f54c3d79bfec33e6705473c6 \
- --hash=sha256:f8ffb705ffcf5ddd0e80b65ddf7bed7ee4f5a441ea7d3419e861a12eaf41af58
+ # anyio
+ # requests
+imagesize==1.4.1
+ # via sphinx
+jinja2==3.1.5
+ # via sphinx
+markupsafe==3.0.2
# via jinja2
-packaging==23.0 \
- --hash=sha256:714ac14496c3e68c99c29b00845f7a2b85f3bb6f1078fd9f72fd20f0570002b2 \
- --hash=sha256:b6ad297f8907de0fa2fe1ccbd26fdaf387f5f47c7275fedf8cce89f99446cf97
+packaging==24.2
# via sphinx
-pyenchant==3.2.2 \
- --hash=sha256:1cf830c6614362a78aab78d50eaf7c6c93831369c52e1bb64ffae1df0341e637 \
- --hash=sha256:5a636832987eaf26efe971968f4d1b78e81f62bca2bde0a9da210c7de43c3bce \
- --hash=sha256:5facc821ece957208a81423af7d6ec7810dad29697cb0d77aae81e4e11c8e5a6 \
- --hash=sha256:6153f521852e23a5add923dbacfbf4bebbb8d70c4e4bad609a8e0f9faeb915d1
+pbr==6.1.0
+ # via stevedore
+polib==1.2.0
+ # via sphinx-lint
+pyenchant==3.2.2
# via
# -r doc/requirements.in
# sphinxcontrib-spelling
-pygments==2.14.0 \
- --hash=sha256:b3ed06a9e8ac9a9aae5a6f5dbe78a8a58655d17b43b93c078f094ddc476ae297 \
- --hash=sha256:fa7bd7bd2771287c0de303af8bfdfc731f51bd2c6a47ab69d117138893b82717
- # via sphinx
-pytz==2022.7.1 \
- --hash=sha256:01a0681c4b9684a28304615eba55d1ab31ae00bf68ec157ec3708a8182dbbcd0 \
- --hash=sha256:78f4f37d8198e0627c5f1143240bb0206b8691d8d7ac6d78fee88b78733f8c4a
- # via babel
-requests==2.28.2 \
- --hash=sha256:64299f4909223da747622c030b781c0d7811e359c37124b4bd368fb8c6518baa \
- --hash=sha256:98b1b2782e3c6c4904938b84c0eb932721069dfdb9134313beff7c83c2df24bf
+pygments==2.18.0
# via
- # scriv
+ # doc8
# sphinx
-scriv==1.2.1 \
- --hash=sha256:0ceec6243ebf02f6a685507eec72f890ca9d9da4cafcfcfce640b1f027cec17d \
- --hash=sha256:95edfd76642cf7ae6b5cd40975545d8af58f6398cabfe83ff755e8eedb8ddd4e
- # via -r doc/requirements.in
-six==1.16.0 \
- --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \
- --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254
- # via livereload
-snowballstemmer==2.2.0 \
- --hash=sha256:09b16deb8547d3412ad7b590689584cd0fe25ec8db3be37788be3810cbf19cb1 \
- --hash=sha256:c8e1716e83cc398ae16824e5572ae04e0d9fc2c6b985fb0f900f5f0c96ecba1a
+regex==2024.11.6
+ # via sphinx-lint
+requests==2.32.3
+ # via
+ # sphinx
+ # sphinxcontrib-spelling
+restructuredtext-lint==1.4.0
+ # via doc8
+sniffio==1.3.1
+ # via anyio
+snowballstemmer==2.2.0
# via sphinx
-sphinx==5.3.0 \
- --hash=sha256:060ca5c9f7ba57a08a1219e547b269fadf125ae25b06b9fa7f66768efb652d6d \
- --hash=sha256:51026de0a9ff9fc13c05d74913ad66047e104f56a129ff73e174eb5c3ee794b5
+sphinx==8.1.3
# via
# -r doc/requirements.in
# sphinx-autobuild
+ # sphinx-code-tabs
# sphinx-rtd-theme
+ # sphinxcontrib-jquery
# sphinxcontrib-restbuilder
# sphinxcontrib-spelling
-sphinx-autobuild==2021.3.14 \
- --hash=sha256:8fe8cbfdb75db04475232f05187c776f46f6e9e04cacf1e49ce81bdac649ccac \
- --hash=sha256:de1ca3b66e271d2b5b5140c35034c89e47f263f2cd5db302c9217065f7443f05
+sphinx-autobuild==2024.10.3
+ # via -r doc/requirements.in
+sphinx-code-tabs==0.5.5
+ # via -r doc/requirements.in
+sphinx-lint==1.0.0
# via -r doc/requirements.in
-sphinx-rtd-theme==1.2.0 \
- --hash=sha256:a0d8bd1a2ed52e0b338cbe19c4b2eef3c5e7a048769753dac6a9f059c7b641b8 \
- --hash=sha256:f823f7e71890abe0ac6aaa6013361ea2696fc8d3e1fa798f463e82bdb77eeff2
+sphinx-rtd-theme==3.0.2
# via -r doc/requirements.in
-sphinxcontrib-applehelp==1.0.2 \
- --hash=sha256:806111e5e962be97c29ec4c1e7fe277bfd19e9652fb1a4392105b43e01af885a \
- --hash=sha256:a072735ec80e7675e3f432fcae8610ecf509c5f1869d17e2eecff44389cdbc58
+sphinxcontrib-applehelp==2.0.0
# via sphinx
-sphinxcontrib-devhelp==1.0.2 \
- --hash=sha256:8165223f9a335cc1af7ffe1ed31d2871f325254c0423bc0c4c7cd1c1e4734a2e \
- --hash=sha256:ff7f1afa7b9642e7060379360a67e9c41e8f3121f2ce9164266f61b9f4b338e4
+sphinxcontrib-devhelp==2.0.0
# via sphinx
-sphinxcontrib-htmlhelp==2.0.0 \
- --hash=sha256:d412243dfb797ae3ec2b59eca0e52dac12e75a241bf0e4eb861e450d06c6ed07 \
- --hash=sha256:f5f8bb2d0d629f398bf47d0d69c07bc13b65f75a81ad9e2f71a63d4b7a2f6db2
+sphinxcontrib-htmlhelp==2.1.0
# via sphinx
-sphinxcontrib-jquery==2.0.0 \
- --hash=sha256:8fb65f6dba84bf7bcd1aea1f02ab3955ac34611d838bcc95d4983b805b234daa \
- --hash=sha256:ed47fa425c338ffebe3c37e1cdb56e30eb806116b85f01055b158c7057fdb995
+sphinxcontrib-jquery==4.1
# via sphinx-rtd-theme
-sphinxcontrib-jsmath==1.0.1 \
- --hash=sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178 \
- --hash=sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8
+sphinxcontrib-jsmath==1.0.1
# via sphinx
-sphinxcontrib-qthelp==1.0.3 \
- --hash=sha256:4c33767ee058b70dba89a6fc5c1892c0d57a54be67ddd3e7875a18d14cba5a72 \
- --hash=sha256:bd9fc24bcb748a8d51fd4ecaade681350aa63009a347a8c14e637895444dfab6
+sphinxcontrib-qthelp==2.0.0
# via sphinx
-sphinxcontrib-restbuilder==0.3 \
- --hash=sha256:6b3ee9394b5ec5e73e6afb34d223530d0b9098cb7562f9c5e364e6d6b41410ce \
- --hash=sha256:6ba2ddc7a87d845c075c1b2e00d541bd1c8400488e50e32c9b4169ccdd9f30cb
+sphinxcontrib-restbuilder==0.3
# via -r doc/requirements.in
-sphinxcontrib-serializinghtml==1.1.5 \
- --hash=sha256:352a9a00ae864471d3a7ead8d7d79f5fc0b57e8b3f95e9867eb9eb28999b92fd \
- --hash=sha256:aa5f6de5dfdf809ef505c4895e51ef5c9eac17d0f287933eb49ec495280b6952
+sphinxcontrib-serializinghtml==2.0.0
# via sphinx
-sphinxcontrib-spelling==8.0.0 \
- --hash=sha256:199d0a16902ad80c387c2966dc9eb10f565b1fb15ccce17210402db7c2443e5c \
- --hash=sha256:b27e0a16aef00bcfc888a6490dc3f16651f901dc475446c6882834278c8dc7b3
+sphinxcontrib-spelling==8.0.1
# via -r doc/requirements.in
-tornado==6.2 \
- --hash=sha256:1d54d13ab8414ed44de07efecb97d4ef7c39f7438cf5e976ccd356bebb1b5fca \
- --hash=sha256:20f638fd8cc85f3cbae3c732326e96addff0a15e22d80f049e00121651e82e72 \
- --hash=sha256:5c87076709343557ef8032934ce5f637dbb552efa7b21d08e89ae7619ed0eb23 \
- --hash=sha256:5f8c52d219d4995388119af7ccaa0bcec289535747620116a58d830e7c25d8a8 \
- --hash=sha256:6fdfabffd8dfcb6cf887428849d30cf19a3ea34c2c248461e1f7d718ad30b66b \
- --hash=sha256:87dcafae3e884462f90c90ecc200defe5e580a7fbbb4365eda7c7c1eb809ebc9 \
- --hash=sha256:9b630419bde84ec666bfd7ea0a4cb2a8a651c2d5cccdbdd1972a0c859dfc3c13 \
- --hash=sha256:b8150f721c101abdef99073bf66d3903e292d851bee51910839831caba341a75 \
- --hash=sha256:ba09ef14ca9893954244fd872798b4ccb2367c165946ce2dd7376aebdde8e3ac \
- --hash=sha256:d3a2f5999215a3a06a4fc218026cd84c61b8b2b40ac5296a6db1f1451ef04c1e \
- --hash=sha256:e5f923aa6a47e133d1cf87d60700889d7eae68988704e20c75fb2d65677a8e4b
- # via livereload
-typing-extensions==4.5.0 \
- --hash=sha256:5cb5f4a79139d699607b3ef622a1dedafa84e115ab0024e0d9c044a9479ca7cb \
- --hash=sha256:fb33085c39dd998ac16d1431ebc293a8b3eedd00fd4a32de0ff79002c19511b4
- # via importlib-metadata
-urllib3==1.26.15 \
- --hash=sha256:8a388717b9476f934a21484e8c8e61875ab60644d29b9b39e11e4b9dc1c6b305 \
- --hash=sha256:aa751d169e23c7479ce47a0cb0da579e3ede798f994f5816a74e4f4500dcea42
+starlette==0.43.0
+ # via sphinx-autobuild
+stevedore==5.4.0
+ # via doc8
+typing-extensions==4.12.2
+ # via anyio
+urllib3==2.3.0
# via requests
-zipp==3.15.0 \
- --hash=sha256:112929ad649da941c23de50f356a2b5570c954b65150642bccdd66bf194d224b \
- --hash=sha256:48904fc76a60e542af151aded95726c1a5c34ed43ab4134b597665c86d7ad556
- # via importlib-metadata
-
-# The following packages are considered to be unsafe in a requirements file:
-setuptools==65.7.0 \
- --hash=sha256:4d3c92fac8f1118bb77a22181355e29c239cabfe2b9effdaa665c66b711136d7 \
- --hash=sha256:8ab4f1dbf2b4a65f7eec5ad0c620e84c34111a68d3349833494b9088212214dd
- # via
- # -c doc/../requirements/pins.pip
- # sphinxcontrib-jquery
+uvicorn==0.34.0
+ # via sphinx-autobuild
+watchfiles==1.0.3
+ # via sphinx-autobuild
+websockets==14.1
+ # via sphinx-autobuild
diff --git a/doc/sample_html/class_index.html b/doc/sample_html/class_index.html
new file mode 100644
index 000000000..dcd0c2515
--- /dev/null
+++ b/doc/sample_html/class_index.html
@@ -0,0 +1,553 @@
+
+
+
+
+ Cog coverage
+
+
+
+
+
+
+
+
+
+ No items found using the specified filter.
+
+
+
+
+
diff --git a/doc/sample_html/coverage_html.js b/doc/sample_html/coverage_html_cb_6fb7b396.js
similarity index 71%
rename from doc/sample_html/coverage_html.js
rename to doc/sample_html/coverage_html_cb_6fb7b396.js
index 1c4eb9881..1face13de 100644
--- a/doc/sample_html/coverage_html.js
+++ b/doc/sample_html/coverage_html_cb_6fb7b396.js
@@ -34,13 +34,14 @@ function on_click(sel, fn) {
// Helpers for table sorting
function getCellValue(row, column = 0) {
- const cell = row.cells[column]
+ const cell = row.cells[column] // nosemgrep: eslint.detect-object-injection
if (cell.childElementCount == 1) {
- const child = cell.firstElementChild
- if (child instanceof HTMLTimeElement && child.dateTime) {
- return child.dateTime
- } else if (child instanceof HTMLDataElement && child.value) {
- return child.value
+ var child = cell.firstElementChild;
+ if (child.tagName === "A") {
+ child = child.firstElementChild;
+ }
+ if (child instanceof HTMLDataElement && child.value) {
+ return child.value;
}
}
return cell.innerText || cell.textContent;
@@ -50,28 +51,62 @@ function rowComparator(rowA, rowB, column = 0) {
let valueA = getCellValue(rowA, column);
let valueB = getCellValue(rowB, column);
if (!isNaN(valueA) && !isNaN(valueB)) {
- return valueA - valueB
+ return valueA - valueB;
}
return valueA.localeCompare(valueB, undefined, {numeric: true});
}
function sortColumn(th) {
// Get the current sorting direction of the selected header,
- // clear state on other headers and then set the new sorting direction
+ // clear state on other headers and then set the new sorting direction.
const currentSortOrder = th.getAttribute("aria-sort");
[...th.parentElement.cells].forEach(header => header.setAttribute("aria-sort", "none"));
+ var direction;
if (currentSortOrder === "none") {
- th.setAttribute("aria-sort", th.dataset.defaultSortOrder || "ascending");
- } else {
- th.setAttribute("aria-sort", currentSortOrder === "ascending" ? "descending" : "ascending");
+ direction = th.dataset.defaultSortOrder || "ascending";
+ }
+ else if (currentSortOrder === "ascending") {
+ direction = "descending";
}
+ else {
+ direction = "ascending";
+ }
+ th.setAttribute("aria-sort", direction);
const column = [...th.parentElement.cells].indexOf(th)
- // Sort all rows and afterwards append them in order to move them in the DOM
+ // Sort all rows and afterwards append them in order to move them in the DOM.
Array.from(th.closest("table").querySelectorAll("tbody tr"))
- .sort((rowA, rowB) => rowComparator(rowA, rowB, column) * (th.getAttribute("aria-sort") === "ascending" ? 1 : -1))
- .forEach(tr => tr.parentElement.appendChild(tr) );
+ .sort((rowA, rowB) => rowComparator(rowA, rowB, column) * (direction === "ascending" ? 1 : -1))
+ .forEach(tr => tr.parentElement.appendChild(tr));
+
+ // Save the sort order for next time.
+ if (th.id !== "region") {
+ let th_id = "file"; // Sort by file if we don't have a column id
+ let current_direction = direction;
+ const stored_list = localStorage.getItem(coverage.INDEX_SORT_STORAGE);
+ if (stored_list) {
+ ({th_id, direction} = JSON.parse(stored_list))
+ }
+ localStorage.setItem(coverage.INDEX_SORT_STORAGE, JSON.stringify({
+ "th_id": th.id,
+ "direction": current_direction
+ }));
+ if (th.id !== th_id || document.getElementById("region")) {
+ // Sort column has changed, unset sorting by function or class.
+ localStorage.setItem(coverage.SORTED_BY_REGION, JSON.stringify({
+ "by_region": false,
+ "region_direction": current_direction
+ }));
+ }
+ }
+ else {
+ // Sort column has changed to by function or class, remember that.
+ localStorage.setItem(coverage.SORTED_BY_REGION, JSON.stringify({
+ "by_region": true,
+ "region_direction": direction
+ }));
+ }
}
// Find all the elements with data-shortcut attribute, and use them to assign a shortcut key.
@@ -90,21 +125,60 @@ coverage.assign_shortkeys = function () {
// Create the events for the filter box.
coverage.wire_up_filter = function () {
+ // Populate the filter and hide100 inputs if there are saved values for them.
+ const saved_filter_value = localStorage.getItem(coverage.FILTER_STORAGE);
+ if (saved_filter_value) {
+ document.getElementById("filter").value = saved_filter_value;
+ }
+ const saved_hide100_value = localStorage.getItem(coverage.HIDE100_STORAGE);
+ if (saved_hide100_value) {
+ document.getElementById("hide100").checked = JSON.parse(saved_hide100_value);
+ }
+
// Cache elements.
const table = document.querySelector("table.index");
const table_body_rows = table.querySelectorAll("tbody tr");
const no_rows = document.getElementById("no_rows");
// Observe filter keyevents.
- document.getElementById("filter").addEventListener("input", debounce(event => {
+ const filter_handler = (event => {
// Keep running total of each metric, first index contains number of shown rows
const totals = new Array(table.rows[0].cells.length).fill(0);
// Accumulate the percentage as fraction
- totals[totals.length - 1] = { "numer": 0, "denom": 0 };
+ totals[totals.length - 1] = { "numer": 0, "denom": 0 }; // nosemgrep: eslint.detect-object-injection
+
+ var text = document.getElementById("filter").value;
+ // Store filter value
+ localStorage.setItem(coverage.FILTER_STORAGE, text);
+ const casefold = (text === text.toLowerCase());
+ const hide100 = document.getElementById("hide100").checked;
+ // Store hide value.
+ localStorage.setItem(coverage.HIDE100_STORAGE, JSON.stringify(hide100));
// Hide / show elements.
table_body_rows.forEach(row => {
- if (!row.cells[0].textContent.includes(event.target.value)) {
+ var show = false;
+ // Check the text filter.
+ for (let column = 0; column < totals.length; column++) {
+ cell = row.cells[column];
+ if (cell.classList.contains("name")) {
+ var celltext = cell.textContent;
+ if (casefold) {
+ celltext = celltext.toLowerCase();
+ }
+ if (celltext.includes(text)) {
+ show = true;
+ }
+ }
+ }
+
+ // Check the "hide covered" filter.
+ if (show && hide100) {
+ const [numer, denom] = row.cells[row.cells.length - 1].dataset.ratio.split(" ");
+ show = (numer !== denom);
+ }
+
+ if (!show) {
// hide
row.classList.add("hidden");
return;
@@ -114,16 +188,20 @@ coverage.wire_up_filter = function () {
row.classList.remove("hidden");
totals[0]++;
- for (let column = 1; column < totals.length; column++) {
+ for (let column = 0; column < totals.length; column++) {
// Accumulate dynamic totals
- cell = row.cells[column]
+ cell = row.cells[column] // nosemgrep: eslint.detect-object-injection
+ if (cell.classList.contains("name")) {
+ continue;
+ }
if (column === totals.length - 1) {
// Last column contains percentage
const [numer, denom] = cell.dataset.ratio.split(" ");
- totals[column]["numer"] += parseInt(numer, 10);
- totals[column]["denom"] += parseInt(denom, 10);
- } else {
- totals[column] += parseInt(cell.textContent, 10);
+ totals[column]["numer"] += parseInt(numer, 10); // nosemgrep: eslint.detect-object-injection
+ totals[column]["denom"] += parseInt(denom, 10); // nosemgrep: eslint.detect-object-injection
+ }
+ else {
+ totals[column] += parseInt(cell.textContent, 10); // nosemgrep: eslint.detect-object-injection
}
}
});
@@ -142,9 +220,12 @@ coverage.wire_up_filter = function () {
const footer = table.tFoot.rows[0];
// Calculate new dynamic sum values based on visible rows.
- for (let column = 1; column < totals.length; column++) {
+ for (let column = 0; column < totals.length; column++) {
// Get footer cell element.
- const cell = footer.cells[column];
+ const cell = footer.cells[column]; // nosemgrep: eslint.detect-object-injection
+ if (cell.classList.contains("name")) {
+ continue;
+ }
// Set value into dynamic footer cell element.
if (column === totals.length - 1) {
@@ -152,54 +233,76 @@ coverage.wire_up_filter = function () {
// and adapts to the number of decimal places.
const match = /\.([0-9]+)/.exec(cell.textContent);
const places = match ? match[1].length : 0;
- const { numer, denom } = totals[column];
+ const { numer, denom } = totals[column]; // nosemgrep: eslint.detect-object-injection
cell.dataset.ratio = `${numer} ${denom}`;
// Check denom to prevent NaN if filtered files contain no statements
cell.textContent = denom
? `${(numer * 100 / denom).toFixed(places)}%`
: `${(100).toFixed(places)}%`;
- } else {
- cell.textContent = totals[column];
+ }
+ else {
+ cell.textContent = totals[column]; // nosemgrep: eslint.detect-object-injection
}
}
- }));
+ });
+
+ document.getElementById("filter").addEventListener("input", debounce(filter_handler));
+ document.getElementById("hide100").addEventListener("input", debounce(filter_handler));
// Trigger change event on setup, to force filter on page refresh
// (filter value may still be present).
document.getElementById("filter").dispatchEvent(new Event("input"));
+ document.getElementById("hide100").dispatchEvent(new Event("input"));
};
+coverage.FILTER_STORAGE = "COVERAGE_FILTER_VALUE";
+coverage.HIDE100_STORAGE = "COVERAGE_HIDE100_VALUE";
-coverage.INDEX_SORT_STORAGE = "COVERAGE_INDEX_SORT_2";
-
-// Loaded on index.html
-coverage.index_ready = function () {
- coverage.assign_shortkeys();
- coverage.wire_up_filter();
+// Set up the click-to-sort columns.
+coverage.wire_up_sorting = function () {
document.querySelectorAll("[data-sortable] th[aria-sort]").forEach(
th => th.addEventListener("click", e => sortColumn(e.target))
);
// Look for a localStorage item containing previous sort settings:
+ let th_id = "file", direction = "ascending";
const stored_list = localStorage.getItem(coverage.INDEX_SORT_STORAGE);
-
if (stored_list) {
- const {column, direction} = JSON.parse(stored_list);
- const th = document.querySelector("[data-sortable]").tHead.rows[0].cells[column];
- th.setAttribute("aria-sort", direction === "ascending" ? "descending" : "ascending");
- th.click()
+ ({th_id, direction} = JSON.parse(stored_list));
+ }
+ let by_region = false, region_direction = "ascending";
+ const sorted_by_region = localStorage.getItem(coverage.SORTED_BY_REGION);
+ if (sorted_by_region) {
+ ({
+ by_region,
+ region_direction
+ } = JSON.parse(sorted_by_region));
}
- // Watch for page unload events so we can save the final sort settings:
- window.addEventListener("unload", function () {
- const th = document.querySelector('[data-sortable] th[aria-sort="ascending"], [data-sortable] [aria-sort="descending"]');
- if (!th) {
- return;
- }
- localStorage.setItem(coverage.INDEX_SORT_STORAGE, JSON.stringify({
- column: [...th.parentElement.cells].indexOf(th),
- direction: th.getAttribute("aria-sort"),
- }));
- });
+ const region_id = "region";
+ if (by_region && document.getElementById(region_id)) {
+ direction = region_direction;
+ }
+ // If we are in a page that has a column with id of "region", sort on
+ // it if the last sort was by function or class.
+ let th;
+ if (document.getElementById(region_id)) {
+ th = document.getElementById(by_region ? region_id : th_id);
+ }
+ else {
+ th = document.getElementById(th_id);
+ }
+ th.setAttribute("aria-sort", direction === "ascending" ? "descending" : "ascending");
+ th.click()
+};
+
+coverage.INDEX_SORT_STORAGE = "COVERAGE_INDEX_SORT_2";
+coverage.SORTED_BY_REGION = "COVERAGE_SORT_REGION";
+
+// Loaded on index.html
+coverage.index_ready = function () {
+ coverage.assign_shortkeys();
+ coverage.wire_up_filter();
+ coverage.wire_up_sorting();
on_click(".button_prev_file", coverage.to_prev_file);
on_click(".button_next_file", coverage.to_next_file);
@@ -214,10 +317,11 @@ coverage.LINE_FILTERS_STORAGE = "COVERAGE_LINE_FILTERS";
coverage.pyfile_ready = function () {
// If we're directed to a particular line number, highlight the line.
var frag = location.hash;
- if (frag.length > 2 && frag[1] === 't') {
+ if (frag.length > 2 && frag[1] === "t") {
document.querySelector(frag).closest(".n").classList.add("highlight");
coverage.set_sel(parseInt(frag.substr(2), 10));
- } else {
+ }
+ else {
coverage.set_sel(0);
}
@@ -250,13 +354,17 @@ coverage.pyfile_ready = function () {
}
for (cls in coverage.filters) {
- coverage.set_line_visibilty(cls, coverage.filters[cls]);
+ coverage.set_line_visibilty(cls, coverage.filters[cls]); // nosemgrep: eslint.detect-object-injection
}
coverage.assign_shortkeys();
coverage.init_scroll_markers();
coverage.wire_up_sticky_header();
+ document.querySelectorAll("[id^=ctxs]").forEach(
+ cbox => cbox.addEventListener("click", coverage.expand_contexts)
+ );
+
// Rebuild scroll markers when the window height changes.
window.addEventListener("resize", coverage.build_scroll_markers);
};
@@ -437,7 +545,8 @@ coverage.to_next_chunk_nicely = function () {
if (line.parentElement !== document.getElementById("source")) {
// The element is not a source line but the header or similar
coverage.select_line_or_chunk(1);
- } else {
+ }
+ else {
// We extract the line number from the id
coverage.select_line_or_chunk(parseInt(line.id.substring(1), 10));
}
@@ -456,7 +565,8 @@ coverage.to_prev_chunk_nicely = function () {
if (line.parentElement !== document.getElementById("source")) {
// The element is not a source line but the header or similar
coverage.select_line_or_chunk(coverage.lines_len);
- } else {
+ }
+ else {
// We extract the line number from the id
coverage.select_line_or_chunk(parseInt(line.id.substring(1), 10));
}
@@ -528,14 +638,14 @@ coverage.scroll_window = function (to_pos) {
coverage.init_scroll_markers = function () {
// Init some variables
- coverage.lines_len = document.querySelectorAll('#source > p').length;
+ coverage.lines_len = document.querySelectorAll("#source > p").length;
// Build html
coverage.build_scroll_markers();
};
coverage.build_scroll_markers = function () {
- const temp_scroll_marker = document.getElementById('scroll_marker')
+ const temp_scroll_marker = document.getElementById("scroll_marker")
if (temp_scroll_marker) temp_scroll_marker.remove();
// Don't build markers if the window has no scroll bar.
if (document.body.scrollHeight <= window.innerHeight) {
@@ -549,8 +659,8 @@ coverage.build_scroll_markers = function () {
const scroll_marker = document.createElement("div");
scroll_marker.id = "scroll_marker";
- document.getElementById('source').querySelectorAll(
- 'p.show_run, p.show_mis, p.show_exc, p.show_exc, p.show_par'
+ document.getElementById("source").querySelectorAll(
+ "p.show_run, p.show_mis, p.show_exc, p.show_exc, p.show_par"
).forEach(element => {
const line_top = Math.floor(element.offsetTop * marker_scale);
const line_number = parseInt(element.querySelector(".n a").id.substr(1));
@@ -558,7 +668,8 @@ coverage.build_scroll_markers = function () {
if (line_number === previous_line + 1) {
// If this solid missed block just make previous mark higher.
last_mark.style.height = `${line_top + line_height - last_top}px`;
- } else {
+ }
+ else {
// Add colored line in scroll_marker block.
last_mark = document.createElement("div");
last_mark.id = `m${line_number}`;
@@ -577,28 +688,46 @@ coverage.build_scroll_markers = function () {
};
coverage.wire_up_sticky_header = function () {
- const header = document.querySelector('header');
+ const header = document.querySelector("header");
const header_bottom = (
- header.querySelector('.content h2').getBoundingClientRect().top -
+ header.querySelector(".content h2").getBoundingClientRect().top -
header.getBoundingClientRect().top
);
function updateHeader() {
if (window.scrollY > header_bottom) {
- header.classList.add('sticky');
- } else {
- header.classList.remove('sticky');
+ header.classList.add("sticky");
+ }
+ else {
+ header.classList.remove("sticky");
}
}
- window.addEventListener('scroll', updateHeader);
+ window.addEventListener("scroll", updateHeader);
updateHeader();
};
+coverage.expand_contexts = function (e) {
+ var ctxs = e.target.parentNode.querySelector(".ctxs");
+
+ if (!ctxs.classList.contains("expanded")) {
+ var ctxs_text = ctxs.textContent;
+ var width = Number(ctxs_text[0]);
+ ctxs.textContent = "";
+ for (var i = 1; i < ctxs_text.length; i += width) {
+ key = ctxs_text.substring(i, i + width).trim();
+ ctxs.appendChild(document.createTextNode(contexts[key]));
+ ctxs.appendChild(document.createElement("br"));
+ }
+ ctxs.classList.add("expanded");
+ }
+};
+
document.addEventListener("DOMContentLoaded", () => {
if (document.body.classList.contains("indexfile")) {
coverage.index_ready();
- } else {
+ }
+ else {
coverage.pyfile_ready();
}
});
diff --git a/doc/sample_html/d_7b071bdc2a35fa80_test_makefiles_py.html b/doc/sample_html/d_7b071bdc2a35fa80_test_makefiles_py.html
deleted file mode 100644
index ffe5456be..000000000
--- a/doc/sample_html/d_7b071bdc2a35fa80_test_makefiles_py.html
+++ /dev/null
@@ -1,216 +0,0 @@
-
-
-
-
- Coverage for cogapp/test_makefiles.py: 22.37%
-
-
-
-
-
-
-
- 1""" Test the cogapp.makefiles modules
- 2"""
- 3
- 4import shutil
- 5import os
- 6import random
- 7import tempfile
- 8from unittest import TestCase
- 9
- 10from . import makefiles
- 11
- 12
- 13class SimpleTests(TestCase):
- 14
- 15 def setUp(self):
- 16 # Create a temporary directory.
- 17 my_dir = 'testmakefiles_tempdir_' + str(random.random())[2:]
- 18 self.tempdir = os.path.join(tempfile.gettempdir(), my_dir)
- 19 os.mkdir(self.tempdir)
- 20
- 21 def tearDown(self):
- 22 # Get rid of the temporary directory.
- 23 shutil.rmtree(self.tempdir)
- 24
- 25 def exists(self, dname, fname):
- 26 return os.path.exists(os.path.join(dname, fname))
- 27
- 28 def checkFilesExist(self, d, dname):
- 29 for fname in d.keys():
- 30 assert(self.exists(dname, fname))
- 31 if type(d[fname]) == type({}):
- 32 self.checkFilesExist(d[fname], os.path.join(dname, fname))
- 33
- 34 def checkFilesDontExist(self, d, dname):
- 35 for fname in d.keys():
- 36 assert(not self.exists(dname, fname))
- 37
- 38 def testOneFile(self):
- 39 fname = 'foo.txt'
- 40 notfname = 'not_here.txt'
- 41 d = { fname: "howdy" }
- 42 assert(not self.exists(self.tempdir, fname))
- 43 assert(not self.exists(self.tempdir, notfname))
- 44
- 45 makefiles.makeFiles(d, self.tempdir)
- 46 assert(self.exists(self.tempdir, fname))
- 47 assert(not self.exists(self.tempdir, notfname))
- 48
- 49 makefiles.removeFiles(d, self.tempdir)
- 50 assert(not self.exists(self.tempdir, fname))
- 51 assert(not self.exists(self.tempdir, notfname))
- 52
- 53 def testManyFiles(self):
- 54 d = {
- 55 'top1.txt': "howdy",
- 56 'top2.txt': "hello",
- 57 'sub': {
- 58 'sub1.txt': "inside",
- 59 'sub2.txt': "inside2",
- 60 },
- 61 }
- 62
- 63 self.checkFilesDontExist(d, self.tempdir)
- 64 makefiles.makeFiles(d, self.tempdir)
- 65 self.checkFilesExist(d, self.tempdir)
- 66 makefiles.removeFiles(d, self.tempdir)
- 67 self.checkFilesDontExist(d, self.tempdir)
- 68
- 69 def testOverlapping(self):
- 70 d1 = {
- 71 'top1.txt': "howdy",
- 72 'sub': {
- 73 'sub1.txt': "inside",
- 74 },
- 75 }
- 76
- 77 d2 = {
- 78 'top2.txt': "hello",
- 79 'sub': {
- 80 'sub2.txt': "inside2",
- 81 },
- 82 }
- 83
- 84 self.checkFilesDontExist(d1, self.tempdir)
- 85 self.checkFilesDontExist(d2, self.tempdir)
- 86 makefiles.makeFiles(d1, self.tempdir)
- 87 makefiles.makeFiles(d2, self.tempdir)
- 88 self.checkFilesExist(d1, self.tempdir)
- 89 self.checkFilesExist(d2, self.tempdir)
- 90 makefiles.removeFiles(d1, self.tempdir)
- 91 makefiles.removeFiles(d2, self.tempdir)
- 92 self.checkFilesDontExist(d1, self.tempdir)
- 93 self.checkFilesDontExist(d2, self.tempdir)
- 94
- 95 def testContents(self):
- 96 fname = 'bar.txt'
- 97 cont0 = "I am bar.txt"
- 98 d = { fname: cont0 }
- 99 makefiles.makeFiles(d, self.tempdir)
- 100 fcont1 = open(os.path.join(self.tempdir, fname))
- 101 assert(fcont1.read() == cont0)
- 102 fcont1.close()
- 103
- 104 def testDedent(self):
- 105 fname = 'dedent.txt'
- 106 d = {
- 107 fname: """\
- 108 This is dedent.txt
- 109 \tTabbed in.
- 110 spaced in.
- 111 OK.
- 112 """,
- 113 }
- 114 makefiles.makeFiles(d, self.tempdir)
- 115 fcont = open(os.path.join(self.tempdir, fname))
- 116 assert(fcont.read() == "This is dedent.txt\n\tTabbed in.\n spaced in.\nOK.\n")
- 117 fcont.close()
-
-
-
-
diff --git a/doc/sample_html/d_7b071bdc2a35fa80_test_whiteutils_py.html b/doc/sample_html/d_7b071bdc2a35fa80_test_whiteutils_py.html
deleted file mode 100644
index 0d3fd4f63..000000000
--- a/doc/sample_html/d_7b071bdc2a35fa80_test_whiteutils_py.html
+++ /dev/null
@@ -1,196 +0,0 @@
-
-
-
-
- Coverage for cogapp/test_whiteutils.py: 26.47%
-
-
-
-
-
-
-
- 1""" Test the cogapp.whiteutils module.
- 2"""
- 3
- 4from unittest import TestCase
- 5
- 6from .whiteutils import commonPrefix, reindentBlock, whitePrefix
- 7
- 8
- 9class WhitePrefixTests(TestCase):
- 10 """ Test cases for cogapp.whiteutils.
- 11 """
- 12 def testSingleLine(self):
- 13 self.assertEqual(whitePrefix(['']), '')
- 14 self.assertEqual(whitePrefix([' ']), '')
- 15 self.assertEqual(whitePrefix(['x']), '')
- 16 self.assertEqual(whitePrefix([' x']), ' ')
- 17 self.assertEqual(whitePrefix(['\tx']), '\t')
- 18 self.assertEqual(whitePrefix([' x']), ' ')
- 19 self.assertEqual(whitePrefix([' \t \tx ']), ' \t \t')
- 20
- 21 def testMultiLine(self):
- 22 self.assertEqual(whitePrefix([' x',' x',' x']), ' ')
- 23 self.assertEqual(whitePrefix([' y',' y',' y']), ' ')
- 24 self.assertEqual(whitePrefix([' y',' y',' y']), ' ')
- 25
- 26 def testBlankLinesAreIgnored(self):
- 27 self.assertEqual(whitePrefix([' x',' x','',' x']), ' ')
- 28 self.assertEqual(whitePrefix(['',' x',' x',' x']), ' ')
- 29 self.assertEqual(whitePrefix([' x',' x',' x','']), ' ')
- 30 self.assertEqual(whitePrefix([' x',' x',' ',' x']), ' ')
- 31
- 32 def testTabCharacters(self):
- 33 self.assertEqual(whitePrefix(['\timport sys', '', '\tprint sys.argv']), '\t')
- 34
- 35 def testDecreasingLengths(self):
- 36 self.assertEqual(whitePrefix([' x',' x',' x']), ' ')
- 37 self.assertEqual(whitePrefix([' x',' x',' x']), ' ')
- 38
- 39
- 40class ReindentBlockTests(TestCase):
- 41 """ Test cases for cogapp.reindentBlock.
- 42 """
- 43 def testNonTermLine(self):
- 44 self.assertEqual(reindentBlock(''), '')
- 45 self.assertEqual(reindentBlock('x'), 'x')
- 46 self.assertEqual(reindentBlock(' x'), 'x')
- 47 self.assertEqual(reindentBlock(' x'), 'x')
- 48 self.assertEqual(reindentBlock('\tx'), 'x')
- 49 self.assertEqual(reindentBlock('x', ' '), ' x')
- 50 self.assertEqual(reindentBlock('x', '\t'), '\tx')
- 51 self.assertEqual(reindentBlock(' x', ' '), ' x')
- 52 self.assertEqual(reindentBlock(' x', '\t'), '\tx')
- 53 self.assertEqual(reindentBlock(' x', ' '), ' x')
- 54
- 55 def testSingleLine(self):
- 56 self.assertEqual(reindentBlock('\n'), '\n')
- 57 self.assertEqual(reindentBlock('x\n'), 'x\n')
- 58 self.assertEqual(reindentBlock(' x\n'), 'x\n')
- 59 self.assertEqual(reindentBlock(' x\n'), 'x\n')
- 60 self.assertEqual(reindentBlock('\tx\n'), 'x\n')
- 61 self.assertEqual(reindentBlock('x\n', ' '), ' x\n')
- 62 self.assertEqual(reindentBlock('x\n', '\t'), '\tx\n')
- 63 self.assertEqual(reindentBlock(' x\n', ' '), ' x\n')
- 64 self.assertEqual(reindentBlock(' x\n', '\t'), '\tx\n')
- 65 self.assertEqual(reindentBlock(' x\n', ' '), ' x\n')
- 66
- 67 def testRealBlock(self):
- 68 self.assertEqual(
- 69 reindentBlock('\timport sys\n\n\tprint sys.argv\n'),
- 70 'import sys\n\nprint sys.argv\n'
- 71 )
- 72
- 73
- 74class CommonPrefixTests(TestCase):
- 75 """ Test cases for cogapp.commonPrefix.
- 76 """
- 77 def testDegenerateCases(self):
- 78 self.assertEqual(commonPrefix([]), '')
- 79 self.assertEqual(commonPrefix(['']), '')
- 80 self.assertEqual(commonPrefix(['','','','','']), '')
- 81 self.assertEqual(commonPrefix(['cat in the hat']), 'cat in the hat')
- 82
- 83 def testNoCommonPrefix(self):
- 84 self.assertEqual(commonPrefix(['a','b']), '')
- 85 self.assertEqual(commonPrefix(['a','b','c','d','e','f']), '')
- 86 self.assertEqual(commonPrefix(['a','a','a','a','a','x']), '')
- 87
- 88 def testUsualCases(self):
- 89 self.assertEqual(commonPrefix(['ab', 'ac']), 'a')
- 90 self.assertEqual(commonPrefix(['aab', 'aac']), 'aa')
- 91 self.assertEqual(commonPrefix(['aab', 'aab', 'aab', 'aac']), 'aa')
- 92
- 93 def testBlankLine(self):
- 94 self.assertEqual(commonPrefix(['abc', 'abx', '', 'aby']), '')
- 95
- 96 def testDecreasingLengths(self):
- 97 self.assertEqual(commonPrefix(['abcd', 'abc', 'ab']), 'ab')
-
-
-
-
diff --git a/doc/sample_html/favicon_32.png b/doc/sample_html/favicon_32_cb_58284776.png
similarity index 100%
rename from doc/sample_html/favicon_32.png
rename to doc/sample_html/favicon_32_cb_58284776.png
diff --git a/doc/sample_html/function_index.html b/doc/sample_html/function_index.html
new file mode 100644
index 000000000..01aa834a7
--- /dev/null
+++ b/doc/sample_html/function_index.html
@@ -0,0 +1,2393 @@
+
+
+
+
+ Cog coverage
+
+
+
+
+
+
+
+
+
+ No items found using the specified filter.
+
+
+
+
+
diff --git a/doc/sample_html/index.html b/doc/sample_html/index.html
index c304d54ad..008434b86 100644
--- a/doc/sample_html/index.html
+++ b/doc/sample_html/index.html
@@ -1,28 +1,28 @@
-
+
Cog coverage
-
-
-
+
+
+
Cog coverage:
- 38.75%
+ 38.58%