Skip to content

gh-108834: Cleanup libregrtest #108858

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 6 additions & 2 deletions Lib/test/libregrtest/__init__.py
Original file line number Diff line number Diff line change
@@ -1,2 +1,6 @@
from test.libregrtest.cmdline import _parse_args, RESOURCE_NAMES, ALL_RESOURCES
from test.libregrtest.main import main
from test.support import TestStats
from .cmdline import _parse_args, RESOURCE_NAMES, ALL_RESOURCES
from .result import FilterTuple, State, TestResult
from .runtests import TestsTuple, FilterDict, RunTests
from .results import TestsList, Results
from .main import main
26 changes: 16 additions & 10 deletions Lib/test/libregrtest/cmdline.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
import os
import shlex
import sys

from test.support import os_helper


Expand Down Expand Up @@ -154,7 +155,7 @@ def __init__(self, **kwargs) -> None:
self.fromfile = None
self.fail_env_changed = False
self.use_resources = None
self.trace = False
self.coverage = False
self.coverdir = 'coverage'
self.runleaks = False
self.huntrleaks = False
Expand All @@ -170,7 +171,8 @@ def __init__(self, **kwargs) -> None:
self.ignore_tests = None
self.pgo = False
self.pgo_extended = False

self.threshold = None
self.wait = False
super().__init__(**kwargs)


Expand Down Expand Up @@ -205,7 +207,7 @@ def _create_parser():
group.add_argument('--wait', action='store_true',
help='wait for user input, e.g., allow a debugger '
'to be attached')
group.add_argument('--worker-args', metavar='ARGS')
group.add_argument('--worker-json', metavar='ARGS')
group.add_argument('-S', '--start', metavar='START',
help='the name of the test at which to start.' +
more_details)
Expand Down Expand Up @@ -283,7 +285,6 @@ def _create_parser():
dest='use_mp', type=int,
help='run PROCESSES processes at once')
group.add_argument('-T', '--coverage', action='store_true',
dest='trace',
help='turn on code coverage tracing using the trace '
'module')
group.add_argument('-D', '--coverdir', metavar='DIR',
Expand Down Expand Up @@ -378,11 +379,11 @@ def _parse_args(args, **kwargs):

if ns.single and ns.fromfile:
parser.error("-s and -f don't go together!")
if ns.use_mp is not None and ns.trace:
if ns.use_mp is not None and ns.coverage:
parser.error("-T and -j don't go together!")
if ns.python is not None:
if ns.use_mp is None:
parser.error("-p requires -j!")
parser.error("--python option requires the -j option!")
# The "executable" may be two or more parts, e.g. "node python.js"
ns.python = shlex.split(ns.python)
if ns.failfast and not (ns.verbose or ns.verbose3):
Expand All @@ -401,10 +402,6 @@ def _parse_args(args, **kwargs):
if ns.timeout is not None:
if ns.timeout <= 0:
ns.timeout = None
if ns.use_mp is not None:
if ns.use_mp <= 0:
# Use all cores + extras for tests that like to sleep
ns.use_mp = 2 + (os.cpu_count() or 1)
if ns.use:
for a in ns.use:
for r in a:
Expand Down Expand Up @@ -448,4 +445,13 @@ def _parse_args(args, **kwargs):
# --forever implies --failfast
ns.failfast = True

if ns.huntrleaks:
warmup, repetitions, _ = ns.huntrleaks
if warmup < 1 or repetitions < 1:
msg = ("Invalid values for the --huntrleaks/-R parameters. The "
"number of warmups and repetitions must be at least 1 "
"each (1:1).")
print(msg, file=sys.stderr, flush=True)
sys.exit(2)

return ns
92 changes: 92 additions & 0 deletions Lib/test/libregrtest/findtests.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,92 @@
import os.path
import sys
import unittest
from test import support

from .result import FilterTuple
from .utils import abs_module_name, count, printlist


# If these test directories are encountered recurse into them and treat each
# test_ .py or dir as a separate test module. This can increase parallelism.
# Beware this can't generally be done for any directory with sub-tests as the
# __init__.py may do things which alter what tests are to be run.

SPLITTESTDIRS = frozenset({
"test_asyncio",
"test_concurrent_futures",
"test_multiprocessing_fork",
"test_multiprocessing_forkserver",
"test_multiprocessing_spawn",
})


def findtestdir(path=None):
return path or os.path.dirname(os.path.dirname(__file__)) or os.curdir


def findtests(*, testdir=None, exclude=(),
split_test_dirs=SPLITTESTDIRS, base_mod=""):
"""Return a list of all applicable test modules."""
testdir = findtestdir(testdir)
tests = []
for name in os.listdir(testdir):
mod, ext = os.path.splitext(name)
if (not mod.startswith("test_")) or (mod in exclude):
continue
if mod in split_test_dirs:
subdir = os.path.join(testdir, mod)
mod = f"{base_mod or 'test'}.{mod}"
tests.extend(findtests(testdir=subdir, exclude=exclude,
split_test_dirs=split_test_dirs, base_mod=mod))
elif ext in (".py", ""):
tests.append(f"{base_mod}.{mod}" if base_mod else mod)
return sorted(tests)


def split_test_packages(tests, *, testdir=None, exclude=(),
split_test_dirs=SPLITTESTDIRS):
testdir = findtestdir(testdir)
splitted = []
for name in tests:
if name in split_test_dirs:
subdir = os.path.join(testdir, name)
splitted.extend(findtests(testdir=subdir, exclude=exclude,
split_test_dirs=split_test_dirs,
base_mod=name))
else:
splitted.append(name)
return splitted


def _list_cases(suite):
for test in suite:
if isinstance(test, unittest.loader._FailedTest):
continue
if isinstance(test, unittest.TestSuite):
_list_cases(test)
elif isinstance(test, unittest.TestCase):
if support.match_test(test):
print(test.id())

def list_cases(tests, *, test_dir: str,
match_tests: FilterTuple | None = None,
ignore_tests: FilterTuple | None = None) -> None:
support.verbose = False
support.set_match_tests(match_tests, ignore_tests)

skipped = []
for test_name in tests:
module_name = abs_module_name(test_name, test_dir)
try:
suite = unittest.defaultTestLoader.loadTestsFromName(module_name)
_list_cases(suite)
except unittest.SkipTest:
skipped.append(test_name)

if skipped:
sys.stdout.flush()
stderr = sys.stderr
print(file=stderr)
print(count(len(skipped), "test"), "skipped:", file=stderr)
printlist(skipped, file=stderr)
89 changes: 89 additions & 0 deletions Lib/test/libregrtest/logger.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,89 @@
import os
import sys
import time

from . import RunTests


class Logger:
def __init__(self, pgo: bool):
self.start_time = time.perf_counter()
self.win_load_tracker = None
self.pgo = pgo

# used to display the progress bar "[ 3/100]"
self.test_count_text = ''
self.test_count_width = 1

def set_tests(self, runtests: RunTests):
if runtests.forever:
self.test_count_text = ''
self.test_count_width = 3
else:
self.test_count_text = '/{}'.format(len(runtests.tests))
self.test_count_width = len(self.test_count_text) - 1

def start_load_tracker(self):
if sys.platform != 'win32':
return

# If we're on windows and this is the parent runner (not a worker),
# track the load average.
from .win_utils import WindowsLoadTracker

try:
self.win_load_tracker = WindowsLoadTracker()
except PermissionError as error:
# Standard accounts may not have access to the performance
# counters.
print(f'Failed to create WindowsLoadTracker: {error}')

def stop_load_tracker(self):
if self.win_load_tracker is not None:
self.win_load_tracker.close()
self.win_load_tracker = None

def get_time(self):
return time.perf_counter() - self.start_time

def getloadavg(self):
if self.win_load_tracker is not None:
return self.win_load_tracker.getloadavg()

if hasattr(os, 'getloadavg'):
return os.getloadavg()[0]

return None

def log(self, line=''):
empty = not line

# add the system load prefix: "load avg: 1.80 "
load_avg = self.getloadavg()
if load_avg is not None:
line = f"load avg: {load_avg:.2f} {line}"

# add the timestamp prefix: "0:01:05 "
test_time = self.get_time()

mins, secs = divmod(int(test_time), 60)
hours, mins = divmod(mins, 60)
test_time = "%d:%02d:%02d" % (hours, mins, secs)

line = f"{test_time} {line}"
if empty:
line = line[:-1]

print(line, flush=True)

def display_progress(self, test_index, text, results, runtests):
quiet = runtests.quiet
if quiet:
return

# "[ 51/405/1] test_tcl passed"
line = f"{test_index:{self.test_count_width}}{self.test_count_text}"
fails = len(results.bad) + len(results.environment_changed)
if fails and not self.pgo:
line = f"{line}/{fails}"
self.log(f"[{line}] {text}")
Loading