From 0e6411ee7d6c4d4998ba1a7757adcf7e7bb731b1 Mon Sep 17 00:00:00 2001 From: Ashwin Naren Date: Sun, 23 Feb 2025 20:32:17 -0800 Subject: [PATCH 01/15] update __init__.py Signed-off-by: Ashwin Naren --- Lib/test/support/__init__.py | 928 +++++++++++++++++++++-------------- Lib/test/test_ast.py | 2 +- Lib/test/test_collections.py | 2 +- Lib/test/test_dict.py | 4 +- Lib/test/test_dictviews.py | 4 +- 5 files changed, 566 insertions(+), 374 deletions(-) diff --git a/Lib/test/support/__init__.py b/Lib/test/support/__init__.py index 1efe5bddb1..869053e0dd 100644 --- a/Lib/test/support/__init__.py +++ b/Lib/test/support/__init__.py @@ -6,8 +6,7 @@ import contextlib import dataclasses import functools -import getpass -import opcode +import _opcode import os import re import stat @@ -19,8 +18,6 @@ import unittest import warnings -from .testresult import get_test_runner - __all__ = [ # globals @@ -29,12 +26,11 @@ "Error", "TestFailed", "TestDidNotRun", "ResourceDenied", # io "record_original_stdout", "get_original_stdout", "captured_stdout", - "captured_stdin", "captured_stderr", + "captured_stdin", "captured_stderr", "captured_output", # unittest "is_resource_enabled", "requires", "requires_freebsd_version", - "requires_linux_version", "requires_mac_ver", + "requires_gil_enabled", "requires_linux_version", "requires_mac_ver", "check_syntax_error", - "run_unittest", "run_doctest", "requires_gzip", "requires_bz2", "requires_lzma", "bigmemtest", "bigaddrspacetest", "cpython_only", "get_attribute", "requires_IEEE_754", "requires_zlib", @@ -46,8 +42,8 @@ "check_disallow_instantiation", "check_sanitizer", "skip_if_sanitizer", "requires_limited_api", "requires_specialization", # sys - "is_jython", "is_android", "is_emscripten", "is_wasi", - "check_impl_detail", "unix_shell", "setswitchinterval", + "MS_WINDOWS", "is_jython", "is_android", "is_emscripten", "is_wasi", + "is_apple_mobile", "check_impl_detail", "unix_shell", "setswitchinterval", # os "get_pagesize", # network @@ -60,9 +56,14 @@ "run_with_tz", "PGO", "missing_compiler_executable", "ALWAYS_EQ", "NEVER_EQ", "LARGEST", "SMALLEST", "LOOPBACK_TIMEOUT", "INTERNET_TIMEOUT", "SHORT_TIMEOUT", "LONG_TIMEOUT", - "Py_DEBUG", "EXCEEDS_RECURSION_LIMIT", "C_RECURSION_LIMIT", + "Py_DEBUG", "exceeds_recursion_limit", "get_c_recursion_limit", "skip_on_s390x", - ] + "without_optimizer", + "force_not_colorized", + "force_not_colorized_test_class", + "make_clean_env", + "BrokenIter", +] # Timeout in seconds for tests using a network server listening on the network @@ -74,13 +75,7 @@ # # The timeout should be long enough for connect(), recv() and send() methods # of socket.socket. -LOOPBACK_TIMEOUT = 5.0 -if sys.platform == 'win32' and ' 32 bit (ARM)' in sys.version: - # bpo-37553: test_socket.SendfileUsingSendTest is taking longer than 2 - # seconds on Windows ARM32 buildbot - LOOPBACK_TIMEOUT = 10 -elif sys.platform == 'vxworks': - LOOPBACK_TIMEOUT = 10 +LOOPBACK_TIMEOUT = 10.0 # Timeout in seconds for network requests going to the internet. The timeout is # short enough to prevent a test to wait for too long if the internet request @@ -185,7 +180,7 @@ def get_attribute(obj, name): verbose = 1 # Flag set to 0 by regrtest.py use_resources = None # Flag set to [] by regrtest.py max_memuse = 0 # Disable bigmem tests (they will still be run with - # small sizes, to make sure they work.) +# small sizes, to make sure they work.) real_max_memuse = 0 junit_xml_list = None # list of testsuite XML elements failfast = False @@ -244,10 +239,10 @@ class USEROBJECTFLAGS(ctypes.Structure): uof = USEROBJECTFLAGS() needed = ctypes.wintypes.DWORD() res = dll.GetUserObjectInformationW(h, - UOI_FLAGS, - ctypes.byref(uof), - ctypes.sizeof(uof), - ctypes.byref(needed)) + UOI_FLAGS, + ctypes.byref(uof), + ctypes.sizeof(uof), + ctypes.byref(needed)) if not res: raise ctypes.WinError() if not bool(uof.dwFlags & WSF_VISIBLE): @@ -259,22 +254,16 @@ class USEROBJECTFLAGS(ctypes.Structure): # process not running under the same user id as the current console # user. To avoid that, raise an exception if the window manager # connection is not available. - from ctypes import cdll, c_int, pointer, Structure - from ctypes.util import find_library - - app_services = cdll.LoadLibrary(find_library("ApplicationServices")) - - if app_services.CGMainDisplayID() == 0: - reason = "gui tests cannot run without OS X window manager" + import subprocess + try: + rc = subprocess.run(["launchctl", "managername"], + capture_output=True, check=True) + managername = rc.stdout.decode("utf-8").strip() + except subprocess.CalledProcessError: + reason = "unable to detect macOS launchd job manager" else: - class ProcessSerialNumber(Structure): - _fields_ = [("highLongOfPSN", c_int), - ("lowLongOfPSN", c_int)] - psn = ProcessSerialNumber() - psn_p = pointer(psn) - if ( (app_services.GetCurrentProcess(psn_p) < 0) or - (app_services.SetFrontProcess(psn_p) < 0) ): - reason = "cannot run without OS X gui process" + if managername != "Aqua": + reason = f"{managername=} -- can only run in a macOS GUI session" # check on every platform whether tkinter can actually do anything if not reason: @@ -391,11 +380,12 @@ def wrapper(*args, **kw): def skip_if_buildbot(reason=None): """Decorator raising SkipTest if running on a buildbot.""" + import getpass if not reason: reason = 'not suitable for buildbots' try: isbuildbot = getpass.getuser().lower() == 'buildbot' - except (KeyError, EnvironmentError) as err: + except (KeyError, OSError) as err: warnings.warn(f'getpass.getuser() failed {err}.', RuntimeWarning) isbuildbot = False return unittest.skipIf(isbuildbot, reason) @@ -431,13 +421,26 @@ def check_sanitizer(*, address=False, memory=False, ub=False, thread=False): (thread and thread_sanitizer) ) + def skip_if_sanitizer(reason=None, *, address=False, memory=False, ub=False, thread=False): """Decorator raising SkipTest if running with a sanitizer active.""" if not reason: reason = 'not working with sanitizers active' - skip = check_sanitizer(address=address, memory=memory, ub=ub) + skip = check_sanitizer(address=address, memory=memory, ub=ub, thread=thread) return unittest.skipIf(skip, reason) +# gh-89363: True if fork() can hang if Python is built with Address Sanitizer +# (ASAN): libasan race condition, dead lock in pthread_create(). +HAVE_ASAN_FORK_BUG = check_sanitizer(address=True) + + +def set_sanitizer_env_var(env, option): + for name in ('ASAN_OPTIONS', 'MSAN_OPTIONS', 'UBSAN_OPTIONS', 'TSAN_OPTIONS'): + if name in env: + env[name] += f':{option}' + else: + env[name] = option + def system_must_validate_cert(f): """Skip the test on TLS certificate validation failures.""" @@ -510,21 +513,42 @@ def has_no_debug_ranges(): def requires_debug_ranges(reason='requires co_positions / debug_ranges'): return unittest.skipIf(has_no_debug_ranges(), reason) -def requires_legacy_unicode_capi(): +@contextlib.contextmanager +def suppress_immortalization(suppress=True): + """Suppress immortalization of deferred objects.""" + try: + import _testinternalcapi + except ImportError: + yield + return + + if not suppress: + yield + return + + _testinternalcapi.suppress_immortalization(True) + try: + yield + finally: + _testinternalcapi.suppress_immortalization(False) + +def skip_if_suppress_immortalization(): try: - from _testcapi import unicode_legacy_string + import _testinternalcapi except ImportError: - unicode_legacy_string = None + return + return unittest.skipUnless(_testinternalcapi.get_immortalize_deferred(), + "requires immortalization of deferred objects") - return unittest.skipUnless(unicode_legacy_string, - 'requires legacy Unicode C API') + +MS_WINDOWS = (sys.platform == 'win32') # Is not actually used in tests, but is kept for compatibility. is_jython = sys.platform.startswith('java') -is_android = hasattr(sys, 'getandroidapilevel') +is_android = sys.platform == "android" -if sys.platform not in ('win32', 'vxworks'): +if sys.platform not in {"win32", "vxworks", "ios", "tvos", "watchos"}: unix_shell = '/system/bin/sh' if is_android else '/bin/sh' else: unix_shell = None @@ -534,23 +558,44 @@ def requires_legacy_unicode_capi(): is_emscripten = sys.platform == "emscripten" is_wasi = sys.platform == "wasi" -has_fork_support = hasattr(os, "fork") and not is_emscripten and not is_wasi +is_apple_mobile = sys.platform in {"ios", "tvos", "watchos"} +is_apple = is_apple_mobile or sys.platform == "darwin" -# From python 3.12.6 -is_s390x = hasattr(os, 'uname') and os.uname().machine == 's390x' -skip_on_s390x = unittest.skipIf(is_s390x, 'skipped on s390x') +has_fork_support = hasattr(os, "fork") and not ( + # WASM and Apple mobile platforms do not support subprocesses. + is_emscripten + or is_wasi + or is_apple_mobile + + # Although Android supports fork, it's unsafe to call it from Python because + # all Android apps are multi-threaded. + or is_android +) def requires_fork(): return unittest.skipUnless(has_fork_support, "requires working os.fork()") -has_subprocess_support = not is_emscripten and not is_wasi +has_subprocess_support = not ( + # WASM and Apple mobile platforms do not support subprocesses. + is_emscripten + or is_wasi + or is_apple_mobile + + # Although Android supports subproceses, they're almost never useful in + # practice (see PEP 738). And most of the tests that use them are calling + # sys.executable, which won't work when Python is embedded in an Android app. + or is_android +) def requires_subprocess(): """Used for subprocess, os.spawn calls, fd inheritance""" return unittest.skipUnless(has_subprocess_support, "requires subprocess support") # Emscripten's socket emulation and WASI sockets have limitations. -has_socket_support = not is_emscripten and not is_wasi +has_socket_support = not ( + is_emscripten + or is_wasi +) def requires_working_socket(*, module=False): """Skip tests or modules that require working sockets @@ -762,27 +807,31 @@ def gc_collect(): longer than expected. This function tries its best to force all garbage objects to disappear. """ - # TODO: RUSTPYTHON (comment out before) - # import gc - # gc.collect() - # if is_jython: - # time.sleep(0.1) - # gc.collect() - # gc.collect() - pass + import gc + gc.collect() + gc.collect() + gc.collect() @contextlib.contextmanager def disable_gc(): - # TODO: RUSTPYTHON (comment out before) - # import gc - # have_gc = gc.isenabled() - # gc.disable() - # try: - # yield - # finally: - # if have_gc: - # gc.enable() - yield + import gc + have_gc = gc.isenabled() + gc.disable() + try: + yield + finally: + if have_gc: + gc.enable() + +@contextlib.contextmanager +def gc_threshold(*args): + import gc + old_threshold = gc.get_threshold() + gc.set_threshold(*args) + try: + yield + finally: + gc.set_threshold(*old_threshold) def python_is_optimized(): @@ -795,11 +844,50 @@ def python_is_optimized(): return final_opt not in ('', '-O0', '-Og') -_header = 'nP' +def check_cflags_pgo(): + # Check if Python was built with ./configure --enable-optimizations: + # with Profile Guided Optimization (PGO). + cflags_nodist = sysconfig.get_config_var('PY_CFLAGS_NODIST') or '' + pgo_options = [ + # GCC + '-fprofile-use', + # clang: -fprofile-instr-use=code.profclangd + '-fprofile-instr-use', + # ICC + "-prof-use", + ] + PGO_PROF_USE_FLAG = sysconfig.get_config_var('PGO_PROF_USE_FLAG') + if PGO_PROF_USE_FLAG: + pgo_options.append(PGO_PROF_USE_FLAG) + return any(option in cflags_nodist for option in pgo_options) + + +def check_bolt_optimized(): + # Always return false, if the platform is WASI, + # because BOLT optimization does not support WASM binary. + if is_wasi: + return False + config_args = sysconfig.get_config_var('CONFIG_ARGS') or '' + return '--enable-bolt' in config_args + + +Py_GIL_DISABLED = bool(sysconfig.get_config_var('Py_GIL_DISABLED')) + +def requires_gil_enabled(msg="needs the GIL enabled"): + """Decorator for skipping tests on the free-threaded build.""" + return unittest.skipIf(Py_GIL_DISABLED, msg) + +def expected_failure_if_gil_disabled(): + """Expect test failure if the GIL is disabled.""" + if Py_GIL_DISABLED: + return unittest.expectedFailure + return lambda test_case: test_case + +if Py_GIL_DISABLED: + _header = 'PHBBInP' +else: + _header = 'nP' _align = '0n' -if hasattr(sys, "getobjects"): - _header = '2P' + _header - _align = '0P' _vheader = _header + 'n' def calcobjsize(fmt): @@ -821,16 +909,16 @@ def check_sizeof(test, o, size): raise unittest.SkipTest("_testinternalcapi required") result = sys.getsizeof(o) # add GC header size - if ((type(o) == type) and (o.__flags__ & _TPFLAGS_HEAPTYPE) or\ - ((type(o) != type) and (type(o).__flags__ & _TPFLAGS_HAVE_GC))): + if ((type(o) == type) and (o.__flags__ & _TPFLAGS_HEAPTYPE) or \ + ((type(o) != type) and (type(o).__flags__ & _TPFLAGS_HAVE_GC))): size += _testinternalcapi.SIZEOF_PYGC_HEAD msg = 'wrong size for %s: got %d, expected %d' \ - % (type(o), result, size) + % (type(o), result, size) test.assertEqual(result, size, msg) #======================================================================= -# Decorator for running a function in a different locale, correctly resetting -# it afterwards. +# Decorator/context manager for running a code in a different locale, +# correctly resetting it afterwards. @contextlib.contextmanager def run_with_locale(catstr, *locales): @@ -841,16 +929,21 @@ def run_with_locale(catstr, *locales): except AttributeError: # if the test author gives us an invalid category string raise - except: + except Exception: # cannot retrieve original locale, so do nothing locale = orig_locale = None + if '' not in locales: + raise unittest.SkipTest('no locales') else: for loc in locales: try: locale.setlocale(category, loc) break - except: + except locale.Error: pass + else: + if '' not in locales: + raise unittest.SkipTest(f'no locales {locales}') try: yield @@ -858,6 +951,46 @@ def run_with_locale(catstr, *locales): if locale and orig_locale: locale.setlocale(category, orig_locale) +#======================================================================= +# Decorator for running a function in multiple locales (if they are +# availasble) and resetting the original locale afterwards. + +def run_with_locales(catstr, *locales): + def deco(func): + @functools.wraps(func) + def wrapper(self, /, *args, **kwargs): + dry_run = '' in locales + try: + import locale + category = getattr(locale, catstr) + orig_locale = locale.setlocale(category) + except AttributeError: + # if the test author gives us an invalid category string + raise + except Exception: + # cannot retrieve original locale, so do nothing + pass + else: + try: + for loc in locales: + with self.subTest(locale=loc): + try: + locale.setlocale(category, loc) + except locale.Error: + self.skipTest(f'no locale {loc!r}') + else: + dry_run = False + func(self, *args, **kwargs) + finally: + locale.setlocale(category, orig_locale) + if dry_run: + # no locales available, so just run the test + # with the current locale + with self.subTest(locale=None): + func(self, *args, **kwargs) + return wrapper + return deco + #======================================================================= # Decorator for running a function in a specific timezone, correctly # resetting it afterwards. @@ -904,27 +1037,31 @@ def inner(*args, **kwds): MAX_Py_ssize_t = sys.maxsize -def set_memlimit(limit): - global max_memuse - global real_max_memuse +def _parse_memlimit(limit: str) -> int: sizes = { 'k': 1024, 'm': _1M, 'g': _1G, 't': 1024*_1G, } - m = re.match(r'(\d+(\.\d+)?) (K|M|G|T)b?$', limit, + m = re.match(r'(\d+(?:\.\d+)?) (K|M|G|T)b?$', limit, re.IGNORECASE | re.VERBOSE) if m is None: - raise ValueError('Invalid memory limit %r' % (limit,)) - memlimit = int(float(m.group(1)) * sizes[m.group(3).lower()]) - real_max_memuse = memlimit - if memlimit > MAX_Py_ssize_t: - memlimit = MAX_Py_ssize_t + raise ValueError(f'Invalid memory limit: {limit!r}') + return int(float(m.group(1)) * sizes[m.group(2).lower()]) + +def set_memlimit(limit: str) -> None: + global max_memuse + global real_max_memuse + memlimit = _parse_memlimit(limit) if memlimit < _2G - 1: - raise ValueError('Memory limit %r too low to be useful' % (limit,)) + raise ValueError('Memory limit {limit!r} too low to be useful') + + real_max_memuse = memlimit + memlimit = min(memlimit, MAX_Py_ssize_t) max_memuse = memlimit + class _MemoryWatchdog: """An object which periodically watches the process' memory consumption and prints it out. @@ -981,7 +1118,7 @@ def wrapper(self): maxsize = size if ((real_max_memuse or not dry_run) - and real_max_memuse < maxsize * memuse): + and real_max_memuse < maxsize * memuse): raise unittest.SkipTest( "not enough memory: %.1fG minimum needed" % (size * memuse / (1024 ** 3))) @@ -1077,18 +1214,30 @@ def check_impl_detail(**guards): def no_tracing(func): """Decorator to temporarily turn off tracing for the duration of a test.""" - if not hasattr(sys, 'gettrace'): - return func - else: + trace_wrapper = func + if hasattr(sys, 'gettrace'): @functools.wraps(func) - def wrapper(*args, **kwargs): + def trace_wrapper(*args, **kwargs): original_trace = sys.gettrace() try: sys.settrace(None) return func(*args, **kwargs) finally: sys.settrace(original_trace) - return wrapper + + coverage_wrapper = trace_wrapper + if 'test.cov' in sys.modules: # -Xpresite=test.cov used + cov = sys.monitoring.COVERAGE_ID + @functools.wraps(func) + def coverage_wrapper(*args, **kwargs): + original_events = sys.monitoring.get_events(cov) + try: + sys.monitoring.set_events(cov, 0) + return trace_wrapper(*args, **kwargs) + finally: + sys.monitoring.set_events(cov, original_events) + + return coverage_wrapper def refcount_test(test): @@ -1105,184 +1254,20 @@ def refcount_test(test): def requires_limited_api(test): try: import _testcapi + import _testlimitedcapi except ImportError: - return unittest.skip('needs _testcapi module')(test) - return unittest.skipUnless( - _testcapi.LIMITED_API_AVAILABLE, 'needs Limited API support')(test) + return unittest.skip('needs _testcapi and _testlimitedcapi modules')(test) + return test -def requires_specialization(test): - return unittest.skipUnless( - opcode.ENABLE_SPECIALIZATION, "requires specialization")(test) - -def _filter_suite(suite, pred): - """Recursively filter test cases in a suite based on a predicate.""" - newtests = [] - for test in suite._tests: - if isinstance(test, unittest.TestSuite): - _filter_suite(test, pred) - newtests.append(test) - else: - if pred(test): - newtests.append(test) - suite._tests = newtests - -@dataclasses.dataclass(slots=True) -class TestStats: - tests_run: int = 0 - failures: int = 0 - skipped: int = 0 - - @staticmethod - def from_unittest(result): - return TestStats(result.testsRun, - len(result.failures), - len(result.skipped)) - - @staticmethod - def from_doctest(results): - return TestStats(results.attempted, - results.failed) - - def accumulate(self, stats): - self.tests_run += stats.tests_run - self.failures += stats.failures - self.skipped += stats.skipped - - -def _run_suite(suite): - """Run tests from a unittest.TestSuite-derived class.""" - runner = get_test_runner(sys.stdout, - verbosity=verbose, - capture_output=(junit_xml_list is not None)) - - result = runner.run(suite) - - if junit_xml_list is not None: - junit_xml_list.append(result.get_xml_element()) - - if not result.testsRun and not result.skipped and not result.errors: - raise TestDidNotRun - if not result.wasSuccessful(): - stats = TestStats.from_unittest(result) - if len(result.errors) == 1 and not result.failures: - err = result.errors[0][1] - elif len(result.failures) == 1 and not result.errors: - err = result.failures[0][1] - else: - err = "multiple errors occurred" - if not verbose: err += "; run in verbose mode for details" - errors = [(str(tc), exc_str) for tc, exc_str in result.errors] - failures = [(str(tc), exc_str) for tc, exc_str in result.failures] - raise TestFailedWithDetails(err, errors, failures, stats=stats) - return result - - -# By default, don't filter tests -_match_test_func = None -_accept_test_patterns = None -_ignore_test_patterns = None - - -def match_test(test): - # Function used by support.run_unittest() and regrtest --list-cases - if _match_test_func is None: - return True - else: - return _match_test_func(test.id()) - - -def _is_full_match_test(pattern): - # If a pattern contains at least one dot, it's considered - # as a full test identifier. - # Example: 'test.test_os.FileTests.test_access'. - # - # ignore patterns which contain fnmatch patterns: '*', '?', '[...]' - # or '[!...]'. For example, ignore 'test_access*'. - return ('.' in pattern) and (not re.search(r'[?*\[\]]', pattern)) - - -def set_match_tests(accept_patterns=None, ignore_patterns=None): - global _match_test_func, _accept_test_patterns, _ignore_test_patterns - - if accept_patterns is None: - accept_patterns = () - if ignore_patterns is None: - ignore_patterns = () - - accept_func = ignore_func = None - - if accept_patterns != _accept_test_patterns: - accept_patterns, accept_func = _compile_match_function(accept_patterns) - if ignore_patterns != _ignore_test_patterns: - ignore_patterns, ignore_func = _compile_match_function(ignore_patterns) - - # Create a copy since patterns can be mutable and so modified later - _accept_test_patterns = tuple(accept_patterns) - _ignore_test_patterns = tuple(ignore_patterns) - - if accept_func is not None or ignore_func is not None: - def match_function(test_id): - accept = True - ignore = False - if accept_func: - accept = accept_func(test_id) - if ignore_func: - ignore = ignore_func(test_id) - return accept and not ignore - - _match_test_func = match_function - - -def _compile_match_function(patterns): - if not patterns: - func = None - # set_match_tests(None) behaves as set_match_tests(()) - patterns = () - elif all(map(_is_full_match_test, patterns)): - # Simple case: all patterns are full test identifier. - # The test.bisect_cmd utility only uses such full test identifiers. - func = set(patterns).__contains__ - else: - import fnmatch - regex = '|'.join(map(fnmatch.translate, patterns)) - # The search *is* case sensitive on purpose: - # don't use flags=re.IGNORECASE - regex_match = re.compile(regex).match - - def match_test_regex(test_id): - if regex_match(test_id): - # The regex matches the whole identifier, for example - # 'test.test_os.FileTests.test_access'. - return True - else: - # Try to match parts of the test identifier. - # For example, split 'test.test_os.FileTests.test_access' - # into: 'test', 'test_os', 'FileTests' and 'test_access'. - return any(map(regex_match, test_id.split("."))) - - func = match_test_regex - - return patterns, func +# Windows build doesn't support --disable-test-modules feature, so there's no +# 'TEST_MODULES' var in config +TEST_MODULES_ENABLED = (sysconfig.get_config_var('TEST_MODULES') or 'yes') == 'yes' +def requires_specialization(test): + return unittest.skipUnless( + _opcode.ENABLE_SPECIALIZATION, "requires specialization")(test) -def run_unittest(*classes): - """Run tests from unittest.TestCase-derived classes.""" - valid_types = (unittest.TestSuite, unittest.TestCase) - loader = unittest.TestLoader() - suite = unittest.TestSuite() - for cls in classes: - if isinstance(cls, str): - if cls in sys.modules: - suite.addTest(loader.loadTestsFromModule(sys.modules[cls])) - else: - raise ValueError("str arguments must be keys in sys.modules") - elif isinstance(cls, valid_types): - suite.addTest(cls) - else: - suite.addTest(loader.loadTestsFromTestCase(cls)) - _filter_suite(suite, match_test) - return _run_suite(suite) #======================================================================= # Check for the presence of docstrings. @@ -1304,38 +1289,6 @@ def _check_docstrings(): "test requires docstrings") -#======================================================================= -# doctest driver. - -def run_doctest(module, verbosity=None, optionflags=0): - """Run doctest on the given module. Return (#failures, #tests). - - If optional argument verbosity is not specified (or is None), pass - support's belief about verbosity on to doctest. Else doctest's - usual behavior is used (it searches sys.argv for -v). - """ - - import doctest - - if verbosity is None: - verbosity = verbose - else: - verbosity = None - - results = doctest.testmod(module, - verbose=verbosity, - optionflags=optionflags) - if results.failed: - stats = TestStats.from_doctest(results) - raise TestFailed(f"{results.failed} of {results.attempted} " - f"doctests failed", - stats=stats) - if verbose: - print('doctest (%s) ... %d tests with zero failures' % - (module.__name__, results.attempted)) - return results - - #======================================================================= # Support for saving and restoring the imported modules. @@ -1747,9 +1700,9 @@ def __enter__(self): msvcrt.CRT_ERROR, msvcrt.CRT_ASSERT]: old_mode = msvcrt.CrtSetReportMode(report_type, - msvcrt.CRTDBG_MODE_FILE) + msvcrt.CRTDBG_MODE_FILE) old_file = msvcrt.CrtSetReportFile(report_type, - msvcrt.CRTDBG_FILE_STDERR) + msvcrt.CRTDBG_FILE_STDERR) self.old_modes[report_type] = old_mode, old_file else: @@ -1858,7 +1811,10 @@ def run_in_subinterp(code): module is enabled. """ _check_tracemalloc() - import _testcapi + try: + import _testcapi + except ImportError: + raise unittest.SkipTest("requires _testcapi") return _testcapi.run_in_subinterp(code) @@ -1868,11 +1824,25 @@ def run_in_subinterp_with_config(code, *, own_gil=None, **config): module is enabled. """ _check_tracemalloc() - import _testcapi + try: + import _testinternalcapi + except ImportError: + raise unittest.SkipTest("requires _testinternalcapi") if own_gil is not None: assert 'gil' not in config, (own_gil, config) - config['gil'] = 2 if own_gil else 1 - return _testcapi.run_in_subinterp_with_config(code, **config) + config['gil'] = 'own' if own_gil else 'shared' + else: + gil = config['gil'] + if gil == 0: + config['gil'] = 'default' + elif gil == 1: + config['gil'] = 'shared' + elif gil == 2: + config['gil'] = 'own' + elif not isinstance(gil, str): + raise NotImplementedError(gil) + config = types.SimpleNamespace(**config) + return _testinternalcapi.run_in_subinterp_with_config(code, config) def _check_tracemalloc(): @@ -1885,28 +1855,30 @@ def _check_tracemalloc(): else: if tracemalloc.is_tracing(): raise unittest.SkipTest("run_in_subinterp() cannot be used " - "if tracemalloc module is tracing " - "memory allocations") - - -# TODO: RUSTPYTHON (comment out before) -# def check_free_after_iterating(test, iter, cls, args=()): -# class A(cls): -# def __del__(self): -# nonlocal done -# done = True -# try: -# next(it) -# except StopIteration: -# pass - -# done = False -# it = iter(A(*args)) -# # Issue 26494: Shouldn't crash -# test.assertRaises(StopIteration, next, it) -# # The sequence should be deallocated just after the end of iterating -# gc_collect() -# test.assertTrue(done) + "if tracemalloc module is tracing " + "memory allocations") + + +def check_free_after_iterating(test, iter, cls, args=()): + done = False + def wrapper(): + class A(cls): + def __del__(self): + nonlocal done + done = True + try: + next(it) + except StopIteration: + pass + + it = iter(A(*args)) + # Issue 26494: Shouldn't crash + test.assertRaises(StopIteration, next, it) + + wrapper() + # The sequence should be deallocated just after the end of iterating + gc_collect() + test.assertTrue(done) def missing_compiler_executable(cmd_names=[]): @@ -1935,25 +1907,25 @@ def missing_compiler_executable(cmd_names=[]): cmd = getattr(compiler, name) if cmd_names: assert cmd is not None, \ - "the '%s' executable is not configured" % name + "the '%s' executable is not configured" % name elif not cmd: continue if spawn.find_executable(cmd[0]) is None: return cmd[0] -_is_android_emulator = None +_old_android_emulator = None def setswitchinterval(interval): # Setting a very low gil interval on the Android emulator causes python # to hang (issue #26939). - minimum_interval = 1e-5 + minimum_interval = 1e-4 # 100 us if is_android and interval < minimum_interval: - global _is_android_emulator - if _is_android_emulator is None: - import subprocess - _is_android_emulator = (subprocess.check_output( - ['getprop', 'ro.kernel.qemu']).strip() == b'1') - if _is_android_emulator: + global _old_android_emulator + if _old_android_emulator is None: + import platform + av = platform.android_ver() + _old_android_emulator = av.is_emulator and av.api_level < 24 + if _old_android_emulator: interval = minimum_interval return sys.setswitchinterval(interval) @@ -2028,8 +2000,19 @@ def restore(self): def with_pymalloc(): - import _testcapi - return _testcapi.WITH_PYMALLOC + try: + import _testcapi + except ImportError: + raise unittest.SkipTest("requires _testcapi") + return _testcapi.WITH_PYMALLOC and not Py_GIL_DISABLED + + +def with_mimalloc(): + try: + import _testcapi + except ImportError: + raise unittest.SkipTest("requires _testcapi") + return _testcapi.WITH_MIMALLOC class _ALWAYS_EQ: @@ -2301,13 +2284,13 @@ def set_recursion_limit(limit): finally: sys.setrecursionlimit(original_limit) -def infinite_recursion(max_depth=100): - """Set a lower limit for tests that interact with infinite recursions - (e.g test_ast.ASTHelpers_Test.test_recursion_direct) since on some - debug windows builds, due to not enough functions being inlined the - stack size might not handle the default recursion limit (1000). See - bpo-11105 for details.""" - if max_depth < 3: +def infinite_recursion(max_depth=None): + if max_depth is None: + # Pick a number large enough to cause problems + # but not take too long for code that can handle + # very deep recursion. + max_depth = 20_000 + elif max_depth < 3: raise ValueError("max_depth must be at least 3, got {max_depth}") depth = get_recursion_depth() depth = max(depth - 1, 1) # Ignore infinite_recursion() frame. @@ -2368,7 +2351,9 @@ def _findwheel(pkgname): If set, the wheels are searched for in WHEEL_PKG_DIR (see ensurepip). Otherwise, they are searched for in the test directory. """ - wheel_dir = sysconfig.get_config_var('WHEEL_PKG_DIR') or TEST_HOME_DIR + wheel_dir = sysconfig.get_config_var('WHEEL_PKG_DIR') or os.path.join( + TEST_HOME_DIR, 'wheeldata', + ) filenames = os.listdir(wheel_dir) filenames = sorted(filenames, reverse=True) # approximate "newest" first for filename in filenames: @@ -2385,16 +2370,25 @@ def _findwheel(pkgname): # and returns the path to the venv directory and the path to the python executable @contextlib.contextmanager def setup_venv_with_pip_setuptools_wheel(venv_dir): + import shlex import subprocess from .os_helper import temp_cwd + def run_command(cmd): + if verbose: + print() + print('Run:', ' '.join(map(shlex.quote, cmd))) + subprocess.run(cmd, check=True) + else: + subprocess.run(cmd, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + check=True) + with temp_cwd() as temp_dir: # Create virtual environment to get setuptools cmd = [sys.executable, '-X', 'dev', '-m', 'venv', venv_dir] - if verbose: - print() - print('Run:', ' '.join(cmd)) - subprocess.run(cmd, check=True) + run_command(cmd) venv = os.path.join(temp_dir, venv_dir) @@ -2409,10 +2403,7 @@ def setup_venv_with_pip_setuptools_wheel(venv_dir): '-m', 'pip', 'install', _findwheel('setuptools'), _findwheel('wheel')] - if verbose: - print() - print('Run:', ' '.join(cmd)) - subprocess.run(cmd, check=True) + run_command(cmd) yield python @@ -2502,7 +2493,7 @@ def busy_retry(timeout, err_msg=None, /, *, error=True): def sleeping_retry(timeout, err_msg=None, /, - *, init_delay=0.010, max_delay=1.0, error=True): + *, init_delay=0.010, max_delay=1.0, error=True): """ Wait strategy that applies exponential backoff. @@ -2535,6 +2526,46 @@ def sleeping_retry(timeout, err_msg=None, /, delay = min(delay * 2, max_delay) +class CPUStopwatch: + """Context manager to roughly time a CPU-bound operation. + + Disables GC. Uses CPU time if it can (i.e. excludes sleeps & time of + other processes). + + N.B.: + - This *includes* time spent in other threads. + - Some systems only have a coarse resolution; check + stopwatch.clock_info.rseolution if. + + Usage: + + with ProcessStopwatch() as stopwatch: + ... + elapsed = stopwatch.seconds + resolution = stopwatch.clock_info.resolution + """ + def __enter__(self): + get_time = time.process_time + clock_info = time.get_clock_info('process_time') + if get_time() <= 0: # some platforms like WASM lack process_time() + get_time = time.monotonic + clock_info = time.get_clock_info('monotonic') + self.context = disable_gc() + self.context.__enter__() + self.get_time = get_time + self.clock_info = clock_info + self.start_time = get_time() + return self + + def __exit__(self, *exc): + try: + end_time = self.get_time() + finally: + result = self.context.__exit__(*exc) + self.seconds = end_time - self.start_time + return result + + @contextlib.contextmanager def adjust_int_max_str_digits(max_digits): """Temporarily change the integer string conversion length limit.""" @@ -2545,18 +2576,179 @@ def adjust_int_max_str_digits(max_digits): finally: sys.set_int_max_str_digits(current) -#For recursion tests, easily exceeds default recursion limit -EXCEEDS_RECURSION_LIMIT = 5000 -# The default C recursion limit (from Include/cpython/pystate.h). -C_RECURSION_LIMIT = 1500 +def get_c_recursion_limit(): + try: + import _testcapi + return _testcapi.Py_C_RECURSION_LIMIT + except ImportError: + raise unittest.SkipTest('requires _testcapi') + + +def exceeds_recursion_limit(): + """For recursion tests, easily exceeds default recursion limit.""" + return get_c_recursion_limit() * 3 + + +# Windows doesn't have os.uname() but it doesn't support s390x. +is_s390x = hasattr(os, 'uname') and os.uname().machine == 's390x' +skip_on_s390x = unittest.skipIf(is_s390x, 'skipped on s390x') + +Py_TRACE_REFS = hasattr(sys, 'getobjects') + +# Decorator to disable optimizer while a function run +def without_optimizer(func): + try: + from _testinternalcapi import get_optimizer, set_optimizer + except ImportError: + return func + @functools.wraps(func) + def wrapper(*args, **kwargs): + save_opt = get_optimizer() + try: + set_optimizer(None) + return func(*args, **kwargs) + finally: + set_optimizer(save_opt) + return wrapper + + +_BASE_COPY_SRC_DIR_IGNORED_NAMES = frozenset({ + # SRC_DIR/.git + '.git', + # ignore all __pycache__/ sub-directories + '__pycache__', +}) + +# Ignore function for shutil.copytree() to copy the Python source code. +def copy_python_src_ignore(path, names): + ignored = _BASE_COPY_SRC_DIR_IGNORED_NAMES + if os.path.basename(path) == 'Doc': + ignored |= { + # SRC_DIR/Doc/build/ + 'build', + # SRC_DIR/Doc/venv/ + 'venv', + } + + # check if we are at the root of the source code + elif 'Modules' in names: + ignored |= { + # SRC_DIR/build/ + 'build', + } + return ignored + + +def iter_builtin_types(): + for obj in __builtins__.values(): + if not isinstance(obj, type): + continue + cls = obj + if cls.__module__ != 'builtins': + continue + yield cls + + +def iter_slot_wrappers(cls): + assert cls.__module__ == 'builtins', cls + + def is_slot_wrapper(name, value): + if not isinstance(value, types.WrapperDescriptorType): + assert not repr(value).startswith(' dict[str, str]: + clean_env = os.environ.copy() + for k in clean_env.copy(): + if k.startswith("PYTHON"): + clean_env.pop(k) + clean_env.pop("FORCE_COLOR", None) + clean_env.pop("NO_COLOR", None) + return clean_env + + +def initialized_with_pyrepl(): + """Detect whether PyREPL was used during Python initialization.""" + # If the main module has a __file__ attribute it's a Python module, which means PyREPL. + return hasattr(sys.modules["__main__"], "__file__") -#Windows doesn't have os.uname() but it doesn't support s390x. -skip_on_s390x = unittest.skipIf(hasattr(os, 'uname') and os.uname().machine == 's390x', - 'skipped on s390x') -HAVE_ASAN_FORK_BUG = check_sanitizer(address=True) -# From python 3.12.8 class BrokenIter: def __init__(self, init_raises=False, next_raises=False, iter_raises=False): if init_raises: diff --git a/Lib/test/test_ast.py b/Lib/test/test_ast.py index af3e2bb5eb..5d03b12998 100644 --- a/Lib/test/test_ast.py +++ b/Lib/test/test_ast.py @@ -1117,7 +1117,7 @@ def next(self): @unittest.skipIf(support.is_wasi, "exhausts limited stack on WASI") @support.cpython_only def test_ast_recursion_limit(self): - fail_depth = support.EXCEEDS_RECURSION_LIMIT + fail_depth = support.exceeds_recursion_limit() crash_depth = 100_000 success_depth = 1200 diff --git a/Lib/test/test_collections.py b/Lib/test/test_collections.py index ecd574ab83..09296bbe12 100644 --- a/Lib/test/test_collections.py +++ b/Lib/test/test_collections.py @@ -547,7 +547,7 @@ def test_odd_sizes(self): self.assertEqual(Dot(1)._replace(d=999), (999,)) self.assertEqual(Dot(1)._fields, ('d',)) - n = support.EXCEEDS_RECURSION_LIMIT + n = support.exceeds_recursion_limit() names = list(set(''.join([choice(string.ascii_letters) for j in range(10)]) for i in range(n))) n = len(names) diff --git a/Lib/test/test_dict.py b/Lib/test/test_dict.py index 4aa6f1089a..18fa5f1f70 100644 --- a/Lib/test/test_dict.py +++ b/Lib/test/test_dict.py @@ -8,7 +8,7 @@ import unittest import weakref from test import support -from test.support import import_helper, C_RECURSION_LIMIT +from test.support import import_helper, get_c_recursion_limit class DictTest(unittest.TestCase): @@ -599,7 +599,7 @@ def __repr__(self): @unittest.skipIf(sys.platform == 'win32', 'TODO: RUSTPYTHON Windows') def test_repr_deep(self): d = {} - for i in range(C_RECURSION_LIMIT + 1): + for i in range(get_c_recursion_limit() + 1): d = {1: d} self.assertRaises(RecursionError, repr, d) diff --git a/Lib/test/test_dictviews.py b/Lib/test/test_dictviews.py index 172b98aa68..69dedf7c22 100644 --- a/Lib/test/test_dictviews.py +++ b/Lib/test/test_dictviews.py @@ -3,7 +3,7 @@ import pickle import sys import unittest -from test.support import C_RECURSION_LIMIT +from test.support import get_c_recursion_limit class DictSetTest(unittest.TestCase): @@ -282,7 +282,7 @@ def test_recursive_repr(self): @unittest.expectedFailure def test_deeply_nested_repr(self): d = {} - for i in range(C_RECURSION_LIMIT//2 + 100): + for i in range(get_c_recursion_limit()//2 + 100): d = {42: d.values()} self.assertRaises(RecursionError, repr, d) From ebb1d908517483f6a3746ad13853a7c8f73f64f7 Mon Sep 17 00:00:00 2001 From: Ashwin Naren Date: Sun, 23 Feb 2025 20:44:27 -0800 Subject: [PATCH 02/15] stub opcode module Signed-off-by: Ashwin Naren --- vm/src/stdlib/mod.rs | 2 ++ vm/src/stdlib/opcode.rs | 6 ++++++ 2 files changed, 8 insertions(+) create mode 100644 vm/src/stdlib/opcode.rs diff --git a/vm/src/stdlib/mod.rs b/vm/src/stdlib/mod.rs index ca8f8f0bfd..91e81b010a 100644 --- a/vm/src/stdlib/mod.rs +++ b/vm/src/stdlib/mod.rs @@ -49,6 +49,7 @@ pub mod sys; mod winapi; #[cfg(windows)] mod winreg; +mod opcode; use crate::{builtins::PyModule, PyRef, VirtualMachine}; use std::{borrow::Cow, collections::HashMap}; @@ -83,6 +84,7 @@ pub fn get_module_inits() -> StdlibMap { "itertools" => itertools::make_module, "_io" => io::make_module, "marshal" => marshal::make_module, + "_opcode" => opcode::make_module, "_operator" => operator::make_module, "_signal" => signal::make_module, "_sre" => sre::make_module, diff --git a/vm/src/stdlib/opcode.rs b/vm/src/stdlib/opcode.rs new file mode 100644 index 0000000000..b187e0fcd2 --- /dev/null +++ b/vm/src/stdlib/opcode.rs @@ -0,0 +1,6 @@ +pub(crate) use _opcode::make_module; + +#[pymodule] +mod _opcode { + +} \ No newline at end of file From 875426fe10f8a7c5ab0bab8e8ed30d0a62883f33 Mon Sep 17 00:00:00 2001 From: Ashwin Naren Date: Sun, 23 Feb 2025 20:53:15 -0800 Subject: [PATCH 03/15] bump libregrtest Signed-off-by: Ashwin Naren --- Lib/test/libregrtest/__init__.py | 5 - Lib/test/libregrtest/cmdline.py | 261 +++- Lib/test/libregrtest/filter.py | 77 ++ Lib/test/libregrtest/findtests.py | 110 ++ Lib/test/libregrtest/logger.py | 89 ++ Lib/test/libregrtest/main.py | 1152 +++++++++-------- Lib/test/libregrtest/mypy.ini | 26 + Lib/test/libregrtest/pgo.py | 56 + Lib/test/libregrtest/refleak.py | 260 ++-- Lib/test/libregrtest/result.py | 225 ++++ Lib/test/libregrtest/results.py | 276 ++++ Lib/test/libregrtest/run_workers.py | 621 +++++++++ Lib/test/libregrtest/runtest.py | 328 ----- Lib/test/libregrtest/runtest_mp.py | 288 ----- Lib/test/libregrtest/runtests.py | 222 ++++ Lib/test/libregrtest/save_env.py | 120 +- Lib/test/libregrtest/setup.py | 160 +-- Lib/test/libregrtest/single.py | 322 +++++ Lib/test/libregrtest/testresult.py | 193 +++ Lib/test/libregrtest/tsan.py | 33 + Lib/test/libregrtest/utils.py | 760 ++++++++++- Lib/test/libregrtest/win_utils.py | 203 +-- Lib/test/libregrtest/worker.py | 116 ++ .../import_from_tests/test_regrtest_a.py | 11 + .../test_regrtest_b/__init__.py | 9 + .../import_from_tests/test_regrtest_b/util.py | 0 .../import_from_tests/test_regrtest_c.py | 11 + 27 files changed, 4376 insertions(+), 1558 deletions(-) create mode 100644 Lib/test/libregrtest/filter.py create mode 100644 Lib/test/libregrtest/findtests.py create mode 100644 Lib/test/libregrtest/logger.py create mode 100644 Lib/test/libregrtest/mypy.ini create mode 100644 Lib/test/libregrtest/pgo.py create mode 100644 Lib/test/libregrtest/result.py create mode 100644 Lib/test/libregrtest/results.py create mode 100644 Lib/test/libregrtest/run_workers.py delete mode 100644 Lib/test/libregrtest/runtest.py delete mode 100644 Lib/test/libregrtest/runtest_mp.py create mode 100644 Lib/test/libregrtest/runtests.py create mode 100644 Lib/test/libregrtest/single.py create mode 100644 Lib/test/libregrtest/testresult.py create mode 100644 Lib/test/libregrtest/tsan.py create mode 100644 Lib/test/libregrtest/worker.py create mode 100644 Lib/test/regrtestdata/import_from_tests/test_regrtest_a.py create mode 100644 Lib/test/regrtestdata/import_from_tests/test_regrtest_b/__init__.py create mode 100644 Lib/test/regrtestdata/import_from_tests/test_regrtest_b/util.py create mode 100644 Lib/test/regrtestdata/import_from_tests/test_regrtest_c.py diff --git a/Lib/test/libregrtest/__init__.py b/Lib/test/libregrtest/__init__.py index 3427b51b60..e69de29bb2 100644 --- a/Lib/test/libregrtest/__init__.py +++ b/Lib/test/libregrtest/__init__.py @@ -1,5 +0,0 @@ -# We import importlib *ASAP* in order to test #15386 -import importlib - -from test.libregrtest.cmdline import _parse_args, RESOURCE_NAMES, ALL_RESOURCES -from test.libregrtest.main import main diff --git a/Lib/test/libregrtest/cmdline.py b/Lib/test/libregrtest/cmdline.py index 0a97c8c19b..0c94fcc190 100644 --- a/Lib/test/libregrtest/cmdline.py +++ b/Lib/test/libregrtest/cmdline.py @@ -1,8 +1,9 @@ import argparse -import os +import os.path +import shlex import sys -from test import support -from test.support import os_helper +from test.support import os_helper, Py_DEBUG +from .utils import ALL_RESOURCES, RESOURCE_NAMES, TestFilter USAGE = """\ @@ -27,8 +28,10 @@ Additional option details: -r randomizes test execution order. You can use --randseed=int to provide an -int seed value for the randomizer; this is useful for reproducing troublesome -test orders. +int seed value for the randomizer. The randseed value will be used +to set seeds for all random usages in tests +(including randomizing the tests order if -r is set). +By default we always set random seed, but do not randomize test order. -s On the first invocation of regrtest using -s, the first test file found or the first test file given on the command line is run, and the name of @@ -107,6 +110,8 @@ cpu - Used for certain CPU-heavy tests. + walltime - Long running but not CPU-bound tests. + subprocess Run all tests for the subprocess module. urlfetch - It is okay to download files required on testing. @@ -128,17 +133,51 @@ """ -ALL_RESOURCES = ('audio', 'curses', 'largefile', 'network', - 'decimal', 'cpu', 'subprocess', 'urlfetch', 'gui') +class Namespace(argparse.Namespace): + def __init__(self, **kwargs) -> None: + self.ci = False + self.testdir = None + self.verbose = 0 + self.quiet = False + self.exclude = False + self.cleanup = False + self.wait = False + self.list_cases = False + self.list_tests = False + self.single = False + self.randomize = False + self.fromfile = None + self.fail_env_changed = False + self.use_resources: list[str] = [] + self.trace = False + self.coverdir = 'coverage' + self.runleaks = False + self.huntrleaks: tuple[int, int, str] | None = None + self.rerun = False + self.verbose3 = False + self.print_slow = False + self.random_seed = None + self.use_mp = None + self.forever = False + self.header = False + self.failfast = False + self.match_tests: TestFilter = [] + self.pgo = False + self.pgo_extended = False + self.tsan = False + self.worker_json = None + self.start = None + self.timeout = None + self.memlimit = None + self.threshold = None + self.fail_rerun = False + self.tempdir = None + self._add_python_opts = True + self.xmlpath = None + self.single_process = False + + super().__init__(**kwargs) -# Other resources excluded from --use=all: -# -# - extralagefile (ex: test_zipfile64): really too slow to be enabled -# "by default" -# - tzdata: while needed to validate fully test_datetime, it makes -# test_datetime too slow (15-20 min on some buildbots) and so is disabled by -# default (see bpo-30822). -RESOURCE_NAMES = ALL_RESOURCES + ('extralargefile', 'tzdata') class _ArgParser(argparse.ArgumentParser): @@ -146,6 +185,20 @@ def error(self, message): super().error(message + "\nPass -h or --help for complete help.") +class FilterAction(argparse.Action): + def __call__(self, parser, namespace, value, option_string=None): + items = getattr(namespace, self.dest) + items.append((value, self.const)) + + +class FromFileFilterAction(argparse.Action): + def __call__(self, parser, namespace, value, option_string=None): + items = getattr(namespace, self.dest) + with open(value, encoding='utf-8') as fp: + for line in fp: + items.append((line.strip(), self.const)) + + def _create_parser(): # Set prog to prevent the uninformative "__main__.py" from displaying in # error messages when using "python -m test ...". @@ -155,6 +208,7 @@ def _create_parser(): epilog=EPILOG, add_help=False, formatter_class=argparse.RawDescriptionHelpFormatter) + parser.set_defaults(match_tests=[]) # Arguments with this clause added to its help are described further in # the epilog's "Additional option details" section. @@ -164,23 +218,35 @@ def _create_parser(): # We add help explicitly to control what argument group it renders under. group.add_argument('-h', '--help', action='help', help='show this help message and exit') - group.add_argument('--timeout', metavar='TIMEOUT', type=float, + group.add_argument('--fast-ci', action='store_true', + help='Fast Continuous Integration (CI) mode used by ' + 'GitHub Actions') + group.add_argument('--slow-ci', action='store_true', + help='Slow Continuous Integration (CI) mode used by ' + 'buildbot workers') + group.add_argument('--timeout', metavar='TIMEOUT', help='dump the traceback and exit if a test takes ' 'more than TIMEOUT seconds; disabled if TIMEOUT ' 'is negative or equals to zero') group.add_argument('--wait', action='store_true', help='wait for user input, e.g., allow a debugger ' 'to be attached') - group.add_argument('--worker-args', metavar='ARGS') group.add_argument('-S', '--start', metavar='START', help='the name of the test at which to start.' + more_details) + group.add_argument('-p', '--python', metavar='PYTHON', + help='Command to run Python test subprocesses with.') + group.add_argument('--randseed', metavar='SEED', + dest='random_seed', type=int, + help='pass a global random seed') group = parser.add_argument_group('Verbosity') group.add_argument('-v', '--verbose', action='count', help='run tests in verbose mode with output to stdout') - group.add_argument('-w', '--verbose2', action='store_true', + group.add_argument('-w', '--rerun', action='store_true', help='re-run failed tests in verbose mode') + group.add_argument('--verbose2', action='store_true', dest='rerun', + help='deprecated alias to --rerun') group.add_argument('-W', '--verbose3', action='store_true', help='display test output on failure') group.add_argument('-q', '--quiet', action='store_true', @@ -193,10 +259,6 @@ def _create_parser(): group = parser.add_argument_group('Selecting tests') group.add_argument('-r', '--randomize', action='store_true', help='randomize test execution order.' + more_details) - group.add_argument('--randseed', metavar='SEED', - dest='random_seed', type=int, - help='pass a random seed to reproduce a previous ' - 'random run') group.add_argument('-f', '--fromfile', metavar='FILE', help='read names of tests to run from a file.' + more_details) @@ -206,12 +268,21 @@ def _create_parser(): help='single step through a set of tests.' + more_details) group.add_argument('-m', '--match', metavar='PAT', - dest='match_tests', action='append', + dest='match_tests', action=FilterAction, const=True, help='match test cases and methods with glob pattern PAT') + group.add_argument('-i', '--ignore', metavar='PAT', + dest='match_tests', action=FilterAction, const=False, + help='ignore test cases and methods with glob pattern PAT') group.add_argument('--matchfile', metavar='FILENAME', - dest='match_filename', + dest='match_tests', + action=FromFileFilterAction, const=True, help='similar to --match but get patterns from a ' 'text file, one pattern per line') + group.add_argument('--ignorefile', metavar='FILENAME', + dest='match_tests', + action=FromFileFilterAction, const=False, + help='similar to --matchfile but it receives patterns ' + 'from text file to ignore') group.add_argument('-G', '--failfast', action='store_true', help='fail as soon as a test fails (only with -v or -W)') group.add_argument('-u', '--use', metavar='RES1,RES2,...', @@ -227,9 +298,6 @@ def _create_parser(): '(instead of the Python stdlib test suite)') group = parser.add_argument_group('Special runs') - group.add_argument('-l', '--findleaks', action='store_const', const=2, - default=1, - help='deprecated alias to --fail-env-changed') group.add_argument('-L', '--runleaks', action='store_true', help='run the leaks(1) command just before exit.' + more_details) @@ -240,6 +308,12 @@ def _create_parser(): group.add_argument('-j', '--multiprocess', metavar='PROCESSES', dest='use_mp', type=int, help='run PROCESSES processes at once') + group.add_argument('--single-process', action='store_true', + dest='single_process', + help='always run all tests sequentially in ' + 'a single process, ignore -jN option, ' + 'and failed tests are also rerun sequentially ' + 'in the same process') group.add_argument('-T', '--coverage', action='store_true', dest='trace', help='turn on code coverage tracing using the trace ' @@ -257,7 +331,7 @@ def _create_parser(): help='suppress error message boxes on Windows') group.add_argument('-F', '--forever', action='store_true', help='run the specified tests in a loop, until an ' - 'error happens') + 'error happens; imply --failfast') group.add_argument('--list-tests', action='store_true', help="only write the name of tests that will be run, " "don't execute them") @@ -265,16 +339,30 @@ def _create_parser(): help='only write the name of test cases that will be run' ' , don\'t execute them') group.add_argument('-P', '--pgo', dest='pgo', action='store_true', - help='enable Profile Guided Optimization training') + help='enable Profile Guided Optimization (PGO) training') + group.add_argument('--pgo-extended', action='store_true', + help='enable extended PGO training (slower training)') + group.add_argument('--tsan', dest='tsan', action='store_true', + help='run a subset of test cases that are proper for the TSAN test') group.add_argument('--fail-env-changed', action='store_true', help='if a test file alters the environment, mark ' 'the test as failed') + group.add_argument('--fail-rerun', action='store_true', + help='if a test failed and then passed when re-run, ' + 'mark the tests as failed') group.add_argument('--junit-xml', dest='xmlpath', metavar='FILENAME', help='writes JUnit-style XML results to the specified ' 'file') - group.add_argument('--tempdir', dest='tempdir', metavar='PATH', + group.add_argument('--tempdir', metavar='PATH', help='override the working directory for the test run') + group.add_argument('--cleanup', action='store_true', + help='remove old test_python_* directories') + group.add_argument('--bisect', action='store_true', + help='if some tests fail, run test.bisect_cmd on them') + group.add_argument('--dont-add-python-opts', dest='_add_python_opts', + action='store_false', + help="internal option, don't use it") return parser @@ -309,19 +397,12 @@ def resources_list(string): def _parse_args(args, **kwargs): # Defaults - ns = argparse.Namespace(testdir=None, verbose=0, quiet=False, - exclude=False, single=False, randomize=False, fromfile=None, - findleaks=1, use_resources=None, trace=False, coverdir='coverage', - runleaks=False, huntrleaks=False, verbose2=False, print_slow=False, - random_seed=None, use_mp=None, verbose3=False, forever=False, - header=False, failfast=False, match_tests=None, pgo=False) + ns = Namespace() for k, v in kwargs.items(): if not hasattr(ns, k): raise TypeError('%r is an invalid keyword argument ' 'for this function' % k) setattr(ns, k, v) - if ns.use_resources is None: - ns.use_resources = [] parser = _create_parser() # Issue #14191: argparse doesn't support "intermixed" positional and @@ -330,19 +411,77 @@ def _parse_args(args, **kwargs): for arg in ns.args: if arg.startswith('-'): parser.error("unrecognized arguments: %s" % arg) - sys.exit(1) - if ns.findleaks > 1: - # --findleaks implies --fail-env-changed + if ns.timeout is not None: + # Support "--timeout=" (no value) so Makefile.pre.pre TESTTIMEOUT + # can be used by "make buildbottest" and "make test". + if ns.timeout != "": + try: + ns.timeout = float(ns.timeout) + except ValueError: + parser.error(f"invalid timeout value: {ns.timeout!r}") + else: + ns.timeout = None + + # Continuous Integration (CI): common options for fast/slow CI modes + if ns.slow_ci or ns.fast_ci: + # Similar to options: + # -j0 --randomize --fail-env-changed --rerun --slowest --verbose3 + if ns.use_mp is None: + ns.use_mp = 0 + ns.randomize = True ns.fail_env_changed = True + if ns.python is None: + ns.rerun = True + ns.print_slow = True + ns.verbose3 = True + else: + ns._add_python_opts = False + + # --singleprocess overrides -jN option + if ns.single_process: + ns.use_mp = None + + # When both --slow-ci and --fast-ci options are present, + # --slow-ci has the priority + if ns.slow_ci: + # Similar to: -u "all" --timeout=1200 + if ns.use is None: + ns.use = [] + ns.use.insert(0, ['all']) + if ns.timeout is None: + ns.timeout = 1200 # 20 minutes + elif ns.fast_ci: + # Similar to: -u "all,-cpu" --timeout=600 + if ns.use is None: + ns.use = [] + ns.use.insert(0, ['all', '-cpu']) + if ns.timeout is None: + ns.timeout = 600 # 10 minutes + if ns.single and ns.fromfile: parser.error("-s and -f don't go together!") - if ns.use_mp is not None and ns.trace: - parser.error("-T and -j don't go together!") + if ns.trace: + if ns.use_mp is not None: + if not Py_DEBUG: + parser.error("need --with-pydebug to use -T and -j together") + else: + print( + "Warning: collecting coverage without -j is imprecise. Configure" + " --with-pydebug and run -m test -T -j for best results.", + file=sys.stderr + ) + if ns.python is not None: + if ns.use_mp is None: + parser.error("-p requires -j!") + # The "executable" may be two or more parts, e.g. "node python.js" + ns.python = shlex.split(ns.python) if ns.failfast and not (ns.verbose or ns.verbose3): parser.error("-G/--failfast needs either -v or -W") - if ns.pgo and (ns.verbose or ns.verbose2 or ns.verbose3): + if ns.pgo and (ns.verbose or ns.rerun or ns.verbose3): parser.error("--pgo/-v don't go together!") + if ns.pgo_extended: + ns.pgo = True # pgo_extended implies pgo if ns.nowindows: print("Warning: the --nowindows (-n) option is deprecated. " @@ -353,10 +492,6 @@ def _parse_args(args, **kwargs): if ns.timeout is not None: if ns.timeout <= 0: ns.timeout = None - if ns.use_mp is not None: - if ns.use_mp <= 0: - # Use all cores + extras for tests that like to sleep - ns.use_mp = 2 + (os.cpu_count() or 1) if ns.use: for a in ns.use: for r in a: @@ -379,16 +514,30 @@ def _parse_args(args, **kwargs): ns.randomize = True if ns.verbose: ns.header = True - if ns.huntrleaks and ns.verbose3: + + # When -jN option is used, a worker process does not use --verbose3 + # and so -R 3:3 -jN --verbose3 just works as expected: there is no false + # alarm about memory leak. + if ns.huntrleaks and ns.verbose3 and ns.use_mp is None: + # run_single_test() replaces sys.stdout with io.StringIO if verbose3 + # is true. In this case, huntrleaks sees an write into StringIO as + # a memory leak, whereas it is not (gh-71290). ns.verbose3 = False print("WARNING: Disable --verbose3 because it's incompatible with " - "--huntrleaks: see http://bugs.python.org/issue27103", + "--huntrleaks without -jN option", file=sys.stderr) - if ns.match_filename: - if ns.match_tests is None: - ns.match_tests = [] - with open(ns.match_filename) as fp: - for line in fp: - ns.match_tests.append(line.strip()) + + if ns.forever: + # --forever implies --failfast + ns.failfast = True + + if ns.huntrleaks: + warmup, repetitions, _ = ns.huntrleaks + if warmup < 1 or repetitions < 1: + msg = ("Invalid values for the --huntrleaks/-R parameters. The " + "number of warmups and repetitions must be at least 1 " + "each (1:1).") + print(msg, file=sys.stderr, flush=True) + sys.exit(2) return ns diff --git a/Lib/test/libregrtest/filter.py b/Lib/test/libregrtest/filter.py new file mode 100644 index 0000000000..41372e427f --- /dev/null +++ b/Lib/test/libregrtest/filter.py @@ -0,0 +1,77 @@ +import itertools +import operator +import re + + +# By default, don't filter tests +_test_matchers = () +_test_patterns = () + + +def match_test(test): + # Function used by support.run_unittest() and regrtest --list-cases + result = False + for matcher, result in reversed(_test_matchers): + if matcher(test.id()): + return result + return not result + + +def _is_full_match_test(pattern): + # If a pattern contains at least one dot, it's considered + # as a full test identifier. + # Example: 'test.test_os.FileTests.test_access'. + # + # ignore patterns which contain fnmatch patterns: '*', '?', '[...]' + # or '[!...]'. For example, ignore 'test_access*'. + return ('.' in pattern) and (not re.search(r'[?*\[\]]', pattern)) + + +def get_match_tests(): + global _test_patterns + return _test_patterns + + +def set_match_tests(patterns): + global _test_matchers, _test_patterns + + if not patterns: + _test_matchers = () + _test_patterns = () + else: + itemgetter = operator.itemgetter + patterns = tuple(patterns) + if patterns != _test_patterns: + _test_matchers = [ + (_compile_match_function(map(itemgetter(0), it)), result) + for result, it in itertools.groupby(patterns, itemgetter(1)) + ] + _test_patterns = patterns + + +def _compile_match_function(patterns): + patterns = list(patterns) + + if all(map(_is_full_match_test, patterns)): + # Simple case: all patterns are full test identifier. + # The test.bisect_cmd utility only uses such full test identifiers. + return set(patterns).__contains__ + else: + import fnmatch + regex = '|'.join(map(fnmatch.translate, patterns)) + # The search *is* case sensitive on purpose: + # don't use flags=re.IGNORECASE + regex_match = re.compile(regex).match + + def match_test_regex(test_id, regex_match=regex_match): + if regex_match(test_id): + # The regex matches the whole identifier, for example + # 'test.test_os.FileTests.test_access'. + return True + else: + # Try to match parts of the test identifier. + # For example, split 'test.test_os.FileTests.test_access' + # into: 'test', 'test_os', 'FileTests' and 'test_access'. + return any(map(regex_match, test_id.split("."))) + + return match_test_regex diff --git a/Lib/test/libregrtest/findtests.py b/Lib/test/libregrtest/findtests.py new file mode 100644 index 0000000000..f01c124077 --- /dev/null +++ b/Lib/test/libregrtest/findtests.py @@ -0,0 +1,110 @@ +import os +import sys +import unittest +from collections.abc import Container + +from test import support + +from .filter import match_test, set_match_tests +from .utils import ( + StrPath, TestName, TestTuple, TestList, TestFilter, + abs_module_name, count, printlist) + + +# If these test directories are encountered recurse into them and treat each +# "test_*.py" file or each sub-directory as a separate test module. This can +# increase parallelism. +# +# Beware this can't generally be done for any directory with sub-tests as the +# __init__.py may do things which alter what tests are to be run. +SPLITTESTDIRS: set[TestName] = { + "test_asyncio", + "test_concurrent_futures", + "test_doctests", + "test_future_stmt", + "test_gdb", + "test_inspect", + "test_pydoc", + "test_multiprocessing_fork", + "test_multiprocessing_forkserver", + "test_multiprocessing_spawn", +} + + +def findtestdir(path: StrPath | None = None) -> StrPath: + return path or os.path.dirname(os.path.dirname(__file__)) or os.curdir + + +def findtests(*, testdir: StrPath | None = None, exclude: Container[str] = (), + split_test_dirs: set[TestName] = SPLITTESTDIRS, + base_mod: str = "") -> TestList: + """Return a list of all applicable test modules.""" + testdir = findtestdir(testdir) + tests = [] + for name in os.listdir(testdir): + mod, ext = os.path.splitext(name) + if (not mod.startswith("test_")) or (mod in exclude): + continue + if base_mod: + fullname = f"{base_mod}.{mod}" + else: + fullname = mod + if fullname in split_test_dirs: + subdir = os.path.join(testdir, mod) + if not base_mod: + fullname = f"test.{mod}" + tests.extend(findtests(testdir=subdir, exclude=exclude, + split_test_dirs=split_test_dirs, + base_mod=fullname)) + elif ext in (".py", ""): + tests.append(fullname) + return sorted(tests) + + +def split_test_packages(tests, *, testdir: StrPath | None = None, + exclude: Container[str] = (), + split_test_dirs=SPLITTESTDIRS) -> list[TestName]: + testdir = findtestdir(testdir) + splitted = [] + for name in tests: + if name in split_test_dirs: + subdir = os.path.join(testdir, name) + splitted.extend(findtests(testdir=subdir, exclude=exclude, + split_test_dirs=split_test_dirs, + base_mod=name)) + else: + splitted.append(name) + return splitted + + +def _list_cases(suite: unittest.TestSuite) -> None: + for test in suite: + if isinstance(test, unittest.loader._FailedTest): # type: ignore[attr-defined] + continue + if isinstance(test, unittest.TestSuite): + _list_cases(test) + elif isinstance(test, unittest.TestCase): + if match_test(test): + print(test.id()) + +def list_cases(tests: TestTuple, *, + match_tests: TestFilter | None = None, + test_dir: StrPath | None = None) -> None: + support.verbose = False + set_match_tests(match_tests) + + skipped = [] + for test_name in tests: + module_name = abs_module_name(test_name, test_dir) + try: + suite = unittest.defaultTestLoader.loadTestsFromName(module_name) + _list_cases(suite) + except unittest.SkipTest: + skipped.append(test_name) + + if skipped: + sys.stdout.flush() + stderr = sys.stderr + print(file=stderr) + print(count(len(skipped), "test"), "skipped:", file=stderr) + printlist(skipped, file=stderr) diff --git a/Lib/test/libregrtest/logger.py b/Lib/test/libregrtest/logger.py new file mode 100644 index 0000000000..fa1d4d575c --- /dev/null +++ b/Lib/test/libregrtest/logger.py @@ -0,0 +1,89 @@ +import os +import time + +from test.support import MS_WINDOWS +from .results import TestResults +from .runtests import RunTests +from .utils import print_warning + +if MS_WINDOWS: + from .win_utils import WindowsLoadTracker + + +class Logger: + def __init__(self, results: TestResults, quiet: bool, pgo: bool): + self.start_time = time.perf_counter() + self.test_count_text = '' + self.test_count_width = 3 + self.win_load_tracker: WindowsLoadTracker | None = None + self._results: TestResults = results + self._quiet: bool = quiet + self._pgo: bool = pgo + + def log(self, line: str = '') -> None: + empty = not line + + # add the system load prefix: "load avg: 1.80 " + load_avg = self.get_load_avg() + if load_avg is not None: + line = f"load avg: {load_avg:.2f} {line}" + + # add the timestamp prefix: "0:01:05 " + log_time = time.perf_counter() - self.start_time + + mins, secs = divmod(int(log_time), 60) + hours, mins = divmod(mins, 60) + formatted_log_time = "%d:%02d:%02d" % (hours, mins, secs) + + line = f"{formatted_log_time} {line}" + if empty: + line = line[:-1] + + print(line, flush=True) + + def get_load_avg(self) -> float | None: + if hasattr(os, 'getloadavg'): + try: + return os.getloadavg()[0] + except OSError: + pass + if self.win_load_tracker is not None: + return self.win_load_tracker.getloadavg() + return None + + def display_progress(self, test_index: int, text: str) -> None: + if self._quiet: + return + results = self._results + + # "[ 51/405/1] test_tcl passed" + line = f"{test_index:{self.test_count_width}}{self.test_count_text}" + fails = len(results.bad) + len(results.env_changed) + if fails and not self._pgo: + line = f"{line}/{fails}" + self.log(f"[{line}] {text}") + + def set_tests(self, runtests: RunTests) -> None: + if runtests.forever: + self.test_count_text = '' + self.test_count_width = 3 + else: + self.test_count_text = '/{}'.format(len(runtests.tests)) + self.test_count_width = len(self.test_count_text) - 1 + + def start_load_tracker(self) -> None: + if not MS_WINDOWS: + return + + try: + self.win_load_tracker = WindowsLoadTracker() + except PermissionError as error: + # Standard accounts may not have access to the performance + # counters. + print_warning(f'Failed to create WindowsLoadTracker: {error}') + + def stop_load_tracker(self) -> None: + if self.win_load_tracker is None: + return + self.win_load_tracker.close() + self.win_load_tracker = None diff --git a/Lib/test/libregrtest/main.py b/Lib/test/libregrtest/main.py index e1d19e1e4a..da63079399 100644 --- a/Lib/test/libregrtest/main.py +++ b/Lib/test/libregrtest/main.py @@ -1,42 +1,33 @@ -import datetime -import faulthandler -import json -import locale import os -import platform import random import re +import shlex import sys import sysconfig -import tempfile import time -import unittest -from test.libregrtest.cmdline import _parse_args -from test.libregrtest.runtest import ( - findtests, runtest, get_abs_module, - STDTESTS, NOTTESTS, PASSED, FAILED, ENV_CHANGED, SKIPPED, RESOURCE_DENIED, - INTERRUPTED, CHILD_ERROR, TEST_DID_NOT_RUN, - PROGRESS_MIN_TIME, format_test_result) -from test.libregrtest.setup import setup_tests -from test.libregrtest.utils import removepy, count, format_duration, printlist -from test import support -from test.support import os_helper, import_helper - - -# When tests are run from the Python build directory, it is best practice -# to keep the test files in a subfolder. This eases the cleanup of leftover -# files using the "make distclean" command. -if sysconfig.is_python_build(): - TEMPDIR = sysconfig.get_config_var('abs_builddir') - if TEMPDIR is None: - # bpo-30284: On Windows, only srcdir is available. Using abs_builddir - # mostly matters on UNIX when building Python out of the source tree, - # especially when the source tree is read only. - TEMPDIR = sysconfig.get_config_var('srcdir') - TEMPDIR = os.path.join(TEMPDIR, 'build') -else: - TEMPDIR = tempfile.gettempdir() -TEMPDIR = os.path.abspath(TEMPDIR) +import trace +from typing import NoReturn + +from test.support import (os_helper, MS_WINDOWS, flush_std_streams, + suppress_immortalization) + +from .cmdline import _parse_args, Namespace +from .findtests import findtests, split_test_packages, list_cases +from .logger import Logger +from .pgo import setup_pgo_tests +from .result import State, TestResult +from .results import TestResults, EXITCODE_INTERRUPTED +from .runtests import RunTests, HuntRefleak +from .setup import setup_process, setup_test_dir +from .single import run_single_test, PROGRESS_MIN_TIME +from .tsan import setup_tsan_tests +from .utils import ( + StrPath, StrJSON, TestName, TestList, TestTuple, TestFilter, + strip_py_suffix, count, format_duration, + printlist, get_temp_dir, get_work_dir, exit_timeout, + display_header, cleanup_temp_dir, print_warning, + is_cross_compiled, get_host_runner, + EXIT_TIMEOUT) class Regrtest: @@ -57,357 +48,356 @@ class Regrtest: files beginning with test_ will be used. The other default arguments (verbose, quiet, exclude, - single, randomize, findleaks, use_resources, trace, coverdir, + single, randomize, use_resources, trace, coverdir, print_slow, and random_seed) allow programmers calling main() directly to set the values that would normally be set by flags on the command line. """ - def __init__(self): - # Namespace of command line options - self.ns = None + def __init__(self, ns: Namespace, _add_python_opts: bool = False): + # Log verbosity + self.verbose: int = int(ns.verbose) + self.quiet: bool = ns.quiet + self.pgo: bool = ns.pgo + self.pgo_extended: bool = ns.pgo_extended + self.tsan: bool = ns.tsan + + # Test results + self.results: TestResults = TestResults() + self.first_state: str | None = None + + # Logger + self.logger = Logger(self.results, self.quiet, self.pgo) + + # Actions + self.want_header: bool = ns.header + self.want_list_tests: bool = ns.list_tests + self.want_list_cases: bool = ns.list_cases + self.want_wait: bool = ns.wait + self.want_cleanup: bool = ns.cleanup + self.want_rerun: bool = ns.rerun + self.want_run_leaks: bool = ns.runleaks + self.want_bisect: bool = ns.bisect + + self.ci_mode: bool = (ns.fast_ci or ns.slow_ci) + self.want_add_python_opts: bool = (_add_python_opts + and ns._add_python_opts) + + # Select tests + self.match_tests: TestFilter = ns.match_tests + self.exclude: bool = ns.exclude + self.fromfile: StrPath | None = ns.fromfile + self.starting_test: TestName | None = ns.start + self.cmdline_args: TestList = ns.args + + # Workers + self.single_process: bool = ns.single_process + if self.single_process or ns.use_mp is None: + num_workers = 0 # run sequentially in a single process + elif ns.use_mp <= 0: + num_workers = -1 # run in parallel, use the number of CPUs + else: + num_workers = ns.use_mp # run in parallel + self.num_workers: int = num_workers + self.worker_json: StrJSON | None = ns.worker_json + + # Options to run tests + self.fail_fast: bool = ns.failfast + self.fail_env_changed: bool = ns.fail_env_changed + self.fail_rerun: bool = ns.fail_rerun + self.forever: bool = ns.forever + self.output_on_failure: bool = ns.verbose3 + self.timeout: float | None = ns.timeout + if ns.huntrleaks: + warmups, runs, filename = ns.huntrleaks + filename = os.path.abspath(filename) + self.hunt_refleak: HuntRefleak | None = HuntRefleak(warmups, runs, filename) + else: + self.hunt_refleak = None + self.test_dir: StrPath | None = ns.testdir + self.junit_filename: StrPath | None = ns.xmlpath + self.memory_limit: str | None = ns.memlimit + self.gc_threshold: int | None = ns.threshold + self.use_resources: tuple[str, ...] = tuple(ns.use_resources) + if ns.python: + self.python_cmd: tuple[str, ...] | None = tuple(ns.python) + else: + self.python_cmd = None + self.coverage: bool = ns.trace + self.coverage_dir: StrPath | None = ns.coverdir + self._tmp_dir: StrPath | None = ns.tempdir + + # Randomize + self.randomize: bool = ns.randomize + if ('SOURCE_DATE_EPOCH' in os.environ + # don't use the variable if empty + and os.environ['SOURCE_DATE_EPOCH'] + ): + self.randomize = False + # SOURCE_DATE_EPOCH should be an integer, but use a string to not + # fail if it's not integer. random.seed() accepts a string. + # https://reproducible-builds.org/docs/source-date-epoch/ + self.random_seed: int | str = os.environ['SOURCE_DATE_EPOCH'] + elif ns.random_seed is None: + self.random_seed = random.getrandbits(32) + else: + self.random_seed = ns.random_seed # tests - self.tests = [] - self.selected = [] - - # test results - self.good = [] - self.bad = [] - self.skipped = [] - self.resource_denieds = [] - self.environment_changed = [] - self.run_no_tests = [] - self.rerun = [] - self.first_result = None - self.interrupted = False - - # used by --slow - self.test_times = [] - - # used by --coverage, trace.Trace instance - self.tracer = None + self.first_runtests: RunTests | None = None + + # used by --slowest + self.print_slowest: bool = ns.print_slow # used to display the progress bar "[ 3/100]" - self.start_time = time.monotonic() - self.test_count = '' - self.test_count_width = 1 + self.start_time = time.perf_counter() # used by --single - self.next_single_test = None - self.next_single_filename = None - - # used by --junit-xml - self.testsuite_xml = None - - self.win_load_tracker = None - - def get_executed(self): - return (set(self.good) | set(self.bad) | set(self.skipped) - | set(self.resource_denieds) | set(self.environment_changed) - | set(self.run_no_tests)) - - def accumulate_result(self, result, rerun=False): - test_name = result.test_name - ok = result.result - - if ok not in (CHILD_ERROR, INTERRUPTED) and not rerun: - self.test_times.append((result.test_time, test_name)) - - if ok == PASSED: - self.good.append(test_name) - elif ok in (FAILED, CHILD_ERROR): - if not rerun: - self.bad.append(test_name) - elif ok == ENV_CHANGED: - self.environment_changed.append(test_name) - elif ok == SKIPPED: - self.skipped.append(test_name) - elif ok == RESOURCE_DENIED: - self.skipped.append(test_name) - self.resource_denieds.append(test_name) - elif ok == TEST_DID_NOT_RUN: - self.run_no_tests.append(test_name) - elif ok == INTERRUPTED: - self.interrupted = True - else: - raise ValueError("invalid test result: %r" % ok) - - if rerun and ok not in {FAILED, CHILD_ERROR, INTERRUPTED}: - self.bad.remove(test_name) - - xml_data = result.xml_data - if xml_data: - import xml.etree.ElementTree as ET - for e in xml_data: - try: - self.testsuite_xml.append(ET.fromstring(e)) - except ET.ParseError: - print(xml_data, file=sys.__stderr__) - raise - - def display_progress(self, test_index, text): - if self.ns.quiet: - return - - # "[ 51/405/1] test_tcl passed" - line = f"{test_index:{self.test_count_width}}{self.test_count}" - fails = len(self.bad) + len(self.environment_changed) - if fails and not self.ns.pgo: - line = f"{line}/{fails}" - line = f"[{line}] {text}" - - # add the system load prefix: "load avg: 1.80 " - load_avg = self.getloadavg() - if load_avg is not None: - line = f"load avg: {load_avg:.2f} {line}" - - # add the timestamp prefix: "0:01:05 " - test_time = time.monotonic() - self.start_time - test_time = datetime.timedelta(seconds=int(test_time)) - line = f"{test_time} {line}" - print(line, flush=True) - - def parse_args(self, kwargs): - ns = _parse_args(sys.argv[1:], **kwargs) - - if ns.timeout and not hasattr(faulthandler, 'dump_traceback_later'): - print("Warning: The timeout option requires " - "faulthandler.dump_traceback_later", file=sys.stderr) - ns.timeout = None - - if ns.xmlpath: - support.junit_xml_list = self.testsuite_xml = [] - - # Strip .py extensions. - removepy(ns.args) - - return ns - - def find_tests(self, tests): - self.tests = tests - - if self.ns.single: - self.next_single_filename = os.path.join(TEMPDIR, 'pynexttest') + self.single_test_run: bool = ns.single + self.next_single_test: TestName | None = None + self.next_single_filename: StrPath | None = None + + def log(self, line: str = '') -> None: + self.logger.log(line) + + def find_tests(self, tests: TestList | None = None) -> tuple[TestTuple, TestList | None]: + if tests is None: + tests = [] + if self.single_test_run: + self.next_single_filename = os.path.join(self.tmp_dir, 'pynexttest') try: with open(self.next_single_filename, 'r') as fp: next_test = fp.read().strip() - self.tests = [next_test] + tests = [next_test] except OSError: pass - if self.ns.fromfile: - self.tests = [] + if self.fromfile: + tests = [] # regex to match 'test_builtin' in line: # '0:00:00 [ 4/400] test_builtin -- test_dict took 1 sec' regex = re.compile(r'\btest_[a-zA-Z0-9_]+\b') - with open(os.path.join(os_helper.SAVEDCWD, self.ns.fromfile)) as fp: + with open(os.path.join(os_helper.SAVEDCWD, self.fromfile)) as fp: for line in fp: line = line.split('#', 1)[0] line = line.strip() match = regex.search(line) if match is not None: - self.tests.append(match.group()) - - removepy(self.tests) - - stdtests = STDTESTS[:] - nottests = NOTTESTS.copy() - if self.ns.exclude: - for arg in self.ns.args: - if arg in stdtests: - stdtests.remove(arg) - nottests.add(arg) - self.ns.args = [] - - # if testdir is set, then we are not running the python tests suite, so - # don't add default tests to be executed or skipped (pass empty values) - if self.ns.testdir: - alltests = findtests(self.ns.testdir, list(), set()) - else: - alltests = findtests(self.ns.testdir, stdtests, nottests) + tests.append(match.group()) + + strip_py_suffix(tests) + + if self.pgo: + # add default PGO tests if no tests are specified + setup_pgo_tests(self.cmdline_args, self.pgo_extended) - if not self.ns.fromfile: - self.selected = self.tests or self.ns.args or alltests + if self.tsan: + setup_tsan_tests(self.cmdline_args) + + exclude_tests = set() + if self.exclude: + for arg in self.cmdline_args: + exclude_tests.add(arg) + self.cmdline_args = [] + + alltests = findtests(testdir=self.test_dir, + exclude=exclude_tests) + + if not self.fromfile: + selected = tests or self.cmdline_args + if selected: + selected = split_test_packages(selected) + else: + selected = alltests else: - self.selected = self.tests - if self.ns.single: - self.selected = self.selected[:1] + selected = tests + + if self.single_test_run: + selected = selected[:1] try: - pos = alltests.index(self.selected[0]) + pos = alltests.index(selected[0]) self.next_single_test = alltests[pos + 1] except IndexError: pass # Remove all the selected tests that precede start if it's set. - if self.ns.start: + if self.starting_test: try: - del self.selected[:self.selected.index(self.ns.start)] + del selected[:selected.index(self.starting_test)] except ValueError: - print("Couldn't find starting test (%s), using all tests" - % self.ns.start, file=sys.stderr) - - if self.ns.randomize: - if self.ns.random_seed is None: - self.ns.random_seed = random.randrange(10000000) - random.seed(self.ns.random_seed) - random.shuffle(self.selected) + print(f"Cannot find starting test: {self.starting_test}") + sys.exit(1) - def list_tests(self): - for name in self.selected: - print(name) + random.seed(self.random_seed) + if self.randomize: + random.shuffle(selected) - def _list_cases(self, suite): - for test in suite: - if isinstance(test, unittest.loader._FailedTest): - continue - if isinstance(test, unittest.TestSuite): - self._list_cases(test) - elif isinstance(test, unittest.TestCase): - if support.match_test(test): - print(test.id()) - - def list_cases(self): - support.verbose = False - support.set_match_tests(self.ns.match_tests) - - for test_name in self.selected: - abstest = get_abs_module(self.ns, test_name) - try: - suite = unittest.defaultTestLoader.loadTestsFromName(abstest) - self._list_cases(suite) - except unittest.SkipTest: - self.skipped.append(test_name) + return (tuple(selected), tests) - if self.skipped: - print(file=sys.stderr) - print(count(len(self.skipped), "test"), "skipped:", file=sys.stderr) - printlist(self.skipped, file=sys.stderr) + @staticmethod + def list_tests(tests: TestTuple) -> None: + for name in tests: + print(name) - def rerun_failed_tests(self): - self.ns.verbose = True - self.ns.failfast = False - self.ns.verbose3 = False + def _rerun_failed_tests(self, runtests: RunTests) -> RunTests: + # Configure the runner to re-run tests + if self.num_workers == 0 and not self.single_process: + # Always run tests in fresh processes to have more deterministic + # initial state. Don't re-run tests in parallel but limit to a + # single worker process to have side effects (on the system load + # and timings) between tests. + self.num_workers = 1 + + tests, match_tests_dict = self.results.prepare_rerun() + + # Re-run failed tests + runtests = runtests.copy( + tests=tests, + rerun=True, + verbose=True, + forever=False, + fail_fast=False, + match_tests_dict=match_tests_dict, + output_on_failure=False) + self.logger.set_tests(runtests) + + msg = f"Re-running {len(tests)} failed tests in verbose mode" + if not self.single_process: + msg = f"{msg} in subprocesses" + self.log(msg) + self._run_tests_mp(runtests, self.num_workers) + else: + self.log(msg) + self.run_tests_sequentially(runtests) + return runtests + + def rerun_failed_tests(self, runtests: RunTests) -> None: + if self.python_cmd: + # Temp patch for https://github.com/python/cpython/issues/94052 + self.log( + "Re-running failed tests is not supported with --python " + "host runner option." + ) + return - self.first_result = self.get_tests_result() + self.first_state = self.get_state() print() - print("Re-running failed tests in verbose mode") - self.rerun = self.bad[:] - for test_name in self.rerun: - print(f"Re-running {test_name} in verbose mode", flush=True) - self.ns.verbose = True - result = runtest(self.ns, test_name) + rerun_runtests = self._rerun_failed_tests(runtests) - self.accumulate_result(result, rerun=True) + if self.results.bad: + print(count(len(self.results.bad), 'test'), "failed again:") + printlist(self.results.bad) - if result.result == INTERRUPTED: - break + self.display_result(rerun_runtests) - if self.bad: - print(count(len(self.bad), 'test'), "failed again:") - printlist(self.bad) + def _run_bisect(self, runtests: RunTests, test: str, progress: str) -> bool: + print() + title = f"Bisect {test}" + if progress: + title = f"{title} ({progress})" + print(title) + print("#" * len(title)) + print() - self.display_result() + cmd = runtests.create_python_cmd() + cmd.extend([ + "-u", "-m", "test.bisect_cmd", + # Limit to 25 iterations (instead of 100) to not abuse CI resources + "--max-iter", "25", + "-v", + # runtests.match_tests is not used (yet) for bisect_cmd -i arg + ]) + cmd.extend(runtests.bisect_cmd_args()) + cmd.append(test) + print("+", shlex.join(cmd), flush=True) + + flush_std_streams() + + import subprocess + proc = subprocess.run(cmd, timeout=runtests.timeout) + exitcode = proc.returncode + + title = f"{title}: exit code {exitcode}" + print(title) + print("#" * len(title)) + print(flush=True) + + if exitcode: + print(f"Bisect failed with exit code {exitcode}") + return False + + return True + + def run_bisect(self, runtests: RunTests) -> None: + tests, _ = self.results.prepare_rerun(clear=False) + + for index, name in enumerate(tests, 1): + if len(tests) > 1: + progress = f"{index}/{len(tests)}" + else: + progress = "" + if not self._run_bisect(runtests, name, progress): + return - def display_result(self): + def display_result(self, runtests: RunTests) -> None: # If running the test suite for PGO then no one cares about results. - if self.ns.pgo: + if runtests.pgo: return + state = self.get_state() print() - print("== Tests result: %s ==" % self.get_tests_result()) - - if self.interrupted: - print("Test suite interrupted by signal SIGINT.") - - omitted = set(self.selected) - self.get_executed() - if omitted: - print() - print(count(len(omitted), "test"), "omitted:") - printlist(omitted) - - if self.good and not self.ns.quiet: - print() - if (not self.bad - and not self.skipped - and not self.interrupted - and len(self.good) > 1): - print("All", end=' ') - print(count(len(self.good), "test"), "OK.") - - if self.ns.print_slow: - self.test_times.sort(reverse=True) - print() - print("10 slowest tests:") - for test_time, test in self.test_times[:10]: - print("- %s: %s" % (test, format_duration(test_time))) - - if self.bad: - print() - print(count(len(self.bad), "test"), "failed:") - printlist(self.bad) - - if self.environment_changed: - print() - print("{} altered the execution environment:".format( - count(len(self.environment_changed), "test"))) - printlist(self.environment_changed) - - if self.skipped and not self.ns.quiet: - print() - print(count(len(self.skipped), "test"), "skipped:") - printlist(self.skipped) - - if self.rerun: - print() - print("%s:" % count(len(self.rerun), "re-run test")) - printlist(self.rerun) - - if self.run_no_tests: - print() - print(count(len(self.run_no_tests), "test"), "run no tests:") - printlist(self.run_no_tests) - - def run_tests_sequential(self): - if self.ns.trace: - import trace - self.tracer = trace.Trace(trace=False, count=True) + print(f"== Tests result: {state} ==") + + self.results.display_result(runtests.tests, + self.quiet, self.print_slowest) + + def run_test( + self, test_name: TestName, runtests: RunTests, tracer: trace.Trace | None + ) -> TestResult: + if tracer is not None: + # If we're tracing code coverage, then we don't exit with status + # if on a false return value from main. + cmd = ('result = run_single_test(test_name, runtests)') + namespace = dict(locals()) + tracer.runctx(cmd, globals=globals(), locals=namespace) + result = namespace['result'] + result.covered_lines = list(tracer.counts) + else: + result = run_single_test(test_name, runtests) + + self.results.accumulate_result(result, runtests) + + return result + + def run_tests_sequentially(self, runtests: RunTests) -> None: + if self.coverage: + tracer = trace.Trace(trace=False, count=True) + else: + tracer = None save_modules = set(sys.modules) - print("Run tests sequentially") + jobs = runtests.get_jobs() + if jobs is not None: + tests = count(jobs, 'test') + else: + tests = 'tests' + msg = f"Run {tests} sequentially in a single process" + if runtests.timeout: + msg += " (timeout: %s)" % format_duration(runtests.timeout) + self.log(msg) previous_test = None - for test_index, test_name in enumerate(self.tests, 1): - start_time = time.monotonic() + tests_iter = runtests.iter_tests() + for test_index, test_name in enumerate(tests_iter, 1): + start_time = time.perf_counter() text = test_name if previous_test: text = '%s -- %s' % (text, previous_test) - self.display_progress(test_index, text) - - if self.tracer: - # If we're tracing code coverage, then we don't exit with status - # if on a false return value from main. - cmd = ('result = runtest(self.ns, test_name); ' - 'self.accumulate_result(result)') - ns = dict(locals()) - self.tracer.runctx(cmd, globals=globals(), locals=ns) - result = ns['result'] - else: - result = runtest(self.ns, test_name) - self.accumulate_result(result) + self.logger.display_progress(test_index, text) - if result.result == INTERRUPTED: - break - - previous_test = format_test_result(result) - test_time = time.monotonic() - start_time - if test_time >= PROGRESS_MIN_TIME: - previous_test = "%s in %s" % (previous_test, format_duration(test_time)) - elif result[0] == PASSED: - # be quiet: say nothing if the test passed shortly - previous_test = None + result = self.run_test(test_name, runtests, tracer) # Unload the newly imported test modules (best effort finalization) new_modules = [module for module in sys.modules @@ -422,95 +412,31 @@ def run_tests_sequential(self): except (KeyError, AttributeError): pass - if previous_test: - print(previous_test) + if result.must_stop(self.fail_fast, self.fail_env_changed): + break - def _test_forever(self, tests): - while True: - for test_name in tests: - yield test_name - if self.bad: - return - if self.ns.fail_env_changed and self.environment_changed: - return - - def display_header(self): - # Print basic platform information - print("==", platform.python_implementation(), *sys.version.split()) - try: - print("==", platform.platform(aliased=True), - "%s-endian" % sys.byteorder) - except: - print("== RustPython: Need to fix platform.platform") - print("== cwd:", os.getcwd()) - cpu_count = os.cpu_count() - if cpu_count: - print("== CPU count:", cpu_count) - try: - print("== encodings: locale=%s, FS=%s" - % (locale.getpreferredencoding(False), - sys.getfilesystemencoding())) - except: - print("== RustPython: Need to fix encoding stuff") - - def get_tests_result(self): - result = [] - if self.bad: - result.append("FAILURE") - elif self.ns.fail_env_changed and self.environment_changed: - result.append("ENV CHANGED") - elif not any((self.good, self.bad, self.skipped, self.interrupted, - self.environment_changed)): - result.append("NO TEST RUN") - - if self.interrupted: - result.append("INTERRUPTED") - - if not result: - result.append("SUCCESS") - - result = ', '.join(result) - if self.first_result: - result = '%s then %s' % (self.first_result, result) - return result + previous_test = str(result) + test_time = time.perf_counter() - start_time + if test_time >= PROGRESS_MIN_TIME: + previous_test = "%s in %s" % (previous_test, format_duration(test_time)) + elif result.state == State.PASSED: + # be quiet: say nothing if the test passed shortly + previous_test = None - def run_tests(self): - # For a partial run, we do not need to clutter the output. - if (self.ns.header - or not(self.ns.pgo or self.ns.quiet or self.ns.single - or self.tests or self.ns.args)): - self.display_header() - - if self.ns.huntrleaks: - warmup, repetitions, _ = self.ns.huntrleaks - if warmup < 3: - msg = ("WARNING: Running tests with --huntrleaks/-R and less than " - "3 warmup repetitions can give false positives!") - print(msg, file=sys.stdout, flush=True) - - if self.ns.randomize: - print("Using random seed", self.ns.random_seed) - - if self.ns.forever: - self.tests = self._test_forever(list(self.selected)) - self.test_count = '' - self.test_count_width = 3 - else: - self.tests = iter(self.selected) - self.test_count = '/{}'.format(len(self.selected)) - self.test_count_width = len(self.test_count) - 1 + if previous_test: + print(previous_test) - if self.ns.use_mp: - from test.libregrtest.runtest_mp import run_tests_multiprocess - run_tests_multiprocess(self) - else: - self.run_tests_sequential() + def get_state(self) -> str: + state = self.results.get_state(self.fail_env_changed) + if self.first_state: + state = f'{self.first_state} then {state}' + return state - def finalize(self): - if self.win_load_tracker is not None: - self.win_load_tracker.close() - self.win_load_tracker = None + def _run_tests_mp(self, runtests: RunTests, num_workers: int) -> None: + from .run_workers import RunWorkers + RunWorkers(num_workers, runtests, self.logger, self.results).run() + def finalize_tests(self, coverage: trace.CoverageResults | None) -> None: if self.next_single_filename: if self.next_single_test: with open(self.next_single_filename, 'w') as fp: @@ -518,141 +444,321 @@ def finalize(self): else: os.unlink(self.next_single_filename) - if self.tracer: - r = self.tracer.results() - r.write_results(show_missing=True, summary=True, - coverdir=self.ns.coverdir) + if coverage is not None: + # uses a new-in-Python 3.13 keyword argument that mypy doesn't know about yet: + coverage.write_results(show_missing=True, summary=True, # type: ignore[call-arg] + coverdir=self.coverage_dir, + ignore_missing_files=True) + + if self.want_run_leaks: + os.system("leaks %d" % os.getpid()) + + if self.junit_filename: + self.results.write_junit(self.junit_filename) + + def display_summary(self) -> None: + if self.first_runtests is None: + raise ValueError( + "Should never call `display_summary()` before calling `_run_test()`" + ) + duration = time.perf_counter() - self.logger.start_time + filtered = bool(self.match_tests) + + # Total duration print() - duration = time.monotonic() - self.start_time print("Total duration: %s" % format_duration(duration)) - print("Tests result: %s" % self.get_tests_result()) - if self.ns.runleaks: - os.system("leaks %d" % os.getpid()) + self.results.display_summary(self.first_runtests, filtered) + + # Result + state = self.get_state() + print(f"Result: {state}") + + def create_run_tests(self, tests: TestTuple) -> RunTests: + return RunTests( + tests, + fail_fast=self.fail_fast, + fail_env_changed=self.fail_env_changed, + match_tests=self.match_tests, + match_tests_dict=None, + rerun=False, + forever=self.forever, + pgo=self.pgo, + pgo_extended=self.pgo_extended, + output_on_failure=self.output_on_failure, + timeout=self.timeout, + verbose=self.verbose, + quiet=self.quiet, + hunt_refleak=self.hunt_refleak, + test_dir=self.test_dir, + use_junit=(self.junit_filename is not None), + coverage=self.coverage, + memory_limit=self.memory_limit, + gc_threshold=self.gc_threshold, + use_resources=self.use_resources, + python_cmd=self.python_cmd, + randomize=self.randomize, + random_seed=self.random_seed, + ) + + def _run_tests(self, selected: TestTuple, tests: TestList | None) -> int: + if self.hunt_refleak and self.hunt_refleak.warmups < 3: + msg = ("WARNING: Running tests with --huntrleaks/-R and " + "less than 3 warmup repetitions can give false positives!") + print(msg, file=sys.stdout, flush=True) + + if self.num_workers < 0: + # Use all CPUs + 2 extra worker processes for tests + # that like to sleep + # + # os.process.cpu_count() is new in Python 3.13; + # mypy doesn't know about it yet + self.num_workers = (os.process_cpu_count() or 1) + 2 # type: ignore[attr-defined] - def save_xml_result(self): - if not self.ns.xmlpath and not self.testsuite_xml: - return + # For a partial run, we do not need to clutter the output. + if (self.want_header + or not(self.pgo or self.quiet or self.single_test_run + or tests or self.cmdline_args)): + display_header(self.use_resources, self.python_cmd) - import xml.etree.ElementTree as ET - root = ET.Element("testsuites") + print("Using random seed:", self.random_seed) - # Manually count the totals for the overall summary - totals = {'tests': 0, 'errors': 0, 'failures': 0} - for suite in self.testsuite_xml: - root.append(suite) - for k in totals: - try: - totals[k] += int(suite.get(k, 0)) - except ValueError: - pass + runtests = self.create_run_tests(selected) + self.first_runtests = runtests + self.logger.set_tests(runtests) - for k, v in totals.items(): - root.set(k, str(v)) - - xmlpath = os.path.join(os_helper.SAVEDCWD, self.ns.xmlpath) - with open(xmlpath, 'wb') as f: - for s in ET.tostringlist(root): - f.write(s) - - def main(self, tests=None, **kwargs): - global TEMPDIR - self.ns = self.parse_args(kwargs) - - if self.ns.tempdir: - TEMPDIR = self.ns.tempdir - elif self.ns.worker_args: - ns_dict, _ = json.loads(self.ns.worker_args) - TEMPDIR = ns_dict.get("tempdir") or TEMPDIR - - os.makedirs(TEMPDIR, exist_ok=True) - - # Define a writable temp dir that will be used as cwd while running - # the tests. The name of the dir includes the pid to allow parallel - # testing (see the -j option). - test_cwd = 'test_python_{}'.format(os.getpid()) - test_cwd = os.path.join(TEMPDIR, test_cwd) - - # Run the tests in a context manager that temporarily changes the CWD to a - # temporary and writable directory. If it's not possible to create or - # change the CWD, the original CWD will be used. The original CWD is - # available from os_helper.SAVEDCWD. - with os_helper.temp_cwd(test_cwd, quiet=True): - self._main(tests, kwargs) - - def getloadavg(self): - if self.win_load_tracker is not None: - return self.win_load_tracker.getloadavg() - - if hasattr(os, 'getloadavg'): - return os.getloadavg()[0] - - return None - - def _main(self, tests, kwargs): - if self.ns.huntrleaks: - warmup, repetitions, _ = self.ns.huntrleaks - if warmup < 1 or repetitions < 1: - msg = ("Invalid values for the --huntrleaks/-R parameters. The " - "number of warmups and repetitions must be at least 1 " - "each (1:1).") - print(msg, file=sys.stderr, flush=True) - sys.exit(2) - - if self.ns.worker_args is not None: - from test.libregrtest.runtest_mp import run_tests_worker - run_tests_worker(self.ns.worker_args) - - if self.ns.wait: - input("Press any key to continue...") + setup_process() - support.PGO = self.ns.pgo + if (runtests.hunt_refleak is not None) and (not self.num_workers): + # gh-109739: WindowsLoadTracker thread interferes with refleak check + use_load_tracker = False + else: + # WindowsLoadTracker is only needed on Windows + use_load_tracker = MS_WINDOWS - setup_tests(self.ns) + if use_load_tracker: + self.logger.start_load_tracker() + try: + if self.num_workers: + self._run_tests_mp(runtests, self.num_workers) + else: + # gh-117783: don't immortalize deferred objects when tracking + # refleaks. Only releveant for the free-threaded build. + with suppress_immortalization(runtests.hunt_refleak): + self.run_tests_sequentially(runtests) + + coverage = self.results.get_coverage_results() + self.display_result(runtests) + + if self.want_rerun and self.results.need_rerun(): + self.rerun_failed_tests(runtests) + + if self.want_bisect and self.results.need_rerun(): + self.run_bisect(runtests) + finally: + if use_load_tracker: + self.logger.stop_load_tracker() + + self.display_summary() + self.finalize_tests(coverage) + + return self.results.get_exitcode(self.fail_env_changed, + self.fail_rerun) + + def run_tests(self, selected: TestTuple, tests: TestList | None) -> int: + os.makedirs(self.tmp_dir, exist_ok=True) + work_dir = get_work_dir(self.tmp_dir) + + # Put a timeout on Python exit + with exit_timeout(): + # Run the tests in a context manager that temporarily changes the + # CWD to a temporary and writable directory. If it's not possible + # to create or change the CWD, the original CWD will be used. + # The original CWD is available from os_helper.SAVEDCWD. + with os_helper.temp_cwd(work_dir, quiet=True): + # When using multiprocessing, worker processes will use + # work_dir as their parent temporary directory. So when the + # main process exit, it removes also subdirectories of worker + # processes. + return self._run_tests(selected, tests) + + def _add_cross_compile_opts(self, regrtest_opts): + # WASM/WASI buildbot builders pass multiple PYTHON environment + # variables such as PYTHONPATH and _PYTHON_HOSTRUNNER. + keep_environ = bool(self.python_cmd) + environ = None + + # Are we using cross-compilation? + cross_compile = is_cross_compiled() + + # Get HOSTRUNNER + hostrunner = get_host_runner() + + if cross_compile: + # emulate -E, but keep PYTHONPATH + cross compile env vars, + # so test executable can load correct sysconfigdata file. + keep = { + '_PYTHON_PROJECT_BASE', + '_PYTHON_HOST_PLATFORM', + '_PYTHON_SYSCONFIGDATA_NAME', + "_PYTHON_SYSCONFIGDATA_PATH", + 'PYTHONPATH' + } + old_environ = os.environ + new_environ = { + name: value for name, value in os.environ.items() + if not name.startswith(('PYTHON', '_PYTHON')) or name in keep + } + # Only set environ if at least one variable was removed + if new_environ != old_environ: + environ = new_environ + keep_environ = True + + if cross_compile and hostrunner: + if self.num_workers == 0 and not self.single_process: + # For now use only two cores for cross-compiled builds; + # hostrunner can be expensive. + regrtest_opts.extend(['-j', '2']) + + # If HOSTRUNNER is set and -p/--python option is not given, then + # use hostrunner to execute python binary for tests. + if not self.python_cmd: + buildpython = sysconfig.get_config_var("BUILDPYTHON") + python_cmd = f"{hostrunner} {buildpython}" + regrtest_opts.extend(["--python", python_cmd]) + keep_environ = True + + return (environ, keep_environ) + + def _add_ci_python_opts(self, python_opts, keep_environ): + # --fast-ci and --slow-ci add options to Python: + # "-u -W default -bb -E" + + # Unbuffered stdout and stderr + if not sys.stdout.write_through: + python_opts.append('-u') + + # Add warnings filter 'default' + if 'default' not in sys.warnoptions: + python_opts.extend(('-W', 'default')) + + # Error on bytes/str comparison + if sys.flags.bytes_warning < 2: + python_opts.append('-bb') + + if not keep_environ: + # Ignore PYTHON* environment variables + if not sys.flags.ignore_environment: + python_opts.append('-E') + + def _execute_python(self, cmd, environ): + # Make sure that messages before execv() are logged + sys.stdout.flush() + sys.stderr.flush() + + cmd_text = shlex.join(cmd) + try: + print(f"+ {cmd_text}", flush=True) - self.find_tests(tests) + if hasattr(os, 'execv') and not MS_WINDOWS: + os.execv(cmd[0], cmd) + # On success, execv() do no return. + # On error, it raises an OSError. + else: + import subprocess + with subprocess.Popen(cmd, env=environ) as proc: + try: + proc.wait() + except KeyboardInterrupt: + # There is no need to call proc.terminate(): on CTRL+C, + # SIGTERM is also sent to the child process. + try: + proc.wait(timeout=EXIT_TIMEOUT) + except subprocess.TimeoutExpired: + proc.kill() + proc.wait() + sys.exit(EXITCODE_INTERRUPTED) + + sys.exit(proc.returncode) + except Exception as exc: + print_warning(f"Failed to change Python options: {exc!r}\n" + f"Command: {cmd_text}") + # continue executing main() + + def _add_python_opts(self) -> None: + python_opts: list[str] = [] + regrtest_opts: list[str] = [] + + environ, keep_environ = self._add_cross_compile_opts(regrtest_opts) + if self.ci_mode: + self._add_ci_python_opts(python_opts, keep_environ) + + if (not python_opts) and (not regrtest_opts) and (environ is None): + # Nothing changed: nothing to do + return - if self.ns.list_tests: - self.list_tests() - sys.exit(0) + # Create new command line + cmd = list(sys.orig_argv) + if python_opts: + cmd[1:1] = python_opts + if regrtest_opts: + cmd.extend(regrtest_opts) + cmd.append("--dont-add-python-opts") - if self.ns.list_cases: - self.list_cases() - sys.exit(0) + self._execute_python(cmd, environ) - # If we're on windows and this is the parent runner (not a worker), - # track the load average. - # TODO: RUSTPYTHON - # if sys.platform == 'win32' and (self.ns.worker_args is None): - # from test.libregrtest.win_utils import WindowsLoadTracker + def _init(self): + # Set sys.stdout encoder error handler to backslashreplace, + # similar to sys.stderr error handler, to avoid UnicodeEncodeError + # when printing a traceback or any other non-encodable character. + sys.stdout.reconfigure(errors="backslashreplace") - # try: - # self.win_load_tracker = WindowsLoadTracker() - # except FileNotFoundError as error: - # # Windows IoT Core and Windows Nano Server do not provide - # # typeperf.exe for x64, x86 or ARM - # print(f'Failed to create WindowsLoadTracker: {error}') + if self.junit_filename and not os.path.isabs(self.junit_filename): + self.junit_filename = os.path.abspath(self.junit_filename) - self.run_tests() - self.display_result() + strip_py_suffix(self.cmdline_args) - if self.ns.verbose2 and self.bad: - self.rerun_failed_tests() + self._tmp_dir = get_temp_dir(self._tmp_dir) - self.finalize() + @property + def tmp_dir(self) -> StrPath: + if self._tmp_dir is None: + raise ValueError( + "Should never use `.tmp_dir` before calling `.main()`" + ) + return self._tmp_dir - self.save_xml_result() + def main(self, tests: TestList | None = None) -> NoReturn: + if self.want_add_python_opts: + self._add_python_opts() + + self._init() + + if self.want_cleanup: + cleanup_temp_dir(self.tmp_dir) + sys.exit(0) + + if self.want_wait: + input("Press any key to continue...") + + setup_test_dir(self.test_dir) + selected, tests = self.find_tests(tests) + + exitcode = 0 + if self.want_list_tests: + self.list_tests(selected) + elif self.want_list_cases: + list_cases(selected, + match_tests=self.match_tests, + test_dir=self.test_dir) + else: + exitcode = self.run_tests(selected, tests) - if self.bad: - sys.exit(2) - if self.interrupted: - sys.exit(130) - if self.ns.fail_env_changed and self.environment_changed: - sys.exit(3) - sys.exit(0) + sys.exit(exitcode) -def main(tests=None, **kwargs): +def main(tests=None, _add_python_opts=False, **kwargs) -> NoReturn: """Run the Python suite.""" - Regrtest().main(tests=tests, **kwargs) + ns = _parse_args(sys.argv[1:], **kwargs) + Regrtest(ns, _add_python_opts=_add_python_opts).main(tests=tests) diff --git a/Lib/test/libregrtest/mypy.ini b/Lib/test/libregrtest/mypy.ini new file mode 100644 index 0000000000..3fa9afcb7a --- /dev/null +++ b/Lib/test/libregrtest/mypy.ini @@ -0,0 +1,26 @@ +# Config file for running mypy on libregrtest. +# Run mypy by invoking `mypy --config-file Lib/test/libregrtest/mypy.ini` +# on the command-line from the repo root + +[mypy] +files = Lib/test/libregrtest +explicit_package_bases = True +python_version = 3.12 +platform = linux +pretty = True + +# Enable most stricter settings +enable_error_code = ignore-without-code +strict = True + +# Various stricter settings that we can't yet enable +# Try to enable these in the following order: +disallow_incomplete_defs = False +disallow_untyped_calls = False +disallow_untyped_defs = False +check_untyped_defs = False +warn_return_any = False + +# Various internal modules that typeshed deliberately doesn't have stubs for: +[mypy-_abc.*,_opcode.*,_overlapped.*,_testcapi.*,_testinternalcapi.*,test.*] +ignore_missing_imports = True diff --git a/Lib/test/libregrtest/pgo.py b/Lib/test/libregrtest/pgo.py new file mode 100644 index 0000000000..f762345c88 --- /dev/null +++ b/Lib/test/libregrtest/pgo.py @@ -0,0 +1,56 @@ +# Set of tests run by default if --pgo is specified. The tests below were +# chosen based on the following criteria: either they exercise a commonly used +# C extension module or type, or they run some relatively typical Python code. +# Long running tests should be avoided because the PGO instrumented executable +# runs slowly. +PGO_TESTS = [ + 'test_array', + 'test_base64', + 'test_binascii', + 'test_binop', + 'test_bisect', + 'test_bytes', + 'test_bz2', + 'test_cmath', + 'test_codecs', + 'test_collections', + 'test_complex', + 'test_dataclasses', + 'test_datetime', + 'test_decimal', + 'test_difflib', + 'test_embed', + 'test_float', + 'test_fstring', + 'test_functools', + 'test_generators', + 'test_hashlib', + 'test_heapq', + 'test_int', + 'test_itertools', + 'test_json', + 'test_long', + 'test_lzma', + 'test_math', + 'test_memoryview', + 'test_operator', + 'test_ordered_dict', + 'test_patma', + 'test_pickle', + 'test_pprint', + 'test_re', + 'test_set', + 'test_sqlite3', + 'test_statistics', + 'test_str', + 'test_struct', + 'test_tabnanny', + 'test_time', + 'test_xml_etree', + 'test_xml_etree_c', +] + +def setup_pgo_tests(cmdline_args, pgo_extended: bool) -> None: + if not cmdline_args and not pgo_extended: + # run default set of tests for PGO training + cmdline_args[:] = PGO_TESTS[:] diff --git a/Lib/test/libregrtest/refleak.py b/Lib/test/libregrtest/refleak.py index 03747f7f75..2e49b31e25 100644 --- a/Lib/test/libregrtest/refleak.py +++ b/Lib/test/libregrtest/refleak.py @@ -1,9 +1,16 @@ import os -import re import sys import warnings from inspect import isabstract +from typing import Any + from test import support +from test.support import os_helper +from test.support import refleak_helper + +from .runtests import HuntRefleak +from .utils import clear_caches + try: from _abc import _get_dump except ImportError: @@ -17,7 +24,33 @@ def _get_dump(cls): cls._abc_negative_cache, cls._abc_negative_cache_version) -def dash_R(ns, test_name, test_func): +def save_support_xml(filename): + if support.junit_xml_list is None: + return + + import pickle + with open(filename, 'xb') as fp: + pickle.dump(support.junit_xml_list, fp) + support.junit_xml_list = None + + +def restore_support_xml(filename): + try: + fp = open(filename, 'rb') + except FileNotFoundError: + return + + import pickle + with fp: + xml_list = pickle.load(fp) + os.unlink(filename) + + support.junit_xml_list = xml_list + + +def runtest_refleak(test_name, test_func, + hunt_refleak: HuntRefleak, + quiet: bool): """Run a test multiple times, looking for reference leaks. Returns: @@ -39,12 +72,14 @@ def dash_R(ns, test_name, test_func): fs = warnings.filters[:] ps = copyreg.dispatch_table.copy() pic = sys.path_importer_cache.copy() + zdc: dict[str, Any] | None try: import zipimport except ImportError: zdc = None # Run unmodified on platforms without zipimport support else: - zdc = zipimport._zip_directory_cache.copy() + # private attribute that mypy doesn't know about: + zdc = zipimport._zip_directory_cache.copy() # type: ignore[attr-defined] abcs = {} for abc in [getattr(collections.abc, a) for a in collections.abc.__all__]: if not isabstract(abc): @@ -60,9 +95,10 @@ def dash_R(ns, test_name, test_func): def get_pooled_int(value): return int_pool.setdefault(value, value) - nwarmup, ntracked, fname = ns.huntrleaks - fname = os.path.join(os_helper.SAVEDCWD, fname) - repcount = nwarmup + ntracked + warmups = hunt_refleak.warmups + runs = hunt_refleak.runs + filename = hunt_refleak.filename + repcount = warmups + runs # Pre-allocate to ensure that the loop doesn't allocate anything new rep_range = list(range(repcount)) @@ -71,45 +107,81 @@ def get_pooled_int(value): fd_deltas = [0] * repcount getallocatedblocks = sys.getallocatedblocks gettotalrefcount = sys.gettotalrefcount - fd_count = support.fd_count - + getunicodeinternedsize = sys.getunicodeinternedsize + fd_count = os_helper.fd_count # initialize variables to make pyflakes quiet - rc_before = alloc_before = fd_before = 0 - - if not ns.quiet: - print("beginning", repcount, "repetitions", file=sys.stderr) - print(("1234567890"*(repcount//10 + 1))[:repcount], file=sys.stderr, - flush=True) - + rc_before = alloc_before = fd_before = interned_immortal_before = 0 + + if not quiet: + print("beginning", repcount, "repetitions. Showing number of leaks " + "(. for 0 or less, X for 10 or more)", + file=sys.stderr) + numbers = ("1234567890"*(repcount//10 + 1))[:repcount] + numbers = numbers[:warmups] + ':' + numbers[warmups:] + print(numbers, file=sys.stderr, flush=True) + + xml_filename = 'refleak-xml.tmp' + result = None dash_R_cleanup(fs, ps, pic, zdc, abcs) + support.gc_collect() for i in rep_range: - test_func() + current = refleak_helper._hunting_for_refleaks + refleak_helper._hunting_for_refleaks = True + try: + result = test_func() + finally: + refleak_helper._hunting_for_refleaks = current + + save_support_xml(xml_filename) dash_R_cleanup(fs, ps, pic, zdc, abcs) - - # dash_R_cleanup() ends with collecting cyclic trash: - # read memory statistics immediately after. - alloc_after = getallocatedblocks() + support.gc_collect() + + # Read memory statistics immediately after the garbage collection. + # Also, readjust the reference counts and alloc blocks by ignoring + # any strings that might have been interned during test_func. These + # strings will be deallocated at runtime shutdown + interned_immortal_after = getunicodeinternedsize( + # Use an internal-only keyword argument that mypy doesn't know yet + _only_immortal=True) # type: ignore[call-arg] + alloc_after = getallocatedblocks() - interned_immortal_after rc_after = gettotalrefcount() fd_after = fd_count() - if not ns.quiet: - print('.', end='', file=sys.stderr, flush=True) - rc_deltas[i] = get_pooled_int(rc_after - rc_before) alloc_deltas[i] = get_pooled_int(alloc_after - alloc_before) fd_deltas[i] = get_pooled_int(fd_after - fd_before) + if not quiet: + # use max, not sum, so total_leaks is one of the pooled ints + total_leaks = max(rc_deltas[i], alloc_deltas[i], fd_deltas[i]) + if total_leaks <= 0: + symbol = '.' + elif total_leaks < 10: + symbol = ( + '.', '1', '2', '3', '4', '5', '6', '7', '8', '9', + )[total_leaks] + else: + symbol = 'X' + if i == warmups: + print(' ', end='', file=sys.stderr, flush=True) + print(symbol, end='', file=sys.stderr, flush=True) + del total_leaks + del symbol + alloc_before = alloc_after rc_before = rc_after fd_before = fd_after + interned_immortal_before = interned_immortal_after + + restore_support_xml(xml_filename) - if not ns.quiet: + if not quiet: print(file=sys.stderr) # These checkers return False on success, True on failure def check_rc_deltas(deltas): - # Checker for reference counters and memomry blocks. + # Checker for reference counters and memory blocks. # # bpo-30776: Try to ignore false positives: # @@ -133,16 +205,22 @@ def check_fd_deltas(deltas): (fd_deltas, 'file descriptors', check_fd_deltas) ]: # ignore warmup runs - deltas = deltas[nwarmup:] - if checker(deltas): + deltas = deltas[warmups:] + failing = checker(deltas) + suspicious = any(deltas) + if failing or suspicious: msg = '%s leaked %s %s, sum=%s' % ( test_name, deltas, item_name, sum(deltas)) - print(msg, file=sys.stderr, flush=True) - with open(fname, "a") as refrep: - print(msg, file=refrep) - refrep.flush() - failed = True - return failed + print(msg, end='', file=sys.stderr) + if failing: + print(file=sys.stderr, flush=True) + with open(filename, "a", encoding="utf-8") as refrep: + print(msg, file=refrep) + refrep.flush() + failed = True + else: + print(' (this is fine)', file=sys.stderr, flush=True) + return (failed, result) def dash_R_cleanup(fs, ps, pic, zdc, abcs): @@ -163,121 +241,29 @@ def dash_R_cleanup(fs, ps, pic, zdc, abcs): zipimport._zip_directory_cache.clear() zipimport._zip_directory_cache.update(zdc) - # clear type cache - sys._clear_type_cache() - # Clear ABC registries, restoring previously saved ABC registries. + # ignore deprecation warning for collections.abc.ByteString abs_classes = [getattr(collections.abc, a) for a in collections.abc.__all__] abs_classes = filter(isabstract, abs_classes) for abc in abs_classes: for obj in abc.__subclasses__() + [abc]: - for ref in abcs.get(obj, set()): - if ref() is not None: - obj.register(ref()) + refs = abcs.get(obj, None) + if refs is not None: + obj._abc_registry_clear() + for ref in refs: + subclass = ref() + if subclass is not None: + obj.register(subclass) obj._abc_caches_clear() + # Clear caches clear_caches() - -def clear_caches(): - # Clear the warnings registry, so they can be displayed again - for mod in sys.modules.values(): - if hasattr(mod, '__warningregistry__'): - del mod.__warningregistry__ - - # Flush standard output, so that buffered data is sent to the OS and - # associated Python objects are reclaimed. - for stream in (sys.stdout, sys.stderr, sys.__stdout__, sys.__stderr__): - if stream is not None: - stream.flush() - - # Clear assorted module caches. - # Don't worry about resetting the cache if the module is not loaded - try: - distutils_dir_util = sys.modules['distutils.dir_util'] - except KeyError: - pass - else: - distutils_dir_util._path_created.clear() - re.purge() - - try: - _strptime = sys.modules['_strptime'] - except KeyError: - pass - else: - _strptime._regex_cache.clear() - - try: - urllib_parse = sys.modules['urllib.parse'] - except KeyError: - pass - else: - urllib_parse.clear_cache() - - try: - urllib_request = sys.modules['urllib.request'] - except KeyError: - pass - else: - urllib_request.urlcleanup() - - try: - linecache = sys.modules['linecache'] - except KeyError: - pass - else: - linecache.clearcache() - - try: - mimetypes = sys.modules['mimetypes'] - except KeyError: - pass - else: - mimetypes._default_mime_types() - - try: - filecmp = sys.modules['filecmp'] - except KeyError: - pass - else: - filecmp._cache.clear() - - try: - struct = sys.modules['struct'] - except KeyError: - pass - else: - # TODO: fix - # struct._clearcache() - pass - - try: - doctest = sys.modules['doctest'] - except KeyError: - pass - else: - doctest.master = None - - try: - ctypes = sys.modules['ctypes'] - except KeyError: - pass - else: - ctypes._reset_cache() - - try: - typing = sys.modules['typing'] - except KeyError: - pass - else: - for f in typing._cleanups: - f() - - support.gc_collect() + # Clear other caches last (previous function calls can re-populate them): + sys._clear_internal_caches() -def warm_caches(): +def warm_caches() -> None: # char cache s = bytes(range(256)) for i in range(256): diff --git a/Lib/test/libregrtest/result.py b/Lib/test/libregrtest/result.py new file mode 100644 index 0000000000..7553efe5e8 --- /dev/null +++ b/Lib/test/libregrtest/result.py @@ -0,0 +1,225 @@ +import dataclasses +import json +from typing import Any + +from .utils import ( + StrJSON, TestName, FilterTuple, + format_duration, normalize_test_name, print_warning) + + +@dataclasses.dataclass(slots=True) +class TestStats: + tests_run: int = 0 + failures: int = 0 + skipped: int = 0 + + @staticmethod + def from_unittest(result): + return TestStats(result.testsRun, + len(result.failures), + len(result.skipped)) + + @staticmethod + def from_doctest(results): + return TestStats(results.attempted, + results.failed, + results.skipped) + + def accumulate(self, stats): + self.tests_run += stats.tests_run + self.failures += stats.failures + self.skipped += stats.skipped + + +# Avoid enum.Enum to reduce the number of imports when tests are run +class State: + PASSED = "PASSED" + FAILED = "FAILED" + SKIPPED = "SKIPPED" + UNCAUGHT_EXC = "UNCAUGHT_EXC" + REFLEAK = "REFLEAK" + ENV_CHANGED = "ENV_CHANGED" + RESOURCE_DENIED = "RESOURCE_DENIED" + INTERRUPTED = "INTERRUPTED" + WORKER_FAILED = "WORKER_FAILED" # non-zero worker process exit code + WORKER_BUG = "WORKER_BUG" # exception when running a worker + DID_NOT_RUN = "DID_NOT_RUN" + TIMEOUT = "TIMEOUT" + + @staticmethod + def is_failed(state): + return state in { + State.FAILED, + State.UNCAUGHT_EXC, + State.REFLEAK, + State.WORKER_FAILED, + State.WORKER_BUG, + State.TIMEOUT} + + @staticmethod + def has_meaningful_duration(state): + # Consider that the duration is meaningless for these cases. + # For example, if a whole test file is skipped, its duration + # is unlikely to be the duration of executing its tests, + # but just the duration to execute code which skips the test. + return state not in { + State.SKIPPED, + State.RESOURCE_DENIED, + State.INTERRUPTED, + State.WORKER_FAILED, + State.WORKER_BUG, + State.DID_NOT_RUN} + + @staticmethod + def must_stop(state): + return state in { + State.INTERRUPTED, + State.WORKER_BUG, + } + + +FileName = str +LineNo = int +Location = tuple[FileName, LineNo] + + +@dataclasses.dataclass(slots=True) +class TestResult: + test_name: TestName + state: str | None = None + # Test duration in seconds + duration: float | None = None + xml_data: list[str] | None = None + stats: TestStats | None = None + + # errors and failures copied from support.TestFailedWithDetails + errors: list[tuple[str, str]] | None = None + failures: list[tuple[str, str]] | None = None + + # partial coverage in a worker run; not used by sequential in-process runs + covered_lines: list[Location] | None = None + + def is_failed(self, fail_env_changed: bool) -> bool: + if self.state == State.ENV_CHANGED: + return fail_env_changed + return State.is_failed(self.state) + + def _format_failed(self): + if self.errors and self.failures: + le = len(self.errors) + lf = len(self.failures) + error_s = "error" + ("s" if le > 1 else "") + failure_s = "failure" + ("s" if lf > 1 else "") + return f"{self.test_name} failed ({le} {error_s}, {lf} {failure_s})" + + if self.errors: + le = len(self.errors) + error_s = "error" + ("s" if le > 1 else "") + return f"{self.test_name} failed ({le} {error_s})" + + if self.failures: + lf = len(self.failures) + failure_s = "failure" + ("s" if lf > 1 else "") + return f"{self.test_name} failed ({lf} {failure_s})" + + return f"{self.test_name} failed" + + def __str__(self) -> str: + match self.state: + case State.PASSED: + return f"{self.test_name} passed" + case State.FAILED: + return self._format_failed() + case State.SKIPPED: + return f"{self.test_name} skipped" + case State.UNCAUGHT_EXC: + return f"{self.test_name} failed (uncaught exception)" + case State.REFLEAK: + return f"{self.test_name} failed (reference leak)" + case State.ENV_CHANGED: + return f"{self.test_name} failed (env changed)" + case State.RESOURCE_DENIED: + return f"{self.test_name} skipped (resource denied)" + case State.INTERRUPTED: + return f"{self.test_name} interrupted" + case State.WORKER_FAILED: + return f"{self.test_name} worker non-zero exit code" + case State.WORKER_BUG: + return f"{self.test_name} worker bug" + case State.DID_NOT_RUN: + return f"{self.test_name} ran no tests" + case State.TIMEOUT: + assert self.duration is not None, "self.duration is None" + return f"{self.test_name} timed out ({format_duration(self.duration)})" + case _: + raise ValueError("unknown result state: {state!r}") + + def has_meaningful_duration(self): + return State.has_meaningful_duration(self.state) + + def set_env_changed(self): + if self.state is None or self.state == State.PASSED: + self.state = State.ENV_CHANGED + + def must_stop(self, fail_fast: bool, fail_env_changed: bool) -> bool: + if State.must_stop(self.state): + return True + if fail_fast and self.is_failed(fail_env_changed): + return True + return False + + def get_rerun_match_tests(self) -> FilterTuple | None: + match_tests = [] + + errors = self.errors or [] + failures = self.failures or [] + for error_list, is_error in ( + (errors, True), + (failures, False), + ): + for full_name, *_ in error_list: + match_name = normalize_test_name(full_name, is_error=is_error) + if match_name is None: + # 'setUpModule (test.test_sys)': don't filter tests + return None + if not match_name: + error_type = "ERROR" if is_error else "FAIL" + print_warning(f"rerun failed to parse {error_type} test name: " + f"{full_name!r}: don't filter tests") + return None + match_tests.append(match_name) + + if not match_tests: + return None + return tuple(match_tests) + + def write_json_into(self, file) -> None: + json.dump(self, file, cls=_EncodeTestResult) + + @staticmethod + def from_json(worker_json: StrJSON) -> 'TestResult': + return json.loads(worker_json, object_hook=_decode_test_result) + + +class _EncodeTestResult(json.JSONEncoder): + def default(self, o: Any) -> dict[str, Any]: + if isinstance(o, TestResult): + result = dataclasses.asdict(o) + result["__test_result__"] = o.__class__.__name__ + return result + else: + return super().default(o) + + +def _decode_test_result(data: dict[str, Any]) -> TestResult | dict[str, Any]: + if "__test_result__" in data: + data.pop('__test_result__') + if data['stats'] is not None: + data['stats'] = TestStats(**data['stats']) + if data['covered_lines'] is not None: + data['covered_lines'] = [ + tuple(loc) for loc in data['covered_lines'] + ] + return TestResult(**data) + else: + return data diff --git a/Lib/test/libregrtest/results.py b/Lib/test/libregrtest/results.py new file mode 100644 index 0000000000..9eda926966 --- /dev/null +++ b/Lib/test/libregrtest/results.py @@ -0,0 +1,276 @@ +import sys +import trace +from typing import TYPE_CHECKING + +from .runtests import RunTests +from .result import State, TestResult, TestStats, Location +from .utils import ( + StrPath, TestName, TestTuple, TestList, FilterDict, + printlist, count, format_duration) + +if TYPE_CHECKING: + from xml.etree.ElementTree import Element + + +# Python uses exit code 1 when an exception is not caught +# argparse.ArgumentParser.error() uses exit code 2 +EXITCODE_BAD_TEST = 2 +EXITCODE_ENV_CHANGED = 3 +EXITCODE_NO_TESTS_RAN = 4 +EXITCODE_RERUN_FAIL = 5 +EXITCODE_INTERRUPTED = 130 # 128 + signal.SIGINT=2 + + +class TestResults: + def __init__(self) -> None: + self.bad: TestList = [] + self.good: TestList = [] + self.rerun_bad: TestList = [] + self.skipped: TestList = [] + self.resource_denied: TestList = [] + self.env_changed: TestList = [] + self.run_no_tests: TestList = [] + self.rerun: TestList = [] + self.rerun_results: list[TestResult] = [] + + self.interrupted: bool = False + self.worker_bug: bool = False + self.test_times: list[tuple[float, TestName]] = [] + self.stats = TestStats() + # used by --junit-xml + self.testsuite_xml: list['Element'] = [] + # used by -T with -j + self.covered_lines: set[Location] = set() + + def is_all_good(self) -> bool: + return (not self.bad + and not self.skipped + and not self.interrupted + and not self.worker_bug) + + def get_executed(self) -> set[TestName]: + return (set(self.good) | set(self.bad) | set(self.skipped) + | set(self.resource_denied) | set(self.env_changed) + | set(self.run_no_tests)) + + def no_tests_run(self) -> bool: + return not any((self.good, self.bad, self.skipped, self.interrupted, + self.env_changed)) + + def get_state(self, fail_env_changed: bool) -> str: + state = [] + if self.bad: + state.append("FAILURE") + elif fail_env_changed and self.env_changed: + state.append("ENV CHANGED") + elif self.no_tests_run(): + state.append("NO TESTS RAN") + + if self.interrupted: + state.append("INTERRUPTED") + if self.worker_bug: + state.append("WORKER BUG") + if not state: + state.append("SUCCESS") + + return ', '.join(state) + + def get_exitcode(self, fail_env_changed: bool, fail_rerun: bool) -> int: + exitcode = 0 + if self.bad: + exitcode = EXITCODE_BAD_TEST + elif self.interrupted: + exitcode = EXITCODE_INTERRUPTED + elif fail_env_changed and self.env_changed: + exitcode = EXITCODE_ENV_CHANGED + elif self.no_tests_run(): + exitcode = EXITCODE_NO_TESTS_RAN + elif fail_rerun and self.rerun: + exitcode = EXITCODE_RERUN_FAIL + elif self.worker_bug: + exitcode = EXITCODE_BAD_TEST + return exitcode + + def accumulate_result(self, result: TestResult, runtests: RunTests) -> None: + test_name = result.test_name + rerun = runtests.rerun + fail_env_changed = runtests.fail_env_changed + + match result.state: + case State.PASSED: + self.good.append(test_name) + case State.ENV_CHANGED: + self.env_changed.append(test_name) + self.rerun_results.append(result) + case State.SKIPPED: + self.skipped.append(test_name) + case State.RESOURCE_DENIED: + self.resource_denied.append(test_name) + case State.INTERRUPTED: + self.interrupted = True + case State.DID_NOT_RUN: + self.run_no_tests.append(test_name) + case _: + if result.is_failed(fail_env_changed): + self.bad.append(test_name) + self.rerun_results.append(result) + else: + raise ValueError(f"invalid test state: {result.state!r}") + + if result.state == State.WORKER_BUG: + self.worker_bug = True + + if result.has_meaningful_duration() and not rerun: + if result.duration is None: + raise ValueError("result.duration is None") + self.test_times.append((result.duration, test_name)) + if result.stats is not None: + self.stats.accumulate(result.stats) + if rerun: + self.rerun.append(test_name) + if result.covered_lines: + # we don't care about trace counts so we don't have to sum them up + self.covered_lines.update(result.covered_lines) + xml_data = result.xml_data + if xml_data: + self.add_junit(xml_data) + + def get_coverage_results(self) -> trace.CoverageResults: + counts = {loc: 1 for loc in self.covered_lines} + return trace.CoverageResults(counts=counts) + + def need_rerun(self) -> bool: + return bool(self.rerun_results) + + def prepare_rerun(self, *, clear: bool = True) -> tuple[TestTuple, FilterDict]: + tests: TestList = [] + match_tests_dict = {} + for result in self.rerun_results: + tests.append(result.test_name) + + match_tests = result.get_rerun_match_tests() + # ignore empty match list + if match_tests: + match_tests_dict[result.test_name] = match_tests + + if clear: + # Clear previously failed tests + self.rerun_bad.extend(self.bad) + self.bad.clear() + self.env_changed.clear() + self.rerun_results.clear() + + return (tuple(tests), match_tests_dict) + + def add_junit(self, xml_data: list[str]) -> None: + import xml.etree.ElementTree as ET + for e in xml_data: + try: + self.testsuite_xml.append(ET.fromstring(e)) + except ET.ParseError: + print(xml_data, file=sys.__stderr__) + raise + + def write_junit(self, filename: StrPath) -> None: + if not self.testsuite_xml: + # Don't create empty XML file + return + + import xml.etree.ElementTree as ET + root = ET.Element("testsuites") + + # Manually count the totals for the overall summary + totals = {'tests': 0, 'errors': 0, 'failures': 0} + for suite in self.testsuite_xml: + root.append(suite) + for k in totals: + try: + totals[k] += int(suite.get(k, 0)) + except ValueError: + pass + + for k, v in totals.items(): + root.set(k, str(v)) + + with open(filename, 'wb') as f: + for s in ET.tostringlist(root): + f.write(s) + + def display_result(self, tests: TestTuple, quiet: bool, print_slowest: bool) -> None: + if print_slowest: + self.test_times.sort(reverse=True) + print() + print("10 slowest tests:") + for test_time, test in self.test_times[:10]: + print("- %s: %s" % (test, format_duration(test_time))) + + all_tests = [] + omitted = set(tests) - self.get_executed() + + # less important + all_tests.append((sorted(omitted), "test", "{} omitted:")) + if not quiet: + all_tests.append((self.skipped, "test", "{} skipped:")) + all_tests.append((self.resource_denied, "test", "{} skipped (resource denied):")) + all_tests.append((self.run_no_tests, "test", "{} run no tests:")) + + # more important + all_tests.append((self.env_changed, "test", "{} altered the execution environment (env changed):")) + all_tests.append((self.rerun, "re-run test", "{}:")) + all_tests.append((self.bad, "test", "{} failed:")) + + for tests_list, count_text, title_format in all_tests: + if tests_list: + print() + count_text = count(len(tests_list), count_text) + print(title_format.format(count_text)) + printlist(tests_list) + + if self.good and not quiet: + print() + text = count(len(self.good), "test") + text = f"{text} OK." + if (self.is_all_good() and len(self.good) > 1): + text = f"All {text}" + print(text) + + if self.interrupted: + print() + print("Test suite interrupted by signal SIGINT.") + + def display_summary(self, first_runtests: RunTests, filtered: bool) -> None: + # Total tests + stats = self.stats + text = f'run={stats.tests_run:,}' + if filtered: + text = f"{text} (filtered)" + report = [text] + if stats.failures: + report.append(f'failures={stats.failures:,}') + if stats.skipped: + report.append(f'skipped={stats.skipped:,}') + print(f"Total tests: {' '.join(report)}") + + # Total test files + all_tests = [self.good, self.bad, self.rerun, + self.skipped, + self.env_changed, self.run_no_tests] + run = sum(map(len, all_tests)) + text = f'run={run}' + if not first_runtests.forever: + ntest = len(first_runtests.tests) + text = f"{text}/{ntest}" + if filtered: + text = f"{text} (filtered)" + report = [text] + for name, tests in ( + ('failed', self.bad), + ('env_changed', self.env_changed), + ('skipped', self.skipped), + ('resource_denied', self.resource_denied), + ('rerun', self.rerun), + ('run_no_tests', self.run_no_tests), + ): + if tests: + report.append(f'{name}={len(tests)}') + print(f"Total test files: {' '.join(report)}") diff --git a/Lib/test/libregrtest/run_workers.py b/Lib/test/libregrtest/run_workers.py new file mode 100644 index 0000000000..3c6d13215f --- /dev/null +++ b/Lib/test/libregrtest/run_workers.py @@ -0,0 +1,621 @@ +import contextlib +import dataclasses +import faulthandler +import os.path +import queue +import signal +import subprocess +import sys +import tempfile +import threading +import time +import traceback +from typing import Any, Literal, TextIO + +from test import support +from test.support import os_helper, MS_WINDOWS + +from .logger import Logger +from .result import TestResult, State +from .results import TestResults +from .runtests import RunTests, WorkerRunTests, JsonFile, JsonFileType +from .single import PROGRESS_MIN_TIME +from .utils import ( + StrPath, TestName, + format_duration, print_warning, count, plural, get_signal_name) +from .worker import create_worker_process, USE_PROCESS_GROUP + +if MS_WINDOWS: + import locale + import msvcrt + + + +# Display the running tests if nothing happened last N seconds +PROGRESS_UPDATE = 30.0 # seconds +assert PROGRESS_UPDATE >= PROGRESS_MIN_TIME + +# Kill the main process after 5 minutes. It is supposed to write an update +# every PROGRESS_UPDATE seconds. Tolerate 5 minutes for Python slowest +# buildbot workers. +MAIN_PROCESS_TIMEOUT = 5 * 60.0 +assert MAIN_PROCESS_TIMEOUT >= PROGRESS_UPDATE + +# Time to wait until a worker completes: should be immediate +WAIT_COMPLETED_TIMEOUT = 30.0 # seconds + +# Time to wait a killed process (in seconds) +WAIT_KILLED_TIMEOUT = 60.0 + + +# We do not use a generator so multiple threads can call next(). +class MultiprocessIterator: + + """A thread-safe iterator over tests for multiprocess mode.""" + + def __init__(self, tests_iter): + self.lock = threading.Lock() + self.tests_iter = tests_iter + + def __iter__(self): + return self + + def __next__(self): + with self.lock: + if self.tests_iter is None: + raise StopIteration + return next(self.tests_iter) + + def stop(self): + with self.lock: + self.tests_iter = None + + +@dataclasses.dataclass(slots=True, frozen=True) +class MultiprocessResult: + result: TestResult + # bpo-45410: stderr is written into stdout to keep messages order + worker_stdout: str | None = None + err_msg: str | None = None + + +class WorkerThreadExited: + """Indicates that a worker thread has exited""" + +ExcStr = str +QueueOutput = tuple[Literal[False], MultiprocessResult] | tuple[Literal[True], ExcStr] +QueueContent = QueueOutput | WorkerThreadExited + + +class ExitThread(Exception): + pass + + +class WorkerError(Exception): + def __init__(self, + test_name: TestName, + err_msg: str | None, + stdout: str | None, + state: str): + result = TestResult(test_name, state=state) + self.mp_result = MultiprocessResult(result, stdout, err_msg) + super().__init__() + + +_NOT_RUNNING = "" + + +class WorkerThread(threading.Thread): + def __init__(self, worker_id: int, runner: "RunWorkers") -> None: + super().__init__() + self.worker_id = worker_id + self.runtests = runner.runtests + self.pending = runner.pending + self.output = runner.output + self.timeout = runner.worker_timeout + self.log = runner.log + self.test_name = _NOT_RUNNING + self.start_time = time.monotonic() + self._popen: subprocess.Popen[str] | None = None + self._killed = False + self._stopped = False + + def __repr__(self) -> str: + info = [f'WorkerThread #{self.worker_id}'] + if self.is_alive(): + info.append("running") + else: + info.append('stopped') + test = self.test_name + if test: + info.append(f'test={test}') + popen = self._popen + if popen is not None: + dt = time.monotonic() - self.start_time + info.extend((f'pid={popen.pid}', + f'time={format_duration(dt)}')) + return '<%s>' % ' '.join(info) + + def _kill(self) -> None: + popen = self._popen + if popen is None: + return + + if self._killed: + return + self._killed = True + + if USE_PROCESS_GROUP: + what = f"{self} process group" + else: + what = f"{self} process" + + print(f"Kill {what}", file=sys.stderr, flush=True) + try: + if USE_PROCESS_GROUP: + os.killpg(popen.pid, signal.SIGKILL) + else: + popen.kill() + except ProcessLookupError: + # popen.kill(): the process completed, the WorkerThread thread + # read its exit status, but Popen.send_signal() read the returncode + # just before Popen.wait() set returncode. + pass + except OSError as exc: + print_warning(f"Failed to kill {what}: {exc!r}") + + def stop(self) -> None: + # Method called from a different thread to stop this thread + self._stopped = True + self._kill() + + def _run_process(self, runtests: WorkerRunTests, output_fd: int, + tmp_dir: StrPath | None = None) -> int | None: + popen = create_worker_process(runtests, output_fd, tmp_dir) + self._popen = popen + self._killed = False + + try: + if self._stopped: + # If kill() has been called before self._popen is set, + # self._popen is still running. Call again kill() + # to ensure that the process is killed. + self._kill() + raise ExitThread + + try: + # gh-94026: stdout+stderr are written to tempfile + retcode = popen.wait(timeout=self.timeout) + assert retcode is not None + return retcode + except subprocess.TimeoutExpired: + if self._stopped: + # kill() has been called: communicate() fails on reading + # closed stdout + raise ExitThread + + # On timeout, kill the process + self._kill() + + # None means TIMEOUT for the caller + retcode = None + # bpo-38207: Don't attempt to call communicate() again: on it + # can hang until all child processes using stdout + # pipes completes. + except OSError: + if self._stopped: + # kill() has been called: communicate() fails + # on reading closed stdout + raise ExitThread + raise + return None + except: + self._kill() + raise + finally: + self._wait_completed() + self._popen = None + + def create_stdout(self, stack: contextlib.ExitStack) -> TextIO: + """Create stdout temporary file (file descriptor).""" + + if MS_WINDOWS: + # gh-95027: When stdout is not a TTY, Python uses the ANSI code + # page for the sys.stdout encoding. If the main process runs in a + # terminal, sys.stdout uses WindowsConsoleIO with UTF-8 encoding. + encoding = locale.getencoding() + else: + encoding = sys.stdout.encoding + + # gh-94026: Write stdout+stderr to a tempfile as workaround for + # non-blocking pipes on Emscripten with NodeJS. + # gh-109425: Use "backslashreplace" error handler: log corrupted + # stdout+stderr, instead of failing with a UnicodeDecodeError and not + # logging stdout+stderr at all. + stdout_file = tempfile.TemporaryFile('w+', + encoding=encoding, + errors='backslashreplace') + stack.enter_context(stdout_file) + return stdout_file + + def create_json_file(self, stack: contextlib.ExitStack) -> tuple[JsonFile, TextIO | None]: + """Create JSON file.""" + + json_file_use_stdout = self.runtests.json_file_use_stdout() + if json_file_use_stdout: + json_file = JsonFile(None, JsonFileType.STDOUT) + json_tmpfile = None + else: + json_tmpfile = tempfile.TemporaryFile('w+', encoding='utf8') + stack.enter_context(json_tmpfile) + + json_fd = json_tmpfile.fileno() + if MS_WINDOWS: + # The msvcrt module is only available on Windows; + # we run mypy with `--platform=linux` in CI + json_handle: int = msvcrt.get_osfhandle(json_fd) # type: ignore[attr-defined] + json_file = JsonFile(json_handle, + JsonFileType.WINDOWS_HANDLE) + else: + json_file = JsonFile(json_fd, JsonFileType.UNIX_FD) + return (json_file, json_tmpfile) + + def create_worker_runtests(self, test_name: TestName, json_file: JsonFile) -> WorkerRunTests: + tests = (test_name,) + if self.runtests.rerun: + match_tests = self.runtests.get_match_tests(test_name) + else: + match_tests = None + + kwargs: dict[str, Any] = {} + if match_tests: + kwargs['match_tests'] = [(test, True) for test in match_tests] + if self.runtests.output_on_failure: + kwargs['verbose'] = True + kwargs['output_on_failure'] = False + return self.runtests.create_worker_runtests( + tests=tests, + json_file=json_file, + **kwargs) + + def run_tmp_files(self, worker_runtests: WorkerRunTests, + stdout_fd: int) -> tuple[int | None, list[StrPath]]: + # gh-93353: Check for leaked temporary files in the parent process, + # since the deletion of temporary files can happen late during + # Python finalization: too late for libregrtest. + if not support.is_wasi: + # Don't check for leaked temporary files and directories if Python is + # run on WASI. WASI doesn't pass environment variables like TMPDIR to + # worker processes. + tmp_dir = tempfile.mkdtemp(prefix="test_python_") + tmp_dir = os.path.abspath(tmp_dir) + try: + retcode = self._run_process(worker_runtests, + stdout_fd, tmp_dir) + finally: + tmp_files = os.listdir(tmp_dir) + os_helper.rmtree(tmp_dir) + else: + retcode = self._run_process(worker_runtests, stdout_fd) + tmp_files = [] + + return (retcode, tmp_files) + + def read_stdout(self, stdout_file: TextIO) -> str: + stdout_file.seek(0) + try: + return stdout_file.read().strip() + except Exception as exc: + # gh-101634: Catch UnicodeDecodeError if stdout cannot be + # decoded from encoding + raise WorkerError(self.test_name, + f"Cannot read process stdout: {exc}", + stdout=None, + state=State.WORKER_BUG) + + def read_json(self, json_file: JsonFile, json_tmpfile: TextIO | None, + stdout: str) -> tuple[TestResult, str]: + try: + if json_tmpfile is not None: + json_tmpfile.seek(0) + worker_json = json_tmpfile.read() + elif json_file.file_type == JsonFileType.STDOUT: + stdout, _, worker_json = stdout.rpartition("\n") + stdout = stdout.rstrip() + else: + with json_file.open(encoding='utf8') as json_fp: + worker_json = json_fp.read() + except Exception as exc: + # gh-101634: Catch UnicodeDecodeError if stdout cannot be + # decoded from encoding + err_msg = f"Failed to read worker process JSON: {exc}" + raise WorkerError(self.test_name, err_msg, stdout, + state=State.WORKER_BUG) + + if not worker_json: + raise WorkerError(self.test_name, "empty JSON", stdout, + state=State.WORKER_BUG) + + try: + result = TestResult.from_json(worker_json) + except Exception as exc: + # gh-101634: Catch UnicodeDecodeError if stdout cannot be + # decoded from encoding + err_msg = f"Failed to parse worker process JSON: {exc}" + raise WorkerError(self.test_name, err_msg, stdout, + state=State.WORKER_BUG) + + return (result, stdout) + + def _runtest(self, test_name: TestName) -> MultiprocessResult: + with contextlib.ExitStack() as stack: + stdout_file = self.create_stdout(stack) + json_file, json_tmpfile = self.create_json_file(stack) + worker_runtests = self.create_worker_runtests(test_name, json_file) + + retcode: str | int | None + retcode, tmp_files = self.run_tmp_files(worker_runtests, + stdout_file.fileno()) + + stdout = self.read_stdout(stdout_file) + + if retcode is None: + raise WorkerError(self.test_name, stdout=stdout, + err_msg=None, + state=State.TIMEOUT) + if retcode != 0: + name = get_signal_name(retcode) + if name: + retcode = f"{retcode} ({name})" + raise WorkerError(self.test_name, f"Exit code {retcode}", stdout, + state=State.WORKER_FAILED) + + result, stdout = self.read_json(json_file, json_tmpfile, stdout) + + if tmp_files: + msg = (f'\n\n' + f'Warning -- {test_name} leaked temporary files ' + f'({len(tmp_files)}): {", ".join(sorted(tmp_files))}') + stdout += msg + result.set_env_changed() + + return MultiprocessResult(result, stdout) + + def run(self) -> None: + fail_fast = self.runtests.fail_fast + fail_env_changed = self.runtests.fail_env_changed + try: + while not self._stopped: + try: + test_name = next(self.pending) + except StopIteration: + break + + self.start_time = time.monotonic() + self.test_name = test_name + try: + mp_result = self._runtest(test_name) + except WorkerError as exc: + mp_result = exc.mp_result + finally: + self.test_name = _NOT_RUNNING + mp_result.result.duration = time.monotonic() - self.start_time + self.output.put((False, mp_result)) + + if mp_result.result.must_stop(fail_fast, fail_env_changed): + break + except ExitThread: + pass + except BaseException: + self.output.put((True, traceback.format_exc())) + finally: + self.output.put(WorkerThreadExited()) + + def _wait_completed(self) -> None: + popen = self._popen + # only needed for mypy: + if popen is None: + raise ValueError("Should never access `._popen` before calling `.run()`") + + try: + popen.wait(WAIT_COMPLETED_TIMEOUT) + except (subprocess.TimeoutExpired, OSError) as exc: + print_warning(f"Failed to wait for {self} completion " + f"(timeout={format_duration(WAIT_COMPLETED_TIMEOUT)}): " + f"{exc!r}") + + def wait_stopped(self, start_time: float) -> None: + # bpo-38207: RunWorkers.stop_workers() called self.stop() + # which killed the process. Sometimes, killing the process from the + # main thread does not interrupt popen.communicate() in + # WorkerThread thread. This loop with a timeout is a workaround + # for that. + # + # Moreover, if this method fails to join the thread, it is likely + # that Python will hang at exit while calling threading._shutdown() + # which tries again to join the blocked thread. Regrtest.main() + # uses EXIT_TIMEOUT to workaround this second bug. + while True: + # Write a message every second + self.join(1.0) + if not self.is_alive(): + break + dt = time.monotonic() - start_time + self.log(f"Waiting for {self} thread for {format_duration(dt)}") + if dt > WAIT_KILLED_TIMEOUT: + print_warning(f"Failed to join {self} in {format_duration(dt)}") + break + + +def get_running(workers: list[WorkerThread]) -> str | None: + running: list[str] = [] + for worker in workers: + test_name = worker.test_name + if test_name == _NOT_RUNNING: + continue + dt = time.monotonic() - worker.start_time + if dt >= PROGRESS_MIN_TIME: + text = f'{test_name} ({format_duration(dt)})' + running.append(text) + if not running: + return None + return f"running ({len(running)}): {', '.join(running)}" + + +class RunWorkers: + def __init__(self, num_workers: int, runtests: RunTests, + logger: Logger, results: TestResults) -> None: + self.num_workers = num_workers + self.runtests = runtests + self.log = logger.log + self.display_progress = logger.display_progress + self.results: TestResults = results + self.live_worker_count = 0 + + self.output: queue.Queue[QueueContent] = queue.Queue() + tests_iter = runtests.iter_tests() + self.pending = MultiprocessIterator(tests_iter) + self.timeout = runtests.timeout + if self.timeout is not None: + # Rely on faulthandler to kill a worker process. This timouet is + # when faulthandler fails to kill a worker process. Give a maximum + # of 5 minutes to faulthandler to kill the worker. + self.worker_timeout: float | None = min(self.timeout * 1.5, self.timeout + 5 * 60) + else: + self.worker_timeout = None + self.workers: list[WorkerThread] = [] + + jobs = self.runtests.get_jobs() + if jobs is not None: + # Don't spawn more threads than the number of jobs: + # these worker threads would never get anything to do. + self.num_workers = min(self.num_workers, jobs) + + def start_workers(self) -> None: + self.workers = [WorkerThread(index, self) + for index in range(1, self.num_workers + 1)] + jobs = self.runtests.get_jobs() + if jobs is not None: + tests = count(jobs, 'test') + else: + tests = 'tests' + nworkers = len(self.workers) + processes = plural(nworkers, "process", "processes") + msg = (f"Run {tests} in parallel using " + f"{nworkers} worker {processes}") + if self.timeout and self.worker_timeout is not None: + msg += (" (timeout: %s, worker timeout: %s)" + % (format_duration(self.timeout), + format_duration(self.worker_timeout))) + self.log(msg) + for worker in self.workers: + worker.start() + self.live_worker_count += 1 + + def stop_workers(self) -> None: + start_time = time.monotonic() + for worker in self.workers: + worker.stop() + for worker in self.workers: + worker.wait_stopped(start_time) + + def _get_result(self) -> QueueOutput | None: + pgo = self.runtests.pgo + use_faulthandler = (self.timeout is not None) + + # bpo-46205: check the status of workers every iteration to avoid + # waiting forever on an empty queue. + while self.live_worker_count > 0: + if use_faulthandler: + faulthandler.dump_traceback_later(MAIN_PROCESS_TIMEOUT, + exit=True) + + # wait for a thread + try: + result = self.output.get(timeout=PROGRESS_UPDATE) + if isinstance(result, WorkerThreadExited): + self.live_worker_count -= 1 + continue + return result + except queue.Empty: + pass + + if not pgo: + # display progress + running = get_running(self.workers) + if running: + self.log(running) + return None + + def display_result(self, mp_result: MultiprocessResult) -> None: + result = mp_result.result + pgo = self.runtests.pgo + + text = str(result) + if mp_result.err_msg: + # WORKER_BUG + text += ' (%s)' % mp_result.err_msg + elif (result.duration and result.duration >= PROGRESS_MIN_TIME and not pgo): + text += ' (%s)' % format_duration(result.duration) + if not pgo: + running = get_running(self.workers) + if running: + text += f' -- {running}' + self.display_progress(self.test_index, text) + + def _process_result(self, item: QueueOutput) -> TestResult: + """Returns True if test runner must stop.""" + if item[0]: + # Thread got an exception + format_exc = item[1] + print_warning(f"regrtest worker thread failed: {format_exc}") + result = TestResult("", state=State.WORKER_BUG) + self.results.accumulate_result(result, self.runtests) + return result + + self.test_index += 1 + mp_result = item[1] + result = mp_result.result + self.results.accumulate_result(result, self.runtests) + self.display_result(mp_result) + + # Display worker stdout + if not self.runtests.output_on_failure: + show_stdout = True + else: + # --verbose3 ignores stdout on success + show_stdout = (result.state != State.PASSED) + if show_stdout: + stdout = mp_result.worker_stdout + if stdout: + print(stdout, flush=True) + + return result + + def run(self) -> None: + fail_fast = self.runtests.fail_fast + fail_env_changed = self.runtests.fail_env_changed + + self.start_workers() + + self.test_index = 0 + try: + while True: + item = self._get_result() + if item is None: + break + + result = self._process_result(item) + if result.must_stop(fail_fast, fail_env_changed): + break + except KeyboardInterrupt: + print() + self.results.interrupted = True + finally: + if self.timeout is not None: + faulthandler.cancel_dump_traceback_later() + + # Always ensure that all worker processes are no longer + # worker when we exit this function + self.pending.stop() + self.stop_workers() diff --git a/Lib/test/libregrtest/runtest.py b/Lib/test/libregrtest/runtest.py deleted file mode 100644 index e2af18f349..0000000000 --- a/Lib/test/libregrtest/runtest.py +++ /dev/null @@ -1,328 +0,0 @@ -import collections -import faulthandler -import functools -# import gc -import importlib -import io -import os -import sys -import time -import traceback -import unittest - -from test import support -from test.support import os_helper, import_helper -from test.libregrtest.refleak import dash_R, clear_caches -from test.libregrtest.save_env import saved_test_environment -from test.libregrtest.utils import print_warning - - -# Test result constants. -PASSED = 1 -FAILED = 0 -ENV_CHANGED = -1 -SKIPPED = -2 -RESOURCE_DENIED = -3 -INTERRUPTED = -4 -CHILD_ERROR = -5 # error in a child process -TEST_DID_NOT_RUN = -6 # error in a child process - -_FORMAT_TEST_RESULT = { - PASSED: '%s passed', - FAILED: '%s failed', - ENV_CHANGED: '%s failed (env changed)', - SKIPPED: '%s skipped', - RESOURCE_DENIED: '%s skipped (resource denied)', - INTERRUPTED: '%s interrupted', - CHILD_ERROR: '%s crashed', - TEST_DID_NOT_RUN: '%s run no tests', -} - -# Minimum duration of a test to display its duration or to mention that -# the test is running in background -PROGRESS_MIN_TIME = 30.0 # seconds - -# small set of tests to determine if we have a basically functioning interpreter -# (i.e. if any of these fail, then anything else is likely to follow) -STDTESTS = [ - # 'test_grammar', - # 'test_opcodes', - # 'test_dict', - # 'test_builtin', - # 'test_exceptions', - # 'test_types', - # 'test_unittest', - # 'test_doctest', - # 'test_doctest2', - # 'test_support' -] - -# set of tests that we don't want to be executed when using regrtest -NOTTESTS = set() - - -# used by --findleaks, store for gc.garbage -FOUND_GARBAGE = [] - - -def format_test_result(result): - fmt = _FORMAT_TEST_RESULT.get(result.result, "%s") - return fmt % result.test_name - - -def findtestdir(path=None): - return path or os.path.dirname(os.path.dirname(__file__)) or os.curdir - - -def findtests(testdir=None, stdtests=STDTESTS, nottests=NOTTESTS): - """Return a list of all applicable test modules.""" - testdir = findtestdir(testdir) - names = os.listdir(testdir) - tests = [] - others = set(stdtests) | nottests - for name in names: - mod, ext = os.path.splitext(name) - if mod[:5] == "test_" and ext in (".py", "") and mod not in others: - tests.append(mod) - return stdtests + sorted(tests) - - -def get_abs_module(ns, test_name): - if test_name.startswith('test.') or ns.testdir: - return test_name - else: - # Import it from the test package - return 'test.' + test_name - - -TestResult = collections.namedtuple('TestResult', - 'test_name result test_time xml_data') - -def _runtest(ns, test_name): - # Handle faulthandler timeout, capture stdout+stderr, XML serialization - # and measure time. - - output_on_failure = ns.verbose3 - - use_timeout = (ns.timeout is not None) - if use_timeout: - faulthandler.dump_traceback_later(ns.timeout, exit=True) - - start_time = time.perf_counter() - try: - support.set_match_tests(ns.match_tests) - support.junit_xml_list = xml_list = [] if ns.xmlpath else None - if ns.failfast: - support.failfast = True - - if output_on_failure: - support.verbose = True - - stream = io.StringIO() - orig_stdout = sys.stdout - orig_stderr = sys.stderr - try: - sys.stdout = stream - sys.stderr = stream - result = _runtest_inner(ns, test_name, - display_failure=False) - if result != PASSED: - output = stream.getvalue() - orig_stderr.write(output) - orig_stderr.flush() - finally: - sys.stdout = orig_stdout - sys.stderr = orig_stderr - else: - # Tell tests to be moderately quiet - support.verbose = ns.verbose - - result = _runtest_inner(ns, test_name, - display_failure=not ns.verbose) - - if xml_list: - import xml.etree.ElementTree as ET - xml_data = [ET.tostring(x).decode('us-ascii') for x in xml_list] - else: - xml_data = None - - test_time = time.perf_counter() - start_time - - return TestResult(test_name, result, test_time, xml_data) - finally: - if use_timeout: - faulthandler.cancel_dump_traceback_later() - support.junit_xml_list = None - - -def runtest(ns, test_name): - """Run a single test. - - ns -- regrtest namespace of options - test_name -- the name of the test - - Returns the tuple (result, test_time, xml_data), where result is one - of the constants: - - INTERRUPTED KeyboardInterrupt - RESOURCE_DENIED test skipped because resource denied - SKIPPED test skipped for some other reason - ENV_CHANGED test failed because it changed the execution environment - FAILED test failed - PASSED test passed - EMPTY_TEST_SUITE test ran no subtests. - - If ns.xmlpath is not None, xml_data is a list containing each - generated testsuite element. - """ - try: - return _runtest(ns, test_name) - except: - if not ns.pgo: - msg = traceback.format_exc() - print(f"test {test_name} crashed -- {msg}", - file=sys.stderr, flush=True) - return TestResult(test_name, FAILED, 0.0, None) - - -def _test_module(the_module): - loader = unittest.TestLoader() - tests = loader.loadTestsFromModule(the_module) - for error in loader.errors: - print(error, file=sys.stderr) - if loader.errors: - raise Exception("errors while loading tests") - support.run_unittest(tests) - - -def _runtest_inner2(ns, test_name): - # Load the test function, run the test function, handle huntrleaks - # and findleaks to detect leaks - - abstest = get_abs_module(ns, test_name) - - # remove the module from sys.module to reload it if it was already imported - import_helper.unload(abstest) - - the_module = importlib.import_module(abstest) - - # If the test has a test_main, that will run the appropriate - # tests. If not, use normal unittest test loading. - test_runner = getattr(the_module, "test_main", None) - if test_runner is None: - test_runner = functools.partial(_test_module, the_module) - - try: - if ns.huntrleaks: - # Return True if the test leaked references - refleak = dash_R(ns, test_name, test_runner) - else: - test_runner() - refleak = False - finally: - cleanup_test_droppings(test_name, ns.verbose) - - support.gc_collect() - - # if gc.garbage: - # support.environment_altered = True - # print_warning(f"{test_name} created {len(gc.garbage)} " - # f"uncollectable object(s).") - - # # move the uncollectable objects somewhere, - # # so we don't see them again - # FOUND_GARBAGE.extend(gc.garbage) - # gc.garbage.clear() - - support.reap_children() - - return refleak - - -def _runtest_inner(ns, test_name, display_failure=True): - # Detect environment changes, handle exceptions. - - # Reset the environment_altered flag to detect if a test altered - # the environment - support.environment_altered = False - - if ns.pgo: - display_failure = False - - try: - clear_caches() - - # with saved_test_environment(test_name, ns.verbose, ns.quiet, pgo=ns.pgo) as environment: - refleak = _runtest_inner2(ns, test_name) - except support.ResourceDenied as msg: - if not ns.quiet and not ns.pgo: - print(f"{test_name} skipped -- {msg}", flush=True) - return RESOURCE_DENIED - except unittest.SkipTest as msg: - if not ns.quiet and not ns.pgo: - print(f"{test_name} skipped -- {msg}", flush=True) - return SKIPPED - except support.TestFailed as exc: - msg = f"test {test_name} failed" - if display_failure: - msg = f"{msg} -- {exc}" - print(msg, file=sys.stderr, flush=True) - return FAILED - except support.TestDidNotRun: - return TEST_DID_NOT_RUN - except KeyboardInterrupt: - print() - return INTERRUPTED - except: - if not ns.pgo: - msg = traceback.format_exc() - print(f"test {test_name} crashed -- {msg}", - file=sys.stderr, flush=True) - return FAILED - - if refleak: - return FAILED - # if environment.changed: - # return ENV_CHANGED - return PASSED - - -def cleanup_test_droppings(test_name, verbose): - # First kill any dangling references to open files etc. - # This can also issue some ResourceWarnings which would otherwise get - # triggered during the following test run, and possibly produce failures. - support.gc_collect() - - # Try to clean up junk commonly left behind. While tests shouldn't leave - # any files or directories behind, when a test fails that can be tedious - # for it to arrange. The consequences can be especially nasty on Windows, - # since if a test leaves a file open, it cannot be deleted by name (while - # there's nothing we can do about that here either, we can display the - # name of the offending test, which is a real help). - for name in (os_helper.TESTFN, - "db_home", - ): - if not os.path.exists(name): - continue - - if os.path.isdir(name): - import shutil - kind, nuker = "directory", shutil.rmtree - elif os.path.isfile(name): - kind, nuker = "file", os.unlink - else: - raise RuntimeError(f"os.path says {name!r} exists but is neither " - f"directory nor file") - - if verbose: - print_warning("%r left behind %s %r" % (test_name, kind, name)) - support.environment_altered = True - - try: - import stat - # fix possible permissions problems that might prevent cleanup - os.chmod(name, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) - nuker(name) - except Exception as exc: - print_warning(f"{test_name} left behind {kind} {name!r} " - f"and it couldn't be removed: {exc}") diff --git a/Lib/test/libregrtest/runtest_mp.py b/Lib/test/libregrtest/runtest_mp.py deleted file mode 100644 index c2177d9995..0000000000 --- a/Lib/test/libregrtest/runtest_mp.py +++ /dev/null @@ -1,288 +0,0 @@ -import collections -import faulthandler -import json -import os -import queue -import subprocess -import sys -import threading -import time -import traceback -import types -from test import support - -from test.libregrtest.runtest import ( - runtest, INTERRUPTED, CHILD_ERROR, PROGRESS_MIN_TIME, - format_test_result, TestResult) -from test.libregrtest.setup import setup_tests -from test.libregrtest.utils import format_duration -from test.support import os_helper - - -# Display the running tests if nothing happened last N seconds -PROGRESS_UPDATE = 30.0 # seconds - - -def must_stop(result): - return result.result in (INTERRUPTED, CHILD_ERROR) - - -def run_test_in_subprocess(testname, ns): - ns_dict = vars(ns) - worker_args = (ns_dict, testname) - worker_args = json.dumps(worker_args) - - cmd = [sys.executable, *support.args_from_interpreter_flags(), - '-u', # Unbuffered stdout and stderr - '-m', 'test.regrtest', - '--worker-args', worker_args] - if ns.pgo: - cmd += ['--pgo'] - - # Running the child from the same working directory as regrtest's original - # invocation ensures that TEMPDIR for the child is the same when - # sysconfig.is_python_build() is true. See issue 15300. - return subprocess.Popen(cmd, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - universal_newlines=True, - close_fds=(os.name != 'nt'), - cwd=os_helper.SAVEDCWD) - - -def run_tests_worker(worker_args): - ns_dict, testname = json.loads(worker_args) - ns = types.SimpleNamespace(**ns_dict) - - setup_tests(ns) - - result = runtest(ns, testname) - print() # Force a newline (just in case) - print(json.dumps(result), flush=True) - sys.exit(0) - - -# We do not use a generator so multiple threads can call next(). -class MultiprocessIterator: - - """A thread-safe iterator over tests for multiprocess mode.""" - - def __init__(self, tests): - self.lock = threading.Lock() - self.tests = tests - - def __iter__(self): - return self - - def __next__(self): - with self.lock: - return next(self.tests) - - -MultiprocessResult = collections.namedtuple('MultiprocessResult', - 'result stdout stderr error_msg') - -class MultiprocessThread(threading.Thread): - def __init__(self, pending, output, ns): - super().__init__() - self.pending = pending - self.output = output - self.ns = ns - self.current_test_name = None - self.start_time = None - self._popen = None - - def kill(self): - if not self.is_alive(): - return - if self._popen is not None: - self._popen.kill() - - def _runtest(self, test_name): - try: - self.start_time = time.monotonic() - self.current_test_name = test_name - - popen = run_test_in_subprocess(test_name, self.ns) - self._popen = popen - with popen: - try: - stdout, stderr = popen.communicate() - except: - popen.kill() - popen.wait() - raise - - retcode = popen.wait() - finally: - self.current_test_name = None - self._popen = None - - stdout = stdout.strip() - stderr = stderr.rstrip() - - err_msg = None - if retcode != 0: - err_msg = "Exit code %s" % retcode - else: - stdout, _, result = stdout.rpartition("\n") - stdout = stdout.rstrip() - if not result: - err_msg = "Failed to parse worker stdout" - else: - try: - # deserialize run_tests_worker() output - result = json.loads(result) - result = TestResult(*result) - except Exception as exc: - err_msg = "Failed to parse worker JSON: %s" % exc - - if err_msg is not None: - test_time = time.monotonic() - self.start_time - result = TestResult(test_name, CHILD_ERROR, test_time, None) - - return MultiprocessResult(result, stdout, stderr, err_msg) - - def run(self): - while True: - try: - try: - test_name = next(self.pending) - except StopIteration: - break - - mp_result = self._runtest(test_name) - self.output.put((False, mp_result)) - - if must_stop(mp_result.result): - break - except BaseException: - self.output.put((True, traceback.format_exc())) - break - - -def get_running(workers): - running = [] - for worker in workers: - current_test_name = worker.current_test_name - if not current_test_name: - continue - dt = time.monotonic() - worker.start_time - if dt >= PROGRESS_MIN_TIME: - text = '%s (%s)' % (current_test_name, format_duration(dt)) - running.append(text) - return running - - -class MultiprocessRunner: - def __init__(self, regrtest): - self.regrtest = regrtest - self.ns = regrtest.ns - self.output = queue.Queue() - self.pending = MultiprocessIterator(self.regrtest.tests) - if self.ns.timeout is not None: - self.test_timeout = self.ns.timeout * 1.5 - else: - self.test_timeout = None - self.workers = None - - def start_workers(self): - self.workers = [MultiprocessThread(self.pending, self.output, self.ns) - for _ in range(self.ns.use_mp)] - print("Run tests in parallel using %s child processes" - % len(self.workers)) - for worker in self.workers: - worker.start() - - def wait_workers(self): - for worker in self.workers: - worker.kill() - for worker in self.workers: - worker.join() - - def _get_result(self): - if not any(worker.is_alive() for worker in self.workers): - # all worker threads are done: consume pending results - try: - return self.output.get(timeout=0) - except queue.Empty: - return None - - while True: - if self.test_timeout is not None: - faulthandler.dump_traceback_later(self.test_timeout, exit=True) - - # wait for a thread - timeout = max(PROGRESS_UPDATE, PROGRESS_MIN_TIME) - try: - return self.output.get(timeout=timeout) - except queue.Empty: - pass - - # display progress - running = get_running(self.workers) - if running and not self.ns.pgo: - print('running: %s' % ', '.join(running), flush=True) - - def display_result(self, mp_result): - result = mp_result.result - - text = format_test_result(result) - if mp_result.error_msg is not None: - # CHILD_ERROR - text += ' (%s)' % mp_result.error_msg - elif (result.test_time >= PROGRESS_MIN_TIME and not self.ns.pgo): - text += ' (%s)' % format_duration(result.test_time) - running = get_running(self.workers) - if running and not self.ns.pgo: - text += ' -- running: %s' % ', '.join(running) - self.regrtest.display_progress(self.test_index, text) - - def _process_result(self, item): - if item[0]: - # Thread got an exception - format_exc = item[1] - print(f"regrtest worker thread failed: {format_exc}", - file=sys.stderr, flush=True) - return True - - self.test_index += 1 - mp_result = item[1] - self.regrtest.accumulate_result(mp_result.result) - self.display_result(mp_result) - - if mp_result.stdout: - print(mp_result.stdout, flush=True) - if mp_result.stderr and not self.ns.pgo: - print(mp_result.stderr, file=sys.stderr, flush=True) - - if must_stop(mp_result.result): - return True - - return False - - def run_tests(self): - self.start_workers() - - self.test_index = 0 - try: - while True: - item = self._get_result() - if item is None: - break - - stop = self._process_result(item) - if stop: - break - except KeyboardInterrupt: - print() - self.regrtest.interrupted = True - finally: - if self.test_timeout is not None: - faulthandler.cancel_dump_traceback_later() - - self.wait_workers() - - -def run_tests_multiprocess(regrtest): - MultiprocessRunner(regrtest).run_tests() diff --git a/Lib/test/libregrtest/runtests.py b/Lib/test/libregrtest/runtests.py new file mode 100644 index 0000000000..7b607d4a55 --- /dev/null +++ b/Lib/test/libregrtest/runtests.py @@ -0,0 +1,222 @@ +import contextlib +import dataclasses +import json +import os +import shlex +import subprocess +import sys +from typing import Any, Iterator + +from test import support + +from .utils import ( + StrPath, StrJSON, TestTuple, TestName, TestFilter, FilterTuple, FilterDict) + + +class JsonFileType: + UNIX_FD = "UNIX_FD" + WINDOWS_HANDLE = "WINDOWS_HANDLE" + STDOUT = "STDOUT" + + +@dataclasses.dataclass(slots=True, frozen=True) +class JsonFile: + # file type depends on file_type: + # - UNIX_FD: file descriptor (int) + # - WINDOWS_HANDLE: handle (int) + # - STDOUT: use process stdout (None) + file: int | None + file_type: str + + def configure_subprocess(self, popen_kwargs: dict[str, Any]) -> None: + match self.file_type: + case JsonFileType.UNIX_FD: + # Unix file descriptor + popen_kwargs['pass_fds'] = [self.file] + case JsonFileType.WINDOWS_HANDLE: + # Windows handle + # We run mypy with `--platform=linux` so it complains about this: + startupinfo = subprocess.STARTUPINFO() # type: ignore[attr-defined] + startupinfo.lpAttributeList = {"handle_list": [self.file]} + popen_kwargs['startupinfo'] = startupinfo + + @contextlib.contextmanager + def inherit_subprocess(self) -> Iterator[None]: + if sys.platform == 'win32' and self.file_type == JsonFileType.WINDOWS_HANDLE: + os.set_handle_inheritable(self.file, True) + try: + yield + finally: + os.set_handle_inheritable(self.file, False) + else: + yield + + def open(self, mode='r', *, encoding): + if self.file_type == JsonFileType.STDOUT: + raise ValueError("for STDOUT file type, just use sys.stdout") + + file = self.file + if self.file_type == JsonFileType.WINDOWS_HANDLE: + import msvcrt + # Create a file descriptor from the handle + file = msvcrt.open_osfhandle(file, os.O_WRONLY) + return open(file, mode, encoding=encoding) + + +@dataclasses.dataclass(slots=True, frozen=True) +class HuntRefleak: + warmups: int + runs: int + filename: StrPath + + def bisect_cmd_args(self) -> list[str]: + # Ignore filename since it can contain colon (":"), + # and usually it's not used. Use the default filename. + return ["-R", f"{self.warmups}:{self.runs}:"] + + +@dataclasses.dataclass(slots=True, frozen=True) +class RunTests: + tests: TestTuple + fail_fast: bool + fail_env_changed: bool + match_tests: TestFilter + match_tests_dict: FilterDict | None + rerun: bool + forever: bool + pgo: bool + pgo_extended: bool + output_on_failure: bool + timeout: float | None + verbose: int + quiet: bool + hunt_refleak: HuntRefleak | None + test_dir: StrPath | None + use_junit: bool + coverage: bool + memory_limit: str | None + gc_threshold: int | None + use_resources: tuple[str, ...] + python_cmd: tuple[str, ...] | None + randomize: bool + random_seed: int | str + + def copy(self, **override) -> 'RunTests': + state = dataclasses.asdict(self) + state.update(override) + return RunTests(**state) + + def create_worker_runtests(self, **override) -> 'WorkerRunTests': + state = dataclasses.asdict(self) + state.update(override) + return WorkerRunTests(**state) + + def get_match_tests(self, test_name: TestName) -> FilterTuple | None: + if self.match_tests_dict is not None: + return self.match_tests_dict.get(test_name, None) + else: + return None + + def get_jobs(self) -> int | None: + # Number of run_single_test() calls needed to run all tests. + # None means that there is not bound limit (--forever option). + if self.forever: + return None + return len(self.tests) + + def iter_tests(self) -> Iterator[TestName]: + if self.forever: + while True: + yield from self.tests + else: + yield from self.tests + + def json_file_use_stdout(self) -> bool: + # Use STDOUT in two cases: + # + # - If --python command line option is used; + # - On Emscripten and WASI. + # + # On other platforms, UNIX_FD or WINDOWS_HANDLE can be used. + return ( + bool(self.python_cmd) + or support.is_emscripten + or support.is_wasi + ) + + def create_python_cmd(self) -> list[str]: + python_opts = support.args_from_interpreter_flags() + if self.python_cmd is not None: + executable = self.python_cmd + # Remove -E option, since --python=COMMAND can set PYTHON + # environment variables, such as PYTHONPATH, in the worker + # process. + python_opts = [opt for opt in python_opts if opt != "-E"] + else: + executable = (sys.executable,) + cmd = [*executable, *python_opts] + if '-u' not in python_opts: + cmd.append('-u') # Unbuffered stdout and stderr + if self.coverage: + cmd.append("-Xpresite=test.cov") + return cmd + + def bisect_cmd_args(self) -> list[str]: + args = [] + if self.fail_fast: + args.append("--failfast") + if self.fail_env_changed: + args.append("--fail-env-changed") + if self.timeout: + args.append(f"--timeout={self.timeout}") + if self.hunt_refleak is not None: + args.extend(self.hunt_refleak.bisect_cmd_args()) + if self.test_dir: + args.extend(("--testdir", self.test_dir)) + if self.memory_limit: + args.extend(("--memlimit", self.memory_limit)) + if self.gc_threshold: + args.append(f"--threshold={self.gc_threshold}") + if self.use_resources: + args.extend(("-u", ','.join(self.use_resources))) + if self.python_cmd: + cmd = shlex.join(self.python_cmd) + args.extend(("--python", cmd)) + if self.randomize: + args.append(f"--randomize") + args.append(f"--randseed={self.random_seed}") + return args + + +@dataclasses.dataclass(slots=True, frozen=True) +class WorkerRunTests(RunTests): + json_file: JsonFile + + def as_json(self) -> StrJSON: + return json.dumps(self, cls=_EncodeRunTests) + + @staticmethod + def from_json(worker_json: StrJSON) -> 'WorkerRunTests': + return json.loads(worker_json, object_hook=_decode_runtests) + + +class _EncodeRunTests(json.JSONEncoder): + def default(self, o: Any) -> dict[str, Any]: + if isinstance(o, WorkerRunTests): + result = dataclasses.asdict(o) + result["__runtests__"] = True + return result + else: + return super().default(o) + + +def _decode_runtests(data: dict[str, Any]) -> RunTests | dict[str, Any]: + if "__runtests__" in data: + data.pop('__runtests__') + if data['hunt_refleak']: + data['hunt_refleak'] = HuntRefleak(**data['hunt_refleak']) + if data['json_file']: + data['json_file'] = JsonFile(**data['json_file']) + return WorkerRunTests(**data) + else: + return data diff --git a/Lib/test/libregrtest/save_env.py b/Lib/test/libregrtest/save_env.py index b9a1c0b392..b2cc381344 100644 --- a/Lib/test/libregrtest/save_env.py +++ b/Lib/test/libregrtest/save_env.py @@ -1,20 +1,17 @@ -import asyncio import builtins import locale -import logging import os -import shutil import sys -import sysconfig import threading -import warnings + from test import support from test.support import os_helper -from test.libregrtest.utils import print_warning -try: - import _multiprocessing, multiprocessing.process -except ImportError: - multiprocessing = None + +from .utils import print_warning + + +class SkipTestEnvironment(Exception): + pass # Unit tests are supposed to leave the execution environment unchanged @@ -28,21 +25,19 @@ class saved_test_environment: """Save bits of the test environment and restore them at block exit. - with saved_test_environment(testname, verbose, quiet): + with saved_test_environment(test_name, verbose, quiet): #stuff Unless quiet is True, a warning is printed to stderr if any of - the saved items was changed by the test. The attribute 'changed' - is initially False, but is set to True if a change is detected. + the saved items was changed by the test. The support.environment_altered + attribute is set to True if a change is detected. If verbose is more than 1, the before and after state of changed items is also printed. """ - changed = False - - def __init__(self, testname, verbose=0, quiet=False, *, pgo=False): - self.testname = testname + def __init__(self, test_name, verbose, quiet, *, pgo): + self.test_name = test_name self.verbose = verbose self.quiet = quiet self.pgo = pgo @@ -69,11 +64,39 @@ def __init__(self, testname, verbose=0, quiet=False, *, pgo=False): 'files', 'locale', 'warnings.showwarning', 'shutil_archive_formats', 'shutil_unpack_formats', 'asyncio.events._event_loop_policy', + 'urllib.requests._url_tempfiles', 'urllib.requests._opener', ) + def get_module(self, name): + # function for restore() methods + return sys.modules[name] + + def try_get_module(self, name): + # function for get() methods + try: + return self.get_module(name) + except KeyError: + raise SkipTestEnvironment + + def get_urllib_requests__url_tempfiles(self): + urllib_request = self.try_get_module('urllib.request') + return list(urllib_request._url_tempfiles) + def restore_urllib_requests__url_tempfiles(self, tempfiles): + for filename in tempfiles: + os_helper.unlink(filename) + + def get_urllib_requests__opener(self): + urllib_request = self.try_get_module('urllib.request') + return urllib_request._opener + def restore_urllib_requests__opener(self, opener): + urllib_request = self.get_module('urllib.request') + urllib_request._opener = opener + def get_asyncio_events__event_loop_policy(self): + self.try_get_module('asyncio') return support.maybe_get_event_loop_policy() def restore_asyncio_events__event_loop_policy(self, policy): + asyncio = self.get_module('asyncio') asyncio.set_event_loop_policy(policy) def get_sys_argv(self): @@ -132,39 +155,46 @@ def restore___import__(self, import_): builtins.__import__ = import_ def get_warnings_filters(self): + warnings = self.try_get_module('warnings') return id(warnings.filters), warnings.filters, warnings.filters[:] def restore_warnings_filters(self, saved_filters): + warnings = self.get_module('warnings') warnings.filters = saved_filters[1] warnings.filters[:] = saved_filters[2] def get_asyncore_socket_map(self): - asyncore = sys.modules.get('asyncore') + asyncore = sys.modules.get('test.support.asyncore') # XXX Making a copy keeps objects alive until __exit__ gets called. return asyncore and asyncore.socket_map.copy() or {} def restore_asyncore_socket_map(self, saved_map): - asyncore = sys.modules.get('asyncore') + asyncore = sys.modules.get('test.support.asyncore') if asyncore is not None: asyncore.close_all(ignore_all=True) asyncore.socket_map.update(saved_map) def get_shutil_archive_formats(self): + shutil = self.try_get_module('shutil') # we could call get_archives_formats() but that only returns the # registry keys; we want to check the values too (the functions that # are registered) return shutil._ARCHIVE_FORMATS, shutil._ARCHIVE_FORMATS.copy() def restore_shutil_archive_formats(self, saved): + shutil = self.get_module('shutil') shutil._ARCHIVE_FORMATS = saved[0] shutil._ARCHIVE_FORMATS.clear() shutil._ARCHIVE_FORMATS.update(saved[1]) def get_shutil_unpack_formats(self): + shutil = self.try_get_module('shutil') return shutil._UNPACK_FORMATS, shutil._UNPACK_FORMATS.copy() def restore_shutil_unpack_formats(self, saved): + shutil = self.get_module('shutil') shutil._UNPACK_FORMATS = saved[0] shutil._UNPACK_FORMATS.clear() shutil._UNPACK_FORMATS.update(saved[1]) def get_logging__handlers(self): + logging = self.try_get_module('logging') # _handlers is a WeakValueDictionary return id(logging._handlers), logging._handlers, logging._handlers.copy() def restore_logging__handlers(self, saved_handlers): @@ -172,6 +202,7 @@ def restore_logging__handlers(self, saved_handlers): pass def get_logging__handlerList(self): + logging = self.try_get_module('logging') # _handlerList is a list of weakrefs to handlers return id(logging._handlerList), logging._handlerList, logging._handlerList[:] def restore_logging__handlerList(self, saved_handlerList): @@ -195,39 +226,43 @@ def restore_threading__dangling(self, saved): # Same for Process objects def get_multiprocessing_process__dangling(self): - if not multiprocessing: - return None + multiprocessing_process = self.try_get_module('multiprocessing.process') # Unjoined process objects can survive after process exits - multiprocessing.process._cleanup() + multiprocessing_process._cleanup() # This copies the weakrefs without making any strong reference - return multiprocessing.process._dangling.copy() + return multiprocessing_process._dangling.copy() def restore_multiprocessing_process__dangling(self, saved): - if not multiprocessing: - return - multiprocessing.process._dangling.clear() - multiprocessing.process._dangling.update(saved) + multiprocessing_process = self.get_module('multiprocessing.process') + multiprocessing_process._dangling.clear() + multiprocessing_process._dangling.update(saved) def get_sysconfig__CONFIG_VARS(self): # make sure the dict is initialized + sysconfig = self.try_get_module('sysconfig') sysconfig.get_config_var('prefix') return (id(sysconfig._CONFIG_VARS), sysconfig._CONFIG_VARS, dict(sysconfig._CONFIG_VARS)) def restore_sysconfig__CONFIG_VARS(self, saved): + sysconfig = self.get_module('sysconfig') sysconfig._CONFIG_VARS = saved[1] sysconfig._CONFIG_VARS.clear() sysconfig._CONFIG_VARS.update(saved[2]) def get_sysconfig__INSTALL_SCHEMES(self): + sysconfig = self.try_get_module('sysconfig') return (id(sysconfig._INSTALL_SCHEMES), sysconfig._INSTALL_SCHEMES, sysconfig._INSTALL_SCHEMES.copy()) def restore_sysconfig__INSTALL_SCHEMES(self, saved): + sysconfig = self.get_module('sysconfig') sysconfig._INSTALL_SCHEMES = saved[1] sysconfig._INSTALL_SCHEMES.clear() sysconfig._INSTALL_SCHEMES.update(saved[2]) def get_files(self): + # XXX: Maybe add an allow-list here? return sorted(fn + ('/' if os.path.isdir(fn) else '') - for fn in os.listdir()) + for fn in os.listdir() + if not fn.startswith(".hypothesis")) def restore_files(self, saved_value): fn = os_helper.TESTFN if fn not in saved_value and (fn + '/') not in saved_value: @@ -251,8 +286,10 @@ def restore_locale(self, saved): locale.setlocale(lc, setting) def get_warnings_showwarning(self): + warnings = self.try_get_module('warnings') return warnings.showwarning def restore_warnings_showwarning(self, fxn): + warnings = self.get_module('warnings') warnings.showwarning = fxn def resource_info(self): @@ -263,29 +300,32 @@ def resource_info(self): yield name, getattr(self, get_name), getattr(self, restore_name) def __enter__(self): - self.saved_values = dict((name, get()) for name, get, restore - in self.resource_info()) + self.saved_values = [] + for name, get, restore in self.resource_info(): + try: + original = get() + except SkipTestEnvironment: + continue + + self.saved_values.append((name, get, restore, original)) return self def __exit__(self, exc_type, exc_val, exc_tb): saved_values = self.saved_values - del self.saved_values + self.saved_values = None # Some resources use weak references support.gc_collect() - # Read support.environment_altered, set by support helper functions - self.changed |= support.environment_altered - - for name, get, restore in self.resource_info(): + for name, get, restore, original in saved_values: current = get() - original = saved_values.pop(name) # Check for changes to the resource's value if current != original: - self.changed = True + support.environment_altered = True restore(original) if not self.quiet and not self.pgo: - print_warning(f"{name} was modified by {self.testname}") - print(f" Before: {original}\n After: {current} ", - file=sys.stderr, flush=True) + print_warning( + f"{name} was modified by {self.test_name}\n" + f" Before: {original}\n" + f" After: {current} ") return False diff --git a/Lib/test/libregrtest/setup.py b/Lib/test/libregrtest/setup.py index b1a5ded525..ba57f06b48 100644 --- a/Lib/test/libregrtest/setup.py +++ b/Lib/test/libregrtest/setup.py @@ -1,17 +1,34 @@ -import atexit import faulthandler +import gc import os +import random import signal import sys import unittest from test import support -try: - import gc -except ImportError: - gc = None +from test.support.os_helper import TESTFN_UNDECODABLE, FS_NONASCII +from .filter import set_match_tests +from .runtests import RunTests +from .utils import ( + setup_unraisable_hook, setup_threading_excepthook, fix_umask, + adjust_rlimit_nofile) -def setup_tests(ns): + +UNICODE_GUARD_ENV = "PYTHONREGRTEST_UNICODE_GUARD" + + +def setup_test_dir(testdir: str | None) -> None: + if testdir: + # Prepend test directory to sys.path, so runtest() will be able + # to locate tests + sys.path.insert(0, os.path.abspath(testdir)) + + +def setup_process() -> None: + fix_umask() + + assert sys.__stderr__ is not None, "sys.__stderr__ is None" try: stderr_fd = sys.__stderr__.fileno() except (ValueError, AttributeError): @@ -19,7 +36,7 @@ def setup_tests(ns): # and ValueError on a closed stream. # # Catch AttributeError for stderr being None. - stderr_fd = None + pass else: # Display the Python traceback on fatal errors (e.g. segfault) faulthandler.enable(all_threads=True, file=stderr_fd) @@ -33,13 +50,9 @@ def setup_tests(ns): for signum in signals: faulthandler.register(signum, chain=True, file=stderr_fd) - # replace_stdout() - # support.record_original_stdout(sys.stdout) + adjust_rlimit_nofile() - if ns.testdir: - # Prepend test directory to sys.path, so runtest() will be able - # to locate tests - sys.path.insert(0, os.path.abspath(ns.testdir)) + support.record_original_stdout(sys.stdout) # Some times __path__ and __file__ are not absolute (e.g. while running from # Lib/) and, if we change the CWD to run the tests in a temporary dir, some @@ -56,79 +69,66 @@ def setup_tests(ns): for index, path in enumerate(module.__path__): module.__path__[index] = os.path.abspath(path) if getattr(module, '__file__', None): - module.__file__ = os.path.abspath(module.__file__) - - # MacOSX (a.k.a. Darwin) has a default stack size that is too small - # for deeply recursive regular expressions. We see this as crashes in - # the Python test suite when running test_re.py and test_sre.py. The - # fix is to set the stack limit to 2048. - # This approach may also be useful for other Unixy platforms that - # suffer from small default stack limits. - if sys.platform == 'darwin': - try: - import resource - except ImportError: + module.__file__ = os.path.abspath(module.__file__) # type: ignore[type-var] + + if hasattr(sys, 'addaudithook'): + # Add an auditing hook for all tests to ensure PySys_Audit is tested + def _test_audit_hook(name, args): pass - else: - soft, hard = resource.getrlimit(resource.RLIMIT_STACK) - newsoft = min(hard, max(soft, 1024*2048)) - resource.setrlimit(resource.RLIMIT_STACK, (newsoft, hard)) + sys.addaudithook(_test_audit_hook) - if ns.huntrleaks: - unittest.BaseTestSuite._cleanup = False + setup_unraisable_hook() + setup_threading_excepthook() - if ns.memlimit is not None: - support.set_memlimit(ns.memlimit) + # Ensure there's a non-ASCII character in env vars at all times to force + # tests consider this case. See BPO-44647 for details. + if TESTFN_UNDECODABLE and os.supports_bytes_environ: + os.environb.setdefault(UNICODE_GUARD_ENV.encode(), TESTFN_UNDECODABLE) + elif FS_NONASCII: + os.environ.setdefault(UNICODE_GUARD_ENV, FS_NONASCII) - if ns.threshold is not None: - gc.set_threshold(ns.threshold) - try: - import msvcrt - except ImportError: - pass +def setup_tests(runtests: RunTests) -> None: + support.verbose = runtests.verbose + support.failfast = runtests.fail_fast + support.PGO = runtests.pgo + support.PGO_EXTENDED = runtests.pgo_extended + + set_match_tests(runtests.match_tests) + + if runtests.use_junit: + support.junit_xml_list = [] + from .testresult import RegressionTestResult + RegressionTestResult.USE_XML = True else: - msvcrt.SetErrorMode(msvcrt.SEM_FAILCRITICALERRORS| - msvcrt.SEM_NOALIGNMENTFAULTEXCEPT| - msvcrt.SEM_NOGPFAULTERRORBOX| - msvcrt.SEM_NOOPENFILEERRORBOX) - try: - msvcrt.CrtSetReportMode - except AttributeError: - # release build - pass - else: - for m in [msvcrt.CRT_WARN, msvcrt.CRT_ERROR, msvcrt.CRT_ASSERT]: - if ns.verbose and ns.verbose >= 2: - msvcrt.CrtSetReportMode(m, msvcrt.CRTDBG_MODE_FILE) - msvcrt.CrtSetReportFile(m, msvcrt.CRTDBG_FILE_STDERR) - else: - msvcrt.CrtSetReportMode(m, 0) + support.junit_xml_list = None - support.use_resources = ns.use_resources + if runtests.memory_limit is not None: + support.set_memlimit(runtests.memory_limit) + support.suppress_msvcrt_asserts(runtests.verbose >= 2) -def replace_stdout(): - """Set stdout encoder error handler to backslashreplace (as stderr error - handler) to avoid UnicodeEncodeError when printing a traceback""" - stdout = sys.stdout - try: - fd = stdout.fileno() - except ValueError: - # On IDLE, sys.stdout has no file descriptor and is not a TextIOWrapper - # object. Leaving sys.stdout unchanged. - # - # Catch ValueError to catch io.UnsupportedOperation on TextIOBase - # and ValueError on a closed stream. - return - - sys.stdout = open(fd, 'w', - encoding=stdout.encoding, - errors="backslashreplace", - closefd=False, - newline='\n') - - def restore_stdout(): - sys.stdout.close() - sys.stdout = stdout - atexit.register(restore_stdout) + support.use_resources = runtests.use_resources + + timeout = runtests.timeout + if timeout is not None: + # For a slow buildbot worker, increase SHORT_TIMEOUT and LONG_TIMEOUT + support.LOOPBACK_TIMEOUT = max(support.LOOPBACK_TIMEOUT, timeout / 120) + # don't increase INTERNET_TIMEOUT + support.SHORT_TIMEOUT = max(support.SHORT_TIMEOUT, timeout / 40) + support.LONG_TIMEOUT = max(support.LONG_TIMEOUT, timeout / 4) + + # If --timeout is short: reduce timeouts + support.LOOPBACK_TIMEOUT = min(support.LOOPBACK_TIMEOUT, timeout) + support.INTERNET_TIMEOUT = min(support.INTERNET_TIMEOUT, timeout) + support.SHORT_TIMEOUT = min(support.SHORT_TIMEOUT, timeout) + support.LONG_TIMEOUT = min(support.LONG_TIMEOUT, timeout) + + if runtests.hunt_refleak: + # private attribute that mypy doesn't know about: + unittest.BaseTestSuite._cleanup = False # type: ignore[attr-defined] + + if runtests.gc_threshold is not None: + gc.set_threshold(runtests.gc_threshold) + + random.seed(runtests.random_seed) diff --git a/Lib/test/libregrtest/single.py b/Lib/test/libregrtest/single.py new file mode 100644 index 0000000000..adc8f1f455 --- /dev/null +++ b/Lib/test/libregrtest/single.py @@ -0,0 +1,322 @@ +import faulthandler +import gc +import importlib +import io +import sys +import time +import traceback +import unittest + +from test import support +from test.support import threading_helper + +from .filter import match_test +from .result import State, TestResult, TestStats +from .runtests import RunTests +from .save_env import saved_test_environment +from .setup import setup_tests +from .testresult import get_test_runner +from .utils import ( + TestName, + clear_caches, remove_testfn, abs_module_name, print_warning) + + +# Minimum duration of a test to display its duration or to mention that +# the test is running in background +PROGRESS_MIN_TIME = 30.0 # seconds + + +def run_unittest(test_mod): + loader = unittest.TestLoader() + tests = loader.loadTestsFromModule(test_mod) + for error in loader.errors: + print(error, file=sys.stderr) + if loader.errors: + raise Exception("errors while loading tests") + _filter_suite(tests, match_test) + return _run_suite(tests) + +def _filter_suite(suite, pred): + """Recursively filter test cases in a suite based on a predicate.""" + newtests = [] + for test in suite._tests: + if isinstance(test, unittest.TestSuite): + _filter_suite(test, pred) + newtests.append(test) + else: + if pred(test): + newtests.append(test) + suite._tests = newtests + +def _run_suite(suite): + """Run tests from a unittest.TestSuite-derived class.""" + runner = get_test_runner(sys.stdout, + verbosity=support.verbose, + capture_output=(support.junit_xml_list is not None)) + + result = runner.run(suite) + + if support.junit_xml_list is not None: + import xml.etree.ElementTree as ET + xml_elem = result.get_xml_element() + xml_str = ET.tostring(xml_elem).decode('ascii') + support.junit_xml_list.append(xml_str) + + if not result.testsRun and not result.skipped and not result.errors: + raise support.TestDidNotRun + if not result.wasSuccessful(): + stats = TestStats.from_unittest(result) + if len(result.errors) == 1 and not result.failures: + err = result.errors[0][1] + elif len(result.failures) == 1 and not result.errors: + err = result.failures[0][1] + else: + err = "multiple errors occurred" + if not support.verbose: err += "; run in verbose mode for details" + errors = [(str(tc), exc_str) for tc, exc_str in result.errors] + failures = [(str(tc), exc_str) for tc, exc_str in result.failures] + raise support.TestFailedWithDetails(err, errors, failures, stats=stats) + return result + + +def regrtest_runner(result: TestResult, test_func, runtests: RunTests) -> None: + # Run test_func(), collect statistics, and detect reference and memory + # leaks. + if runtests.hunt_refleak: + from .refleak import runtest_refleak + refleak, test_result = runtest_refleak(result.test_name, test_func, + runtests.hunt_refleak, + runtests.quiet) + else: + test_result = test_func() + refleak = False + + if refleak: + result.state = State.REFLEAK + + stats: TestStats | None + + match test_result: + case TestStats(): + stats = test_result + case unittest.TestResult(): + stats = TestStats.from_unittest(test_result) + case None: + print_warning(f"{result.test_name} test runner returned None: {test_func}") + stats = None + case _: + # Don't import doctest at top level since only few tests return + # a doctest.TestResult instance. + import doctest + if isinstance(test_result, doctest.TestResults): + stats = TestStats.from_doctest(test_result) + else: + print_warning(f"Unknown test result type: {type(test_result)}") + stats = None + + result.stats = stats + + +# Storage of uncollectable GC objects (gc.garbage) +GC_GARBAGE = [] + + +def _load_run_test(result: TestResult, runtests: RunTests) -> None: + # Load the test module and run the tests. + test_name = result.test_name + module_name = abs_module_name(test_name, runtests.test_dir) + test_mod = importlib.import_module(module_name) + + if hasattr(test_mod, "test_main"): + # https://github.com/python/cpython/issues/89392 + raise Exception(f"Module {test_name} defines test_main() which " + f"is no longer supported by regrtest") + def test_func(): + return run_unittest(test_mod) + + try: + regrtest_runner(result, test_func, runtests) + finally: + # First kill any dangling references to open files etc. + # This can also issue some ResourceWarnings which would otherwise get + # triggered during the following test run, and possibly produce + # failures. + support.gc_collect() + + remove_testfn(test_name, runtests.verbose) + + if gc.garbage: + support.environment_altered = True + print_warning(f"{test_name} created {len(gc.garbage)} " + f"uncollectable object(s)") + + # move the uncollectable objects somewhere, + # so we don't see them again + GC_GARBAGE.extend(gc.garbage) + gc.garbage.clear() + + support.reap_children() + + +def _runtest_env_changed_exc(result: TestResult, runtests: RunTests, + display_failure: bool = True) -> None: + # Handle exceptions, detect environment changes. + + # Reset the environment_altered flag to detect if a test altered + # the environment + support.environment_altered = False + + pgo = runtests.pgo + if pgo: + display_failure = False + quiet = runtests.quiet + + test_name = result.test_name + try: + clear_caches() + support.gc_collect() + + with saved_test_environment(test_name, + runtests.verbose, quiet, pgo=pgo): + _load_run_test(result, runtests) + except support.ResourceDenied as exc: + if not quiet and not pgo: + print(f"{test_name} skipped -- {exc}", flush=True) + result.state = State.RESOURCE_DENIED + return + except unittest.SkipTest as exc: + if not quiet and not pgo: + print(f"{test_name} skipped -- {exc}", flush=True) + result.state = State.SKIPPED + return + except support.TestFailedWithDetails as exc: + msg = f"test {test_name} failed" + if display_failure: + msg = f"{msg} -- {exc}" + print(msg, file=sys.stderr, flush=True) + result.state = State.FAILED + result.errors = exc.errors + result.failures = exc.failures + result.stats = exc.stats + return + except support.TestFailed as exc: + msg = f"test {test_name} failed" + if display_failure: + msg = f"{msg} -- {exc}" + print(msg, file=sys.stderr, flush=True) + result.state = State.FAILED + result.stats = exc.stats + return + except support.TestDidNotRun: + result.state = State.DID_NOT_RUN + return + except KeyboardInterrupt: + print() + result.state = State.INTERRUPTED + return + except: + if not pgo: + msg = traceback.format_exc() + print(f"test {test_name} crashed -- {msg}", + file=sys.stderr, flush=True) + result.state = State.UNCAUGHT_EXC + return + + if support.environment_altered: + result.set_env_changed() + # Don't override the state if it was already set (REFLEAK or ENV_CHANGED) + if result.state is None: + result.state = State.PASSED + + +def _runtest(result: TestResult, runtests: RunTests) -> None: + # Capture stdout and stderr, set faulthandler timeout, + # and create JUnit XML report. + verbose = runtests.verbose + output_on_failure = runtests.output_on_failure + timeout = runtests.timeout + + if timeout is not None and threading_helper.can_start_thread: + use_timeout = True + faulthandler.dump_traceback_later(timeout, exit=True) + else: + use_timeout = False + + try: + setup_tests(runtests) + + if output_on_failure: + support.verbose = True + + stream = io.StringIO() + orig_stdout = sys.stdout + orig_stderr = sys.stderr + print_warning = support.print_warning + orig_print_warnings_stderr = print_warning.orig_stderr + + output = None + try: + sys.stdout = stream + sys.stderr = stream + # print_warning() writes into the temporary stream to preserve + # messages order. If support.environment_altered becomes true, + # warnings will be written to sys.stderr below. + print_warning.orig_stderr = stream + + _runtest_env_changed_exc(result, runtests, display_failure=False) + # Ignore output if the test passed successfully + if result.state != State.PASSED: + output = stream.getvalue() + finally: + sys.stdout = orig_stdout + sys.stderr = orig_stderr + print_warning.orig_stderr = orig_print_warnings_stderr + + if output is not None: + sys.stderr.write(output) + sys.stderr.flush() + else: + # Tell tests to be moderately quiet + support.verbose = verbose + _runtest_env_changed_exc(result, runtests, + display_failure=not verbose) + + xml_list = support.junit_xml_list + if xml_list: + result.xml_data = xml_list + finally: + if use_timeout: + faulthandler.cancel_dump_traceback_later() + support.junit_xml_list = None + + +def run_single_test(test_name: TestName, runtests: RunTests) -> TestResult: + """Run a single test. + + test_name -- the name of the test + + Returns a TestResult. + + If runtests.use_junit, xml_data is a list containing each generated + testsuite element. + """ + start_time = time.perf_counter() + result = TestResult(test_name) + pgo = runtests.pgo + try: + # gh-117783: don't immortalize deferred objects when tracking + # refleaks. Only releveant for the free-threaded build. + with support.suppress_immortalization(runtests.hunt_refleak): + _runtest(result, runtests) + except: + if not pgo: + msg = traceback.format_exc() + print(f"test {test_name} crashed -- {msg}", + file=sys.stderr, flush=True) + result.state = State.UNCAUGHT_EXC + + sys.stdout.flush() + sys.stderr.flush() + + result.duration = time.perf_counter() - start_time + return result diff --git a/Lib/test/libregrtest/testresult.py b/Lib/test/libregrtest/testresult.py new file mode 100644 index 0000000000..1820f35457 --- /dev/null +++ b/Lib/test/libregrtest/testresult.py @@ -0,0 +1,193 @@ +'''Test runner and result class for the regression test suite. + +''' + +import functools +import io +import sys +import time +import traceback +import unittest +from test import support +from test.libregrtest.utils import sanitize_xml + +class RegressionTestResult(unittest.TextTestResult): + USE_XML = False + + def __init__(self, stream, descriptions, verbosity): + super().__init__(stream=stream, descriptions=descriptions, + verbosity=2 if verbosity else 0) + self.buffer = True + if self.USE_XML: + from xml.etree import ElementTree as ET + from datetime import datetime, UTC + self.__ET = ET + self.__suite = ET.Element('testsuite') + self.__suite.set('start', + datetime.now(UTC) + .replace(tzinfo=None) + .isoformat(' ')) + self.__e = None + self.__start_time = None + + @classmethod + def __getId(cls, test): + try: + test_id = test.id + except AttributeError: + return str(test) + try: + return test_id() + except TypeError: + return str(test_id) + return repr(test) + + def startTest(self, test): + super().startTest(test) + if self.USE_XML: + self.__e = e = self.__ET.SubElement(self.__suite, 'testcase') + self.__start_time = time.perf_counter() + + def _add_result(self, test, capture=False, **args): + if not self.USE_XML: + return + e = self.__e + self.__e = None + if e is None: + return + ET = self.__ET + + e.set('name', args.pop('name', self.__getId(test))) + e.set('status', args.pop('status', 'run')) + e.set('result', args.pop('result', 'completed')) + if self.__start_time: + e.set('time', f'{time.perf_counter() - self.__start_time:0.6f}') + + if capture: + if self._stdout_buffer is not None: + stdout = self._stdout_buffer.getvalue().rstrip() + ET.SubElement(e, 'system-out').text = sanitize_xml(stdout) + if self._stderr_buffer is not None: + stderr = self._stderr_buffer.getvalue().rstrip() + ET.SubElement(e, 'system-err').text = sanitize_xml(stderr) + + for k, v in args.items(): + if not k or not v: + continue + + e2 = ET.SubElement(e, k) + if hasattr(v, 'items'): + for k2, v2 in v.items(): + if k2: + e2.set(k2, sanitize_xml(str(v2))) + else: + e2.text = sanitize_xml(str(v2)) + else: + e2.text = sanitize_xml(str(v)) + + @classmethod + def __makeErrorDict(cls, err_type, err_value, err_tb): + if isinstance(err_type, type): + if err_type.__module__ == 'builtins': + typename = err_type.__name__ + else: + typename = f'{err_type.__module__}.{err_type.__name__}' + else: + typename = repr(err_type) + + msg = traceback.format_exception(err_type, err_value, None) + tb = traceback.format_exception(err_type, err_value, err_tb) + + return { + 'type': typename, + 'message': ''.join(msg), + '': ''.join(tb), + } + + def addError(self, test, err): + self._add_result(test, True, error=self.__makeErrorDict(*err)) + super().addError(test, err) + + def addExpectedFailure(self, test, err): + self._add_result(test, True, output=self.__makeErrorDict(*err)) + super().addExpectedFailure(test, err) + + def addFailure(self, test, err): + self._add_result(test, True, failure=self.__makeErrorDict(*err)) + super().addFailure(test, err) + if support.failfast: + self.stop() + + def addSkip(self, test, reason): + self._add_result(test, skipped=reason) + super().addSkip(test, reason) + + def addSuccess(self, test): + self._add_result(test) + super().addSuccess(test) + + def addUnexpectedSuccess(self, test): + self._add_result(test, outcome='UNEXPECTED_SUCCESS') + super().addUnexpectedSuccess(test) + + def get_xml_element(self): + if not self.USE_XML: + raise ValueError("USE_XML is false") + e = self.__suite + e.set('tests', str(self.testsRun)) + e.set('errors', str(len(self.errors))) + e.set('failures', str(len(self.failures))) + return e + +class QuietRegressionTestRunner: + def __init__(self, stream, buffer=False): + self.result = RegressionTestResult(stream, None, 0) + self.result.buffer = buffer + + def run(self, test): + test(self.result) + return self.result + +def get_test_runner_class(verbosity, buffer=False): + if verbosity: + return functools.partial(unittest.TextTestRunner, + resultclass=RegressionTestResult, + buffer=buffer, + verbosity=verbosity) + return functools.partial(QuietRegressionTestRunner, buffer=buffer) + +def get_test_runner(stream, verbosity, capture_output=False): + return get_test_runner_class(verbosity, capture_output)(stream) + +if __name__ == '__main__': + import xml.etree.ElementTree as ET + RegressionTestResult.USE_XML = True + + class TestTests(unittest.TestCase): + def test_pass(self): + pass + + def test_pass_slow(self): + time.sleep(1.0) + + def test_fail(self): + print('stdout', file=sys.stdout) + print('stderr', file=sys.stderr) + self.fail('failure message') + + def test_error(self): + print('stdout', file=sys.stdout) + print('stderr', file=sys.stderr) + raise RuntimeError('error message') + + suite = unittest.TestSuite() + suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestTests)) + stream = io.StringIO() + runner_cls = get_test_runner_class(sum(a == '-v' for a in sys.argv)) + runner = runner_cls(sys.stdout) + result = runner.run(suite) + print('Output:', stream.getvalue()) + print('XML: ', end='') + for s in ET.tostringlist(result.get_xml_element()): + print(s.decode(), end='') + print() diff --git a/Lib/test/libregrtest/tsan.py b/Lib/test/libregrtest/tsan.py new file mode 100644 index 0000000000..822ac0f404 --- /dev/null +++ b/Lib/test/libregrtest/tsan.py @@ -0,0 +1,33 @@ +# Set of tests run by default if --tsan is specified. The tests below were +# chosen because they use threads and run in a reasonable amount of time. + +TSAN_TESTS = [ + # TODO: enable more of test_capi once bugs are fixed (GH-116908, GH-116909). + 'test_capi.test_mem', + 'test_capi.test_pyatomic', + 'test_code', + 'test_enum', + 'test_functools', + 'test_httpservers', + 'test_imaplib', + 'test_importlib', + 'test_io', + 'test_logging', + 'test_queue', + 'test_signal', + 'test_socket', + 'test_sqlite3', + 'test_ssl', + 'test_syslog', + 'test_thread', + 'test_threadedtempfile', + 'test_threading', + 'test_threading_local', + 'test_threadsignals', + 'test_weakref', +] + + +def setup_tsan_tests(cmdline_args) -> None: + if not cmdline_args: + cmdline_args[:] = TSAN_TESTS[:] diff --git a/Lib/test/libregrtest/utils.py b/Lib/test/libregrtest/utils.py index fb9971a64f..9da77dd6dc 100644 --- a/Lib/test/libregrtest/utils.py +++ b/Lib/test/libregrtest/utils.py @@ -1,10 +1,64 @@ +import contextlib +import faulthandler +import locale import math import os.path +import platform +import random +import re +import shlex +import signal +import subprocess import sys +import sysconfig +import tempfile import textwrap +from collections.abc import Callable, Iterable +from test import support +from test.support import os_helper +from test.support import threading_helper -def format_duration(seconds): + +# All temporary files and temporary directories created by libregrtest should +# use TMP_PREFIX so cleanup_temp_dir() can remove them all. +TMP_PREFIX = 'test_python_' +WORK_DIR_PREFIX = TMP_PREFIX +WORKER_WORK_DIR_PREFIX = WORK_DIR_PREFIX + 'worker_' + +# bpo-38203: Maximum delay in seconds to exit Python (call Py_Finalize()). +# Used to protect against threading._shutdown() hang. +# Must be smaller than buildbot "1200 seconds without output" limit. +EXIT_TIMEOUT = 120.0 + + +ALL_RESOURCES = ('audio', 'curses', 'largefile', 'network', + 'decimal', 'cpu', 'subprocess', 'urlfetch', 'gui', 'walltime') + +# Other resources excluded from --use=all: +# +# - extralagefile (ex: test_zipfile64): really too slow to be enabled +# "by default" +# - tzdata: while needed to validate fully test_datetime, it makes +# test_datetime too slow (15-20 min on some buildbots) and so is disabled by +# default (see bpo-30822). +RESOURCE_NAMES = ALL_RESOURCES + ('extralargefile', 'tzdata') + + +# Types for types hints +StrPath = str +TestName = str +StrJSON = str +TestTuple = tuple[TestName, ...] +TestList = list[TestName] +# --match and --ignore options: list of patterns +# ('*' joker character can be used) +TestFilter = list[tuple[TestName, bool]] +FilterTuple = tuple[TestName, ...] +FilterDict = dict[TestName, FilterTuple] + + +def format_duration(seconds: float) -> str: ms = math.ceil(seconds * 1e3) seconds, ms = divmod(ms, 1000) minutes, seconds = divmod(seconds, 60) @@ -16,17 +70,20 @@ def format_duration(seconds): if minutes: parts.append('%s min' % minutes) if seconds: - parts.append('%s sec' % seconds) - if ms: - parts.append('%s ms' % ms) + if parts: + # 2 min 1 sec + parts.append('%s sec' % seconds) + else: + # 1.0 sec + parts.append('%.1f sec' % (seconds + ms / 1000)) if not parts: - return '0 ms' + return '%s ms' % ms parts = parts[:2] return ' '.join(parts) -def removepy(names): +def strip_py_suffix(names: list[str] | None) -> None: if not names: return for idx, name in enumerate(names): @@ -35,11 +92,20 @@ def removepy(names): names[idx] = basename -def count(n, word): +def plural(n: int, singular: str, plural: str | None = None) -> str: if n == 1: - return "%d %s" % (n, word) + return singular + elif plural is not None: + return plural else: - return "%d %ss" % (n, word) + return singular + 's' + + +def count(n: int, word: str) -> str: + if n == 1: + return f"{n} {word}" + else: + return f"{n} {word}s" def printlist(x, width=70, indent=4, file=None): @@ -57,5 +123,677 @@ def printlist(x, width=70, indent=4, file=None): file=file) -def print_warning(msg): - print(f"Warning -- {msg}", file=sys.stderr, flush=True) +def print_warning(msg: str) -> None: + support.print_warning(msg) + + +orig_unraisablehook = None + + +def regrtest_unraisable_hook(unraisable) -> None: + global orig_unraisablehook + support.environment_altered = True + support.print_warning("Unraisable exception") + old_stderr = sys.stderr + try: + support.flush_std_streams() + sys.stderr = support.print_warning.orig_stderr + assert orig_unraisablehook is not None, "orig_unraisablehook not set" + orig_unraisablehook(unraisable) + sys.stderr.flush() + finally: + sys.stderr = old_stderr + + +def setup_unraisable_hook() -> None: + global orig_unraisablehook + orig_unraisablehook = sys.unraisablehook + sys.unraisablehook = regrtest_unraisable_hook + + +orig_threading_excepthook = None + + +def regrtest_threading_excepthook(args) -> None: + global orig_threading_excepthook + support.environment_altered = True + support.print_warning(f"Uncaught thread exception: {args.exc_type.__name__}") + old_stderr = sys.stderr + try: + support.flush_std_streams() + sys.stderr = support.print_warning.orig_stderr + assert orig_threading_excepthook is not None, "orig_threading_excepthook not set" + orig_threading_excepthook(args) + sys.stderr.flush() + finally: + sys.stderr = old_stderr + + +def setup_threading_excepthook() -> None: + global orig_threading_excepthook + import threading + orig_threading_excepthook = threading.excepthook + threading.excepthook = regrtest_threading_excepthook + + +def clear_caches(): + # Clear the warnings registry, so they can be displayed again + for mod in sys.modules.values(): + if hasattr(mod, '__warningregistry__'): + del mod.__warningregistry__ + + # Flush standard output, so that buffered data is sent to the OS and + # associated Python objects are reclaimed. + for stream in (sys.stdout, sys.stderr, sys.__stdout__, sys.__stderr__): + if stream is not None: + stream.flush() + + try: + re = sys.modules['re'] + except KeyError: + pass + else: + re.purge() + + try: + _strptime = sys.modules['_strptime'] + except KeyError: + pass + else: + _strptime._regex_cache.clear() + + try: + urllib_parse = sys.modules['urllib.parse'] + except KeyError: + pass + else: + urllib_parse.clear_cache() + + try: + urllib_request = sys.modules['urllib.request'] + except KeyError: + pass + else: + urllib_request.urlcleanup() + + try: + linecache = sys.modules['linecache'] + except KeyError: + pass + else: + linecache.clearcache() + + try: + mimetypes = sys.modules['mimetypes'] + except KeyError: + pass + else: + mimetypes._default_mime_types() + + try: + filecmp = sys.modules['filecmp'] + except KeyError: + pass + else: + filecmp._cache.clear() + + try: + struct = sys.modules['struct'] + except KeyError: + pass + else: + struct._clearcache() + + try: + doctest = sys.modules['doctest'] + except KeyError: + pass + else: + doctest.master = None + + try: + ctypes = sys.modules['ctypes'] + except KeyError: + pass + else: + ctypes._reset_cache() + + try: + typing = sys.modules['typing'] + except KeyError: + pass + else: + for f in typing._cleanups: + f() + + import inspect + abs_classes = filter(inspect.isabstract, typing.__dict__.values()) + for abc in abs_classes: + for obj in abc.__subclasses__() + [abc]: + obj._abc_caches_clear() + + try: + fractions = sys.modules['fractions'] + except KeyError: + pass + else: + fractions._hash_algorithm.cache_clear() + + try: + inspect = sys.modules['inspect'] + except KeyError: + pass + else: + inspect._shadowed_dict_from_weakref_mro_tuple.cache_clear() + inspect._filesbymodname.clear() + inspect.modulesbyfile.clear() + + try: + importlib_metadata = sys.modules['importlib.metadata'] + except KeyError: + pass + else: + importlib_metadata.FastPath.__new__.cache_clear() + + +def get_build_info(): + # Get most important configure and build options as a list of strings. + # Example: ['debug', 'ASAN+MSAN'] or ['release', 'LTO+PGO']. + + config_args = sysconfig.get_config_var('CONFIG_ARGS') or '' + cflags = sysconfig.get_config_var('PY_CFLAGS') or '' + cflags += ' ' + (sysconfig.get_config_var('PY_CFLAGS_NODIST') or '') + ldflags_nodist = sysconfig.get_config_var('PY_LDFLAGS_NODIST') or '' + + build = [] + + # --disable-gil + if sysconfig.get_config_var('Py_GIL_DISABLED'): + if not sys.flags.ignore_environment: + PYTHON_GIL = os.environ.get('PYTHON_GIL', None) + if PYTHON_GIL: + PYTHON_GIL = (PYTHON_GIL == '1') + else: + PYTHON_GIL = None + + free_threading = "free_threading" + if PYTHON_GIL is not None: + free_threading = f"{free_threading} GIL={int(PYTHON_GIL)}" + build.append(free_threading) + + if hasattr(sys, 'gettotalrefcount'): + # --with-pydebug + build.append('debug') + + if '-DNDEBUG' in cflags: + build.append('without_assert') + else: + build.append('release') + + if '--with-assertions' in config_args: + build.append('with_assert') + elif '-DNDEBUG' not in cflags: + build.append('with_assert') + + # --enable-experimental-jit + tier2 = re.search('-D_Py_TIER2=([0-9]+)', cflags) + if tier2: + tier2 = int(tier2.group(1)) + + if not sys.flags.ignore_environment: + PYTHON_JIT = os.environ.get('PYTHON_JIT', None) + if PYTHON_JIT: + PYTHON_JIT = (PYTHON_JIT != '0') + else: + PYTHON_JIT = None + + if tier2 == 1: # =yes + if PYTHON_JIT == False: + jit = 'JIT=off' + else: + jit = 'JIT' + elif tier2 == 3: # =yes-off + if PYTHON_JIT: + jit = 'JIT' + else: + jit = 'JIT=off' + elif tier2 == 4: # =interpreter + if PYTHON_JIT == False: + jit = 'JIT-interpreter=off' + else: + jit = 'JIT-interpreter' + elif tier2 == 6: # =interpreter-off (Secret option!) + if PYTHON_JIT: + jit = 'JIT-interpreter' + else: + jit = 'JIT-interpreter=off' + elif '-D_Py_JIT' in cflags: + jit = 'JIT' + else: + jit = None + if jit: + build.append(jit) + + # --enable-framework=name + framework = sysconfig.get_config_var('PYTHONFRAMEWORK') + if framework: + build.append(f'framework={framework}') + + # --enable-shared + shared = int(sysconfig.get_config_var('PY_ENABLE_SHARED') or '0') + if shared: + build.append('shared') + + # --with-lto + optimizations = [] + if '-flto=thin' in ldflags_nodist: + optimizations.append('ThinLTO') + elif '-flto' in ldflags_nodist: + optimizations.append('LTO') + + if support.check_cflags_pgo(): + # PGO (--enable-optimizations) + optimizations.append('PGO') + + if support.check_bolt_optimized(): + # BOLT (--enable-bolt) + optimizations.append('BOLT') + + if optimizations: + build.append('+'.join(optimizations)) + + # --with-address-sanitizer + sanitizers = [] + if support.check_sanitizer(address=True): + sanitizers.append("ASAN") + # --with-memory-sanitizer + if support.check_sanitizer(memory=True): + sanitizers.append("MSAN") + # --with-undefined-behavior-sanitizer + if support.check_sanitizer(ub=True): + sanitizers.append("UBSAN") + # --with-thread-sanitizer + if support.check_sanitizer(thread=True): + sanitizers.append("TSAN") + if sanitizers: + build.append('+'.join(sanitizers)) + + # --with-trace-refs + if hasattr(sys, 'getobjects'): + build.append("TraceRefs") + # --enable-pystats + if hasattr(sys, '_stats_on'): + build.append("pystats") + # --with-valgrind + if sysconfig.get_config_var('WITH_VALGRIND'): + build.append("valgrind") + # --with-dtrace + if sysconfig.get_config_var('WITH_DTRACE'): + build.append("dtrace") + + return build + + +def get_temp_dir(tmp_dir: StrPath | None = None) -> StrPath: + if tmp_dir: + tmp_dir = os.path.expanduser(tmp_dir) + else: + # When tests are run from the Python build directory, it is best practice + # to keep the test files in a subfolder. This eases the cleanup of leftover + # files using the "make distclean" command. + if sysconfig.is_python_build(): + if not support.is_wasi: + tmp_dir = sysconfig.get_config_var('abs_builddir') + if tmp_dir is None: + tmp_dir = sysconfig.get_config_var('abs_srcdir') + if not tmp_dir: + # gh-74470: On Windows, only srcdir is available. Using + # abs_builddir mostly matters on UNIX when building + # Python out of the source tree, especially when the + # source tree is read only. + tmp_dir = sysconfig.get_config_var('srcdir') + if not tmp_dir: + raise RuntimeError( + "Could not determine the correct value for tmp_dir" + ) + tmp_dir = os.path.join(tmp_dir, 'build') + else: + # WASI platform + tmp_dir = sysconfig.get_config_var('projectbase') + if not tmp_dir: + raise RuntimeError( + "sysconfig.get_config_var('projectbase') " + f"unexpectedly returned {tmp_dir!r} on WASI" + ) + tmp_dir = os.path.join(tmp_dir, 'build') + + # When get_temp_dir() is called in a worker process, + # get_temp_dir() path is different than in the parent process + # which is not a WASI process. So the parent does not create + # the same "tmp_dir" than the test worker process. + os.makedirs(tmp_dir, exist_ok=True) + else: + tmp_dir = tempfile.gettempdir() + + return os.path.abspath(tmp_dir) + + +def fix_umask() -> None: + if support.is_emscripten: + # Emscripten has default umask 0o777, which breaks some tests. + # see https://github.com/emscripten-core/emscripten/issues/17269 + old_mask = os.umask(0) + if old_mask == 0o777: + os.umask(0o027) + else: + os.umask(old_mask) + + +def get_work_dir(parent_dir: StrPath, worker: bool = False) -> StrPath: + # Define a writable temp dir that will be used as cwd while running + # the tests. The name of the dir includes the pid to allow parallel + # testing (see the -j option). + # Emscripten and WASI have stubbed getpid(), Emscripten has only + # millisecond clock resolution. Use randint() instead. + if support.is_emscripten or support.is_wasi: + nounce = random.randint(0, 1_000_000) + else: + nounce = os.getpid() + + if worker: + work_dir = WORK_DIR_PREFIX + str(nounce) + else: + work_dir = WORKER_WORK_DIR_PREFIX + str(nounce) + work_dir += os_helper.FS_NONASCII + work_dir = os.path.join(parent_dir, work_dir) + return work_dir + + +@contextlib.contextmanager +def exit_timeout(): + try: + yield + except SystemExit as exc: + # bpo-38203: Python can hang at exit in Py_Finalize(), especially + # on threading._shutdown() call: put a timeout + if threading_helper.can_start_thread: + faulthandler.dump_traceback_later(EXIT_TIMEOUT, exit=True) + sys.exit(exc.code) + + +def remove_testfn(test_name: TestName, verbose: int) -> None: + # Try to clean up os_helper.TESTFN if left behind. + # + # While tests shouldn't leave any files or directories behind, when a test + # fails that can be tedious for it to arrange. The consequences can be + # especially nasty on Windows, since if a test leaves a file open, it + # cannot be deleted by name (while there's nothing we can do about that + # here either, we can display the name of the offending test, which is a + # real help). + name = os_helper.TESTFN + if not os.path.exists(name): + return + + nuker: Callable[[str], None] + if os.path.isdir(name): + import shutil + kind, nuker = "directory", shutil.rmtree + elif os.path.isfile(name): + kind, nuker = "file", os.unlink + else: + raise RuntimeError(f"os.path says {name!r} exists but is neither " + f"directory nor file") + + if verbose: + print_warning(f"{test_name} left behind {kind} {name!r}") + support.environment_altered = True + + try: + import stat + # fix possible permissions problems that might prevent cleanup + os.chmod(name, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) + nuker(name) + except Exception as exc: + print_warning(f"{test_name} left behind {kind} {name!r} " + f"and it couldn't be removed: {exc}") + + +def abs_module_name(test_name: TestName, test_dir: StrPath | None) -> TestName: + if test_name.startswith('test.') or test_dir: + return test_name + else: + # Import it from the test package + return 'test.' + test_name + + +# gh-90681: When rerunning tests, we might need to rerun the whole +# class or module suite if some its life-cycle hooks fail. +# Test level hooks are not affected. +_TEST_LIFECYCLE_HOOKS = frozenset(( + 'setUpClass', 'tearDownClass', + 'setUpModule', 'tearDownModule', +)) + +def normalize_test_name(test_full_name: str, *, + is_error: bool = False) -> str | None: + short_name = test_full_name.split(" ")[0] + if is_error and short_name in _TEST_LIFECYCLE_HOOKS: + if test_full_name.startswith(('setUpModule (', 'tearDownModule (')): + # if setUpModule() or tearDownModule() failed, don't filter + # tests with the test file name, don't use use filters. + return None + + # This means that we have a failure in a life-cycle hook, + # we need to rerun the whole module or class suite. + # Basically the error looks like this: + # ERROR: setUpClass (test.test_reg_ex.RegTest) + # or + # ERROR: setUpModule (test.test_reg_ex) + # So, we need to parse the class / module name. + lpar = test_full_name.index('(') + rpar = test_full_name.index(')') + return test_full_name[lpar + 1: rpar].split('.')[-1] + return short_name + + +def adjust_rlimit_nofile() -> None: + """ + On macOS the default fd limit (RLIMIT_NOFILE) is sometimes too low (256) + for our test suite to succeed. Raise it to something more reasonable. 1024 + is a common Linux default. + """ + try: + import resource + except ImportError: + return + + fd_limit, max_fds = resource.getrlimit(resource.RLIMIT_NOFILE) + + desired_fds = 1024 + + if fd_limit < desired_fds and fd_limit < max_fds: + new_fd_limit = min(desired_fds, max_fds) + try: + resource.setrlimit(resource.RLIMIT_NOFILE, + (new_fd_limit, max_fds)) + print(f"Raised RLIMIT_NOFILE: {fd_limit} -> {new_fd_limit}") + except (ValueError, OSError) as err: + print_warning(f"Unable to raise RLIMIT_NOFILE from {fd_limit} to " + f"{new_fd_limit}: {err}.") + + +def get_host_runner() -> str: + if (hostrunner := os.environ.get("_PYTHON_HOSTRUNNER")) is None: + hostrunner = sysconfig.get_config_var("HOSTRUNNER") + return hostrunner + + +def is_cross_compiled() -> bool: + return ('_PYTHON_HOST_PLATFORM' in os.environ) + + +def format_resources(use_resources: Iterable[str]) -> str: + use_resources = set(use_resources) + all_resources = set(ALL_RESOURCES) + + # Express resources relative to "all" + relative_all = ['all'] + for name in sorted(all_resources - use_resources): + relative_all.append(f'-{name}') + for name in sorted(use_resources - all_resources): + relative_all.append(f'{name}') + all_text = ','.join(relative_all) + all_text = f"resources: {all_text}" + + # List of enabled resources + text = ','.join(sorted(use_resources)) + text = f"resources ({len(use_resources)}): {text}" + + # Pick the shortest string (prefer relative to all if lengths are equal) + if len(all_text) <= len(text): + return all_text + else: + return text + + +def display_header(use_resources: tuple[str, ...], + python_cmd: tuple[str, ...] | None) -> None: + # Print basic platform information + print("==", platform.python_implementation(), *sys.version.split()) + print("==", platform.platform(aliased=True), + "%s-endian" % sys.byteorder) + print("== Python build:", ' '.join(get_build_info())) + print("== cwd:", os.getcwd()) + + cpu_count: object = os.cpu_count() + if cpu_count: + # The function is new in Python 3.13; mypy doesn't know about it yet: + process_cpu_count = os.process_cpu_count() # type: ignore[attr-defined] + if process_cpu_count and process_cpu_count != cpu_count: + cpu_count = f"{process_cpu_count} (process) / {cpu_count} (system)" + print("== CPU count:", cpu_count) + print("== encodings: locale=%s FS=%s" + % (locale.getencoding(), sys.getfilesystemencoding())) + + if use_resources: + text = format_resources(use_resources) + print(f"== {text}") + else: + print("== resources: all test resources are disabled, " + "use -u option to unskip tests") + + cross_compile = is_cross_compiled() + if cross_compile: + print("== cross compiled: Yes") + if python_cmd: + cmd = shlex.join(python_cmd) + print(f"== host python: {cmd}") + + get_cmd = [*python_cmd, '-m', 'platform'] + proc = subprocess.run( + get_cmd, + stdout=subprocess.PIPE, + text=True, + cwd=os_helper.SAVEDCWD) + stdout = proc.stdout.replace('\n', ' ').strip() + if stdout: + print(f"== host platform: {stdout}") + elif proc.returncode: + print(f"== host platform: ") + else: + hostrunner = get_host_runner() + if hostrunner: + print(f"== host runner: {hostrunner}") + + # This makes it easier to remember what to set in your local + # environment when trying to reproduce a sanitizer failure. + asan = support.check_sanitizer(address=True) + msan = support.check_sanitizer(memory=True) + ubsan = support.check_sanitizer(ub=True) + tsan = support.check_sanitizer(thread=True) + sanitizers = [] + if asan: + sanitizers.append("address") + if msan: + sanitizers.append("memory") + if ubsan: + sanitizers.append("undefined behavior") + if tsan: + sanitizers.append("thread") + if sanitizers: + print(f"== sanitizers: {', '.join(sanitizers)}") + for sanitizer, env_var in ( + (asan, "ASAN_OPTIONS"), + (msan, "MSAN_OPTIONS"), + (ubsan, "UBSAN_OPTIONS"), + (tsan, "TSAN_OPTIONS"), + ): + options= os.environ.get(env_var) + if sanitizer and options is not None: + print(f"== {env_var}={options!r}") + + print(flush=True) + + +def cleanup_temp_dir(tmp_dir: StrPath) -> None: + import glob + + path = os.path.join(glob.escape(tmp_dir), TMP_PREFIX + '*') + print("Cleanup %s directory" % tmp_dir) + for name in glob.glob(path): + if os.path.isdir(name): + print("Remove directory: %s" % name) + os_helper.rmtree(name) + else: + print("Remove file: %s" % name) + os_helper.unlink(name) + +WINDOWS_STATUS = { + 0xC0000005: "STATUS_ACCESS_VIOLATION", + 0xC00000FD: "STATUS_STACK_OVERFLOW", + 0xC000013A: "STATUS_CONTROL_C_EXIT", +} + +def get_signal_name(exitcode): + if exitcode < 0: + signum = -exitcode + try: + return signal.Signals(signum).name + except ValueError: + pass + + # Shell exit code (ex: WASI build) + if 128 < exitcode < 256: + signum = exitcode - 128 + try: + return signal.Signals(signum).name + except ValueError: + pass + + try: + return WINDOWS_STATUS[exitcode] + except KeyError: + pass + + return None + + +ILLEGAL_XML_CHARS_RE = re.compile( + '[' + # Control characters; newline (\x0A and \x0D) and TAB (\x09) are legal + '\x00-\x08\x0B\x0C\x0E-\x1F' + # Surrogate characters + '\uD800-\uDFFF' + # Special Unicode characters + '\uFFFE' + '\uFFFF' + # Match multiple sequential invalid characters for better efficiency + ']+') + +def _sanitize_xml_replace(regs): + text = regs[0] + return ''.join(f'\\x{ord(ch):02x}' if ch <= '\xff' else ascii(ch)[1:-1] + for ch in text) + +def sanitize_xml(text: str) -> str: + return ILLEGAL_XML_CHARS_RE.sub(_sanitize_xml_replace, text) diff --git a/Lib/test/libregrtest/win_utils.py b/Lib/test/libregrtest/win_utils.py index 95db3def36..b51fde0af5 100644 --- a/Lib/test/libregrtest/win_utils.py +++ b/Lib/test/libregrtest/win_utils.py @@ -1,105 +1,128 @@ +import _overlapped +import _thread import _winapi -import msvcrt -import os -import subprocess -import uuid -from test import support +import math +import struct +import winreg -# Max size of asynchronous reads -BUFSIZE = 8192 -# Exponential damping factor (see below) -LOAD_FACTOR_1 = 0.9200444146293232478931553241 # Seconds per measurement -SAMPLING_INTERVAL = 5 -COUNTER_NAME = r'\System\Processor Queue Length' +SAMPLING_INTERVAL = 1 +# Exponential damping factor to compute exponentially weighted moving average +# on 1 minute (60 seconds) +LOAD_FACTOR_1 = 1 / math.exp(SAMPLING_INTERVAL / 60) +# Initialize the load using the arithmetic mean of the first NVALUE values +# of the Processor Queue Length +NVALUE = 5 class WindowsLoadTracker(): """ - This class asynchronously interacts with the `typeperf` command to read - the system load on Windows. Mulitprocessing and threads can't be used - here because they interfere with the test suite's cases for those - modules. + This class asynchronously reads the performance counters to calculate + the system load on Windows. A "raw" thread is used here to prevent + interference with the test suite's cases for the threading module. """ def __init__(self): - self.load = 0.0 - self.start() - - def start(self): - # Create a named pipe which allows for asynchronous IO in Windows - pipe_name = r'\\.\pipe\typeperf_output_' + str(uuid.uuid4()) - - open_mode = _winapi.PIPE_ACCESS_INBOUND - open_mode |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE - open_mode |= _winapi.FILE_FLAG_OVERLAPPED - - # This is the read end of the pipe, where we will be grabbing output - self.pipe = _winapi.CreateNamedPipe( - pipe_name, open_mode, _winapi.PIPE_WAIT, - 1, BUFSIZE, BUFSIZE, _winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL - ) - # The write end of the pipe which is passed to the created process - pipe_write_end = _winapi.CreateFile( - pipe_name, _winapi.GENERIC_WRITE, 0, _winapi.NULL, - _winapi.OPEN_EXISTING, 0, _winapi.NULL - ) - # Open up the handle as a python file object so we can pass it to - # subprocess - command_stdout = msvcrt.open_osfhandle(pipe_write_end, 0) - - # Connect to the read end of the pipe in overlap/async mode - overlap = _winapi.ConnectNamedPipe(self.pipe, overlapped=True) - overlap.GetOverlappedResult(True) - - # Spawn off the load monitor - command = ['typeperf', COUNTER_NAME, '-si', str(SAMPLING_INTERVAL)] - self.p = subprocess.Popen(command, stdout=command_stdout, cwd=os_helper.SAVEDCWD) - - # Close our copy of the write end of the pipe - os.close(command_stdout) - - def close(self): - if self.p is None: + # make __del__ not fail if pre-flight test fails + self._running = None + self._stopped = None + + # Pre-flight test for access to the performance data; + # `PermissionError` will be raised if not allowed + winreg.QueryInfoKey(winreg.HKEY_PERFORMANCE_DATA) + + self._values = [] + self._load = None + self._running = _overlapped.CreateEvent(None, True, False, None) + self._stopped = _overlapped.CreateEvent(None, True, False, None) + + _thread.start_new_thread(self._update_load, (), {}) + + def _update_load(self, + # localize module access to prevent shutdown errors + _wait=_winapi.WaitForSingleObject, + _signal=_overlapped.SetEvent): + # run until signaled to stop + while _wait(self._running, 1000): + self._calculate_load() + # notify stopped + _signal(self._stopped) + + def _calculate_load(self, + # localize module access to prevent shutdown errors + _query=winreg.QueryValueEx, + _hkey=winreg.HKEY_PERFORMANCE_DATA, + _unpack=struct.unpack_from): + # get the 'System' object + data, _ = _query(_hkey, '2') + # PERF_DATA_BLOCK { + # WCHAR Signature[4] 8 + + # DWOWD LittleEndian 4 + + # DWORD Version 4 + + # DWORD Revision 4 + + # DWORD TotalByteLength 4 + + # DWORD HeaderLength = 24 byte offset + # ... + # } + obj_start, = _unpack('L', data, 24) + # PERF_OBJECT_TYPE { + # DWORD TotalByteLength + # DWORD DefinitionLength + # DWORD HeaderLength + # ... + # } + data_start, defn_start = _unpack('4xLL', data, obj_start) + data_base = obj_start + data_start + defn_base = obj_start + defn_start + # find the 'Processor Queue Length' counter (index=44) + while defn_base < data_base: + # PERF_COUNTER_DEFINITION { + # DWORD ByteLength + # DWORD CounterNameTitleIndex + # ... [7 DWORDs/28 bytes] + # DWORD CounterOffset + # } + size, idx, offset = _unpack('LL28xL', data, defn_base) + defn_base += size + if idx == 44: + counter_offset = data_base + offset + # the counter is known to be PERF_COUNTER_RAWCOUNT (DWORD) + processor_queue_length, = _unpack('L', data, counter_offset) + break + else: return - self.p.kill() - self.p.wait() - self.p = None - def __del__(self): - self.close() - - def read_output(self): - import _winapi - - overlapped, _ = _winapi.ReadFile(self.pipe, BUFSIZE, True) - bytes_read, res = overlapped.GetOverlappedResult(False) - if res != 0: - return - - return overlapped.getbuffer().decode() + # We use an exponentially weighted moving average, imitating the + # load calculation on Unix systems. + # https://en.wikipedia.org/wiki/Load_(computing)#Unix-style_load_calculation + # https://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average + if self._load is not None: + self._load = (self._load * LOAD_FACTOR_1 + + processor_queue_length * (1.0 - LOAD_FACTOR_1)) + elif len(self._values) < NVALUE: + self._values.append(processor_queue_length) + else: + self._load = sum(self._values) / len(self._values) + + def close(self, kill=True): + self.__del__() + return + + def __del__(self, + # localize module access to prevent shutdown errors + _wait=_winapi.WaitForSingleObject, + _close=_winapi.CloseHandle, + _signal=_overlapped.SetEvent): + if self._running is not None: + # tell the update thread to quit + _signal(self._running) + # wait for the update thread to signal done + _wait(self._stopped, -1) + # cleanup events + _close(self._running) + _close(self._stopped) + self._running = self._stopped = None def getloadavg(self): - typeperf_output = self.read_output() - # Nothing to update, just return the current load - if not typeperf_output: - return self.load - - # Process the backlog of load values - for line in typeperf_output.splitlines(): - # typeperf outputs in a CSV format like this: - # "07/19/2018 01:32:26.605","3.000000" - toks = line.split(',') - # Ignore blank lines and the initial header - if line.strip() == '' or (COUNTER_NAME in line) or len(toks) != 2: - continue - - load = float(toks[1].replace('"', '')) - # We use an exponentially weighted moving average, imitating the - # load calculation on Unix systems. - # https://en.wikipedia.org/wiki/Load_(computing)#Unix-style_load_calculation - new_load = self.load * LOAD_FACTOR_1 + load * (1.0 - LOAD_FACTOR_1) - self.load = new_load - - return self.load + return self._load diff --git a/Lib/test/libregrtest/worker.py b/Lib/test/libregrtest/worker.py new file mode 100644 index 0000000000..d232ea6948 --- /dev/null +++ b/Lib/test/libregrtest/worker.py @@ -0,0 +1,116 @@ +import subprocess +import sys +import os +from typing import Any, NoReturn + +from test.support import os_helper, Py_DEBUG + +from .setup import setup_process, setup_test_dir +from .runtests import WorkerRunTests, JsonFile, JsonFileType +from .single import run_single_test +from .utils import ( + StrPath, StrJSON, TestFilter, + get_temp_dir, get_work_dir, exit_timeout) + + +USE_PROCESS_GROUP = (hasattr(os, "setsid") and hasattr(os, "killpg")) + + +def create_worker_process(runtests: WorkerRunTests, output_fd: int, + tmp_dir: StrPath | None = None) -> subprocess.Popen[str]: + worker_json = runtests.as_json() + + cmd = runtests.create_python_cmd() + cmd.extend(['-m', 'test.libregrtest.worker', worker_json]) + + env = dict(os.environ) + if tmp_dir is not None: + env['TMPDIR'] = tmp_dir + env['TEMP'] = tmp_dir + env['TMP'] = tmp_dir + + # Running the child from the same working directory as regrtest's original + # invocation ensures that TEMPDIR for the child is the same when + # sysconfig.is_python_build() is true. See issue 15300. + # + # Emscripten and WASI Python must start in the Python source code directory + # to get 'python.js' or 'python.wasm' file. Then worker_process() changes + # to a temporary directory created to run tests. + work_dir = os_helper.SAVEDCWD + + kwargs: dict[str, Any] = dict( + env=env, + stdout=output_fd, + # bpo-45410: Write stderr into stdout to keep messages order + stderr=output_fd, + text=True, + close_fds=True, + cwd=work_dir, + ) + if USE_PROCESS_GROUP: + kwargs['start_new_session'] = True + + # Pass json_file to the worker process + json_file = runtests.json_file + json_file.configure_subprocess(kwargs) + + with json_file.inherit_subprocess(): + return subprocess.Popen(cmd, **kwargs) + + +def worker_process(worker_json: StrJSON) -> NoReturn: + runtests = WorkerRunTests.from_json(worker_json) + test_name = runtests.tests[0] + match_tests: TestFilter = runtests.match_tests + json_file: JsonFile = runtests.json_file + + setup_test_dir(runtests.test_dir) + setup_process() + + if runtests.rerun: + if match_tests: + matching = "matching: " + ", ".join(pattern for pattern, result in match_tests if result) + print(f"Re-running {test_name} in verbose mode ({matching})", flush=True) + else: + print(f"Re-running {test_name} in verbose mode", flush=True) + + result = run_single_test(test_name, runtests) + if runtests.coverage: + if "test.cov" in sys.modules: # imported by -Xpresite= + result.covered_lines = list(sys.modules["test.cov"].coverage) + elif not Py_DEBUG: + print( + "Gathering coverage in worker processes requires --with-pydebug", + flush=True, + ) + else: + raise LookupError( + "`test.cov` not found in sys.modules but coverage wanted" + ) + + if json_file.file_type == JsonFileType.STDOUT: + print() + result.write_json_into(sys.stdout) + else: + with json_file.open('w', encoding='utf-8') as json_fp: + result.write_json_into(json_fp) + + sys.exit(0) + + +def main() -> NoReturn: + if len(sys.argv) != 2: + print("usage: python -m test.libregrtest.worker JSON") + sys.exit(1) + worker_json = sys.argv[1] + + tmp_dir = get_temp_dir() + work_dir = get_work_dir(tmp_dir, worker=True) + + with exit_timeout(): + with os_helper.temp_cwd(work_dir, quiet=True): + worker_process(worker_json) + + +if __name__ == "__main__": + main() diff --git a/Lib/test/regrtestdata/import_from_tests/test_regrtest_a.py b/Lib/test/regrtestdata/import_from_tests/test_regrtest_a.py new file mode 100644 index 0000000000..9c3d0c7cf4 --- /dev/null +++ b/Lib/test/regrtestdata/import_from_tests/test_regrtest_a.py @@ -0,0 +1,11 @@ +import sys +import unittest +import test_regrtest_b.util + +class Test(unittest.TestCase): + def test(self): + test_regrtest_b.util # does not fail + self.assertIn('test_regrtest_a', sys.modules) + self.assertIs(sys.modules['test_regrtest_b'], test_regrtest_b) + self.assertIs(sys.modules['test_regrtest_b.util'], test_regrtest_b.util) + self.assertNotIn('test_regrtest_c', sys.modules) diff --git a/Lib/test/regrtestdata/import_from_tests/test_regrtest_b/__init__.py b/Lib/test/regrtestdata/import_from_tests/test_regrtest_b/__init__.py new file mode 100644 index 0000000000..3dfba25345 --- /dev/null +++ b/Lib/test/regrtestdata/import_from_tests/test_regrtest_b/__init__.py @@ -0,0 +1,9 @@ +import sys +import unittest + +class Test(unittest.TestCase): + def test(self): + self.assertNotIn('test_regrtest_a', sys.modules) + self.assertIn('test_regrtest_b', sys.modules) + self.assertNotIn('test_regrtest_b.util', sys.modules) + self.assertNotIn('test_regrtest_c', sys.modules) diff --git a/Lib/test/regrtestdata/import_from_tests/test_regrtest_b/util.py b/Lib/test/regrtestdata/import_from_tests/test_regrtest_b/util.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/Lib/test/regrtestdata/import_from_tests/test_regrtest_c.py b/Lib/test/regrtestdata/import_from_tests/test_regrtest_c.py new file mode 100644 index 0000000000..de80769118 --- /dev/null +++ b/Lib/test/regrtestdata/import_from_tests/test_regrtest_c.py @@ -0,0 +1,11 @@ +import sys +import unittest +import test_regrtest_b.util + +class Test(unittest.TestCase): + def test(self): + test_regrtest_b.util # does not fail + self.assertNotIn('test_regrtest_a', sys.modules) + self.assertIs(sys.modules['test_regrtest_b'], test_regrtest_b) + self.assertIs(sys.modules['test_regrtest_b.util'], test_regrtest_b.util) + self.assertIn('test_regrtest_c', sys.modules) From e0f50eabf7d93974b5bf34a8f4103abaa2547d1d Mon Sep 17 00:00:00 2001 From: Ashwin Naren Date: Mon, 24 Feb 2025 16:33:50 -0800 Subject: [PATCH 04/15] running regrtest Signed-off-by: Ashwin Naren --- Lib/inspect.py | 18 +++++++ Lib/test/__main__.py | 2 +- Lib/test/libregrtest/logger.py | 2 +- Lib/test/libregrtest/result.py | 55 +++++++++++---------- Lib/test/libregrtest/results.py | 39 +++++++-------- Lib/test/libregrtest/runtests.py | 19 ++++---- Lib/test/libregrtest/single.py | 50 +++++++++---------- Lib/test/libregrtest/utils.py | 2 +- Lib/test/libregrtest/win_utils.py | 81 +++---------------------------- Lib/trace.py | 2 +- 10 files changed, 109 insertions(+), 161 deletions(-) diff --git a/Lib/inspect.py b/Lib/inspect.py index a84f3346b3..22e1b9deb8 100644 --- a/Lib/inspect.py +++ b/Lib/inspect.py @@ -3096,6 +3096,24 @@ def signature(obj, *, follow_wrapped=True): return Signature.from_callable(obj, follow_wrapped=follow_wrapped) +@functools.lru_cache() +def _shadowed_dict_from_weakref_mro_tuple(*weakref_mro): + for weakref_entry in weakref_mro: + # Normally we'd have to check whether the result of weakref_entry() + # is None here, in case the object the weakref is pointing to has died. + # In this specific case, however, we know that the only caller of this + # function is `_shadowed_dict()`, and that therefore this weakref is + # guaranteed to point to an object that is still alive. + entry = weakref_entry() + dunder_dict = _get_dunder_dict_of_class(entry) + if '__dict__' in dunder_dict: + class_dict = dunder_dict['__dict__'] + if not (type(class_dict) is types.GetSetDescriptorType and + class_dict.__name__ == "__dict__" and + class_dict.__objclass__ is entry): + return class_dict + return _sentinel + def _main(): """ Logic for inspecting an object given at command line """ import argparse diff --git a/Lib/test/__main__.py b/Lib/test/__main__.py index 19a6b2b890..647f9f29af 100644 --- a/Lib/test/__main__.py +++ b/Lib/test/__main__.py @@ -1,2 +1,2 @@ from test.libregrtest import main -main() +main.main() diff --git a/Lib/test/libregrtest/logger.py b/Lib/test/libregrtest/logger.py index fa1d4d575c..f0b4d34e77 100644 --- a/Lib/test/libregrtest/logger.py +++ b/Lib/test/libregrtest/logger.py @@ -76,7 +76,7 @@ def start_load_tracker(self) -> None: return try: - self.win_load_tracker = WindowsLoadTracker() + self.win_load_tracker = None except PermissionError as error: # Standard accounts may not have access to the performance # counters. diff --git a/Lib/test/libregrtest/result.py b/Lib/test/libregrtest/result.py index 7553efe5e8..83b2b60834 100644 --- a/Lib/test/libregrtest/result.py +++ b/Lib/test/libregrtest/result.py @@ -125,34 +125,33 @@ def _format_failed(self): return f"{self.test_name} failed" def __str__(self) -> str: - match self.state: - case State.PASSED: - return f"{self.test_name} passed" - case State.FAILED: - return self._format_failed() - case State.SKIPPED: - return f"{self.test_name} skipped" - case State.UNCAUGHT_EXC: - return f"{self.test_name} failed (uncaught exception)" - case State.REFLEAK: - return f"{self.test_name} failed (reference leak)" - case State.ENV_CHANGED: - return f"{self.test_name} failed (env changed)" - case State.RESOURCE_DENIED: - return f"{self.test_name} skipped (resource denied)" - case State.INTERRUPTED: - return f"{self.test_name} interrupted" - case State.WORKER_FAILED: - return f"{self.test_name} worker non-zero exit code" - case State.WORKER_BUG: - return f"{self.test_name} worker bug" - case State.DID_NOT_RUN: - return f"{self.test_name} ran no tests" - case State.TIMEOUT: - assert self.duration is not None, "self.duration is None" - return f"{self.test_name} timed out ({format_duration(self.duration)})" - case _: - raise ValueError("unknown result state: {state!r}") + if self.state == State.PASSED: + return f"{self.test_name} passed" + elif self.state == State.FAILED: + return self._format_failed() + elif self.state == State.SKIPPED: + return f"{self.test_name} skipped" + elif self.state == State.UNCAUGHT_EXC: + return f"{self.test_name} failed (uncaught exception)" + elif self.state == State.REFLEAK: + return f"{self.test_name} failed (reference leak)" + elif self.state == State.ENV_CHANGED: + return f"{self.test_name} failed (env changed)" + elif self.state == State.RESOURCE_DENIED: + return f"{self.test_name} skipped (resource denied)" + elif self.state == State.INTERRUPTED: + return f"{self.test_name} interrupted" + elif self.state == State.WORKER_FAILED: + return f"{self.test_name} worker non-zero exit code" + elif self.state == State.WORKER_BUG: + return f"{self.test_name} worker bug" + elif self.state == State.DID_NOT_RUN: + return f"{self.test_name} ran no tests" + elif self.state == State.TIMEOUT: + assert self.duration is not None, "self.duration is None" + return f"{self.test_name} timed out ({format_duration(self.duration)})" + else: + raise ValueError(f"unknown result state: {self.state!r}") def has_meaningful_duration(self): return State.has_meaningful_duration(self.state) diff --git a/Lib/test/libregrtest/results.py b/Lib/test/libregrtest/results.py index 9eda926966..98361fcf06 100644 --- a/Lib/test/libregrtest/results.py +++ b/Lib/test/libregrtest/results.py @@ -96,26 +96,27 @@ def accumulate_result(self, result: TestResult, runtests: RunTests) -> None: rerun = runtests.rerun fail_env_changed = runtests.fail_env_changed - match result.state: - case State.PASSED: - self.good.append(test_name) - case State.ENV_CHANGED: - self.env_changed.append(test_name) + if result.state == State.PASSED: + self.good.append(test_name) + elif result.state == State.ENV_CHANGED: + self.env_changed.append(test_name) + self.rerun_results.append(result) + elif result.state == State.SKIPPED: + self.skipped.append(test_name) + elif result.state == State.RESOURCE_DENIED: + self.resource_denied.append(test_name) + elif result.state == State.INTERRUPTED: + self.interrupted = True + elif result.state == State.DID_NOT_RUN: + self.run_no_tests.append(test_name) + elif result.state == State.WORKER_BUG: + self.worker_bug = True + else: + if result.is_failed(fail_env_changed): + self.bad.append(test_name) self.rerun_results.append(result) - case State.SKIPPED: - self.skipped.append(test_name) - case State.RESOURCE_DENIED: - self.resource_denied.append(test_name) - case State.INTERRUPTED: - self.interrupted = True - case State.DID_NOT_RUN: - self.run_no_tests.append(test_name) - case _: - if result.is_failed(fail_env_changed): - self.bad.append(test_name) - self.rerun_results.append(result) - else: - raise ValueError(f"invalid test state: {result.state!r}") + else: + raise ValueError(f"invalid test state: {result.state!r}") if result.state == State.WORKER_BUG: self.worker_bug = True diff --git a/Lib/test/libregrtest/runtests.py b/Lib/test/libregrtest/runtests.py index 7b607d4a55..553d4e0d5c 100644 --- a/Lib/test/libregrtest/runtests.py +++ b/Lib/test/libregrtest/runtests.py @@ -29,16 +29,15 @@ class JsonFile: file_type: str def configure_subprocess(self, popen_kwargs: dict[str, Any]) -> None: - match self.file_type: - case JsonFileType.UNIX_FD: - # Unix file descriptor - popen_kwargs['pass_fds'] = [self.file] - case JsonFileType.WINDOWS_HANDLE: - # Windows handle - # We run mypy with `--platform=linux` so it complains about this: - startupinfo = subprocess.STARTUPINFO() # type: ignore[attr-defined] - startupinfo.lpAttributeList = {"handle_list": [self.file]} - popen_kwargs['startupinfo'] = startupinfo + if self.file_type == JsonFileType.UNIX_FD: + # Unix file descriptor + popen_kwargs['pass_fds'] = [self.file] + elif self.file_type == JsonFileType.WINDOWS_HANDLE: + # Windows handle + # We run mypy with `--platform=linux` so it complains about this: + startupinfo = subprocess.STARTUPINFO() # type: ignore[attr-defined] + startupinfo.lpAttributeList = {"handle_list": [self.file]} + popen_kwargs['startupinfo'] = startupinfo @contextlib.contextmanager def inherit_subprocess(self) -> Iterator[None]: diff --git a/Lib/test/libregrtest/single.py b/Lib/test/libregrtest/single.py index adc8f1f455..9d3c55def6 100644 --- a/Lib/test/libregrtest/single.py +++ b/Lib/test/libregrtest/single.py @@ -96,23 +96,22 @@ def regrtest_runner(result: TestResult, test_func, runtests: RunTests) -> None: stats: TestStats | None - match test_result: - case TestStats(): - stats = test_result - case unittest.TestResult(): - stats = TestStats.from_unittest(test_result) - case None: - print_warning(f"{result.test_name} test runner returned None: {test_func}") + if isinstance(test_result, TestStats): + stats = test_result + elif isinstance(test_result, unittest.TestResult): + stats = TestStats.from_unittest(test_result) + elif test_result is None: + print_warning(f"{result.test_name} test runner returned None: {test_func}") + stats = None + else: + # Don't import doctest at top level since only few tests return + # a doctest.TestResult instance. + import doctest + if isinstance(test_result, doctest.TestResults): + stats = TestStats.from_doctest(test_result) + else: + print_warning(f"Unknown test result type: {type(test_result)}") stats = None - case _: - # Don't import doctest at top level since only few tests return - # a doctest.TestResult instance. - import doctest - if isinstance(test_result, doctest.TestResults): - stats = TestStats.from_doctest(test_result) - else: - print_warning(f"Unknown test result type: {type(test_result)}") - stats = None result.stats = stats @@ -145,15 +144,16 @@ def test_func(): remove_testfn(test_name, runtests.verbose) - if gc.garbage: - support.environment_altered = True - print_warning(f"{test_name} created {len(gc.garbage)} " - f"uncollectable object(s)") - - # move the uncollectable objects somewhere, - # so we don't see them again - GC_GARBAGE.extend(gc.garbage) - gc.garbage.clear() + # TODO: RUSTPYTHON + # if gc.garbage: + # support.environment_altered = True + # print_warning(f"{test_name} created {len(gc.garbage)} " + # f"uncollectable object(s)") + # + # # move the uncollectable objects somewhere, + # # so we don't see them again + # GC_GARBAGE.extend(gc.garbage) + # gc.garbage.clear() support.reap_children() diff --git a/Lib/test/libregrtest/utils.py b/Lib/test/libregrtest/utils.py index 9da77dd6dc..425875b2d4 100644 --- a/Lib/test/libregrtest/utils.py +++ b/Lib/test/libregrtest/utils.py @@ -668,7 +668,7 @@ def display_header(use_resources: tuple[str, ...], cpu_count: object = os.cpu_count() if cpu_count: # The function is new in Python 3.13; mypy doesn't know about it yet: - process_cpu_count = os.process_cpu_count() # type: ignore[attr-defined] + process_cpu_count = os.cpu_count() # type: ignore[attr-defined] if process_cpu_count and process_cpu_count != cpu_count: cpu_count = f"{process_cpu_count} (process) / {cpu_count} (system)" print("== CPU count:", cpu_count) diff --git a/Lib/test/libregrtest/win_utils.py b/Lib/test/libregrtest/win_utils.py index b51fde0af5..38ce70bb38 100644 --- a/Lib/test/libregrtest/win_utils.py +++ b/Lib/test/libregrtest/win_utils.py @@ -33,96 +33,27 @@ def __init__(self): winreg.QueryInfoKey(winreg.HKEY_PERFORMANCE_DATA) self._values = [] - self._load = None - self._running = _overlapped.CreateEvent(None, True, False, None) - self._stopped = _overlapped.CreateEvent(None, True, False, None) _thread.start_new_thread(self._update_load, (), {}) def _update_load(self, # localize module access to prevent shutdown errors - _wait=_winapi.WaitForSingleObject, - _signal=_overlapped.SetEvent): - # run until signaled to stop - while _wait(self._running, 1000): - self._calculate_load() - # notify stopped - _signal(self._stopped) + _wait, _signal): + pass def _calculate_load(self, # localize module access to prevent shutdown errors _query=winreg.QueryValueEx, _hkey=winreg.HKEY_PERFORMANCE_DATA, _unpack=struct.unpack_from): - # get the 'System' object - data, _ = _query(_hkey, '2') - # PERF_DATA_BLOCK { - # WCHAR Signature[4] 8 + - # DWOWD LittleEndian 4 + - # DWORD Version 4 + - # DWORD Revision 4 + - # DWORD TotalByteLength 4 + - # DWORD HeaderLength = 24 byte offset - # ... - # } - obj_start, = _unpack('L', data, 24) - # PERF_OBJECT_TYPE { - # DWORD TotalByteLength - # DWORD DefinitionLength - # DWORD HeaderLength - # ... - # } - data_start, defn_start = _unpack('4xLL', data, obj_start) - data_base = obj_start + data_start - defn_base = obj_start + defn_start - # find the 'Processor Queue Length' counter (index=44) - while defn_base < data_base: - # PERF_COUNTER_DEFINITION { - # DWORD ByteLength - # DWORD CounterNameTitleIndex - # ... [7 DWORDs/28 bytes] - # DWORD CounterOffset - # } - size, idx, offset = _unpack('LL28xL', data, defn_base) - defn_base += size - if idx == 44: - counter_offset = data_base + offset - # the counter is known to be PERF_COUNTER_RAWCOUNT (DWORD) - processor_queue_length, = _unpack('L', data, counter_offset) - break - else: - return - - # We use an exponentially weighted moving average, imitating the - # load calculation on Unix systems. - # https://en.wikipedia.org/wiki/Load_(computing)#Unix-style_load_calculation - # https://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average - if self._load is not None: - self._load = (self._load * LOAD_FACTOR_1 - + processor_queue_length * (1.0 - LOAD_FACTOR_1)) - elif len(self._values) < NVALUE: - self._values.append(processor_queue_length) - else: - self._load = sum(self._values) / len(self._values) + pass def close(self, kill=True): self.__del__() return - def __del__(self, - # localize module access to prevent shutdown errors - _wait=_winapi.WaitForSingleObject, - _close=_winapi.CloseHandle, - _signal=_overlapped.SetEvent): - if self._running is not None: - # tell the update thread to quit - _signal(self._running) - # wait for the update thread to signal done - _wait(self._stopped, -1) - # cleanup events - _close(self._running) - _close(self._stopped) - self._running = self._stopped = None + def __del__(self, _wait, _close, _signal): + pass def getloadavg(self): - return self._load + return None diff --git a/Lib/trace.py b/Lib/trace.py index 213e46517d..5b7f9291b5 100755 --- a/Lib/trace.py +++ b/Lib/trace.py @@ -201,7 +201,7 @@ def update(self, other): for key in other_callers: callers[key] = 1 - def write_results(self, show_missing=True, summary=False, coverdir=None): + def write_results(self, show_missing=True, summary=False, coverdir=None, ignore_missing_files=False): """ Write the coverage results. From d5a74dba7560543502c57017ad99e8eb0ca23850 Mon Sep 17 00:00:00 2001 From: Ashwin Naren Date: Mon, 24 Feb 2025 16:47:56 -0800 Subject: [PATCH 05/15] disable check_free_after_iterating Signed-off-by: Ashwin Naren --- Lib/test/support/__init__.py | 41 +++++++++++++++++++----------------- 1 file changed, 22 insertions(+), 19 deletions(-) diff --git a/Lib/test/support/__init__.py b/Lib/test/support/__init__.py index 869053e0dd..8d1cb03048 100644 --- a/Lib/test/support/__init__.py +++ b/Lib/test/support/__init__.py @@ -1859,26 +1859,29 @@ def _check_tracemalloc(): "memory allocations") +# TODO: RUSTPYTHON +# def check_free_after_iterating(test, iter, cls, args=()): +# done = False +# def wrapper(): +# class A(cls): +# def __del__(self): +# nonlocal done +# done = True +# try: +# next(it) +# except StopIteration: +# pass +# +# it = iter(A(*args)) +# # Issue 26494: Shouldn't crash +# test.assertRaises(StopIteration, next, it) +# +# wrapper() +# # The sequence should be deallocated just after the end of iterating +# gc_collect() +# test.assertTrue(done) def check_free_after_iterating(test, iter, cls, args=()): - done = False - def wrapper(): - class A(cls): - def __del__(self): - nonlocal done - done = True - try: - next(it) - except StopIteration: - pass - - it = iter(A(*args)) - # Issue 26494: Shouldn't crash - test.assertRaises(StopIteration, next, it) - - wrapper() - # The sequence should be deallocated just after the end of iterating - gc_collect() - test.assertTrue(done) + raise NotImplementedError("check_free_after_iterating() is not implemented") def missing_compiler_executable(cmd_names=[]): From 60a4c4aba4ffa45842999c73d506c4cf382d79ce Mon Sep 17 00:00:00 2001 From: Ashwin Naren Date: Mon, 24 Feb 2025 16:49:55 -0800 Subject: [PATCH 06/15] remove requires_legacy_unicode_capi tests Signed-off-by: Ashwin Naren --- Lib/test/test_csv.py | 12 ------------ Lib/test/test_decimal.py | 31 +------------------------------ Lib/test/test_unicode.py | 30 ------------------------------ 3 files changed, 1 insertion(+), 72 deletions(-) diff --git a/Lib/test/test_csv.py b/Lib/test/test_csv.py index 2646be086c..9a1743da6d 100644 --- a/Lib/test/test_csv.py +++ b/Lib/test/test_csv.py @@ -291,18 +291,6 @@ def test_writerows_errors(self): self.assertRaises(TypeError, writer.writerows, None) self.assertRaises(OSError, writer.writerows, BadIterable()) - @support.cpython_only - @support.requires_legacy_unicode_capi() - @warnings_helper.ignore_warnings(category=DeprecationWarning) - def test_writerows_legacy_strings(self): - import _testcapi - c = _testcapi.unicode_legacy_string('a') - with TemporaryFile("w+", encoding="utf-8", newline='') as fileobj: - writer = csv.writer(fileobj) - writer.writerows([[c]]) - fileobj.seek(0) - self.assertEqual(fileobj.read(), "a\r\n") - def _read_test(self, input, expect, **kwargs): reader = csv.reader(input, **kwargs) result = list(reader) diff --git a/Lib/test/test_decimal.py b/Lib/test/test_decimal.py index 04f4fc4a01..eac2dc1ab3 100644 --- a/Lib/test/test_decimal.py +++ b/Lib/test/test_decimal.py @@ -34,7 +34,7 @@ import locale from test.support import (is_resource_enabled, requires_IEEE_754, requires_docstrings, - requires_legacy_unicode_capi, check_sanitizer) + check_sanitizer) from test.support import (TestFailed, run_with_locale, cpython_only, darwin_malloc_err_warning, is_emscripten) @@ -586,18 +586,6 @@ def test_explicit_from_string(self): # underscores don't prevent errors self.assertRaises(InvalidOperation, Decimal, "1_2_\u00003") - @cpython_only - @requires_legacy_unicode_capi() - @warnings_helper.ignore_warnings(category=DeprecationWarning) - def test_from_legacy_strings(self): - import _testcapi - Decimal = self.decimal.Decimal - context = self.decimal.Context() - - s = _testcapi.unicode_legacy_string('9.999999') - self.assertEqual(str(Decimal(s)), '9.999999') - self.assertEqual(str(context.create_decimal(s)), '9.999999') - def test_explicit_from_tuples(self): Decimal = self.decimal.Decimal @@ -2928,23 +2916,6 @@ def test_none_args(self): assert_signals(self, c, 'traps', [InvalidOperation, DivisionByZero, Overflow]) - @cpython_only - @requires_legacy_unicode_capi() - @warnings_helper.ignore_warnings(category=DeprecationWarning) - def test_from_legacy_strings(self): - import _testcapi - c = self.decimal.Context() - - for rnd in RoundingModes: - c.rounding = _testcapi.unicode_legacy_string(rnd) - self.assertEqual(c.rounding, rnd) - - s = _testcapi.unicode_legacy_string('') - self.assertRaises(TypeError, setattr, c, 'rounding', s) - - s = _testcapi.unicode_legacy_string('ROUND_\x00UP') - self.assertRaises(TypeError, setattr, c, 'rounding', s) - def test_pickle(self): for proto in range(pickle.HIGHEST_PROTOCOL + 1): diff --git a/Lib/test/test_unicode.py b/Lib/test/test_unicode.py index 17c9f01cd8..3b5902a85e 100644 --- a/Lib/test/test_unicode.py +++ b/Lib/test/test_unicode.py @@ -822,16 +822,6 @@ def test_isidentifier(self): self.assertFalse("©".isidentifier()) self.assertFalse("0".isidentifier()) - @support.cpython_only - @support.requires_legacy_unicode_capi() - @unittest.skipIf(_testcapi is None, 'need _testcapi module') - def test_isidentifier_legacy(self): - u = '𝖀𝖓𝖎𝖈𝖔𝖉𝖊' - self.assertTrue(u.isidentifier()) - with warnings_helper.check_warnings(): - warnings.simplefilter('ignore', DeprecationWarning) - self.assertTrue(_testcapi.unicode_legacy_string(u).isidentifier()) - # TODO: RUSTPYTHON @unittest.expectedFailure def test_isprintable(self): @@ -2532,26 +2522,6 @@ def test_getnewargs(self): self.assertEqual(args[0], text) self.assertEqual(len(args), 1) - @support.cpython_only - @support.requires_legacy_unicode_capi() - @unittest.skipIf(_testcapi is None, 'need _testcapi module') - def test_resize(self): - for length in range(1, 100, 7): - # generate a fresh string (refcount=1) - text = 'a' * length + 'b' - - # fill wstr internal field - with self.assertWarns(DeprecationWarning): - abc = _testcapi.getargs_u(text) - self.assertEqual(abc, text) - - # resize text: wstr field must be cleared and then recomputed - text += 'c' - with self.assertWarns(DeprecationWarning): - abcdef = _testcapi.getargs_u(text) - self.assertNotEqual(abc, abcdef) - self.assertEqual(abcdef, text) - def test_compare(self): # Issue #17615 N = 10 From d8962bd347f8a2433b8efd28cc3b90a91445c90f Mon Sep 17 00:00:00 2001 From: Ashwin Naren Date: Mon, 24 Feb 2025 18:35:36 -0800 Subject: [PATCH 07/15] disable support.infinite_recursion Signed-off-by: Ashwin Naren --- Lib/test/support/__init__.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/Lib/test/support/__init__.py b/Lib/test/support/__init__.py index 8d1cb03048..d87b4f4afc 100644 --- a/Lib/test/support/__init__.py +++ b/Lib/test/support/__init__.py @@ -2287,13 +2287,14 @@ def set_recursion_limit(limit): finally: sys.setrecursionlimit(original_limit) -def infinite_recursion(max_depth=None): - if max_depth is None: - # Pick a number large enough to cause problems - # but not take too long for code that can handle - # very deep recursion. - max_depth = 20_000 - elif max_depth < 3: +# TODO: RUSTPYTHON +def infinite_recursion(max_depth=100): + """Set a lower limit for tests that interact with infinite recursions + (e.g test_ast.ASTHelpers_Test.test_recursion_direct) since on some + debug windows builds, due to not enough functions being inlined the + stack size might not handle the default recursion limit (1000). See + bpo-11105 for details.""" + if max_depth < 3: raise ValueError("max_depth must be at least 3, got {max_depth}") depth = get_recursion_depth() depth = max(depth - 1, 1) # Ignore infinite_recursion() frame. From 4a96193500752593fdd15bf0f191ef8f71b40fa2 Mon Sep 17 00:00:00 2001 From: Ashwin Naren Date: Mon, 24 Feb 2025 21:17:43 -0800 Subject: [PATCH 08/15] formatting Signed-off-by: Ashwin Naren --- vm/src/stdlib/mod.rs | 2 +- vm/src/stdlib/opcode.rs | 4 +--- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/vm/src/stdlib/mod.rs b/vm/src/stdlib/mod.rs index 91e81b010a..e18ebc1e53 100644 --- a/vm/src/stdlib/mod.rs +++ b/vm/src/stdlib/mod.rs @@ -41,6 +41,7 @@ pub mod posix; mod ctypes; #[cfg(windows)] pub(crate) mod msvcrt; +mod opcode; #[cfg(all(unix, not(any(target_os = "android", target_os = "redox"))))] mod pwd; pub(crate) mod signal; @@ -49,7 +50,6 @@ pub mod sys; mod winapi; #[cfg(windows)] mod winreg; -mod opcode; use crate::{builtins::PyModule, PyRef, VirtualMachine}; use std::{borrow::Cow, collections::HashMap}; diff --git a/vm/src/stdlib/opcode.rs b/vm/src/stdlib/opcode.rs index b187e0fcd2..835463041d 100644 --- a/vm/src/stdlib/opcode.rs +++ b/vm/src/stdlib/opcode.rs @@ -1,6 +1,4 @@ pub(crate) use _opcode::make_module; #[pymodule] -mod _opcode { - -} \ No newline at end of file +mod _opcode {} From ea88e7cdf7cc4429cd59b96569e6de021fd56861 Mon Sep 17 00:00:00 2001 From: Ashwin Naren Date: Mon, 24 Feb 2025 23:19:46 -0800 Subject: [PATCH 09/15] fix support.disable_gc Signed-off-by: Ashwin Naren --- Lib/test/support/__init__.py | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/Lib/test/support/__init__.py b/Lib/test/support/__init__.py index d87b4f4afc..d7eccda12f 100644 --- a/Lib/test/support/__init__.py +++ b/Lib/test/support/__init__.py @@ -814,14 +814,8 @@ def gc_collect(): @contextlib.contextmanager def disable_gc(): - import gc - have_gc = gc.isenabled() - gc.disable() - try: - yield - finally: - if have_gc: - gc.enable() + # TODO: RUSTPYTHON doesn't have a gc + pass @contextlib.contextmanager def gc_threshold(*args): From 5dff9ba5d9ad53b4eac7412547d956f6a10f429a Mon Sep 17 00:00:00 2001 From: Ashwin Naren Date: Mon, 24 Feb 2025 23:21:47 -0800 Subject: [PATCH 10/15] disable worker process mode for CI Signed-off-by: Ashwin Naren --- .github/workflows/ci.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 23f47b574c..e522277cf3 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -275,17 +275,17 @@ jobs: - if: runner.os == 'Linux' name: run cpython platform-independent tests run: - target/release/rustpython -m test -j 1 -u all --slowest --fail-env-changed -v ${{ env.PLATFORM_INDEPENDENT_TESTS }} + target/release/rustpython -m test -u all --slowest --fail-env-changed -v ${{ env.PLATFORM_INDEPENDENT_TESTS }} - if: runner.os == 'Linux' name: run cpython platform-dependent tests (Linux) - run: target/release/rustpython -m test -j 1 -u all --slowest --fail-env-changed -v -x ${{ env.PLATFORM_INDEPENDENT_TESTS }} + run: target/release/rustpython -m test -u all --slowest --fail-env-changed -v -x ${{ env.PLATFORM_INDEPENDENT_TESTS }} - if: runner.os == 'macOS' name: run cpython platform-dependent tests (MacOS) - run: target/release/rustpython -m test -j 1 --slowest --fail-env-changed -v -x ${{ env.PLATFORM_INDEPENDENT_TESTS }} ${{ env.MACOS_SKIPS }} + run: target/release/rustpython -m test --slowest --fail-env-changed -v -x ${{ env.PLATFORM_INDEPENDENT_TESTS }} ${{ env.MACOS_SKIPS }} - if: runner.os == 'Windows' name: run cpython platform-dependent tests (windows partial - fixme) run: - target/release/rustpython -m test -j 1 --slowest --fail-env-changed -v -x ${{ env.PLATFORM_INDEPENDENT_TESTS }} ${{ env.WINDOWS_SKIPS }} + target/release/rustpython -m test --slowest --fail-env-changed -v -x ${{ env.PLATFORM_INDEPENDENT_TESTS }} ${{ env.WINDOWS_SKIPS }} - if: runner.os != 'Windows' name: check that --install-pip succeeds run: | From b0cbb10f81cddbc605d1f3a065f09678530c48fe Mon Sep 17 00:00:00 2001 From: Ashwin Naren Date: Mon, 24 Feb 2025 23:42:08 -0800 Subject: [PATCH 11/15] fix tests using removed test.support unittest running function Signed-off-by: Ashwin Naren --- Lib/test/test_generators.py | 16 ++-- Lib/test/test_http_cookies.py | 11 +-- Lib/test/test_resource.py | 6 +- Lib/test/test_shelve.py | 150 +++++++++++++++---------------- Lib/test/test_threading_local.py | 18 ++-- Lib/test/test_weakref.py | 17 +--- Lib/test/test_xml_etree.py | 59 +++--------- 7 files changed, 113 insertions(+), 164 deletions(-) diff --git a/Lib/test/test_generators.py b/Lib/test/test_generators.py index 5ed0986150..45274fb6b1 100644 --- a/Lib/test/test_generators.py +++ b/Lib/test/test_generators.py @@ -2393,16 +2393,12 @@ def printsolution(self, x): "refleaks": refleaks_tests, } -# Magic test name that regrtest.py invokes *after* importing this module. -# This worms around a bootstrap problem. -# Note that doctest and regrtest both look in sys.argv for a "-v" argument, -# so this works as expected in both ways of running regrtest. -def test_main(verbose=None): - from test import support, test_generators - support.run_unittest(__name__) + +def load_tests(loader, tests, pattern): # TODO: RUSTPYTHON - # support.run_doctest(test_generators, verbose) + # tests.addTest(doctest.DocTestSuite()) + return tests + -# This part isn't needed for regrtest, but for running the test directly. if __name__ == "__main__": - test_main(1) + unittest.main() \ No newline at end of file diff --git a/Lib/test/test_http_cookies.py b/Lib/test/test_http_cookies.py index 6072c7e15e..f4bb23d059 100644 --- a/Lib/test/test_http_cookies.py +++ b/Lib/test/test_http_cookies.py @@ -1,7 +1,6 @@ # Simple test suite for http/cookies.py import copy -from test.support import run_unittest, run_doctest import unittest from http import cookies import pickle @@ -479,9 +478,11 @@ def test_repr(self): r'Set-Cookie: key=coded_val; ' r'expires=\w+, \d+ \w+ \d+ \d+:\d+:\d+ \w+') -def test_main(): - run_unittest(CookieTests, MorselTests) - run_doctest(cookies) +def load_tests(loader, tests, pattern): + # TODO: RUSTPYTHON + # tests.addTest(doctest.DocTestSuite(cookies)) + return tests + if __name__ == '__main__': - test_main() + unittest.main() diff --git a/Lib/test/test_resource.py b/Lib/test/test_resource.py index 103d0199d0..293878d5c4 100644 --- a/Lib/test/test_resource.py +++ b/Lib/test/test_resource.py @@ -176,9 +176,5 @@ def __getitem__(self, key): self.assertEqual(resource.prlimit(0, resource.RLIMIT_AS, BadSeq()), limits) - -def test_main(verbose=None): - support.run_unittest(ResourceTest) - if __name__ == "__main__": - test_main() + unittest.main() diff --git a/Lib/test/test_shelve.py b/Lib/test/test_shelve.py index ac25eee2e5..bef16fa40d 100644 --- a/Lib/test/test_shelve.py +++ b/Lib/test/test_shelve.py @@ -1,7 +1,9 @@ import unittest +import dbm import shelve -import glob -from test import support +import pickle +import os + from test.support import os_helper from collections.abc import MutableMapping from test.test_dbm import dbm_iterator @@ -41,12 +43,8 @@ def copy(self): class TestCase(unittest.TestCase): - - fn = "shelftemp.db" - - def tearDown(self): - for f in glob.glob(self.fn+"*"): - os_helper.unlink(f) + dirname = os_helper.TESTFN + fn = os.path.join(os_helper.TESTFN, "shelftemp.db") def test_close(self): d1 = {} @@ -63,29 +61,40 @@ def test_close(self): else: self.fail('Closed shelf should not find a key') - def test_ascii_file_shelf(self): - s = shelve.open(self.fn, protocol=0) + def test_open_template(self, filename=None, protocol=None): + os.mkdir(self.dirname) + self.addCleanup(os_helper.rmtree, self.dirname) + s = shelve.open(filename=filename if filename is not None else self.fn, + protocol=protocol) try: s['key1'] = (1,2,3,4) self.assertEqual(s['key1'], (1,2,3,4)) finally: s.close() + def test_ascii_file_shelf(self): + self.test_open_template(protocol=0) + def test_binary_file_shelf(self): - s = shelve.open(self.fn, protocol=1) - try: - s['key1'] = (1,2,3,4) - self.assertEqual(s['key1'], (1,2,3,4)) - finally: - s.close() + self.test_open_template(protocol=1) def test_proto2_file_shelf(self): - s = shelve.open(self.fn, protocol=2) - try: - s['key1'] = (1,2,3,4) - self.assertEqual(s['key1'], (1,2,3,4)) - finally: - s.close() + self.test_open_template(protocol=2) + + # TODO: RUSTPYTHON + @unittest.expectedFailure + def test_pathlib_path_file_shelf(self): + self.test_open_template(filename=os_helper.FakePath(self.fn)) + + # TODO: RUSTPYTHON + @unittest.expectedFailure + def test_bytes_path_file_shelf(self): + self.test_open_template(filename=os.fsencode(self.fn)) + + # TODO: RUSTPYTHON + @unittest.expectedFailure + def test_pathlib_bytes_path_file_shelf(self): + self.test_open_template(filename=os_helper.FakePath(os.fsencode(self.fn))) def test_in_memory_shelf(self): d1 = byteskeydict() @@ -158,67 +167,58 @@ def test_with(self): else: self.fail('Closed shelf should not find a key') + # TODO: RUSTPYTHON + @unittest.expectedFailure def test_default_protocol(self): with shelve.Shelf({}) as s: - self.assertEqual(s._protocol, 3) + self.assertEqual(s._protocol, pickle.DEFAULT_PROTOCOL) -from test import mapping_tests -class TestShelveBase(mapping_tests.BasicTestMappingProtocol): - fn = "shelftemp.db" - counter = 0 - def __init__(self, *args, **kw): - self._db = [] - mapping_tests.BasicTestMappingProtocol.__init__(self, *args, **kw) +class TestShelveBase: type2test = shelve.Shelf + def _reference(self): return {"key1":"value1", "key2":2, "key3":(1,2,3)} + + +class TestShelveInMemBase(TestShelveBase): def _empty_mapping(self): - if self._in_mem: - x= shelve.Shelf(byteskeydict(), **self._args) - else: - self.counter+=1 - x= shelve.open(self.fn+str(self.counter), **self._args) - self._db.append(x) + return shelve.Shelf(byteskeydict(), **self._args) + + +class TestShelveFileBase(TestShelveBase): + counter = 0 + + def _empty_mapping(self): + self.counter += 1 + x = shelve.open(self.base_path + str(self.counter), **self._args) + self.addCleanup(x.close) return x - def tearDown(self): - for db in self._db: - db.close() - self._db = [] - if not self._in_mem: - for f in glob.glob(self.fn+"*"): - os_helper.unlink(f) - -class TestAsciiFileShelve(TestShelveBase): - _args={'protocol':0} - _in_mem = False -class TestBinaryFileShelve(TestShelveBase): - _args={'protocol':1} - _in_mem = False -class TestProto2FileShelve(TestShelveBase): - _args={'protocol':2} - _in_mem = False -class TestAsciiMemShelve(TestShelveBase): - _args={'protocol':0} - _in_mem = True -class TestBinaryMemShelve(TestShelveBase): - _args={'protocol':1} - _in_mem = True -class TestProto2MemShelve(TestShelveBase): - _args={'protocol':2} - _in_mem = True - -def test_main(): - for module in dbm_iterator(): - support.run_unittest( - TestAsciiFileShelve, - TestBinaryFileShelve, - TestProto2FileShelve, - TestAsciiMemShelve, - TestBinaryMemShelve, - TestProto2MemShelve, - TestCase - ) + + def setUp(self): + dirname = os_helper.TESTFN + os.mkdir(dirname) + self.addCleanup(os_helper.rmtree, dirname) + self.base_path = os.path.join(dirname, "shelftemp.db") + self.addCleanup(setattr, dbm, '_defaultmod', dbm._defaultmod) + dbm._defaultmod = self.dbm_mod + + +from test import mapping_tests + +for proto in range(pickle.HIGHEST_PROTOCOL + 1): + bases = (TestShelveInMemBase, mapping_tests.BasicTestMappingProtocol) + name = f'TestProto{proto}MemShelve' + globals()[name] = type(name, bases, + {'_args': {'protocol': proto}}) + bases = (TestShelveFileBase, mapping_tests.BasicTestMappingProtocol) + for dbm_mod in dbm_iterator(): + assert dbm_mod.__name__.startswith('dbm.') + suffix = dbm_mod.__name__[4:] + name = f'TestProto{proto}File_{suffix}Shelve' + globals()[name] = type(name, bases, + {'dbm_mod': dbm_mod, '_args': {'protocol': proto}}) + if __name__ == "__main__": - test_main() + unittest.main() diff --git a/Lib/test/test_threading_local.py b/Lib/test/test_threading_local.py index 3443e3875d..aa0f88b21a 100644 --- a/Lib/test/test_threading_local.py +++ b/Lib/test/test_threading_local.py @@ -210,22 +210,20 @@ class PyThreadingLocalTest(unittest.TestCase, BaseLocalTest): _local = _threading_local.local -def test_main(): - suite = unittest.TestSuite() - suite.addTest(DocTestSuite('_threading_local')) - suite.addTest(unittest.makeSuite(ThreadLocalTest)) - suite.addTest(unittest.makeSuite(PyThreadingLocalTest)) + +def load_tests(loader, tests, pattern): + tests.addTest(DocTestSuite('_threading_local')) local_orig = _threading_local.local def setUp(test): _threading_local.local = _thread._local def tearDown(test): _threading_local.local = local_orig - suite.addTest(DocTestSuite('_threading_local', - setUp=setUp, tearDown=tearDown) - ) + tests.addTests(DocTestSuite('_threading_local', + setUp=setUp, tearDown=tearDown) + ) + return tests - support.run_unittest(suite) if __name__ == '__main__': - test_main() + unittest.main() diff --git a/Lib/test/test_weakref.py b/Lib/test/test_weakref.py index 751002bfc9..2b3678c124 100644 --- a/Lib/test/test_weakref.py +++ b/Lib/test/test_weakref.py @@ -2282,19 +2282,10 @@ def test_atexit(self): __test__ = {'libreftest' : libreftest} -def test_main(): - support.run_unittest( - ReferencesTestCase, - WeakMethodTestCase, - MappingTestCase, - WeakValueDictionaryTestCase, - WeakKeyDictionaryTestCase, - SubclassableWeakrefTestCase, - FinalizeTestCase, - ) - # TODO: RUSTPYTHON - # support.run_doctest(sys.modules[__name__]) +def load_tests(loader, tests, pattern): + tests.addTest(doctest.DocTestSuite()) + return tests if __name__ == "__main__": - test_main() + unittest.main() diff --git a/Lib/test/test_xml_etree.py b/Lib/test/test_xml_etree.py index 1a681d5a7c..19718b2a89 100644 --- a/Lib/test/test_xml_etree.py +++ b/Lib/test/test_xml_etree.py @@ -4456,8 +4456,7 @@ def get_option(config, option_name, default=None): # -------------------------------------------------------------------- - -def test_main(module=None): +def setUpModule(module=None): # When invoked without a module, runs the Python ET tests by loading pyET. # Otherwise, uses the given module as the ET. global pyET @@ -4469,62 +4468,30 @@ def test_main(module=None): global ET ET = module - test_classes = [ - ModuleTest, - ElementSlicingTest, - BasicElementTest, - BadElementTest, - BadElementPathTest, - ElementTreeTest, - IOTest, - ParseErrorTest, - XIncludeTest, - ElementTreeTypeTest, - ElementFindTest, - ElementIterTest, - TreeBuilderTest, - XMLParserTest, - XMLPullParserTest, - BugsTest, - KeywordArgsTest, - C14NTest, - ] - - # These tests will only run for the pure-Python version that doesn't import - # _elementtree. We can't use skipUnless here, because pyET is filled in only - # after the module is loaded. - if pyET is not ET: - test_classes.extend([ - NoAcceleratorTest, - ]) + # don't interfere with subsequent tests + def cleanup(): + global ET, pyET + ET = pyET = None + unittest.addModuleCleanup(cleanup) # Provide default namespace mapping and path cache. from xml.etree import ElementPath nsmap = ET.register_namespace._namespace_map # Copy the default namespace mapping nsmap_copy = nsmap.copy() + unittest.addModuleCleanup(nsmap.update, nsmap_copy) + unittest.addModuleCleanup(nsmap.clear) + # Copy the path cache (should be empty) path_cache = ElementPath._cache + unittest.addModuleCleanup(setattr, ElementPath, "_cache", path_cache) ElementPath._cache = path_cache.copy() + # Align the Comment/PI factories. if hasattr(ET, '_set_factories'): old_factories = ET._set_factories(ET.Comment, ET.PI) - else: - old_factories = None - - try: - support.run_unittest(*test_classes) - finally: - from xml.etree import ElementPath - # Restore mapping and path cache - nsmap.clear() - nsmap.update(nsmap_copy) - ElementPath._cache = path_cache - if old_factories is not None: - ET._set_factories(*old_factories) - # don't interfere with subsequent tests - ET = pyET = None + unittest.addModuleCleanup(ET._set_factories, *old_factories) if __name__ == '__main__': - test_main() + unittest.main() From a900fcb1bb008ea5e4fd2a893f9b8e91818c404b Mon Sep 17 00:00:00 2001 From: Ashwin Naren Date: Tue, 25 Feb 2025 00:00:03 -0800 Subject: [PATCH 12/15] fix tests Signed-off-by: Ashwin Naren --- Lib/test/test_fstring.py | 2 ++ Lib/test/test_netrc.py | 7 ++----- Lib/test/test_setcomps.py | 28 ++++++++++++---------------- 3 files changed, 16 insertions(+), 21 deletions(-) diff --git a/Lib/test/test_fstring.py b/Lib/test/test_fstring.py index b3a38dd652..cc65dc68dd 100644 --- a/Lib/test/test_fstring.py +++ b/Lib/test/test_fstring.py @@ -1660,6 +1660,8 @@ def test_errors(self): ], ) + # TODO: RUSTPYTHON + @unittest.expectedFailure def test_filename_in_syntaxerror(self): # see issue 38964 with temp_cwd() as cwd: diff --git a/Lib/test/test_netrc.py b/Lib/test/test_netrc.py index 573d636de9..36cc701a21 100644 --- a/Lib/test/test_netrc.py +++ b/Lib/test/test_netrc.py @@ -1,5 +1,5 @@ import netrc, os, unittest, sys, textwrap -from test.support import os_helper, run_unittest +from test.support import os_helper try: import pwd @@ -308,8 +308,5 @@ def test_security(self): self.assertEqual(nrc.hosts['foo.domain.com'], ('anonymous', '', 'pass')) -def test_main(): - run_unittest(NetrcTestCase) - if __name__ == "__main__": - test_main() + unittest.main() diff --git a/Lib/test/test_setcomps.py b/Lib/test/test_setcomps.py index ecc4fffec0..a2accdf423 100644 --- a/Lib/test/test_setcomps.py +++ b/Lib/test/test_setcomps.py @@ -1,3 +1,9 @@ +# import doctest +import traceback +import unittest + +from test.support import BrokenIter + doctests = """ ########### Tests mostly copied from test_listcomps.py ############ @@ -147,21 +153,11 @@ __test__ = {'doctests' : doctests} -def test_main(verbose=None): - import sys - from test import support - from test import test_setcomps - support.run_doctest(test_setcomps, verbose) - - # verify reference counting - if verbose and hasattr(sys, "gettotalrefcount"): - import gc - counts = [None] * 5 - for i in range(len(counts)): - support.run_doctest(test_setcomps, verbose) - gc.collect() - counts[i] = sys.gettotalrefcount() - print(counts) +def load_tests(loader, tests, pattern): + # TODO: RUSTPYTHON + # tests.addTest(doctest.DocTestSuite()) + return tests + if __name__ == "__main__": - test_main(verbose=True) + unittest.main() From 4f3aff496dd36f592f729178fe53535f4e56ac76 Mon Sep 17 00:00:00 2001 From: Ashwin Naren Date: Tue, 25 Feb 2025 00:23:15 -0800 Subject: [PATCH 13/15] finish initial step Signed-off-by: Ashwin Naren --- Lib/test/test_regrtest.py | 1721 +++++++++++++++++++++++++++++------ Lib/test/test_support.py | 263 +++--- Lib/test/test_trace.py | 13 +- Lib/test/test_weakref.py | 9 +- Lib/test/test_xml_etree.py | 2 - Lib/test/test_yield_from.py | 2 + 6 files changed, 1593 insertions(+), 417 deletions(-) diff --git a/Lib/test/test_regrtest.py b/Lib/test/test_regrtest.py index aab2d9f7ae..14ac044e34 100644 --- a/Lib/test/test_regrtest.py +++ b/Lib/test/test_regrtest.py @@ -5,29 +5,47 @@ """ import contextlib -import faulthandler +import dataclasses import glob import io +import locale import os.path import platform +import random import re +import shlex +import signal import subprocess import sys import sysconfig import tempfile import textwrap import unittest -from test import libregrtest +from xml.etree import ElementTree + from test import support -from test.support import os_helper +from test.support import os_helper, without_optimizer +from test.libregrtest import cmdline +from test.libregrtest import main +from test.libregrtest import setup from test.libregrtest import utils +from test.libregrtest.filter import get_match_tests, set_match_tests, match_test +from test.libregrtest.result import TestStats +from test.libregrtest.utils import normalize_test_name +if not support.has_subprocess_support: + raise unittest.SkipTest("test module requires subprocess") -Py_DEBUG = hasattr(sys, 'gettotalrefcount') ROOT_DIR = os.path.join(os.path.dirname(__file__), '..', '..') ROOT_DIR = os.path.abspath(os.path.normpath(ROOT_DIR)) LOG_PREFIX = r'[0-9]+:[0-9]+:[0-9]+ (?:load avg: [0-9]+\.[0-9]{2} )?' +EXITCODE_BAD_TEST = 2 +EXITCODE_ENV_CHANGED = 3 +EXITCODE_NO_TESTS_RAN = 4 +EXITCODE_RERUN_FAIL = 5 +EXITCODE_INTERRUPTED = 130 + TEST_INTERRUPTED = textwrap.dedent(""" from signal import SIGINT, raise_signal try: @@ -43,95 +61,115 @@ class ParseArgsTestCase(unittest.TestCase): Test regrtest's argument parsing, function _parse_args(). """ + @staticmethod + def parse_args(args): + return cmdline._parse_args(args) + def checkError(self, args, msg): with support.captured_stderr() as err, self.assertRaises(SystemExit): - libregrtest._parse_args(args) + self.parse_args(args) self.assertIn(msg, err.getvalue()) def test_help(self): for opt in '-h', '--help': with self.subTest(opt=opt): with support.captured_stdout() as out, \ - self.assertRaises(SystemExit): - libregrtest._parse_args([opt]) + self.assertRaises(SystemExit): + self.parse_args([opt]) self.assertIn('Run Python regression tests.', out.getvalue()) - @unittest.skipUnless(hasattr(faulthandler, 'dump_traceback_later'), - "faulthandler.dump_traceback_later() required") def test_timeout(self): - ns = libregrtest._parse_args(['--timeout', '4.2']) + ns = self.parse_args(['--timeout', '4.2']) self.assertEqual(ns.timeout, 4.2) + + # negative, zero and empty string are treated as "no timeout" + for value in ('-1', '0', ''): + with self.subTest(value=value): + ns = self.parse_args([f'--timeout={value}']) + self.assertEqual(ns.timeout, None) + self.checkError(['--timeout'], 'expected one argument') - self.checkError(['--timeout', 'foo'], 'invalid float value') + self.checkError(['--timeout', 'foo'], 'invalid timeout value:') def test_wait(self): - ns = libregrtest._parse_args(['--wait']) + ns = self.parse_args(['--wait']) self.assertTrue(ns.wait) - def test_worker_args(self): - ns = libregrtest._parse_args(['--worker-args', '[[], {}]']) - self.assertEqual(ns.worker_args, '[[], {}]') - self.checkError(['--worker-args'], 'expected one argument') - def test_start(self): for opt in '-S', '--start': with self.subTest(opt=opt): - ns = libregrtest._parse_args([opt, 'foo']) + ns = self.parse_args([opt, 'foo']) self.assertEqual(ns.start, 'foo') self.checkError([opt], 'expected one argument') def test_verbose(self): - ns = libregrtest._parse_args(['-v']) + ns = self.parse_args(['-v']) self.assertEqual(ns.verbose, 1) - ns = libregrtest._parse_args(['-vvv']) + ns = self.parse_args(['-vvv']) self.assertEqual(ns.verbose, 3) - ns = libregrtest._parse_args(['--verbose']) + ns = self.parse_args(['--verbose']) self.assertEqual(ns.verbose, 1) - ns = libregrtest._parse_args(['--verbose'] * 3) + ns = self.parse_args(['--verbose'] * 3) self.assertEqual(ns.verbose, 3) - ns = libregrtest._parse_args([]) + ns = self.parse_args([]) self.assertEqual(ns.verbose, 0) - def test_verbose2(self): - for opt in '-w', '--verbose2': + def test_rerun(self): + for opt in '-w', '--rerun', '--verbose2': with self.subTest(opt=opt): - ns = libregrtest._parse_args([opt]) - self.assertTrue(ns.verbose2) + ns = self.parse_args([opt]) + self.assertTrue(ns.rerun) def test_verbose3(self): for opt in '-W', '--verbose3': with self.subTest(opt=opt): - ns = libregrtest._parse_args([opt]) + ns = self.parse_args([opt]) self.assertTrue(ns.verbose3) def test_quiet(self): for opt in '-q', '--quiet': with self.subTest(opt=opt): - ns = libregrtest._parse_args([opt]) + ns = self.parse_args([opt]) self.assertTrue(ns.quiet) self.assertEqual(ns.verbose, 0) def test_slowest(self): for opt in '-o', '--slowest': with self.subTest(opt=opt): - ns = libregrtest._parse_args([opt]) + ns = self.parse_args([opt]) self.assertTrue(ns.print_slow) def test_header(self): - ns = libregrtest._parse_args(['--header']) + ns = self.parse_args(['--header']) self.assertTrue(ns.header) - ns = libregrtest._parse_args(['--verbose']) + ns = self.parse_args(['--verbose']) self.assertTrue(ns.header) def test_randomize(self): - for opt in '-r', '--randomize': + for opt in ('-r', '--randomize'): with self.subTest(opt=opt): - ns = libregrtest._parse_args([opt]) + ns = self.parse_args([opt]) self.assertTrue(ns.randomize) + with os_helper.EnvironmentVarGuard() as env: + # with SOURCE_DATE_EPOCH + env['SOURCE_DATE_EPOCH'] = '1697839080' + ns = self.parse_args(['--randomize']) + regrtest = main.Regrtest(ns) + self.assertFalse(regrtest.randomize) + self.assertIsInstance(regrtest.random_seed, str) + self.assertEqual(regrtest.random_seed, '1697839080') + + # without SOURCE_DATE_EPOCH + del env['SOURCE_DATE_EPOCH'] + ns = self.parse_args(['--randomize']) + regrtest = main.Regrtest(ns) + self.assertTrue(regrtest.randomize) + self.assertIsInstance(regrtest.random_seed, int) + def test_randseed(self): - ns = libregrtest._parse_args(['--randseed', '12345']) + ns = self.parse_args(['--randseed', '12345']) self.assertEqual(ns.random_seed, 12345) self.assertTrue(ns.randomize) self.checkError(['--randseed'], 'expected one argument') @@ -140,7 +178,7 @@ def test_randseed(self): def test_fromfile(self): for opt in '-f', '--fromfile': with self.subTest(opt=opt): - ns = libregrtest._parse_args([opt, 'foo']) + ns = self.parse_args([opt, 'foo']) self.assertEqual(ns.fromfile, 'foo') self.checkError([opt], 'expected one argument') self.checkError([opt, 'foo', '-s'], "don't go together") @@ -148,46 +186,37 @@ def test_fromfile(self): def test_exclude(self): for opt in '-x', '--exclude': with self.subTest(opt=opt): - ns = libregrtest._parse_args([opt]) + ns = self.parse_args([opt]) self.assertTrue(ns.exclude) def test_single(self): for opt in '-s', '--single': with self.subTest(opt=opt): - ns = libregrtest._parse_args([opt]) + ns = self.parse_args([opt]) self.assertTrue(ns.single) self.checkError([opt, '-f', 'foo'], "don't go together") - # TODO: RUSTPYTHON - @unittest.expectedFailure - def test_ignore(self): - for opt in '-i', '--ignore': + def test_match(self): + for opt in '-m', '--match': with self.subTest(opt=opt): - ns = libregrtest._parse_args([opt, 'pattern']) - self.assertEqual(ns.ignore_tests, ['pattern']) + ns = self.parse_args([opt, 'pattern']) + self.assertEqual(ns.match_tests, [('pattern', True)]) self.checkError([opt], 'expected one argument') - self.addCleanup(os_helper.unlink, os_helper.TESTFN) - with open(os_helper.TESTFN, "w") as fp: - print('matchfile1', file=fp) - print('matchfile2', file=fp) - - filename = os.path.abspath(os_helper.TESTFN) - ns = libregrtest._parse_args(['-m', 'match', - '--ignorefile', filename]) - self.assertEqual(ns.ignore_tests, - ['matchfile1', 'matchfile2']) - - def test_match(self): - for opt in '-m', '--match': + for opt in '-i', '--ignore': with self.subTest(opt=opt): - ns = libregrtest._parse_args([opt, 'pattern']) - self.assertEqual(ns.match_tests, ['pattern']) + ns = self.parse_args([opt, 'pattern']) + self.assertEqual(ns.match_tests, [('pattern', False)]) self.checkError([opt], 'expected one argument') - ns = libregrtest._parse_args(['-m', 'pattern1', - '-m', 'pattern2']) - self.assertEqual(ns.match_tests, ['pattern1', 'pattern2']) + ns = self.parse_args(['-m', 'pattern1', '-m', 'pattern2']) + self.assertEqual(ns.match_tests, [('pattern1', True), ('pattern2', True)]) + + ns = self.parse_args(['-m', 'pattern1', '-i', 'pattern2']) + self.assertEqual(ns.match_tests, [('pattern1', True), ('pattern2', False)]) + + ns = self.parse_args(['-i', 'pattern1', '-m', 'pattern2']) + self.assertEqual(ns.match_tests, [('pattern1', False), ('pattern2', True)]) self.addCleanup(os_helper.unlink, os_helper.TESTFN) with open(os_helper.TESTFN, "w") as fp: @@ -195,73 +224,76 @@ def test_match(self): print('matchfile2', file=fp) filename = os.path.abspath(os_helper.TESTFN) - ns = libregrtest._parse_args(['-m', 'match', - '--matchfile', filename]) + ns = self.parse_args(['-m', 'match', '--matchfile', filename]) self.assertEqual(ns.match_tests, - ['match', 'matchfile1', 'matchfile2']) + [('match', True), ('matchfile1', True), ('matchfile2', True)]) + + ns = self.parse_args(['-i', 'match', '--ignorefile', filename]) + self.assertEqual(ns.match_tests, + [('match', False), ('matchfile1', False), ('matchfile2', False)]) def test_failfast(self): for opt in '-G', '--failfast': with self.subTest(opt=opt): - ns = libregrtest._parse_args([opt, '-v']) + ns = self.parse_args([opt, '-v']) self.assertTrue(ns.failfast) - ns = libregrtest._parse_args([opt, '-W']) + ns = self.parse_args([opt, '-W']) self.assertTrue(ns.failfast) self.checkError([opt], '-G/--failfast needs either -v or -W') def test_use(self): for opt in '-u', '--use': with self.subTest(opt=opt): - ns = libregrtest._parse_args([opt, 'gui,network']) + ns = self.parse_args([opt, 'gui,network']) self.assertEqual(ns.use_resources, ['gui', 'network']) - ns = libregrtest._parse_args([opt, 'gui,none,network']) + ns = self.parse_args([opt, 'gui,none,network']) self.assertEqual(ns.use_resources, ['network']) - expected = list(libregrtest.ALL_RESOURCES) + expected = list(cmdline.ALL_RESOURCES) expected.remove('gui') - ns = libregrtest._parse_args([opt, 'all,-gui']) + ns = self.parse_args([opt, 'all,-gui']) self.assertEqual(ns.use_resources, expected) self.checkError([opt], 'expected one argument') self.checkError([opt, 'foo'], 'invalid resource') # all + a resource not part of "all" - ns = libregrtest._parse_args([opt, 'all,tzdata']) + ns = self.parse_args([opt, 'all,tzdata']) self.assertEqual(ns.use_resources, - list(libregrtest.ALL_RESOURCES) + ['tzdata']) + list(cmdline.ALL_RESOURCES) + ['tzdata']) # test another resource which is not part of "all" - ns = libregrtest._parse_args([opt, 'extralargefile']) + ns = self.parse_args([opt, 'extralargefile']) self.assertEqual(ns.use_resources, ['extralargefile']) def test_memlimit(self): for opt in '-M', '--memlimit': with self.subTest(opt=opt): - ns = libregrtest._parse_args([opt, '4G']) + ns = self.parse_args([opt, '4G']) self.assertEqual(ns.memlimit, '4G') self.checkError([opt], 'expected one argument') def test_testdir(self): - ns = libregrtest._parse_args(['--testdir', 'foo']) + ns = self.parse_args(['--testdir', 'foo']) self.assertEqual(ns.testdir, os.path.join(os_helper.SAVEDCWD, 'foo')) self.checkError(['--testdir'], 'expected one argument') def test_runleaks(self): for opt in '-L', '--runleaks': with self.subTest(opt=opt): - ns = libregrtest._parse_args([opt]) + ns = self.parse_args([opt]) self.assertTrue(ns.runleaks) def test_huntrleaks(self): for opt in '-R', '--huntrleaks': with self.subTest(opt=opt): - ns = libregrtest._parse_args([opt, ':']) + ns = self.parse_args([opt, ':']) self.assertEqual(ns.huntrleaks, (5, 4, 'reflog.txt')) - ns = libregrtest._parse_args([opt, '6:']) + ns = self.parse_args([opt, '6:']) self.assertEqual(ns.huntrleaks, (6, 4, 'reflog.txt')) - ns = libregrtest._parse_args([opt, ':3']) + ns = self.parse_args([opt, ':3']) self.assertEqual(ns.huntrleaks, (5, 3, 'reflog.txt')) - ns = libregrtest._parse_args([opt, '6:3:leaks.log']) + ns = self.parse_args([opt, '6:3:leaks.log']) self.assertEqual(ns.huntrleaks, (6, 3, 'leaks.log')) self.checkError([opt], 'expected one argument') self.checkError([opt, '6'], @@ -272,23 +304,33 @@ def test_huntrleaks(self): def test_multiprocess(self): for opt in '-j', '--multiprocess': with self.subTest(opt=opt): - ns = libregrtest._parse_args([opt, '2']) + ns = self.parse_args([opt, '2']) self.assertEqual(ns.use_mp, 2) self.checkError([opt], 'expected one argument') self.checkError([opt, 'foo'], 'invalid int value') - self.checkError([opt, '2', '-T'], "don't go together") - self.checkError([opt, '0', '-T'], "don't go together") - def test_coverage(self): + def test_coverage_sequential(self): for opt in '-T', '--coverage': with self.subTest(opt=opt): - ns = libregrtest._parse_args([opt]) + with support.captured_stderr() as stderr: + ns = self.parse_args([opt]) + self.assertTrue(ns.trace) + self.assertIn( + "collecting coverage without -j is imprecise", + stderr.getvalue(), + ) + + @unittest.skipUnless(support.Py_DEBUG, 'need a debug build') + def test_coverage_mp(self): + for opt in '-T', '--coverage': + with self.subTest(opt=opt): + ns = self.parse_args([opt, '-j1']) self.assertTrue(ns.trace) def test_coverdir(self): for opt in '-D', '--coverdir': with self.subTest(opt=opt): - ns = libregrtest._parse_args([opt, 'foo']) + ns = self.parse_args([opt, 'foo']) self.assertEqual(ns.coverdir, os.path.join(os_helper.SAVEDCWD, 'foo')) self.checkError([opt], 'expected one argument') @@ -296,13 +338,13 @@ def test_coverdir(self): def test_nocoverdir(self): for opt in '-N', '--nocoverdir': with self.subTest(opt=opt): - ns = libregrtest._parse_args([opt]) + ns = self.parse_args([opt]) self.assertIsNone(ns.coverdir) def test_threshold(self): for opt in '-t', '--threshold': with self.subTest(opt=opt): - ns = libregrtest._parse_args([opt, '1000']) + ns = self.parse_args([opt, '1000']) self.assertEqual(ns.threshold, 1000) self.checkError([opt], 'expected one argument') self.checkError([opt, 'foo'], 'invalid int value') @@ -311,7 +353,7 @@ def test_nowindows(self): for opt in '-n', '--nowindows': with self.subTest(opt=opt): with contextlib.redirect_stderr(io.StringIO()) as stderr: - ns = libregrtest._parse_args([opt]) + ns = self.parse_args([opt]) self.assertTrue(ns.nowindows) err = stderr.getvalue() self.assertIn('the --nowindows (-n) option is deprecated', err) @@ -319,39 +361,39 @@ def test_nowindows(self): def test_forever(self): for opt in '-F', '--forever': with self.subTest(opt=opt): - ns = libregrtest._parse_args([opt]) + ns = self.parse_args([opt]) self.assertTrue(ns.forever) def test_unrecognized_argument(self): self.checkError(['--xxx'], 'usage:') def test_long_option__partial(self): - ns = libregrtest._parse_args(['--qui']) + ns = self.parse_args(['--qui']) self.assertTrue(ns.quiet) self.assertEqual(ns.verbose, 0) def test_two_options(self): - ns = libregrtest._parse_args(['--quiet', '--exclude']) + ns = self.parse_args(['--quiet', '--exclude']) self.assertTrue(ns.quiet) self.assertEqual(ns.verbose, 0) self.assertTrue(ns.exclude) def test_option_with_empty_string_value(self): - ns = libregrtest._parse_args(['--start', '']) + ns = self.parse_args(['--start', '']) self.assertEqual(ns.start, '') def test_arg(self): - ns = libregrtest._parse_args(['foo']) + ns = self.parse_args(['foo']) self.assertEqual(ns.args, ['foo']) def test_option_and_arg(self): - ns = libregrtest._parse_args(['--quiet', 'foo']) + ns = self.parse_args(['--quiet', 'foo']) self.assertTrue(ns.quiet) self.assertEqual(ns.verbose, 0) self.assertEqual(ns.args, ['foo']) def test_arg_option_arg(self): - ns = libregrtest._parse_args(['test_unaryop', '-v', 'test_binop']) + ns = self.parse_args(['test_unaryop', '-v', 'test_binop']) self.assertEqual(ns.verbose, 1) self.assertEqual(ns.args, ['test_unaryop', 'test_binop']) @@ -359,6 +401,100 @@ def test_unknown_option(self): self.checkError(['--unknown-option'], 'unrecognized arguments: --unknown-option') + def create_regrtest(self, args): + ns = cmdline._parse_args(args) + + # Check Regrtest attributes which are more reliable than Namespace + # which has an unclear API + with os_helper.EnvironmentVarGuard() as env: + # Ignore SOURCE_DATE_EPOCH env var if it's set + if 'SOURCE_DATE_EPOCH' in env: + del env['SOURCE_DATE_EPOCH'] + + regrtest = main.Regrtest(ns) + + return regrtest + + def check_ci_mode(self, args, use_resources, rerun=True): + regrtest = self.create_regrtest(args) + self.assertEqual(regrtest.num_workers, -1) + self.assertEqual(regrtest.want_rerun, rerun) + self.assertTrue(regrtest.randomize) + self.assertIsInstance(regrtest.random_seed, int) + self.assertTrue(regrtest.fail_env_changed) + self.assertTrue(regrtest.print_slowest) + self.assertTrue(regrtest.output_on_failure) + self.assertEqual(sorted(regrtest.use_resources), sorted(use_resources)) + return regrtest + + def test_fast_ci(self): + args = ['--fast-ci'] + use_resources = sorted(cmdline.ALL_RESOURCES) + use_resources.remove('cpu') + regrtest = self.check_ci_mode(args, use_resources) + self.assertEqual(regrtest.timeout, 10 * 60) + + def test_fast_ci_python_cmd(self): + args = ['--fast-ci', '--python', 'python -X dev'] + use_resources = sorted(cmdline.ALL_RESOURCES) + use_resources.remove('cpu') + regrtest = self.check_ci_mode(args, use_resources, rerun=False) + self.assertEqual(regrtest.timeout, 10 * 60) + self.assertEqual(regrtest.python_cmd, ('python', '-X', 'dev')) + + def test_fast_ci_resource(self): + # it should be possible to override resources individually + args = ['--fast-ci', '-u-network'] + use_resources = sorted(cmdline.ALL_RESOURCES) + use_resources.remove('cpu') + use_resources.remove('network') + self.check_ci_mode(args, use_resources) + + def test_slow_ci(self): + args = ['--slow-ci'] + use_resources = sorted(cmdline.ALL_RESOURCES) + regrtest = self.check_ci_mode(args, use_resources) + self.assertEqual(regrtest.timeout, 20 * 60) + + def test_dont_add_python_opts(self): + args = ['--dont-add-python-opts'] + ns = cmdline._parse_args(args) + self.assertFalse(ns._add_python_opts) + + def test_bisect(self): + args = ['--bisect'] + regrtest = self.create_regrtest(args) + self.assertTrue(regrtest.want_bisect) + + def test_verbose3_huntrleaks(self): + args = ['-R', '3:10', '--verbose3'] + with support.captured_stderr(): + regrtest = self.create_regrtest(args) + self.assertIsNotNone(regrtest.hunt_refleak) + self.assertEqual(regrtest.hunt_refleak.warmups, 3) + self.assertEqual(regrtest.hunt_refleak.runs, 10) + self.assertFalse(regrtest.output_on_failure) + + def test_single_process(self): + args = ['-j2', '--single-process'] + with support.captured_stderr(): + regrtest = self.create_regrtest(args) + self.assertEqual(regrtest.num_workers, 0) + self.assertTrue(regrtest.single_process) + + args = ['--fast-ci', '--single-process'] + with support.captured_stderr(): + regrtest = self.create_regrtest(args) + self.assertEqual(regrtest.num_workers, 0) + self.assertTrue(regrtest.single_process) + + +@dataclasses.dataclass(slots=True) +class Rerun: + name: str + match: str | None + success: bool + class BaseTestCase(unittest.TestCase): TEST_UNIQUE_ID = 1 @@ -407,8 +543,12 @@ def regex_search(self, regex, output): self.fail("%r not found in %r" % (regex, output)) return match - def check_line(self, output, regex): - regex = re.compile(r'^' + regex, re.MULTILINE) + def check_line(self, output, pattern, full=False, regex=True): + if not regex: + pattern = re.escape(pattern) + if full: + pattern += '\n' + regex = re.compile(r'^' + pattern, re.MULTILINE) self.assertRegex(output, regex) def parse_executed_tests(self, output): @@ -417,31 +557,47 @@ def parse_executed_tests(self, output): parser = re.finditer(regex, output, re.MULTILINE) return list(match.group(1) for match in parser) - def check_executed_tests(self, output, tests, skipped=(), failed=(), + def check_executed_tests(self, output, tests, *, stats, + skipped=(), failed=(), env_changed=(), omitted=(), - rerun=(), no_test_ran=(), - randomize=False, interrupted=False, - fail_env_changed=False): + rerun=None, run_no_tests=(), + resource_denied=(), + randomize=False, parallel=False, interrupted=False, + fail_env_changed=False, + forever=False, filtered=False): if isinstance(tests, str): tests = [tests] if isinstance(skipped, str): skipped = [skipped] + if isinstance(resource_denied, str): + resource_denied = [resource_denied] if isinstance(failed, str): failed = [failed] if isinstance(env_changed, str): env_changed = [env_changed] if isinstance(omitted, str): omitted = [omitted] - if isinstance(rerun, str): - rerun = [rerun] - if isinstance(no_test_ran, str): - no_test_ran = [no_test_ran] + if isinstance(run_no_tests, str): + run_no_tests = [run_no_tests] + if isinstance(stats, int): + stats = TestStats(stats) + if parallel: + randomize = True + + rerun_failed = [] + if rerun is not None and not env_changed: + failed = [rerun.name] + if not rerun.success: + rerun_failed.append(rerun.name) executed = self.parse_executed_tests(output) + total_tests = list(tests) + if rerun is not None: + total_tests.append(rerun.name) if randomize: - self.assertEqual(set(executed), set(tests), output) + self.assertEqual(set(executed), set(total_tests), output) else: - self.assertEqual(executed, tests, output) + self.assertEqual(executed, total_tests, output) def plural(count): return 's' if count != 1 else '' @@ -457,12 +613,17 @@ def list_regex(line_format, tests): regex = list_regex('%s test%s skipped', skipped) self.check_line(output, regex) + if resource_denied: + regex = list_regex(r'%s test%s skipped \(resource denied\)', resource_denied) + self.check_line(output, regex) + if failed: regex = list_regex('%s test%s failed', failed) self.check_line(output, regex) if env_changed: - regex = list_regex('%s test%s altered the execution environment', + regex = list_regex(r'%s test%s altered the execution environment ' + r'\(env changed\)', env_changed) self.check_line(output, regex) @@ -470,73 +631,120 @@ def list_regex(line_format, tests): regex = list_regex('%s test%s omitted', omitted) self.check_line(output, regex) - if rerun: - regex = list_regex('%s re-run test%s', rerun) + if rerun is not None: + regex = list_regex('%s re-run test%s', [rerun.name]) self.check_line(output, regex) - regex = LOG_PREFIX + r"Re-running failed tests in verbose mode" + regex = LOG_PREFIX + r"Re-running 1 failed tests in verbose mode" + self.check_line(output, regex) + regex = fr"Re-running {rerun.name} in verbose mode" + if rerun.match: + regex = fr"{regex} \(matching: {rerun.match}\)" self.check_line(output, regex) - for test_name in rerun: - regex = LOG_PREFIX + f"Re-running {test_name} in verbose mode" - self.check_line(output, regex) - if no_test_ran: - regex = list_regex('%s test%s run no tests', no_test_ran) + if run_no_tests: + regex = list_regex('%s test%s run no tests', run_no_tests) self.check_line(output, regex) - good = (len(tests) - len(skipped) - len(failed) - - len(omitted) - len(env_changed) - len(no_test_ran)) + good = (len(tests) - len(skipped) - len(resource_denied) - len(failed) + - len(omitted) - len(env_changed) - len(run_no_tests)) if good: - regex = r'%s test%s OK\.$' % (good, plural(good)) - if not skipped and not failed and good > 1: + regex = r'%s test%s OK\.' % (good, plural(good)) + if not skipped and not failed and (rerun is None or rerun.success) and good > 1: regex = 'All %s' % regex - self.check_line(output, regex) + self.check_line(output, regex, full=True) if interrupted: self.check_line(output, 'Test suite interrupted by signal SIGINT.') - result = [] + # Total tests + text = f'run={stats.tests_run:,}' + if filtered: + text = fr'{text} \(filtered\)' + parts = [text] + if stats.failures: + parts.append(f'failures={stats.failures:,}') + if stats.skipped: + parts.append(f'skipped={stats.skipped:,}') + line = fr'Total tests: {" ".join(parts)}' + self.check_line(output, line, full=True) + + # Total test files + run = len(total_tests) - len(resource_denied) + if rerun is not None: + total_failed = len(rerun_failed) + total_rerun = 1 + else: + total_failed = len(failed) + total_rerun = 0 + if interrupted: + run = 0 + text = f'run={run}' + if not forever: + text = f'{text}/{len(tests)}' + if filtered: + text = fr'{text} \(filtered\)' + report = [text] + for name, ntest in ( + ('failed', total_failed), + ('env_changed', len(env_changed)), + ('skipped', len(skipped)), + ('resource_denied', len(resource_denied)), + ('rerun', total_rerun), + ('run_no_tests', len(run_no_tests)), + ): + if ntest: + report.append(f'{name}={ntest}') + line = fr'Total test files: {" ".join(report)}' + self.check_line(output, line, full=True) + + # Result + state = [] if failed: - result.append('FAILURE') + state.append('FAILURE') elif fail_env_changed and env_changed: - result.append('ENV CHANGED') + state.append('ENV CHANGED') if interrupted: - result.append('INTERRUPTED') - if not any((good, result, failed, interrupted, skipped, + state.append('INTERRUPTED') + if not any((good, failed, interrupted, skipped, env_changed, fail_env_changed)): - result.append("NO TEST RUN") - elif not result: - result.append('SUCCESS') - result = ', '.join(result) - if rerun: - self.check_line(output, 'Tests result: FAILURE') - result = 'FAILURE then %s' % result - - self.check_line(output, 'Tests result: %s' % result) - - def parse_random_seed(self, output): - match = self.regex_search(r'Using random seed ([0-9]+)', output) - randseed = int(match.group(1)) - self.assertTrue(0 <= randseed <= 10000000, randseed) - return randseed + state.append("NO TESTS RAN") + elif not state: + state.append('SUCCESS') + state = ', '.join(state) + if rerun is not None: + new_state = 'SUCCESS' if rerun.success else 'FAILURE' + state = f'{state} then {new_state}' + self.check_line(output, f'Result: {state}', full=True) + + def parse_random_seed(self, output: str) -> str: + match = self.regex_search(r'Using random seed: (.*)', output) + return match.group(1) def run_command(self, args, input=None, exitcode=0, **kw): if not input: input = '' if 'stderr' not in kw: - kw['stderr'] = subprocess.PIPE + kw['stderr'] = subprocess.STDOUT + + env = kw.pop('env', None) + if env is None: + env = dict(os.environ) + env.pop('SOURCE_DATE_EPOCH', None) + proc = subprocess.run(args, - universal_newlines=True, + text=True, input=input, stdout=subprocess.PIPE, + env=env, **kw) if proc.returncode != exitcode: - msg = ("Command %s failed with exit code %s\n" + msg = ("Command %s failed with exit code %s, but exit code %s expected!\n" "\n" "stdout:\n" "---\n" "%s\n" "---\n" - % (str(args), proc.returncode, proc.stdout)) + % (str(args), proc.returncode, exitcode, proc.stdout)) if proc.stderr: msg += ("\n" "stderr:\n" @@ -548,17 +756,20 @@ def run_command(self, args, input=None, exitcode=0, **kw): return proc def run_python(self, args, **kw): - args = [sys.executable, '-X', 'faulthandler', '-I', *args] + extraargs = [] + if 'uops' in sys._xoptions: + # Pass -X uops along + extraargs.extend(['-X', 'uops']) + args = [sys.executable, *extraargs, '-X', 'faulthandler', '-I', *args] proc = self.run_command(args, **kw) return proc.stdout class CheckActualTests(BaseTestCase): - """ - Check that regrtest appears to find the expected set of tests. - """ - def test_finds_expected_number_of_tests(self): + """ + Check that regrtest appears to find the expected set of tests. + """ args = ['-Wd', '-E', '-bb', '-m', 'test.regrtest', '--list-tests'] output = self.run_python(args) rough_number_of_tests_found = len(output.splitlines()) @@ -575,9 +786,10 @@ def test_finds_expected_number_of_tests(self): self.assertGreater(rough_number_of_tests_found, rough_counted_test_py_files*9//10, msg='Unexpectedly low number of tests found in:\n' - f'{", ".join(output.splitlines())}') + f'{", ".join(output.splitlines())}') +@support.force_not_colorized_test_class class ProgramsTestCase(BaseTestCase): """ Test various ways to run the Python test suite. Use options close @@ -595,17 +807,19 @@ def setUp(self): self.python_args = ['-Wd', '-E', '-bb'] self.regrtest_args = ['-uall', '-rwW', '--testdir=%s' % self.tmptestdir] - if hasattr(faulthandler, 'dump_traceback_later'): - self.regrtest_args.extend(('--timeout', '3600', '-j4')) + self.regrtest_args.extend(('--timeout', '3600', '-j4')) if sys.platform == 'win32': self.regrtest_args.append('-n') def check_output(self, output): - self.parse_random_seed(output) - self.check_executed_tests(output, self.tests, randomize=True) + randseed = self.parse_random_seed(output) + self.assertTrue(randseed.isdigit(), randseed) - def run_tests(self, args): - output = self.run_python(args) + self.check_executed_tests(output, self.tests, + randomize=True, stats=len(self.tests)) + + def run_tests(self, args, env=None): + output = self.run_python(args, env=env) self.check_output(output) def test_script_regrtest(self): @@ -627,16 +841,12 @@ def test_module_regrtest(self): *self.regrtest_args, *self.tests] self.run_tests(args) - # TODO: RUSTPYTHON - @unittest.expectedFailure def test_module_autotest(self): # -m test.autotest args = [*self.python_args, '-m', 'test.autotest', *self.regrtest_args, *self.tests] self.run_tests(args) - # TODO: RUSTPYTHON - @unittest.expectedFailure def test_module_from_test_autotest(self): # from test import autotest code = 'from test import autotest' @@ -644,22 +854,12 @@ def test_module_from_test_autotest(self): *self.regrtest_args, *self.tests] self.run_tests(args) - # TODO: RUSTPYTHON - @unittest.expectedFailure def test_script_autotest(self): # Lib/test/autotest.py script = os.path.join(self.testdir, 'autotest.py') args = [*self.python_args, script, *self.regrtest_args, *self.tests] self.run_tests(args) - @unittest.skipUnless(sysconfig.is_python_build(), - 'run_tests.py script is not installed') - def test_tools_script_run_tests(self): - # Tools/scripts/run_tests.py - script = os.path.join(ROOT_DIR, 'Tools', 'scripts', 'run_tests.py') - args = [script, *self.regrtest_args, *self.tests] - self.run_tests(args) - def run_batch(self, *args): proc = self.run_command(args) self.check_output(proc.stdout) @@ -673,10 +873,14 @@ def test_tools_buildbot_test(self): test_args = ['--testdir=%s' % self.tmptestdir] if platform.machine() == 'ARM64': test_args.append('-arm64') # ARM 64-bit build + elif platform.machine() == 'ARM': + test_args.append('-arm32') # 32-bit ARM build elif platform.architecture()[0] == '64bit': test_args.append('-x64') # 64-bit build - if not Py_DEBUG: + if not support.Py_DEBUG: test_args.append('+d') # Release build, use python.exe + if sysconfig.get_config_var("Py_GIL_DISABLED"): + test_args.append('--disable-gil') self.run_batch(script, *test_args, *self.tests) @unittest.skipUnless(sys.platform == 'win32', 'Windows only') @@ -688,13 +892,18 @@ def test_pcbuild_rt(self): rt_args = ["-q"] # Quick, don't run tests twice if platform.machine() == 'ARM64': rt_args.append('-arm64') # ARM 64-bit build + elif platform.machine() == 'ARM': + rt_args.append('-arm32') # 32-bit ARM build elif platform.architecture()[0] == '64bit': rt_args.append('-x64') # 64-bit build - if Py_DEBUG: + if support.Py_DEBUG: rt_args.append('-d') # Debug build, use python_d.exe + if sysconfig.get_config_var("Py_GIL_DISABLED"): + rt_args.append('--disable-gil') self.run_batch(script, *rt_args, *self.regrtest_args, *self.tests) +@support.force_not_colorized_test_class class ArgsTestCase(BaseTestCase): """ Test arguments of the Python test suite. @@ -704,6 +913,40 @@ def run_tests(self, *testargs, **kw): cmdargs = ['-m', 'test', '--testdir=%s' % self.tmptestdir, *testargs] return self.run_python(cmdargs, **kw) + def test_success(self): + code = textwrap.dedent(""" + import unittest + + class PassingTests(unittest.TestCase): + def test_test1(self): + pass + + def test_test2(self): + pass + + def test_test3(self): + pass + """) + tests = [self.create_test(f'ok{i}', code=code) for i in range(1, 6)] + + output = self.run_tests(*tests) + self.check_executed_tests(output, tests, + stats=3 * len(tests)) + + def test_skip(self): + code = textwrap.dedent(""" + import unittest + raise unittest.SkipTest("nope") + """) + test_ok = self.create_test('ok') + test_skip = self.create_test('skip', code=code) + tests = [test_ok, test_skip] + + output = self.run_tests(*tests) + self.check_executed_tests(output, tests, + skipped=[test_skip], + stats=1) + def test_failing_test(self): # test a failing test code = textwrap.dedent(""" @@ -717,8 +960,9 @@ def test_failing(self): test_failing = self.create_test('failing', code=code) tests = [test_ok, test_failing] - output = self.run_tests(*tests, exitcode=2) - self.check_executed_tests(output, tests, failed=test_failing) + output = self.run_tests(*tests, exitcode=EXITCODE_BAD_TEST) + self.check_executed_tests(output, tests, failed=test_failing, + stats=TestStats(2, 1)) def test_resources(self): # test -u command line option @@ -737,17 +981,19 @@ def test_pass(self): # -u all: 2 resources enabled output = self.run_tests('-u', 'all', *test_names) - self.check_executed_tests(output, test_names) + self.check_executed_tests(output, test_names, stats=2) # -u audio: 1 resource enabled output = self.run_tests('-uaudio', *test_names) self.check_executed_tests(output, test_names, - skipped=tests['network']) + resource_denied=tests['network'], + stats=1) # no option: 0 resources enabled - output = self.run_tests(*test_names) + output = self.run_tests(*test_names, exitcode=EXITCODE_NO_TESTS_RAN) self.check_executed_tests(output, test_names, - skipped=test_names) + resource_denied=test_names, + stats=0) def test_random(self): # test -r and --randseed command line option @@ -758,13 +1004,14 @@ def test_random(self): test = self.create_test('random', code) # first run to get the output with the random seed - output = self.run_tests('-r', test) + output = self.run_tests('-r', test, exitcode=EXITCODE_NO_TESTS_RAN) randseed = self.parse_random_seed(output) match = self.regex_search(r'TESTRANDOM: ([0-9]+)', output) test_random = int(match.group(1)) # try to reproduce with the random seed - output = self.run_tests('-r', '--randseed=%s' % randseed, test) + output = self.run_tests('-r', f'--randseed={randseed}', test, + exitcode=EXITCODE_NO_TESTS_RAN) randseed2 = self.parse_random_seed(output) self.assertEqual(randseed2, randseed) @@ -772,6 +1019,35 @@ def test_random(self): test_random2 = int(match.group(1)) self.assertEqual(test_random2, test_random) + # check that random.seed is used by default + output = self.run_tests(test, exitcode=EXITCODE_NO_TESTS_RAN) + randseed = self.parse_random_seed(output) + self.assertTrue(randseed.isdigit(), randseed) + + # check SOURCE_DATE_EPOCH (integer) + timestamp = '1697839080' + env = dict(os.environ, SOURCE_DATE_EPOCH=timestamp) + output = self.run_tests('-r', test, exitcode=EXITCODE_NO_TESTS_RAN, + env=env) + randseed = self.parse_random_seed(output) + self.assertEqual(randseed, timestamp) + self.check_line(output, 'TESTRANDOM: 520') + + # check SOURCE_DATE_EPOCH (string) + env = dict(os.environ, SOURCE_DATE_EPOCH='XYZ') + output = self.run_tests('-r', test, exitcode=EXITCODE_NO_TESTS_RAN, + env=env) + randseed = self.parse_random_seed(output) + self.assertEqual(randseed, 'XYZ') + self.check_line(output, 'TESTRANDOM: 22') + + # check SOURCE_DATE_EPOCH (empty string): ignore the env var + env = dict(os.environ, SOURCE_DATE_EPOCH='') + output = self.run_tests('-r', test, exitcode=EXITCODE_NO_TESTS_RAN, + env=env) + randseed = self.parse_random_seed(output) + self.assertTrue(randseed.isdigit(), randseed) + def test_fromfile(self): # test --fromfile tests = [self.create_test() for index in range(5)] @@ -794,7 +1070,8 @@ def test_fromfile(self): previous = name output = self.run_tests('--fromfile', filename) - self.check_executed_tests(output, tests) + stats = len(tests) + self.check_executed_tests(output, tests, stats=stats) # test format '[2/7] test_opcodes' with open(filename, "w") as fp: @@ -802,7 +1079,7 @@ def test_fromfile(self): print("[%s/%s] %s" % (index, len(tests), name), file=fp) output = self.run_tests('--fromfile', filename) - self.check_executed_tests(output, tests) + self.check_executed_tests(output, tests, stats=stats) # test format 'test_opcodes' with open(filename, "w") as fp: @@ -810,7 +1087,7 @@ def test_fromfile(self): print(name, file=fp) output = self.run_tests('--fromfile', filename) - self.check_executed_tests(output, tests) + self.check_executed_tests(output, tests, stats=stats) # test format 'Lib/test/test_opcodes.py' with open(filename, "w") as fp: @@ -818,29 +1095,25 @@ def test_fromfile(self): print('Lib/test/%s.py' % name, file=fp) output = self.run_tests('--fromfile', filename) - self.check_executed_tests(output, tests) + self.check_executed_tests(output, tests, stats=stats) - # TODO: RUSTPYTHON - @unittest.expectedFailure def test_interrupted(self): code = TEST_INTERRUPTED test = self.create_test('sigint', code=code) - output = self.run_tests(test, exitcode=130) + output = self.run_tests(test, exitcode=EXITCODE_INTERRUPTED) self.check_executed_tests(output, test, omitted=test, - interrupted=True) + interrupted=True, stats=0) def test_slowest(self): # test --slowest tests = [self.create_test() for index in range(3)] output = self.run_tests("--slowest", *tests) - self.check_executed_tests(output, tests) + self.check_executed_tests(output, tests, stats=len(tests)) regex = ('10 slowest tests:\n' '(?:- %s: .*\n){%s}' % (self.TESTNAME_REGEX, len(tests))) self.check_line(output, regex) - # TODO: RUSTPYTHON - @unittest.expectedFailure def test_slowest_interrupted(self): # Issue #25373: test --slowest with an interrupted test code = TEST_INTERRUPTED @@ -852,9 +1125,10 @@ def test_slowest_interrupted(self): args = ("--slowest", "-j2", test) else: args = ("--slowest", test) - output = self.run_tests(*args, exitcode=130) + output = self.run_tests(*args, exitcode=EXITCODE_INTERRUPTED) self.check_executed_tests(output, test, - omitted=test, interrupted=True) + omitted=test, interrupted=True, + stats=0) regex = ('10 slowest tests:\n') self.check_line(output, regex) @@ -865,7 +1139,7 @@ def test_coverage(self): # test --coverage test = self.create_test('coverage') output = self.run_tests("--coverage", test) - self.check_executed_tests(output, [test]) + self.check_executed_tests(output, [test], stats=1) regex = (r'lines +cov% +module +\(path\)\n' r'(?: *[0-9]+ *[0-9]{1,2}% *[^ ]+ +\([^)]+\)+)+') self.check_line(output, regex) @@ -894,21 +1168,39 @@ def test_run(self): builtins.__dict__['RUN'] = 1 """) test = self.create_test('forever', code=code) - output = self.run_tests('--forever', test, exitcode=2) - self.check_executed_tests(output, [test]*3, failed=test) - def check_leak(self, code, what): + # --forever + output = self.run_tests('--forever', test, exitcode=EXITCODE_BAD_TEST) + self.check_executed_tests(output, [test]*3, failed=test, + stats=TestStats(3, 1), + forever=True) + + # --forever --rerun + output = self.run_tests('--forever', '--rerun', test, exitcode=0) + self.check_executed_tests(output, [test]*3, + rerun=Rerun(test, + match='test_run', + success=True), + stats=TestStats(4, 1), + forever=True) + + @without_optimizer + def check_leak(self, code, what, *, run_workers=False): test = self.create_test('huntrleaks', code=code) filename = 'reflog.txt' self.addCleanup(os_helper.unlink, filename) - output = self.run_tests('--huntrleaks', '3:3:', test, - exitcode=2, + cmd = ['--huntrleaks', '3:3:'] + if run_workers: + cmd.append('-j1') + cmd.append(test) + output = self.run_tests(*cmd, + exitcode=EXITCODE_BAD_TEST, stderr=subprocess.STDOUT) - self.check_executed_tests(output, [test], failed=test) + self.check_executed_tests(output, [test], failed=test, stats=1) - line = 'beginning 6 repetitions\n123456\n......\n' - self.check_line(output, re.escape(line)) + line = r'beginning 6 repetitions. .*\n123:456\n[.0-9X]{3} 111\n' + self.check_line(output, line) line2 = '%s leaked [1, 1, 1] %s, sum=3\n' % (test, what) self.assertIn(line2, output) @@ -917,8 +1209,8 @@ def check_leak(self, code, what): reflog = fp.read() self.assertIn(line2, reflog) - @unittest.skipUnless(Py_DEBUG, 'need a debug build') - def test_huntrleaks(self): + @unittest.skipUnless(support.Py_DEBUG, 'need a debug build') + def check_huntrleaks(self, *, run_workers: bool): # test --huntrleaks code = textwrap.dedent(""" import unittest @@ -929,9 +1221,56 @@ class RefLeakTest(unittest.TestCase): def test_leak(self): GLOBAL_LIST.append(object()) """) - self.check_leak(code, 'references') + self.check_leak(code, 'references', run_workers=run_workers) + + def test_huntrleaks(self): + self.check_huntrleaks(run_workers=False) + + def test_huntrleaks_mp(self): + self.check_huntrleaks(run_workers=True) + + @unittest.skipUnless(support.Py_DEBUG, 'need a debug build') + def test_huntrleaks_bisect(self): + # test --huntrleaks --bisect + code = textwrap.dedent(""" + import unittest + + GLOBAL_LIST = [] + + class RefLeakTest(unittest.TestCase): + def test1(self): + pass + + def test2(self): + pass + + def test3(self): + GLOBAL_LIST.append(object()) + + def test4(self): + pass + """) + + test = self.create_test('huntrleaks', code=code) + + filename = 'reflog.txt' + self.addCleanup(os_helper.unlink, filename) + cmd = ['--huntrleaks', '3:3:', '--bisect', test] + output = self.run_tests(*cmd, + exitcode=EXITCODE_BAD_TEST, + stderr=subprocess.STDOUT) + + self.assertIn(f"Bisect {test}", output) + self.assertIn(f"Bisect {test}: exit code 0", output) - @unittest.skipUnless(Py_DEBUG, 'need a debug build') + # test3 is the one which leaks + self.assertIn("Bisection completed in", output) + self.assertIn( + "Tests (1):\n" + f"* {test}.RefLeakTest.test3\n", + output) + + @unittest.skipUnless(support.Py_DEBUG, 'need a debug build') def test_huntrleaks_fd_leak(self): # test --huntrleaks for file descriptor leak code = textwrap.dedent(""" @@ -945,7 +1284,6 @@ def test_leak(self): """) self.check_leak(code, 'file descriptors') - @unittest.expectedFailureIfWindows('TODO: RUSTPYTHON Windows') def test_list_tests(self): # test --list-tests tests = [self.create_test() for i in range(5)] @@ -953,7 +1291,6 @@ def test_list_tests(self): self.assertEqual(output.rstrip().splitlines(), tests) - @unittest.expectedFailureIfWindows('TODO: RUSTPYTHON Windows') def test_list_cases(self): # test --list-cases code = textwrap.dedent(""" @@ -987,16 +1324,14 @@ def test_crashed(self): crash_test = self.create_test(name="crash", code=code) tests = [crash_test] - output = self.run_tests("-j2", *tests, exitcode=2) + output = self.run_tests("-j2", *tests, exitcode=EXITCODE_BAD_TEST) self.check_executed_tests(output, tests, failed=crash_test, - randomize=True) + parallel=True, stats=0) def parse_methods(self, output): regex = re.compile("^(test[^ ]+).*ok$", flags=re.MULTILINE) return [match.group(1) for match in regex.finditer(output)] - # TODO: RUSTPYTHON - @unittest.expectedFailure def test_ignorefile(self): code = textwrap.dedent(""" import unittest @@ -1011,8 +1346,6 @@ def test_method3(self): def test_method4(self): pass """) - all_methods = ['test_method1', 'test_method2', - 'test_method3', 'test_method4'] testname = self.create_test(code=code) # only run a subset @@ -1074,8 +1407,6 @@ def test_method4(self): subset = ['test_method1', 'test_method3'] self.assertEqual(methods, subset) - # TODO: RUSTPYTHON - @unittest.expectedFailure def test_env_changed(self): code = textwrap.dedent(""" import unittest @@ -1088,52 +1419,265 @@ def test_env_changed(self): # don't fail by default output = self.run_tests(testname) - self.check_executed_tests(output, [testname], env_changed=testname) + self.check_executed_tests(output, [testname], + env_changed=testname, stats=1) # fail with --fail-env-changed - output = self.run_tests("--fail-env-changed", testname, exitcode=3) + output = self.run_tests("--fail-env-changed", testname, + exitcode=EXITCODE_ENV_CHANGED) self.check_executed_tests(output, [testname], env_changed=testname, - fail_env_changed=True) + fail_env_changed=True, stats=1) + + # rerun + output = self.run_tests("--rerun", testname) + self.check_executed_tests(output, [testname], + env_changed=testname, + rerun=Rerun(testname, + match=None, + success=True), + stats=2) - # TODO: RUSTPYTHON - @unittest.expectedFailure def test_rerun_fail(self): # FAILURE then FAILURE code = textwrap.dedent(""" import unittest class Tests(unittest.TestCase): - def test_bug(self): - # test always fail + def test_succeed(self): + return + + def test_fail_always(self): + # test that always fails self.fail("bug") """) testname = self.create_test(code=code) - output = self.run_tests("-w", testname, exitcode=2) + output = self.run_tests("--rerun", testname, exitcode=EXITCODE_BAD_TEST) self.check_executed_tests(output, [testname], - failed=testname, rerun=testname) + rerun=Rerun(testname, + "test_fail_always", + success=False), + stats=TestStats(3, 2)) - # TODO: RUSTPYTHON - @unittest.expectedFailure def test_rerun_success(self): # FAILURE then SUCCESS - code = textwrap.dedent(""" - import builtins + marker_filename = os.path.abspath("regrtest_marker_filename") + self.addCleanup(os_helper.unlink, marker_filename) + self.assertFalse(os.path.exists(marker_filename)) + + code = textwrap.dedent(f""" + import os.path import unittest + marker_filename = {marker_filename!r} + class Tests(unittest.TestCase): - failed = False + def test_succeed(self): + return def test_fail_once(self): - if not hasattr(builtins, '_test_failed'): - builtins._test_failed = True + if not os.path.exists(marker_filename): + open(marker_filename, "w").close() self.fail("bug") """) testname = self.create_test(code=code) - output = self.run_tests("-w", testname, exitcode=0) + # FAILURE then SUCCESS => exit code 0 + output = self.run_tests("--rerun", testname, exitcode=0) + self.check_executed_tests(output, [testname], + rerun=Rerun(testname, + match="test_fail_once", + success=True), + stats=TestStats(3, 1)) + os_helper.unlink(marker_filename) + + # with --fail-rerun, exit code EXITCODE_RERUN_FAIL + # on "FAILURE then SUCCESS" state. + output = self.run_tests("--rerun", "--fail-rerun", testname, + exitcode=EXITCODE_RERUN_FAIL) + self.check_executed_tests(output, [testname], + rerun=Rerun(testname, + match="test_fail_once", + success=True), + stats=TestStats(3, 1)) + os_helper.unlink(marker_filename) + + def test_rerun_setup_class_hook_failure(self): + # FAILURE then FAILURE + code = textwrap.dedent(""" + import unittest + + class ExampleTests(unittest.TestCase): + @classmethod + def setUpClass(self): + raise RuntimeError('Fail') + + def test_success(self): + return + """) + testname = self.create_test(code=code) + + output = self.run_tests("--rerun", testname, exitcode=EXITCODE_BAD_TEST) + self.check_executed_tests(output, testname, + failed=[testname], + rerun=Rerun(testname, + match="ExampleTests", + success=False), + stats=0) + + def test_rerun_teardown_class_hook_failure(self): + # FAILURE then FAILURE + code = textwrap.dedent(""" + import unittest + + class ExampleTests(unittest.TestCase): + @classmethod + def tearDownClass(self): + raise RuntimeError('Fail') + + def test_success(self): + return + """) + testname = self.create_test(code=code) + + output = self.run_tests("--rerun", testname, exitcode=EXITCODE_BAD_TEST) + self.check_executed_tests(output, testname, + failed=[testname], + rerun=Rerun(testname, + match="ExampleTests", + success=False), + stats=2) + + def test_rerun_setup_module_hook_failure(self): + # FAILURE then FAILURE + code = textwrap.dedent(""" + import unittest + + def setUpModule(): + raise RuntimeError('Fail') + + class ExampleTests(unittest.TestCase): + def test_success(self): + return + """) + testname = self.create_test(code=code) + + output = self.run_tests("--rerun", testname, exitcode=EXITCODE_BAD_TEST) + self.check_executed_tests(output, testname, + failed=[testname], + rerun=Rerun(testname, + match=None, + success=False), + stats=0) + + def test_rerun_teardown_module_hook_failure(self): + # FAILURE then FAILURE + code = textwrap.dedent(""" + import unittest + + def tearDownModule(): + raise RuntimeError('Fail') + + class ExampleTests(unittest.TestCase): + def test_success(self): + return + """) + testname = self.create_test(code=code) + + output = self.run_tests("--rerun", testname, exitcode=EXITCODE_BAD_TEST) self.check_executed_tests(output, [testname], - rerun=testname) + failed=[testname], + rerun=Rerun(testname, + match=None, + success=False), + stats=2) + + def test_rerun_setup_hook_failure(self): + # FAILURE then FAILURE + code = textwrap.dedent(""" + import unittest + + class ExampleTests(unittest.TestCase): + def setUp(self): + raise RuntimeError('Fail') + + def test_success(self): + return + """) + testname = self.create_test(code=code) + + output = self.run_tests("--rerun", testname, exitcode=EXITCODE_BAD_TEST) + self.check_executed_tests(output, testname, + failed=[testname], + rerun=Rerun(testname, + match="test_success", + success=False), + stats=2) + + def test_rerun_teardown_hook_failure(self): + # FAILURE then FAILURE + code = textwrap.dedent(""" + import unittest + + class ExampleTests(unittest.TestCase): + def tearDown(self): + raise RuntimeError('Fail') + + def test_success(self): + return + """) + testname = self.create_test(code=code) + + output = self.run_tests("--rerun", testname, exitcode=EXITCODE_BAD_TEST) + self.check_executed_tests(output, testname, + failed=[testname], + rerun=Rerun(testname, + match="test_success", + success=False), + stats=2) + + def test_rerun_async_setup_hook_failure(self): + # FAILURE then FAILURE + code = textwrap.dedent(""" + import unittest + + class ExampleTests(unittest.IsolatedAsyncioTestCase): + async def asyncSetUp(self): + raise RuntimeError('Fail') + + async def test_success(self): + return + """) + testname = self.create_test(code=code) + + output = self.run_tests("--rerun", testname, exitcode=EXITCODE_BAD_TEST) + self.check_executed_tests(output, testname, + rerun=Rerun(testname, + match="test_success", + success=False), + stats=2) + + def test_rerun_async_teardown_hook_failure(self): + # FAILURE then FAILURE + code = textwrap.dedent(""" + import unittest + + class ExampleTests(unittest.IsolatedAsyncioTestCase): + async def asyncTearDown(self): + raise RuntimeError('Fail') + + async def test_success(self): + return + """) + testname = self.create_test(code=code) + + output = self.run_tests("--rerun", testname, exitcode=EXITCODE_BAD_TEST) + self.check_executed_tests(output, testname, + failed=[testname], + rerun=Rerun(testname, + match="test_success", + success=False), + stats=2) def test_no_tests_ran(self): code = textwrap.dedent(""" @@ -1145,8 +1689,11 @@ def test_bug(self): """) testname = self.create_test(code=code) - output = self.run_tests(testname, "-m", "nosuchtest", exitcode=0) - self.check_executed_tests(output, [testname], no_test_ran=testname) + output = self.run_tests(testname, "-m", "nosuchtest", + exitcode=EXITCODE_NO_TESTS_RAN) + self.check_executed_tests(output, [testname], + run_no_tests=testname, + stats=0, filtered=True) def test_no_tests_ran_skip(self): code = textwrap.dedent(""" @@ -1158,8 +1705,9 @@ def test_skipped(self): """) testname = self.create_test(code=code) - output = self.run_tests(testname, exitcode=0) - self.check_executed_tests(output, [testname]) + output = self.run_tests(testname) + self.check_executed_tests(output, [testname], + stats=TestStats(1, skipped=1)) def test_no_tests_ran_multiple_tests_nonexistent(self): code = textwrap.dedent(""" @@ -1172,9 +1720,11 @@ def test_bug(self): testname = self.create_test(code=code) testname2 = self.create_test(code=code) - output = self.run_tests(testname, testname2, "-m", "nosuchtest", exitcode=0) + output = self.run_tests(testname, testname2, "-m", "nosuchtest", + exitcode=EXITCODE_NO_TESTS_RAN) self.check_executed_tests(output, [testname, testname2], - no_test_ran=[testname, testname2]) + run_no_tests=[testname, testname2], + stats=0, filtered=True) def test_no_test_ran_some_test_exist_some_not(self): code = textwrap.dedent(""" @@ -1197,10 +1747,15 @@ def test_other_bug(self): output = self.run_tests(testname, testname2, "-m", "nosuchtest", "-m", "test_other_bug", exitcode=0) self.check_executed_tests(output, [testname, testname2], - no_test_ran=[testname]) + run_no_tests=[testname], + stats=1, filtered=True) @support.cpython_only - def test_findleaks(self): + def test_uncollectable(self): + try: + import _testcapi + except ImportError: + raise unittest.SkipTest("requires _testcapi") code = textwrap.dedent(r""" import _testcapi import gc @@ -1220,19 +1775,13 @@ def test_garbage(self): """) testname = self.create_test(code=code) - output = self.run_tests("--fail-env-changed", testname, exitcode=3) - self.check_executed_tests(output, [testname], - env_changed=[testname], - fail_env_changed=True) - - # --findleaks is now basically an alias to --fail-env-changed - output = self.run_tests("--findleaks", testname, exitcode=3) + output = self.run_tests("--fail-env-changed", testname, + exitcode=EXITCODE_ENV_CHANGED) self.check_executed_tests(output, [testname], env_changed=[testname], - fail_env_changed=True) + fail_env_changed=True, + stats=1) - # TODO: RUSTPYTHON - @unittest.expectedFailure def test_multiprocessing_timeout(self): code = textwrap.dedent(r""" import time @@ -1254,14 +1803,130 @@ def test_sleep(self): """) testname = self.create_test(code=code) - output = self.run_tests("-j2", "--timeout=1.0", testname, exitcode=2) + output = self.run_tests("-j2", "--timeout=1.0", testname, + exitcode=EXITCODE_BAD_TEST) self.check_executed_tests(output, [testname], - failed=testname) + failed=testname, stats=0) self.assertRegex(output, re.compile('%s timed out' % testname, re.MULTILINE)) - # TODO: RUSTPYTHON - @unittest.expectedFailure + def test_unraisable_exc(self): + # --fail-env-changed must catch unraisable exception. + # The exception must be displayed even if sys.stderr is redirected. + code = textwrap.dedent(r""" + import unittest + import weakref + from test.support import captured_stderr + + class MyObject: + pass + + def weakref_callback(obj): + raise Exception("weakref callback bug") + + class Tests(unittest.TestCase): + def test_unraisable_exc(self): + obj = MyObject() + ref = weakref.ref(obj, weakref_callback) + with captured_stderr() as stderr: + # call weakref_callback() which logs + # an unraisable exception + obj = None + self.assertEqual(stderr.getvalue(), '') + """) + testname = self.create_test(code=code) + + output = self.run_tests("--fail-env-changed", "-v", testname, + exitcode=EXITCODE_ENV_CHANGED) + self.check_executed_tests(output, [testname], + env_changed=[testname], + fail_env_changed=True, + stats=1) + self.assertIn("Warning -- Unraisable exception", output) + self.assertIn("Exception: weakref callback bug", output) + + def test_threading_excepthook(self): + # --fail-env-changed must catch uncaught thread exception. + # The exception must be displayed even if sys.stderr is redirected. + code = textwrap.dedent(r""" + import threading + import unittest + from test.support import captured_stderr + + class MyObject: + pass + + def func_bug(): + raise Exception("bug in thread") + + class Tests(unittest.TestCase): + def test_threading_excepthook(self): + with captured_stderr() as stderr: + thread = threading.Thread(target=func_bug) + thread.start() + thread.join() + self.assertEqual(stderr.getvalue(), '') + """) + testname = self.create_test(code=code) + + output = self.run_tests("--fail-env-changed", "-v", testname, + exitcode=EXITCODE_ENV_CHANGED) + self.check_executed_tests(output, [testname], + env_changed=[testname], + fail_env_changed=True, + stats=1) + self.assertIn("Warning -- Uncaught thread exception", output) + self.assertIn("Exception: bug in thread", output) + + def test_print_warning(self): + # bpo-45410: The order of messages must be preserved when -W and + # support.print_warning() are used. + code = textwrap.dedent(r""" + import sys + import unittest + from test import support + + class MyObject: + pass + + def func_bug(): + raise Exception("bug in thread") + + class Tests(unittest.TestCase): + def test_print_warning(self): + print("msg1: stdout") + support.print_warning("msg2: print_warning") + # Fail with ENV CHANGED to see print_warning() log + support.environment_altered = True + """) + testname = self.create_test(code=code) + + # Expect an output like: + # + # test_threading_excepthook (test.test_x.Tests) ... msg1: stdout + # Warning -- msg2: print_warning + # ok + regex = (r"test_print_warning.*msg1: stdout\n" + r"Warning -- msg2: print_warning\n" + r"ok\n") + for option in ("-v", "-W"): + with self.subTest(option=option): + cmd = ["--fail-env-changed", option, testname] + output = self.run_tests(*cmd, exitcode=EXITCODE_ENV_CHANGED) + self.check_executed_tests(output, [testname], + env_changed=[testname], + fail_env_changed=True, + stats=1) + self.assertRegex(output, regex) + + def test_unicode_guard_env(self): + guard = os.environ.get(setup.UNICODE_GUARD_ENV) + self.assertIsNotNone(guard, f"{setup.UNICODE_GUARD_ENV} not set") + if guard.isascii(): + # Skip to signify that the env var value was changed by the user; + # possibly to something ASCII to work around Unicode issues. + self.skipTest("Modified guard") + def test_cleanup(self): dirname = os.path.join(self.tmptestdir, "test_python_123") os.mkdir(dirname) @@ -1277,10 +1942,366 @@ def test_cleanup(self): for name in names: self.assertFalse(os.path.exists(name), name) + @unittest.skipIf(support.is_wasi, + 'checking temp files is not implemented on WASI') + def test_leak_tmp_file(self): + code = textwrap.dedent(r""" + import os.path + import tempfile + import unittest + + class FileTests(unittest.TestCase): + def test_leak_tmp_file(self): + filename = os.path.join(tempfile.gettempdir(), 'mytmpfile') + with open(filename, "wb") as fp: + fp.write(b'content') + """) + testnames = [self.create_test(code=code) for _ in range(3)] + + output = self.run_tests("--fail-env-changed", "-v", "-j2", *testnames, + exitcode=EXITCODE_ENV_CHANGED) + self.check_executed_tests(output, testnames, + env_changed=testnames, + fail_env_changed=True, + parallel=True, + stats=len(testnames)) + for testname in testnames: + self.assertIn(f"Warning -- {testname} leaked temporary " + f"files (1): mytmpfile", + output) + + def test_worker_decode_error(self): + # gh-109425: Use "backslashreplace" error handler to decode stdout. + if sys.platform == 'win32': + encoding = locale.getencoding() + else: + encoding = sys.stdout.encoding + if encoding is None: + encoding = sys.__stdout__.encoding + if encoding is None: + self.skipTest("cannot get regrtest worker encoding") + + nonascii = bytes(ch for ch in range(128, 256)) + corrupted_output = b"nonascii:%s\n" % (nonascii,) + # gh-108989: On Windows, assertion errors are written in UTF-16: when + # decoded each letter is follow by a NUL character. + assertion_failed = 'Assertion failed: tstate_is_alive(tstate)\n' + corrupted_output += assertion_failed.encode('utf-16-le') + try: + corrupted_output.decode(encoding) + except UnicodeDecodeError: + pass + else: + self.skipTest(f"{encoding} can decode non-ASCII bytes") + + expected_line = corrupted_output.decode(encoding, 'backslashreplace') + + code = textwrap.dedent(fr""" + import sys + import unittest + + class Tests(unittest.TestCase): + def test_pass(self): + pass + + # bytes which cannot be decoded from UTF-8 + corrupted_output = {corrupted_output!a} + sys.stdout.buffer.write(corrupted_output) + sys.stdout.buffer.flush() + """) + testname = self.create_test(code=code) + + output = self.run_tests("--fail-env-changed", "-v", "-j1", testname) + self.check_executed_tests(output, [testname], + parallel=True, + stats=1) + self.check_line(output, expected_line, regex=False) + + def test_doctest(self): + code = textwrap.dedent(r''' + import doctest + import sys + from test import support + + def my_function(): + """ + Pass: + + >>> 1 + 1 + 2 + + Failure: + + >>> 2 + 3 + 23 + >>> 1 + 1 + 11 + + Skipped test (ignored): + + >>> id(1.0) # doctest: +SKIP + 7948648 + """ + + def load_tests(loader, tests, pattern): + tests.addTest(doctest.DocTestSuite()) + return tests + ''') + testname = self.create_test(code=code) + + output = self.run_tests("--fail-env-changed", "-v", "-j1", testname, + exitcode=EXITCODE_BAD_TEST) + self.check_executed_tests(output, [testname], + failed=[testname], + parallel=True, + stats=TestStats(1, 1, 0)) + + def _check_random_seed(self, run_workers: bool): + # gh-109276: When -r/--randomize is used, random.seed() is called + # with the same random seed before running each test file. + code = textwrap.dedent(r''' + import random + import unittest + + class RandomSeedTest(unittest.TestCase): + def test_randint(self): + numbers = [random.randint(0, 1000) for _ in range(10)] + print(f"Random numbers: {numbers}") + ''') + tests = [self.create_test(name=f'test_random{i}', code=code) + for i in range(1, 3+1)] + + random_seed = 856_656_202 + cmd = ["--randomize", f"--randseed={random_seed}"] + if run_workers: + # run as many worker processes than the number of tests + cmd.append(f'-j{len(tests)}') + cmd.extend(tests) + output = self.run_tests(*cmd) + + random.seed(random_seed) + # Make the assumption that nothing consume entropy between libregrest + # setup_tests() which calls random.seed() and RandomSeedTest calling + # random.randint(). + numbers = [random.randint(0, 1000) for _ in range(10)] + expected = f"Random numbers: {numbers}" + + regex = r'^Random numbers: .*$' + matches = re.findall(regex, output, flags=re.MULTILINE) + self.assertEqual(matches, [expected] * len(tests)) + + def test_random_seed(self): + self._check_random_seed(run_workers=False) + + def test_random_seed_workers(self): + self._check_random_seed(run_workers=True) + + def test_python_command(self): + code = textwrap.dedent(r""" + import sys + import unittest + + class WorkerTests(unittest.TestCase): + def test_dev_mode(self): + self.assertTrue(sys.flags.dev_mode) + """) + tests = [self.create_test(code=code) for _ in range(3)] + + # Custom Python command: "python -X dev" + python_cmd = [sys.executable, '-X', 'dev'] + # test.libregrtest.cmdline uses shlex.split() to parse the Python + # command line string + python_cmd = shlex.join(python_cmd) + + output = self.run_tests("--python", python_cmd, "-j0", *tests) + self.check_executed_tests(output, tests, + stats=len(tests), parallel=True) + + def test_unload_tests(self): + # Test that unloading test modules does not break tests + # that import from other tests. + # The test execution order matters for this test. + # Both test_regrtest_a and test_regrtest_c which are executed before + # and after test_regrtest_b import a submodule from the test_regrtest_b + # package and use it in testing. test_regrtest_b itself does not import + # that submodule. + # Previously test_regrtest_c failed because test_regrtest_b.util in + # sys.modules was left after test_regrtest_a (making the import + # statement no-op), but new test_regrtest_b without the util attribute + # was imported for test_regrtest_b. + testdir = os.path.join(os.path.dirname(__file__), + 'regrtestdata', 'import_from_tests') + tests = [f'test_regrtest_{name}' for name in ('a', 'b', 'c')] + args = ['-Wd', '-E', '-bb', '-m', 'test', '--testdir=%s' % testdir, *tests] + output = self.run_python(args) + self.check_executed_tests(output, tests, stats=3) + + def check_add_python_opts(self, option): + # --fast-ci and --slow-ci add "-u -W default -bb -E" options to Python + try: + import _testinternalcapi + except ImportError: + raise unittest.SkipTest("requires _testinternalcapi") + code = textwrap.dedent(r""" + import sys + import unittest + from test import support + try: + from _testinternalcapi import get_config + except ImportError: + get_config = None + + # WASI/WASM buildbots don't use -E option + use_environment = (support.is_emscripten or support.is_wasi) + + class WorkerTests(unittest.TestCase): + @unittest.skipUnless(get_config is None, 'need get_config()') + def test_config(self): + config = get_config()['config'] + # -u option + self.assertEqual(config['buffered_stdio'], 0) + # -W default option + self.assertTrue(config['warnoptions'], ['default']) + # -bb option + self.assertTrue(config['bytes_warning'], 2) + # -E option + self.assertTrue(config['use_environment'], use_environment) + + def test_python_opts(self): + # -u option + self.assertTrue(sys.__stdout__.write_through) + self.assertTrue(sys.__stderr__.write_through) + + # -W default option + self.assertTrue(sys.warnoptions, ['default']) + + # -bb option + self.assertEqual(sys.flags.bytes_warning, 2) + + # -E option + self.assertEqual(not sys.flags.ignore_environment, + use_environment) + """) + testname = self.create_test(code=code) + + # Use directly subprocess to control the exact command line + cmd = [sys.executable, + "-m", "test", option, + f'--testdir={self.tmptestdir}', + testname] + proc = subprocess.run(cmd, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + text=True) + self.assertEqual(proc.returncode, 0, proc) + + def test_add_python_opts(self): + for opt in ("--fast-ci", "--slow-ci"): + with self.subTest(opt=opt): + self.check_add_python_opts(opt) + + # gh-76319: Raising SIGSEGV on Android may not cause a crash. + @unittest.skipIf(support.is_android, + 'raising SIGSEGV on Android is unreliable') + def test_worker_output_on_failure(self): + try: + from faulthandler import _sigsegv + except ImportError: + self.skipTest("need faulthandler._sigsegv") + + code = textwrap.dedent(r""" + import faulthandler + import unittest + from test import support + + class CrashTests(unittest.TestCase): + def test_crash(self): + print("just before crash!", flush=True) + + with support.SuppressCrashReport(): + faulthandler._sigsegv(True) + """) + testname = self.create_test(code=code) + + # Sanitizers must not handle SIGSEGV (ex: for test_enable_fd()) + env = dict(os.environ) + option = 'handle_segv=0' + support.set_sanitizer_env_var(env, option) + + output = self.run_tests("-j1", testname, + exitcode=EXITCODE_BAD_TEST, + env=env) + self.check_executed_tests(output, testname, + failed=[testname], + stats=0, parallel=True) + if not support.MS_WINDOWS: + exitcode = -int(signal.SIGSEGV) + self.assertIn(f"Exit code {exitcode} (SIGSEGV)", output) + self.check_line(output, "just before crash!", full=True, regex=False) + + def test_verbose3(self): + code = textwrap.dedent(r""" + import unittest + from test import support + + class VerboseTests(unittest.TestCase): + def test_pass(self): + print("SPAM SPAM SPAM") + """) + testname = self.create_test(code=code) + + # Run sequentially + output = self.run_tests("--verbose3", testname) + self.check_executed_tests(output, testname, stats=1) + self.assertNotIn('SPAM SPAM SPAM', output) + + # -R option needs a debug build + if support.Py_DEBUG: + # Check for reference leaks, run in parallel + output = self.run_tests("-R", "3:3", "-j1", "--verbose3", testname) + self.check_executed_tests(output, testname, stats=1, parallel=True) + self.assertNotIn('SPAM SPAM SPAM', output) + + def test_xml(self): + code = textwrap.dedent(r""" + import unittest + from test import support + + class VerboseTests(unittest.TestCase): + def test_failed(self): + print("abc \x1b def") + self.fail() + """) + testname = self.create_test(code=code) + + # Run sequentially + filename = os_helper.TESTFN + self.addCleanup(os_helper.unlink, filename) + + output = self.run_tests(testname, "--junit-xml", filename, + exitcode=EXITCODE_BAD_TEST) + self.check_executed_tests(output, testname, + failed=testname, + stats=TestStats(1, 1, 0)) + + # Test generated XML + with open(filename, encoding="utf8") as fp: + content = fp.read() + + testsuite = ElementTree.fromstring(content) + self.assertEqual(int(testsuite.get('tests')), 1) + self.assertEqual(int(testsuite.get('errors')), 0) + self.assertEqual(int(testsuite.get('failures')), 1) + + testcase = testsuite[0][0] + self.assertEqual(testcase.get('status'), 'run') + self.assertEqual(testcase.get('result'), 'completed') + self.assertGreater(float(testcase.get('time')), 0) + for out in testcase.iter('system-out'): + self.assertEqual(out.text, r"abc \x1b def") + class TestUtils(unittest.TestCase): - # TODO: RUSTPYTHON - @unittest.expectedFailure def test_format_duration(self): self.assertEqual(utils.format_duration(0), '0 ms') @@ -1303,6 +2324,184 @@ def test_format_duration(self): self.assertEqual(utils.format_duration(3 * 3600 + 1), '3 hour 1 sec') + def test_normalize_test_name(self): + normalize = normalize_test_name + self.assertEqual(normalize('test_access (test.test_os.FileTests.test_access)'), + 'test_access') + self.assertEqual(normalize('setUpClass (test.test_os.ChownFileTests)', is_error=True), + 'ChownFileTests') + self.assertEqual(normalize('test_success (test.test_bug.ExampleTests.test_success)', is_error=True), + 'test_success') + self.assertIsNone(normalize('setUpModule (test.test_x)', is_error=True)) + self.assertIsNone(normalize('tearDownModule (test.test_module)', is_error=True)) + + def test_get_signal_name(self): + for exitcode, expected in ( + (-int(signal.SIGINT), 'SIGINT'), + (-int(signal.SIGSEGV), 'SIGSEGV'), + (128 + int(signal.SIGABRT), 'SIGABRT'), + (3221225477, "STATUS_ACCESS_VIOLATION"), + (0xC00000FD, "STATUS_STACK_OVERFLOW"), + ): + self.assertEqual(utils.get_signal_name(exitcode), expected, exitcode) + + def test_format_resources(self): + format_resources = utils.format_resources + ALL_RESOURCES = utils.ALL_RESOURCES + self.assertEqual( + format_resources(("network",)), + 'resources (1): network') + self.assertEqual( + format_resources(("audio", "decimal", "network")), + 'resources (3): audio,decimal,network') + self.assertEqual( + format_resources(ALL_RESOURCES), + 'resources: all') + self.assertEqual( + format_resources(tuple(name for name in ALL_RESOURCES + if name != "cpu")), + 'resources: all,-cpu') + self.assertEqual( + format_resources((*ALL_RESOURCES, "tzdata")), + 'resources: all,tzdata') + + def test_match_test(self): + class Test: + def __init__(self, test_id): + self.test_id = test_id + + def id(self): + return self.test_id + + # Restore patterns once the test completes + patterns = get_match_tests() + self.addCleanup(set_match_tests, patterns) + + test_access = Test('test.test_os.FileTests.test_access') + test_chdir = Test('test.test_os.Win32ErrorTests.test_chdir') + test_copy = Test('test.test_shutil.TestCopy.test_copy') + + # Test acceptance + with support.swap_attr(support, '_test_matchers', ()): + # match all + set_match_tests([]) + self.assertTrue(match_test(test_access)) + self.assertTrue(match_test(test_chdir)) + + # match all using None + set_match_tests(None) + self.assertTrue(match_test(test_access)) + self.assertTrue(match_test(test_chdir)) + + # match the full test identifier + set_match_tests([(test_access.id(), True)]) + self.assertTrue(match_test(test_access)) + self.assertFalse(match_test(test_chdir)) + + # match the module name + set_match_tests([('test_os', True)]) + self.assertTrue(match_test(test_access)) + self.assertTrue(match_test(test_chdir)) + self.assertFalse(match_test(test_copy)) + + # Test '*' pattern + set_match_tests([('test_*', True)]) + self.assertTrue(match_test(test_access)) + self.assertTrue(match_test(test_chdir)) + + # Test case sensitivity + set_match_tests([('filetests', True)]) + self.assertFalse(match_test(test_access)) + set_match_tests([('FileTests', True)]) + self.assertTrue(match_test(test_access)) + + # Test pattern containing '.' and a '*' metacharacter + set_match_tests([('*test_os.*.test_*', True)]) + self.assertTrue(match_test(test_access)) + self.assertTrue(match_test(test_chdir)) + self.assertFalse(match_test(test_copy)) + + # Multiple patterns + set_match_tests([(test_access.id(), True), (test_chdir.id(), True)]) + self.assertTrue(match_test(test_access)) + self.assertTrue(match_test(test_chdir)) + self.assertFalse(match_test(test_copy)) + + set_match_tests([('test_access', True), ('DONTMATCH', True)]) + self.assertTrue(match_test(test_access)) + self.assertFalse(match_test(test_chdir)) + + # Test rejection + with support.swap_attr(support, '_test_matchers', ()): + # match the full test identifier + set_match_tests([(test_access.id(), False)]) + self.assertFalse(match_test(test_access)) + self.assertTrue(match_test(test_chdir)) + + # match the module name + set_match_tests([('test_os', False)]) + self.assertFalse(match_test(test_access)) + self.assertFalse(match_test(test_chdir)) + self.assertTrue(match_test(test_copy)) + + # Test '*' pattern + set_match_tests([('test_*', False)]) + self.assertFalse(match_test(test_access)) + self.assertFalse(match_test(test_chdir)) + + # Test case sensitivity + set_match_tests([('filetests', False)]) + self.assertTrue(match_test(test_access)) + set_match_tests([('FileTests', False)]) + self.assertFalse(match_test(test_access)) + + # Test pattern containing '.' and a '*' metacharacter + set_match_tests([('*test_os.*.test_*', False)]) + self.assertFalse(match_test(test_access)) + self.assertFalse(match_test(test_chdir)) + self.assertTrue(match_test(test_copy)) + + # Multiple patterns + set_match_tests([(test_access.id(), False), (test_chdir.id(), False)]) + self.assertFalse(match_test(test_access)) + self.assertFalse(match_test(test_chdir)) + self.assertTrue(match_test(test_copy)) + + set_match_tests([('test_access', False), ('DONTMATCH', False)]) + self.assertFalse(match_test(test_access)) + self.assertTrue(match_test(test_chdir)) + + # Test mixed filters + with support.swap_attr(support, '_test_matchers', ()): + set_match_tests([('*test_os', False), ('test_access', True)]) + self.assertTrue(match_test(test_access)) + self.assertFalse(match_test(test_chdir)) + self.assertTrue(match_test(test_copy)) + + set_match_tests([('*test_os', True), ('test_access', False)]) + self.assertFalse(match_test(test_access)) + self.assertTrue(match_test(test_chdir)) + self.assertFalse(match_test(test_copy)) + + def test_sanitize_xml(self): + sanitize_xml = utils.sanitize_xml + + # escape invalid XML characters + self.assertEqual(sanitize_xml('abc \x1b\x1f def'), + r'abc \x1b\x1f def') + self.assertEqual(sanitize_xml('nul:\x00, bell:\x07'), + r'nul:\x00, bell:\x07') + self.assertEqual(sanitize_xml('surrogate:\uDC80'), + r'surrogate:\udc80') + self.assertEqual(sanitize_xml('illegal \uFFFE and \uFFFF'), + r'illegal \ufffe and \uffff') + + # no escape for valid XML characters + self.assertEqual(sanitize_xml('a\n\tb'), + 'a\n\tb') + self.assertEqual(sanitize_xml('valid t\xe9xt \u20ac'), + 'valid t\xe9xt \u20ac') + if __name__ == '__main__': unittest.main() diff --git a/Lib/test/test_support.py b/Lib/test/test_support.py index 4381a82b6b..bdf4ab4efa 100644 --- a/Lib/test/test_support.py +++ b/Lib/test/test_support.py @@ -7,6 +7,7 @@ import stat import subprocess import sys +import sysconfig import tempfile import textwrap import unittest @@ -61,7 +62,7 @@ def test_import_fresh_module(self): def test_get_attribute(self): self.assertEqual(support.get_attribute(self, "test_get_attribute"), - self.test_get_attribute) + self.test_get_attribute) self.assertRaises(unittest.SkipTest, support.get_attribute, self, "foo") @unittest.skip("failing buildbots") @@ -414,11 +415,11 @@ def test_detect_api_mismatch__ignore(self): ignore = ['attribute1', 'attribute3', '__magic_2__', 'not_in_either'] missing_items = support.detect_api_mismatch( - self.RefClass, self.OtherClass, ignore=ignore) + self.RefClass, self.OtherClass, ignore=ignore) self.assertEqual(set(), missing_items) missing_items = support.detect_api_mismatch( - self.OtherClass, self.RefClass, ignore=ignore) + self.OtherClass, self.RefClass, ignore=ignore) self.assertEqual(set(), missing_items) # TODO: RUSTPYTHON @@ -433,10 +434,7 @@ def test_check__all__(self): extra = { 'TextTestResult', - 'findTestCases', - 'getTestCaseNames', 'installHandler', - 'makeSuite', } not_exported = {'load_tests', "TestProgram", "BaseTestSuite"} support.check__all__(self, @@ -507,31 +505,31 @@ def check_options(self, args, func, expected=None): def test_args_from_interpreter_flags(self): # Test test.support.args_from_interpreter_flags() for opts in ( - # no option - [], - # single option - ['-B'], - ['-s'], - ['-S'], - ['-E'], - ['-v'], - ['-b'], - ['-P'], - ['-q'], - ['-I'], - # same option multiple times - ['-bb'], - ['-vvv'], - # -W options - ['-Wignore'], - # -X options - ['-X', 'dev'], - ['-Wignore', '-X', 'dev'], - ['-X', 'faulthandler'], - ['-X', 'importtime'], - ['-X', 'showrefcount'], - ['-X', 'tracemalloc'], - ['-X', 'tracemalloc=3'], + # no option + [], + # single option + ['-B'], + ['-s'], + ['-S'], + ['-E'], + ['-v'], + ['-b'], + ['-P'], + ['-q'], + ['-I'], + # same option multiple times + ['-bb'], + ['-vvv'], + # -W options + ['-Wignore'], + # -X options + ['-X', 'dev'], + ['-Wignore', '-X', 'dev'], + ['-X', 'faulthandler'], + ['-X', 'importtime'], + ['-X', 'showrefcount'], + ['-X', 'tracemalloc'], + ['-X', 'tracemalloc=3'], ): with self.subTest(opts=opts): self.check_options(opts, 'args_from_interpreter_flags') @@ -543,128 +541,23 @@ def test_args_from_interpreter_flags(self): def test_optim_args_from_interpreter_flags(self): # Test test.support.optim_args_from_interpreter_flags() for opts in ( - # no option - [], - ['-O'], - ['-OO'], - ['-OOOO'], + # no option + [], + ['-O'], + ['-OO'], + ['-OOOO'], ): with self.subTest(opts=opts): self.check_options(opts, 'optim_args_from_interpreter_flags') - def test_match_test(self): - class Test: - def __init__(self, test_id): - self.test_id = test_id - - def id(self): - return self.test_id - - test_access = Test('test.test_os.FileTests.test_access') - test_chdir = Test('test.test_os.Win32ErrorTests.test_chdir') - - # Test acceptance - with support.swap_attr(support, '_match_test_func', None): - # match all - support.set_match_tests([]) - self.assertTrue(support.match_test(test_access)) - self.assertTrue(support.match_test(test_chdir)) - - # match all using None - support.set_match_tests(None, None) - self.assertTrue(support.match_test(test_access)) - self.assertTrue(support.match_test(test_chdir)) - - # match the full test identifier - support.set_match_tests([test_access.id()], None) - self.assertTrue(support.match_test(test_access)) - self.assertFalse(support.match_test(test_chdir)) - - # match the module name - support.set_match_tests(['test_os'], None) - self.assertTrue(support.match_test(test_access)) - self.assertTrue(support.match_test(test_chdir)) - - # Test '*' pattern - support.set_match_tests(['test_*'], None) - self.assertTrue(support.match_test(test_access)) - self.assertTrue(support.match_test(test_chdir)) - - # Test case sensitivity - support.set_match_tests(['filetests'], None) - self.assertFalse(support.match_test(test_access)) - support.set_match_tests(['FileTests'], None) - self.assertTrue(support.match_test(test_access)) - - # Test pattern containing '.' and a '*' metacharacter - support.set_match_tests(['*test_os.*.test_*'], None) - self.assertTrue(support.match_test(test_access)) - self.assertTrue(support.match_test(test_chdir)) - - # Multiple patterns - support.set_match_tests([test_access.id(), test_chdir.id()], None) - self.assertTrue(support.match_test(test_access)) - self.assertTrue(support.match_test(test_chdir)) - - support.set_match_tests(['test_access', 'DONTMATCH'], None) - self.assertTrue(support.match_test(test_access)) - self.assertFalse(support.match_test(test_chdir)) - - # Test rejection - with support.swap_attr(support, '_match_test_func', None): - # match all - support.set_match_tests(ignore_patterns=[]) - self.assertTrue(support.match_test(test_access)) - self.assertTrue(support.match_test(test_chdir)) - - # match all using None - support.set_match_tests(None, None) - self.assertTrue(support.match_test(test_access)) - self.assertTrue(support.match_test(test_chdir)) - - # match the full test identifier - support.set_match_tests(None, [test_access.id()]) - self.assertFalse(support.match_test(test_access)) - self.assertTrue(support.match_test(test_chdir)) - - # match the module name - support.set_match_tests(None, ['test_os']) - self.assertFalse(support.match_test(test_access)) - self.assertFalse(support.match_test(test_chdir)) - - # Test '*' pattern - support.set_match_tests(None, ['test_*']) - self.assertFalse(support.match_test(test_access)) - self.assertFalse(support.match_test(test_chdir)) - - # Test case sensitivity - support.set_match_tests(None, ['filetests']) - self.assertTrue(support.match_test(test_access)) - support.set_match_tests(None, ['FileTests']) - self.assertFalse(support.match_test(test_access)) - - # Test pattern containing '.' and a '*' metacharacter - support.set_match_tests(None, ['*test_os.*.test_*']) - self.assertFalse(support.match_test(test_access)) - self.assertFalse(support.match_test(test_chdir)) - - # Multiple patterns - support.set_match_tests(None, [test_access.id(), test_chdir.id()]) - self.assertFalse(support.match_test(test_access)) - self.assertFalse(support.match_test(test_chdir)) - - support.set_match_tests(None, ['test_access', 'DONTMATCH']) - self.assertFalse(support.match_test(test_access)) - self.assertTrue(support.match_test(test_chdir)) - - @unittest.skipIf(sys.platform.startswith("win"), "TODO: RUSTPYTHON; os.dup on windows") + @unittest.skipIf(support.is_apple_mobile, "Unstable on Apple Mobile") @unittest.skipIf(support.is_emscripten, "Unstable in Emscripten") @unittest.skipIf(support.is_wasi, "Unavailable on WASI") def test_fd_count(self): - # We cannot test the absolute value of fd_count(): on old Linux - # kernel or glibc versions, os.urandom() keeps a FD open on - # /dev/urandom device and Python has 4 FD opens instead of 3. - # Test is unstable on Emscripten. The platform starts and stops + # We cannot test the absolute value of fd_count(): on old Linux kernel + # or glibc versions, os.urandom() keeps a FD open on /dev/urandom + # device and Python has 4 FD opens instead of 3. Test is unstable on + # Emscripten and Apple Mobile platforms; these platforms start and stop # background threads that use pipes and epoll fds. start = os_helper.fd_count() fd = os.open(__file__, os.O_RDONLY) @@ -769,7 +662,81 @@ def recursive_function(depth): else: self.fail("RecursionError was not raised") - #self.assertEqual(available, 2) + def test_parse_memlimit(self): + parse = support._parse_memlimit + KiB = 1024 + MiB = KiB * 1024 + GiB = MiB * 1024 + TiB = GiB * 1024 + self.assertEqual(parse('0k'), 0) + self.assertEqual(parse('3k'), 3 * KiB) + self.assertEqual(parse('2.4m'), int(2.4 * MiB)) + self.assertEqual(parse('4g'), int(4 * GiB)) + self.assertEqual(parse('1t'), TiB) + + for limit in ('', '3', '3.5.10k', '10x'): + with self.subTest(limit=limit): + with self.assertRaises(ValueError): + parse(limit) + + def test_set_memlimit(self): + _4GiB = 4 * 1024 ** 3 + TiB = 1024 ** 4 + old_max_memuse = support.max_memuse + old_real_max_memuse = support.real_max_memuse + try: + if sys.maxsize > 2**32: + support.set_memlimit('4g') + self.assertEqual(support.max_memuse, _4GiB) + self.assertEqual(support.real_max_memuse, _4GiB) + + big = 2**100 // TiB + support.set_memlimit(f'{big}t') + self.assertEqual(support.max_memuse, sys.maxsize) + self.assertEqual(support.real_max_memuse, big * TiB) + else: + support.set_memlimit('4g') + self.assertEqual(support.max_memuse, sys.maxsize) + self.assertEqual(support.real_max_memuse, _4GiB) + finally: + support.max_memuse = old_max_memuse + support.real_max_memuse = old_real_max_memuse + + def test_copy_python_src_ignore(self): + # Get source directory + src_dir = sysconfig.get_config_var('abs_srcdir') + if not src_dir: + src_dir = sysconfig.get_config_var('srcdir') + src_dir = os.path.abspath(src_dir) + + # Check that the source code is available + if not os.path.exists(src_dir): + self.skipTest(f"cannot access Python source code directory:" + f" {src_dir!r}") + # Check that the landmark copy_python_src_ignore() expects is available + # (Previously we looked for 'Lib\os.py', which is always present on Windows.) + landmark = os.path.join(src_dir, 'Modules') + if not os.path.exists(landmark): + self.skipTest(f"cannot access Python source code directory:" + f" {landmark!r} landmark is missing") + + # Test support.copy_python_src_ignore() + + # Source code directory + ignored = {'.git', '__pycache__'} + names = os.listdir(src_dir) + self.assertEqual(support.copy_python_src_ignore(src_dir, names), + ignored | {'build'}) + + # Doc/ directory + path = os.path.join(src_dir, 'Doc') + self.assertEqual(support.copy_python_src_ignore(path, os.listdir(path)), + ignored | {'build', 'venv'}) + + # Another directory + path = os.path.join(src_dir, 'Objects') + self.assertEqual(support.copy_python_src_ignore(path, os.listdir(path)), + ignored) # XXX -follows a list of untested API # make_legacy_pyc @@ -782,12 +749,10 @@ def recursive_function(depth): # EnvironmentVarGuard # transient_internet # run_with_locale - # set_memlimit # bigmemtest # precisionbigmemtest # bigaddrspacetest # requires_resource - # run_doctest # threading_cleanup # reap_threads # can_symlink diff --git a/Lib/test/test_trace.py b/Lib/test/test_trace.py index a551a17fc1..cd4f11c821 100644 --- a/Lib/test/test_trace.py +++ b/Lib/test/test_trace.py @@ -387,9 +387,14 @@ def tearDown(self): rmtree(TESTFN) unlink(TESTFN) - def _coverage(self, tracer, - cmd='import test.support, test.test_pprint;' - 'test.support.run_unittest(test.test_pprint.QueryTestCase)'): + DEFAULT_SCRIPT = '''if True: + import unittest + from test.test_pprint import QueryTestCase + loader = unittest.TestLoader() + tests = loader.loadTestsFromTestCase(QueryTestCase) + tests(unittest.TestResult()) + ''' + def _coverage(self, tracer, cmd=DEFAULT_SCRIPT): tracer.run(cmd) r = tracer.results() r.write_results(show_missing=True, summary=True, coverdir=TESTFN) @@ -412,7 +417,7 @@ def test_coverage_ignore(self): libpath = os.path.normpath(os.path.dirname(os.path.dirname(__file__))) # sys.prefix does not work when running from a checkout tracer = trace.Trace(ignoredirs=[sys.base_prefix, sys.base_exec_prefix, - libpath], trace=0, count=1) + libpath], trace=0, count=1) with captured_stdout() as stdout: self._coverage(tracer) if os.path.exists(TESTFN): diff --git a/Lib/test/test_weakref.py b/Lib/test/test_weakref.py index 2b3678c124..46a15db28f 100644 --- a/Lib/test/test_weakref.py +++ b/Lib/test/test_weakref.py @@ -1898,6 +1898,8 @@ def test_make_weak_keyed_dict_repr(self): dict = weakref.WeakKeyDictionary() self.assertRegex(repr(dict), '') + # TODO: RUSTPYTHON + @unittest.expectedFailure def test_threaded_weak_valued_setdefault(self): d = weakref.WeakValueDictionary() with collect_in_thread(): @@ -1906,6 +1908,8 @@ def test_threaded_weak_valued_setdefault(self): self.assertIsNot(x, None) # we never put None in there! del x + # TODO: RUSTPYTHON + @unittest.expectedFailure def test_threaded_weak_valued_pop(self): d = weakref.WeakValueDictionary() with collect_in_thread(): @@ -1914,6 +1918,8 @@ def test_threaded_weak_valued_pop(self): x = d.pop(10, 10) self.assertIsNot(x, None) # we never put None in there! + # TODO: RUSTPYTHON + @unittest.expectedFailure def test_threaded_weak_valued_consistency(self): # Issue #28427: old keys should not remove new values from # WeakValueDictionary when collecting from another thread. @@ -2283,7 +2289,8 @@ def test_atexit(self): __test__ = {'libreftest' : libreftest} def load_tests(loader, tests, pattern): - tests.addTest(doctest.DocTestSuite()) + # TODO: RUSTPYTHON + # tests.addTest(doctest.DocTestSuite()) return tests diff --git a/Lib/test/test_xml_etree.py b/Lib/test/test_xml_etree.py index 19718b2a89..7660c62205 100644 --- a/Lib/test/test_xml_etree.py +++ b/Lib/test/test_xml_etree.py @@ -4205,8 +4205,6 @@ def setUp(self): if not pyET: raise unittest.SkipTest('only for the Python version') - # TODO: RUSTPYTHON - @unittest.expectedFailure # Test that the C accelerator was not imported for pyET def test_correct_import_pyET(self): # The type of methods defined in Python code is types.FunctionType, diff --git a/Lib/test/test_yield_from.py b/Lib/test/test_yield_from.py index 97acfd5413..ed75748dd9 100644 --- a/Lib/test/test_yield_from.py +++ b/Lib/test/test_yield_from.py @@ -999,6 +999,8 @@ def gen(): list(gen()) self.assertEqual(ret, 42) + # TODO: RUSTPYTHON + @unittest.expectedFailure def test_close_with_cleared_frame(self): # See issue #17669. # From 3ff8f39a848a8b3abd28d87ee6db774c96bb5334 Mon Sep 17 00:00:00 2001 From: Ashwin Naren Date: Tue, 25 Feb 2025 00:31:31 -0800 Subject: [PATCH 14/15] pass test_support Signed-off-by: Ashwin Naren --- Lib/test/test_support.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Lib/test/test_support.py b/Lib/test/test_support.py index bdf4ab4efa..3e86a28581 100644 --- a/Lib/test/test_support.py +++ b/Lib/test/test_support.py @@ -553,6 +553,8 @@ def test_optim_args_from_interpreter_flags(self): @unittest.skipIf(support.is_apple_mobile, "Unstable on Apple Mobile") @unittest.skipIf(support.is_emscripten, "Unstable in Emscripten") @unittest.skipIf(support.is_wasi, "Unavailable on WASI") + # TODO: RUSTPYTHON + @unittest.expectedFailure def test_fd_count(self): # We cannot test the absolute value of fd_count(): on old Linux kernel # or glibc versions, os.urandom() keeps a FD open on /dev/urandom From 56834d63e87dc1a56bfbab9cc780f004e2414615 Mon Sep 17 00:00:00 2001 From: Ashwin Naren Date: Tue, 25 Feb 2025 00:31:53 -0800 Subject: [PATCH 15/15] fix some test_regrtest Signed-off-by: Ashwin Naren --- Lib/test/autotest.py | 5 +++++ Lib/test/regrtest.py | 2 +- Lib/test/test_regrtest.py | 6 ++++++ 3 files changed, 12 insertions(+), 1 deletion(-) create mode 100644 Lib/test/autotest.py diff --git a/Lib/test/autotest.py b/Lib/test/autotest.py new file mode 100644 index 0000000000..b5a1fab404 --- /dev/null +++ b/Lib/test/autotest.py @@ -0,0 +1,5 @@ +# This should be equivalent to running regrtest.py from the cmdline. +# It can be especially handy if you're in an interactive shell, e.g., +# from test import autotest. +from test.libregrtest.main import main +main() diff --git a/Lib/test/regrtest.py b/Lib/test/regrtest.py index 21b0edfd07..87c6531c73 100755 --- a/Lib/test/regrtest.py +++ b/Lib/test/regrtest.py @@ -43,7 +43,7 @@ def _main(): # sanity check assert __file__ == os.path.abspath(sys.argv[0]) - main() + main.main() if __name__ == '__main__': diff --git a/Lib/test/test_regrtest.py b/Lib/test/test_regrtest.py index 14ac044e34..547903b3c1 100644 --- a/Lib/test/test_regrtest.py +++ b/Lib/test/test_regrtest.py @@ -2017,6 +2017,8 @@ def test_pass(self): stats=1) self.check_line(output, expected_line, regex=False) + # TODO: RUSTPYTHON + @unittest.expectedFailure def test_doctest(self): code = textwrap.dedent(r''' import doctest @@ -2262,6 +2264,8 @@ def test_pass(self): self.check_executed_tests(output, testname, stats=1, parallel=True) self.assertNotIn('SPAM SPAM SPAM', output) + # TODO: RUSTPYTHON + @unittest.expectedFailure def test_xml(self): code = textwrap.dedent(r""" import unittest @@ -2483,6 +2487,8 @@ def id(self): self.assertTrue(match_test(test_chdir)) self.assertFalse(match_test(test_copy)) + # TODO: RUSTPYTHON + @unittest.expectedFailure def test_sanitize_xml(self): sanitize_xml = utils.sanitize_xml