From f7f36509e2e81e9a20cfeadddd6608f2378ff26c Mon Sep 17 00:00:00 2001 From: Michael Droettboom Date: Thu, 2 Nov 2023 12:10:51 -0400 Subject: [PATCH 1/8] Upgrade to pyperf 2.6.2 (#322) --- pyperformance/requirements/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyperformance/requirements/requirements.txt b/pyperformance/requirements/requirements.txt index d4aa6631..16f07281 100644 --- a/pyperformance/requirements/requirements.txt +++ b/pyperformance/requirements/requirements.txt @@ -10,5 +10,5 @@ psutil==5.9.5 # via # -r requirements.in # pyperf -pyperf==2.6.1 +pyperf==2.6.2 # via -r requirements.in From 9756f98a51fd6891569ad8f14851d2f34dbf4cd8 Mon Sep 17 00:00:00 2001 From: Yan Yanchii Date: Tue, 16 Jan 2024 13:05:09 +0100 Subject: [PATCH 2/8] Fix typos in benchmark.conf.sample (#324) --- doc/benchmark.conf.sample | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/benchmark.conf.sample b/doc/benchmark.conf.sample index ea69746e..528d33e0 100644 --- a/doc/benchmark.conf.sample +++ b/doc/benchmark.conf.sample @@ -4,10 +4,10 @@ # - results of patched Python are written into json_dir/patch/ json_dir = ~/json -# If True, compile CPython is debug mode (LTO and PGO disabled), +# If True, compile CPython in debug mode (LTO and PGO disabled), # run benchmarks with --debug-single-sample, and disable upload. # -# Use this option used to quickly test a configuration. +# Use this option to quickly test a configuration. debug = False From 32c6bbf2b101a15a9b6bd555b5c12314bae2c232 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicol=C3=B2=20Boschi?= Date: Tue, 16 Jan 2024 13:33:35 +0100 Subject: [PATCH 3/8] [doc] Fix docs typo (#325) --- doc/index.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/index.rst b/doc/index.rst index 7a15e4ee..4f7cf71b 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -14,7 +14,7 @@ possible. pyperformance is distributed under the MIT license. -Documenation: +Documentation: .. toctree:: :maxdepth: 2 From dcf71dcb0783adae1b6a356d71e8d65d7b769981 Mon Sep 17 00:00:00 2001 From: Ken Jin Date: Sat, 27 Jan 2024 20:01:24 +0800 Subject: [PATCH 4/8] Move jobs to correct location in benchmark.conf.sample (#326) --- doc/benchmark.conf.sample | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/benchmark.conf.sample b/doc/benchmark.conf.sample index 528d33e0..b1224460 100644 --- a/doc/benchmark.conf.sample +++ b/doc/benchmark.conf.sample @@ -59,6 +59,9 @@ pkg_only = # really understand what you are doing! install = True +# Specify '-j' parameter in 'make' command +jobs = 8 + [run_benchmark] # Run "sudo python3 -m pyperf system tune" before running benchmarks? @@ -79,9 +82,6 @@ affinity = # disabled. upload = False -# Specify '-j' parameter in 'make' command -jobs = 8 - # Configuration to upload results to a Codespeed website [upload] From 79f80a4c75d895ff21863ce2645aabcd55596c24 Mon Sep 17 00:00:00 2001 From: Michael Droettboom Date: Thu, 1 Feb 2024 19:31:31 -0500 Subject: [PATCH 5/8] Add a feature for using the same number of loops as a previous run (#327) Motivation: On the Faster CPython team, we often collect pystats (counters of various interpreter events) by running the benchmark suite. It is very useful to compare the stats between two commits to see how a pull request affects the interpreter. Unfortunately, with pyperformance's default behavior where the number of loops is automatically calibrated, each benchmark may not be run the same number of times from run-to-run, making the data hard to compare. This change adds a new argument to the "run" command which will use the same number of loops as a previous run. The loops for each benchmark is looked up from the metadata in the .json output of that previous run, and passed to the underlying call to pyperf using the --loops argument. Additionally, this modifies one of the benchmarks (sqlglot) to be compatible with that scheme. sqlglot is the only run_benchmark.py script that runs multiple benchmarks within it in a single call to the script. This makes it impossible to set the number of loops independently for each of these benchmarks. It's been updated to use the pattern from other "suites" of benchmarks (e.g. async_tree) where each benchmark has its own .toml file and is run independently. This should still be backward compatible with older data collected from this benchmark, but doing "pyperformance run -b sqlglot" will now only run a single benchmark. --- doc/changelog.rst | 3 ++ doc/usage.rst | 4 +++ pyperformance/cli.py | 4 +++ pyperformance/compile.py | 3 ++ pyperformance/data-files/benchmarks/MANIFEST | 3 ++ .../bm_sqlglot/bm_sqlglot_optimize.toml | 3 ++ .../bm_sqlglot/bm_sqlglot_parse.toml | 3 ++ .../bm_sqlglot/bm_sqlglot_transpile.toml | 3 ++ .../benchmarks/bm_sqlglot/pyproject.toml | 1 + .../benchmarks/bm_sqlglot/run_benchmark.py | 31 ++++++++++++++++--- pyperformance/run.py | 25 +++++++++++++++ 11 files changed, 78 insertions(+), 5 deletions(-) create mode 100644 pyperformance/data-files/benchmarks/bm_sqlglot/bm_sqlglot_optimize.toml create mode 100644 pyperformance/data-files/benchmarks/bm_sqlglot/bm_sqlglot_parse.toml create mode 100644 pyperformance/data-files/benchmarks/bm_sqlglot/bm_sqlglot_transpile.toml diff --git a/doc/changelog.rst b/doc/changelog.rst index de5c0126..070d2bb5 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -1,6 +1,9 @@ Changelog ========= +* Add a --same-loops option to the run command to use the exact same number of + loops as a previous run (without recalibrating). + Version 1.10.0 (2023-10-22) -------------- * Add benchmark for asyncio_webockets diff --git a/doc/usage.rst b/doc/usage.rst index c7336407..34706144 100644 --- a/doc/usage.rst +++ b/doc/usage.rst @@ -140,6 +140,10 @@ options:: -p PYTHON, --python PYTHON Python executable (default: use running Python) + --same-loops SAME_LOOPS + Use the same number of loops as a previous run + (i.e., don't recalibrate). Should be a path to a + .json file from a previous run. show ---- diff --git a/pyperformance/cli.py b/pyperformance/cli.py index 34544a11..3d83772b 100644 --- a/pyperformance/cli.py +++ b/pyperformance/cli.py @@ -75,6 +75,10 @@ def parse_args(): cmd.add_argument("--min-time", metavar="MIN_TIME", help="Minimum duration in seconds of a single " "value, used to calibrate the number of loops") + cmd.add_argument("--same-loops", + help="Use the same number of loops as a previous run " + "(i.e., don't recalibrate). Should be a path to a " + ".json file from a previous run.") filter_opts(cmd) # show diff --git a/pyperformance/compile.py b/pyperformance/compile.py index 0a7c8332..8f26aded 100644 --- a/pyperformance/compile.py +++ b/pyperformance/compile.py @@ -543,6 +543,8 @@ def run_benchmark(self, python=None): cmd.extend(('--affinity', self.conf.affinity)) if self.conf.debug: cmd.append('--debug-single-value') + if self.conf.same_loops: + cmd.append('--same_loops=%s' % self.conf.same_loops) exitcode = self.run_nocheck(*cmd) if os.path.exists(self.filename): @@ -812,6 +814,7 @@ def getint(section, key, default=None): conf.benchmarks = getstr('run_benchmark', 'benchmarks', default='') conf.affinity = getstr('run_benchmark', 'affinity', default='') conf.upload = getboolean('run_benchmark', 'upload', False) + conf.same_loops = getfile('run_benchmark', 'same_loops', default='') # paths conf.build_dir = os.path.join(conf.directory, 'build') diff --git a/pyperformance/data-files/benchmarks/MANIFEST b/pyperformance/data-files/benchmarks/MANIFEST index b4d22f7b..3210b97f 100644 --- a/pyperformance/data-files/benchmarks/MANIFEST +++ b/pyperformance/data-files/benchmarks/MANIFEST @@ -77,6 +77,9 @@ spectral_norm sqlalchemy_declarative sqlalchemy_imperative sqlglot +sqlglot_parse +sqlglot_transpile +sqlglot_optimize sqlite_synth sympy telco diff --git a/pyperformance/data-files/benchmarks/bm_sqlglot/bm_sqlglot_optimize.toml b/pyperformance/data-files/benchmarks/bm_sqlglot/bm_sqlglot_optimize.toml new file mode 100644 index 00000000..7f59f0b8 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_sqlglot/bm_sqlglot_optimize.toml @@ -0,0 +1,3 @@ +[tool.pyperformance] +name = "sqlglot_optimize" +extra_opts = ["optimize"] diff --git a/pyperformance/data-files/benchmarks/bm_sqlglot/bm_sqlglot_parse.toml b/pyperformance/data-files/benchmarks/bm_sqlglot/bm_sqlglot_parse.toml new file mode 100644 index 00000000..b886688a --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_sqlglot/bm_sqlglot_parse.toml @@ -0,0 +1,3 @@ +[tool.pyperformance] +name = "sqlglot_parse" +extra_opts = ["parse"] diff --git a/pyperformance/data-files/benchmarks/bm_sqlglot/bm_sqlglot_transpile.toml b/pyperformance/data-files/benchmarks/bm_sqlglot/bm_sqlglot_transpile.toml new file mode 100644 index 00000000..25a26a3f --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_sqlglot/bm_sqlglot_transpile.toml @@ -0,0 +1,3 @@ +[tool.pyperformance] +name = "sqlglot_transpile" +extra_opts = ["transpile"] diff --git a/pyperformance/data-files/benchmarks/bm_sqlglot/pyproject.toml b/pyperformance/data-files/benchmarks/bm_sqlglot/pyproject.toml index 6e59a668..cb8656a2 100644 --- a/pyperformance/data-files/benchmarks/bm_sqlglot/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_sqlglot/pyproject.toml @@ -10,3 +10,4 @@ dynamic = ["version"] [tool.pyperformance] name = "sqlglot" +extra_opts = ["normalize"] diff --git a/pyperformance/data-files/benchmarks/bm_sqlglot/run_benchmark.py b/pyperformance/data-files/benchmarks/bm_sqlglot/run_benchmark.py index fa7d9efb..f8fbb79a 100644 --- a/pyperformance/data-files/benchmarks/bm_sqlglot/run_benchmark.py +++ b/pyperformance/data-files/benchmarks/bm_sqlglot/run_benchmark.py @@ -164,10 +164,31 @@ def bench_normalize(loops): return elapsed +BENCHMARKS = { + "parse": bench_parse, + "transpile": bench_transpile, + "optimize": bench_optimize, + "normalize": bench_normalize +} + + +def add_cmdline_args(cmd, args): + cmd.append(args.benchmark) + + +def add_parser_args(parser): + parser.add_argument( + "benchmark", + choices=BENCHMARKS, + help="Which benchmark to run." + ) + + if __name__ == "__main__": - runner = pyperf.Runner() + runner = pyperf.Runner(add_cmdline_args=add_cmdline_args) runner.metadata['description'] = "SQLGlot benchmark" - runner.bench_time_func("sqlglot_parse", bench_parse) - runner.bench_time_func("sqlglot_transpile", bench_transpile) - runner.bench_time_func("sqlglot_optimize", bench_optimize) - runner.bench_time_func("sqlglot_normalize", bench_normalize) + add_parser_args(runner.argparser) + args = runner.parse_args() + benchmark = args.benchmark + + runner.bench_time_func(f"sqlglot_{benchmark}", BENCHMARKS[benchmark]) diff --git a/pyperformance/run.py b/pyperformance/run.py index aa2b3744..f572181c 100644 --- a/pyperformance/run.py +++ b/pyperformance/run.py @@ -1,5 +1,6 @@ from collections import namedtuple import hashlib +import json import sys import time import traceback @@ -50,7 +51,28 @@ def get_run_id(python, bench=None): return RunID(py_id, compat_id, bench, ts) +def get_loops_from_file(filename): + with open(filename) as fd: + data = json.load(fd) + + loops = {} + for benchmark in data["benchmarks"]: + metadata = benchmark.get("metadata", data["metadata"]) + name = metadata["name"] + if name.endswith("_none"): + name = name[:-len("_none")] + if "loops" in metadata: + loops[name] = metadata["loops"] + + return loops + + def run_benchmarks(should_run, python, options): + if options.same_loops is not None: + loops = get_loops_from_file(options.same_loops) + else: + loops = {} + to_run = sorted(should_run) info = _pythoninfo.get_info(python) @@ -136,6 +158,9 @@ def add_bench(dest_suite, obj): return dest_suite + if name in loops: + pyperf_opts.append(f"--loops={loops[name]}") + bench_venv, bench_runid = benchmarks.get(bench) if bench_venv is None: print("ERROR: Benchmark %s failed: could not install requirements" % name) From 16765924a3e845fe9c3c582b7576e5f1285955df Mon Sep 17 00:00:00 2001 From: Michael Droettboom Date: Tue, 5 Mar 2024 14:58:13 -0500 Subject: [PATCH 6/8] Fix the django_template benchmark (#329) This is broken by the removal of the cgi module in Python 3.13. This adds the legacy-cgi PyPI library as a dependency as a workaround. --- .../data-files/benchmarks/bm_django_template/pyproject.toml | 3 ++- .../data-files/benchmarks/bm_django_template/requirements.txt | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/pyperformance/data-files/benchmarks/bm_django_template/pyproject.toml b/pyperformance/data-files/benchmarks/bm_django_template/pyproject.toml index 0b66d9d0..19772e54 100644 --- a/pyperformance/data-files/benchmarks/bm_django_template/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_django_template/pyproject.toml @@ -1,9 +1,10 @@ [project] name = "pyperformance_bm_django_template" -requires-python = ">=3.8" +requires-python = ">=3.10" dependencies = [ "pyperf", "django", + "legacy-cgi", ] urls = {repository = "https://github.com/python/pyperformance"} dynamic = ["version"] diff --git a/pyperformance/data-files/benchmarks/bm_django_template/requirements.txt b/pyperformance/data-files/benchmarks/bm_django_template/requirements.txt index 4a3490bf..4b71dc07 100644 --- a/pyperformance/data-files/benchmarks/bm_django_template/requirements.txt +++ b/pyperformance/data-files/benchmarks/bm_django_template/requirements.txt @@ -2,3 +2,4 @@ asgiref==3.3.4 django==3.2.4 pytz==2021.1 sqlparse==0.4.1 +legacy-cgi==2.6 \ No newline at end of file From 52a4c58f1ab28c3f17360f96b62db3ea902aad22 Mon Sep 17 00:00:00 2001 From: Donghee Na Date: Fri, 8 Mar 2024 09:38:47 +0900 Subject: [PATCH 7/8] Upgrade to pyperf 2.6.3 (#330) --- pyperformance/requirements/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyperformance/requirements/requirements.txt b/pyperformance/requirements/requirements.txt index 16f07281..7936dbb9 100644 --- a/pyperformance/requirements/requirements.txt +++ b/pyperformance/requirements/requirements.txt @@ -10,5 +10,5 @@ psutil==5.9.5 # via # -r requirements.in # pyperf -pyperf==2.6.2 +pyperf==2.6.3 # via -r requirements.in From ad7824c7700cbe1a107097099761c07ae2dee12c Mon Sep 17 00:00:00 2001 From: Donghee Na Date: Sat, 9 Mar 2024 13:22:08 +0900 Subject: [PATCH 8/8] Prepare release 1.11.0 (#331) --- doc/changelog.rst | 5 +++++ pyperformance/__init__.py | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 070d2bb5..3611e3c5 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -1,8 +1,13 @@ Changelog ========= +Version 1.11.0 (2024-03-09) +-------------- * Add a --same-loops option to the run command to use the exact same number of loops as a previous run (without recalibrating). +* Bump pyperf to 2.6.3 +* Fix the django_template benchmark for compatibilty with 3.13 +* Fix benchmark.conf.sample Version 1.10.0 (2023-10-22) -------------- diff --git a/pyperformance/__init__.py b/pyperformance/__init__.py index 3941313b..b4efa911 100644 --- a/pyperformance/__init__.py +++ b/pyperformance/__init__.py @@ -2,7 +2,7 @@ import sys -VERSION = (1, 10, 0) +VERSION = (1, 11, 0) __version__ = '.'.join(map(str, VERSION))