-
Notifications
You must be signed in to change notification settings - Fork 14.9k
[libc++] Add an optional integration of libc++'s test suite with SPEC #156953
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Conversation
This allows running the SPEC benchmarks as part of libc++'s own benchmarks by providing an external installation of SPEC.
@llvm/pr-subscribers-libcxx Author: Louis Dionne (ldionne) ChangesThis allows running the SPEC benchmarks as part of libc++'s own benchmarks by providing an external installation of SPEC. Full diff: https://github.com/llvm/llvm-project/pull/156953.diff 2 Files Affected:
diff --git a/libcxx/test/benchmarks/spec.gen.py b/libcxx/test/benchmarks/spec.gen.py
new file mode 100644
index 0000000000000..0994c9d54abfa
--- /dev/null
+++ b/libcxx/test/benchmarks/spec.gen.py
@@ -0,0 +1,73 @@
+# ===----------------------------------------------------------------------===##
+#
+# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+# See https://llvm.org/LICENSE.txt for license information.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+#
+# ===----------------------------------------------------------------------===##
+
+# REQUIRES: enable-spec-benchmarks
+
+# RUN: mkdir -p %T
+# RUN: echo "%{cxx}" > %T/cxx.subs
+# RUN: echo "%{compile_flags}" > %T/compile_flags.subs
+# RUN: echo "%{flags}" > %T/flags.subs
+# RUN: echo "%{link_flags}" > %T/link_flags.subs
+# RUN: echo "%{spec_dir}" > %T/spec_dir.subs
+# RUN: %{python} %s %T
+# END.
+
+import json
+import pathlib
+import sys
+
+test_dir = pathlib.Path(sys.argv[1])
+cxx = (test_dir / 'cxx.subs').open().read().strip()
+compile_flags = (test_dir / 'compile_flags.subs').open().read().strip()
+flags = (test_dir / 'flags.subs').open().read().strip()
+link_flags = (test_dir / 'link_flags.subs').open().read().strip()
+spec_dir = pathlib.Path((test_dir / 'spec_dir.subs').open().read().strip())
+
+# Setup the configuration file
+test_dir.mkdir(parents=True, exist_ok=True)
+spec_config = test_dir / 'spec-config.cfg'
+spec_config.write_text(f"""
+default:
+ ignore_errors = 1
+ iterations = 1
+ label = spec-stdlib
+ log_line_width = 4096
+ makeflags = --jobs=8
+ mean_anyway = 1
+ output_format = csv
+ preenv = 0
+ reportable = 0
+ tune = base
+ copies = 1
+ threads = 1
+ CC = cc -O3
+ CXX = {cxx} {compile_flags} {flags} {link_flags} -Wno-error
+ CC_VERSION_OPTION = --version
+ CXX_VERSION_OPTION = --version
+ EXTRA_PORTABILITY = -DSPEC_NO_CXX17_SPECIAL_MATH_FUNCTIONS # because libc++ doesn't implement the special math functions yet
+""")
+
+# Build the list of benchmarks. We take all intrate and fprate benchmarks that contain C++ and
+# discard the ones that contain Fortran, since this test suite isn't set up to build Fortran code.
+spec_benchmarks = set()
+no_fortran = set()
+with open(spec_dir / 'benchspec' / 'CPU' / 'intrate_any_cpp.bset', 'r') as f:
+ spec_benchmarks.update(json.load(f)['benchmarks'])
+with open(spec_dir / 'benchspec' / 'CPU' / 'fprate_any_cpp.bset', 'r') as f:
+ spec_benchmarks.update(json.load(f)['benchmarks'])
+with open(spec_dir / 'benchspec' / 'CPU' / 'no_fortran.bset', 'r') as f:
+ no_fortran.update(json.load(f)['benchmarks'])
+spec_benchmarks &= no_fortran
+
+for benchmark in spec_benchmarks:
+ print(f'#--- {benchmark}.sh.test')
+ print(f'RUN: cp {spec_config} %T/spec-config.cfg')
+ print(f'RUN: rm -rf %T/output') # clean up any previous (potentially incomplete) run
+ print(f'RUN: %{{spec_dir}}/bin/runcpu --config %T/spec-config.cfg --size train --output-root %T/output --rebuild {benchmark}')
+ print(f'RUN: cp %T/output/result/*.log %T/output/result/*.train.csv %T') # copy relevant output files
+ print(f'RUN: rm -rf %T/output') # remove the temporary directory, which can become quite large
diff --git a/libcxx/utils/libcxx/test/params.py b/libcxx/utils/libcxx/test/params.py
index 81c613421a465..6f013a75195a8 100644
--- a/libcxx/utils/libcxx/test/params.py
+++ b/libcxx/utils/libcxx/test/params.py
@@ -374,6 +374,17 @@ def getSuitableClangTidy(cfg):
help="Whether to run the benchmarks in the test suite, to only dry-run them or to disable them entirely.",
actions=lambda mode: [AddFeature(f"enable-benchmarks={mode}")],
),
+ Parameter(
+ name="spec_dir",
+ type=str,
+ default="none",
+ help="Path to the SPEC benchmarks. This is required in order to run the SPEC benchmarks as part of "
+ "the libc++ test suite. If provided, the appropriate SPEC toolset must already be built and installed.",
+ actions=lambda spec_dir: [
+ AddSubstitution("%{spec_dir}", spec_dir),
+ AddFeature('enable-spec-benchmarks')
+ ] if spec_dir != "none" else [],
+ ),
Parameter(
name="long_tests",
choices=[True, False],
|
You can test this locally with the following command:darker --check --diff -r origin/main...HEAD libcxx/test/benchmarks/spec.gen.py libcxx/utils/libcxx/test/params.py
View the diff from darker here.--- test/benchmarks/spec.gen.py 2025-09-04 20:18:48.000000 +0000
+++ test/benchmarks/spec.gen.py 2025-09-04 20:22:05.286978 +0000
@@ -20,20 +20,21 @@
import json
import pathlib
import sys
test_dir = pathlib.Path(sys.argv[1])
-cxx = (test_dir / 'cxx.subs').open().read().strip()
-compile_flags = (test_dir / 'compile_flags.subs').open().read().strip()
-flags = (test_dir / 'flags.subs').open().read().strip()
-link_flags = (test_dir / 'link_flags.subs').open().read().strip()
-spec_dir = pathlib.Path((test_dir / 'spec_dir.subs').open().read().strip())
+cxx = (test_dir / "cxx.subs").open().read().strip()
+compile_flags = (test_dir / "compile_flags.subs").open().read().strip()
+flags = (test_dir / "flags.subs").open().read().strip()
+link_flags = (test_dir / "link_flags.subs").open().read().strip()
+spec_dir = pathlib.Path((test_dir / "spec_dir.subs").open().read().strip())
# Setup the configuration file
test_dir.mkdir(parents=True, exist_ok=True)
-spec_config = test_dir / 'spec-config.cfg'
-spec_config.write_text(f"""
+spec_config = test_dir / "spec-config.cfg"
+spec_config.write_text(
+ f"""
default:
ignore_errors = 1
iterations = 1
label = spec-stdlib
log_line_width = 4096
@@ -48,31 +49,38 @@
CC = cc -O3
CXX = {cxx} {compile_flags} {flags} {link_flags} -Wno-error
CC_VERSION_OPTION = --version
CXX_VERSION_OPTION = --version
EXTRA_PORTABILITY = -DSPEC_NO_CXX17_SPECIAL_MATH_FUNCTIONS # because libc++ doesn't implement the special math functions yet
-""")
+"""
+)
# Build the list of benchmarks. We take all intrate and fprate benchmarks that contain C++ and
# discard the ones that contain Fortran, since this test suite isn't set up to build Fortran code.
spec_benchmarks = set()
no_fortran = set()
-with open(spec_dir / 'benchspec' / 'CPU' / 'intrate_any_cpp.bset', 'r') as f:
- spec_benchmarks.update(json.load(f)['benchmarks'])
-with open(spec_dir / 'benchspec' / 'CPU' / 'fprate_any_cpp.bset', 'r') as f:
- spec_benchmarks.update(json.load(f)['benchmarks'])
-with open(spec_dir / 'benchspec' / 'CPU' / 'no_fortran.bset', 'r') as f:
- no_fortran.update(json.load(f)['benchmarks'])
+with open(spec_dir / "benchspec" / "CPU" / "intrate_any_cpp.bset", "r") as f:
+ spec_benchmarks.update(json.load(f)["benchmarks"])
+with open(spec_dir / "benchspec" / "CPU" / "fprate_any_cpp.bset", "r") as f:
+ spec_benchmarks.update(json.load(f)["benchmarks"])
+with open(spec_dir / "benchspec" / "CPU" / "no_fortran.bset", "r") as f:
+ no_fortran.update(json.load(f)["benchmarks"])
spec_benchmarks &= no_fortran
for benchmark in spec_benchmarks:
- print(f'#--- {benchmark}.sh.test')
- print(f'RUN: rm -rf %T') # clean up any previous (potentially incomplete) run
- print(f'RUN: mkdir %T')
- print(f'RUN: cp {spec_config} %T/spec-config.cfg')
- print(f'RUN: %{{spec_dir}}/bin/runcpu --config %T/spec-config.cfg --size train --output-root %T --rebuild {benchmark}')
- print(f'RUN: rm -rf %T/benchspec') # remove the temporary directory, which can become quite large
+ print(f"#--- {benchmark}.sh.test")
+ print(f"RUN: rm -rf %T") # clean up any previous (potentially incomplete) run
+ print(f"RUN: mkdir %T")
+ print(f"RUN: cp {spec_config} %T/spec-config.cfg")
+ print(
+ f"RUN: %{{spec_dir}}/bin/runcpu --config %T/spec-config.cfg --size train --output-root %T --rebuild {benchmark}"
+ )
+ print(
+ f"RUN: rm -rf %T/benchspec"
+ ) # remove the temporary directory, which can become quite large
# Parse the results into a LNT-compatible format. This also errors out if there are no CSV files, which
# means that the benchmark didn't run properly (the `runcpu` command above never reports a failure).
- print(f'RUN: %{{libcxx-dir}}/utils/parse-spec-result %T/result/CPUv8.001.*.train.csv --output-format=lnt > %T/results.lnt')
- print(f'RUN: cat %T/results.lnt')
+ print(
+ f"RUN: %{{libcxx-dir}}/utils/parse-spec-result %T/result/CPUv8.001.*.train.csv --output-format=lnt > %T/results.lnt"
+ )
+ print(f"RUN: cat %T/results.lnt")
|
tune = base | ||
copies = 1 | ||
threads = 1 | ||
CC = cc -O3 |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I think in an ideal world we'd take the same optimization level as we have in the CXX flags. Not sure how easy that would be though.
This allows running the SPEC benchmarks as part of libc++'s own benchmarks by providing an external installation of SPEC.