Skip to content

Commit ceb46dc

Browse files
Revert "formatting changes"
This reverts commit 6d5ae7e.
1 parent cf55dc1 commit ceb46dc

File tree

2 files changed

+15
-34
lines changed

2 files changed

+15
-34
lines changed

libcxx/test/benchmarks/spec.gen.py

Lines changed: 9 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -65,27 +65,19 @@
6565
spec_benchmarks &= no_fortran
6666

6767
for benchmark in spec_benchmarks:
68-
print(f"#--- {benchmark}.sh.test")
69-
print(f"RUN: rm -rf %{temp}") # clean up any previous (potentially incomplete) run
70-
print(f"RUN: mkdir %{temp}")
71-
print(f"RUN: cp {spec_config} %{temp}/spec-config.cfg")
72-
print(
73-
f"RUN: %{{spec_dir}}/bin/runcpu --config %{temp}/spec-config.cfg --size train --output-root %{temp} --rebuild {benchmark}"
74-
)
75-
print(
76-
f"RUN: rm -rf %{temp}/benchspec"
77-
) # remove the temporary directory, which can become quite large
68+
print(f'#--- {benchmark}.sh.test')
69+
print(f'RUN: rm -rf %{temp}') # clean up any previous (potentially incomplete) run
70+
print(f'RUN: mkdir %{temp}')
71+
print(f'RUN: cp {spec_config} %{temp}/spec-config.cfg')
72+
print(f'RUN: %{{spec_dir}}/bin/runcpu --config %{temp}/spec-config.cfg --size train --output-root %{temp} --rebuild {benchmark}')
73+
print(f'RUN: rm -rf %{temp}/benchspec') # remove the temporary directory, which can become quite large
7874

7975
# The `runcpu` command above doesn't fail even if the benchmark fails to run. To determine failure, parse the CSV
8076
# results and ensure there are no compilation errors or runtime errors in the status row. Also print the logs and
8177
# fail if there are no CSV files at all, which implies a SPEC error.
82-
print(
83-
f'RUN: %{{libcxx-dir}}/utils/parse-spec-results --extract "Base Status" --keep-failed %{temp}/result/*.train.csv > %{temp}/status || ! cat %{temp}/result/*.log'
84-
)
78+
print(f'RUN: %{{libcxx-dir}}/utils/parse-spec-results --extract "Base Status" --keep-failed %{temp}/result/*.train.csv > %{temp}/status || ! cat %{temp}/result/*.log')
8579
print(f'RUN: ! grep -E "CE|RE" %{temp}/status || ! cat %{temp}/result/*.log')
8680

8781
# If there were no errors, parse the results into LNT-compatible format and print them.
88-
print(
89-
f"RUN: %{{libcxx-dir}}/utils/parse-spec-results %{temp}/result/*.train.csv --output-format=lnt > %{temp}/results.lnt"
90-
)
91-
print(f"RUN: cat %{temp}/results.lnt")
82+
print(f'RUN: %{{libcxx-dir}}/utils/parse-spec-results %{temp}/result/*.train.csv --output-format=lnt > %{temp}/results.lnt')
83+
print(f'RUN: cat %{temp}/results.lnt')

libcxx/utils/libcxx/test/format.py

Lines changed: 6 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -178,14 +178,11 @@ def parseScript(test, preamble):
178178
"%dbg(MODULE std.compat) %{cxx} %{flags} "
179179
f"{compileFlags} "
180180
"-Wno-reserved-module-identifier -Wno-reserved-user-defined-literal "
181-
"-fmodule-file=std=%{temp}/std.pcm " # The std.compat module imports std.
181+
"-fmodule-file=std=%{temp}/std.pcm " # The std.compat module imports std.
182182
"--precompile -o %{temp}/std.compat.pcm -c %{module-dir}/std.compat.cppm",
183183
)
184184
moduleCompileFlags.extend(
185-
[
186-
"-fmodule-file=std.compat=%{temp}/std.compat.pcm",
187-
"%{temp}/std.compat.pcm",
188-
]
185+
["-fmodule-file=std.compat=%{temp}/std.compat.pcm", "%{temp}/std.compat.pcm"]
189186
)
190187

191188
# Make sure the std module is built before std.compat. Libc++'s
@@ -200,9 +197,7 @@ def parseScript(test, preamble):
200197
"-Wno-reserved-module-identifier -Wno-reserved-user-defined-literal "
201198
"--precompile -o %{temp}/std.pcm -c %{module-dir}/std.cppm",
202199
)
203-
moduleCompileFlags.extend(
204-
["-fmodule-file=std=%{temp}/std.pcm", "%{temp}/std.pcm"]
205-
)
200+
moduleCompileFlags.extend(["-fmodule-file=std=%{temp}/std.pcm", "%{temp}/std.pcm"])
206201

207202
# Add compile flags required for the modules.
208203
substitutions = config._appendToSubstitution(
@@ -360,15 +355,9 @@ def execute(self, test, litConfig):
360355
"%dbg(COMPILED WITH) %{cxx} %s %{flags} %{compile_flags} %{benchmark_flags} %{link_flags} -o %t.exe",
361356
]
362357
if "enable-benchmarks=run" in test.config.available_features:
363-
steps += [
364-
"%dbg(EXECUTED AS) %{exec} %t.exe --benchmark_out=%{temp}/benchmark-result.json --benchmark_out_format=json"
365-
]
366-
parse_results = os.path.join(
367-
LIBCXX_UTILS, "parse-google-benchmark-results"
368-
)
369-
steps += [
370-
f"{parse_results} %{temp}/benchmark-result.json --output-format=lnt > %{temp}/results.lnt"
371-
]
358+
steps += ["%dbg(EXECUTED AS) %{exec} %t.exe --benchmark_out=%{temp}/benchmark-result.json --benchmark_out_format=json"]
359+
parse_results = os.path.join(LIBCXX_UTILS, 'parse-google-benchmark-results')
360+
steps += [f"{parse_results} %{temp}/benchmark-result.json --output-format=lnt > %{temp}/results.lnt"]
372361
return self._executeShTest(test, litConfig, steps)
373362
elif re.search('[.]gen[.][^.]+$', filename): # This only happens when a generator test is not supported
374363
return self._executeShTest(test, litConfig, [])

0 commit comments

Comments
 (0)