Skip to content

Commit 5b54f68

Browse files
committed
Merge branch 'mr/thevenoux-langkit-query-language#568' into 'master'
testsuite gnatcheck driver: move worker canonicalization to output refiners See merge request eng/libadalang/langkit-query-language!564
2 parents ae1af50 + 7559c6a commit 5b54f68

File tree

7 files changed

+139
-135
lines changed

7 files changed

+139
-135
lines changed

testsuite/drivers/base_driver.py

Lines changed: 54 additions & 64 deletions
Original file line numberDiff line numberDiff line change
@@ -15,9 +15,7 @@
1515
Substitute,
1616
OutputRefiner,
1717
)
18-
from e3.testsuite.driver.classic import (
19-
TestAbortWithError, ProcessResult, TestSkip
20-
)
18+
from e3.testsuite.driver.classic import TestAbortWithError, ProcessResult, TestSkip
2119

2220

2321
@dataclass
@@ -95,21 +93,20 @@ def errors(self) -> list[tuple[int, str]]:
9593
if self.noflag_annotations.tag_count(line) != 0:
9694
res.append((line, "'NOFLAG' annotation violated"))
9795
elif flag_count == 0:
98-
res.append((
99-
line,
100-
f"no 'FLAG' annotation (line flagged {count} time(s))"
101-
))
96+
res.append(
97+
(line, f"no 'FLAG' annotation (line flagged {count} time(s))")
98+
)
10299
elif flag_count != count:
103-
res.append((
104-
line,
105-
f"unexpected flag count (expecting {flag_count}, actual {count})"
106-
))
100+
res.append(
101+
(
102+
line,
103+
f"unexpected flag count (expecting {flag_count}, actual {count})",
104+
)
105+
)
107106

108107
for line, count in self.flag_annotations.tagged_lines.items():
109108
if self.flagged_lines.tag_count(line) == 0:
110-
res.append((
111-
line, f"line is never flagged (expecting {count} time(s))"
112-
))
109+
res.append((line, f"line is never flagged (expecting {count} time(s))"))
113110

114111
return sorted(res, key=lambda t: t[0])
115112

@@ -147,7 +144,9 @@ def add_missing_flags(self):
147144
for line, count in missing_flag_annotations.items():
148145
# Split the source line to get the code and the comment parts
149146
line_split = source_lines[line - 1].split("--", maxsplit=1)
150-
code, comment = line_split[0], line_split[1].strip() if len(line_split) == 2 else ""
147+
code, comment = line_split[0], (
148+
line_split[1].strip() if len(line_split) == 2 else ""
149+
)
151150

152151
# Remove the already existing FLAG annotation if there is one
153152
search_res = re.search(r"FLAG\s*(\(\d+\))?\s*(.*)", comment)
@@ -166,7 +165,7 @@ def add_missing_flags(self):
166165
source_lines[line - 1] = f"{code}-- FLAG{count_str}{comment}"
167166

168167
# Finally write the modified lines in the source file
169-
with open(source, 'w', encoding=source_encoding) as f:
168+
with open(source, "w", encoding=source_encoding) as f:
170169
for line in source_lines:
171170
print(line, file=f)
172171

@@ -179,7 +178,7 @@ class BaseDriver(DiffTestDriver):
179178
flag_pattern = re.compile(r"--\s*FLAG\s*(\((\d+)\))?\s*(.*)")
180179
noflag_pattern = re.compile(r"--\s*NOFLAG")
181180
ada_file_pattern = r"[a-zA-Z][a-zA-Z0-9_\.\-]*\.(adb|ads|ada|ada_spec)"
182-
ada_source_encodings = ['utf-8', 'iso-8859-1']
181+
ada_source_encodings = ["utf-8", "iso-8859-1"]
183182

184183
perf_supported = False
185184
flag_checking_supported = False
@@ -190,12 +189,13 @@ def is_codepeer(self) -> bool:
190189

191190
@property
192191
def perf_mode(self) -> bool:
193-
return hasattr(self.env, 'perf_mode') and self.env.perf_mode
192+
return hasattr(self.env, "perf_mode") and self.env.perf_mode
194193

195194
@property
196195
def flag_checking(self) -> bool:
197-
return ((not self.env.options.no_flag_checking) and
198-
self.test_env.get("check_flags", True))
196+
return (not self.env.options.no_flag_checking) and self.test_env.get(
197+
"check_flags", True
198+
)
199199

200200
@property
201201
def lkql_jit_dir(self):
@@ -209,26 +209,28 @@ def baseline(self) -> tuple[str, str, bool]:
209209

210210
@property
211211
def test_control_creator(self):
212-
return YAMLTestControlCreator({
213-
'mode': self.env.options.mode,
214-
'os': self.env.build.os.name,
215-
'is_codepeer': self.is_codepeer,
216-
})
212+
return YAMLTestControlCreator(
213+
{
214+
"mode": self.env.options.mode,
215+
"os": self.env.build.os.name,
216+
"is_codepeer": self.is_codepeer,
217+
}
218+
)
217219

218220
def set_up(self) -> None:
219221
super().set_up()
220222

221223
# If requested, skip internal testcases
222224
if (
223-
hasattr(self.env.options, "skip_internal_tests") and
224-
self.env.options.skip_internal_tests and
225-
self.test_env['test_name'].startswith('internal__')
225+
hasattr(self.env.options, "skip_internal_tests")
226+
and self.env.options.skip_internal_tests
227+
and self.test_env["test_name"].startswith("internal__")
226228
):
227-
raise TestSkip('Skipping internal testcase')
229+
raise TestSkip("Skipping internal testcase")
228230

229231
self._define_lkql_executables()
230232

231-
if hasattr(self.env.options, 'coverage') and self.env.options.coverage:
233+
if hasattr(self.env.options, "coverage") and self.env.options.coverage:
232234
# Unique number to generate separate trace files in the "shell"
233235
# method.
234236
self.trace_counter = 0
@@ -241,12 +243,8 @@ def set_up(self) -> None:
241243
mkdir(self.traces_dir)
242244

243245
def check_run(
244-
self,
245-
args: list[str],
246-
check_flags: bool = True,
247-
lkql_path = "",
248-
**kwargs
249-
) -> ProcessResult:
246+
self, args: list[str], check_flags: bool = True, lkql_path="", **kwargs
247+
) -> ProcessResult:
250248
"""
251249
Run a process and check that its output is the expected one.
252250
@@ -256,26 +254,22 @@ def check_run(
256254
env = dict(os.environ)
257255

258256
# Ensure color codes are not output during the test execution
259-
env.pop('TERM', None)
257+
env.pop("TERM", None)
260258

261259
# If code coverage is enabled, put trace files in the dedicated
262260
# directory.
263261
if self.env.options.coverage:
264-
env['LIBLKQLLANG_TRACE_FILE'] = P.join(
265-
self.traces_dir, f'lkql-{self.trace_counter}.srctrace'
262+
env["LIBLKQLLANG_TRACE_FILE"] = P.join(
263+
self.traces_dir, f"lkql-{self.trace_counter}.srctrace"
266264
)
267-
env['GNATCOV_TRACE_FILE'] = P.join(
268-
self.traces_dir, f'prog-{self.trace_counter}.srctrace'
265+
env["GNATCOV_TRACE_FILE"] = P.join(
266+
self.traces_dir, f"prog-{self.trace_counter}.srctrace"
269267
)
270268

271269
# Add the provided LKQL path to environment variables
272270
env["LKQL_PATH"] = lkql_path
273271

274-
if (
275-
self.flag_checking_supported and
276-
self.flag_checking and
277-
check_flags
278-
):
272+
if self.flag_checking_supported and self.flag_checking and check_flags:
279273
run_result = self.shell(args, env=env, **kwargs)
280274
self.check_flags(self.parse_flagged_lines(run_result.out))
281275
return run_result
@@ -345,7 +339,7 @@ def run(*prefix):
345339
"-F100",
346340
"-o",
347341
P.join(perf_dir, perf_filename),
348-
"--"
342+
"--",
349343
)
350344
self.result.info["time-profile"] = perf_filename
351345

@@ -362,9 +356,7 @@ def run_in_tty(self, args: list[str], **kwargs) -> tuple[str, int]:
362356
# documentation, the ``pty`` module is not working fine on it (see
363357
# https://docs.python.org/fr/3/library/pty.html).
364358
if self.env.build.os.name == "windows":
365-
raise TestAbortWithError(
366-
"Cannot run a pseudo-TTY on Windows systems"
367-
)
359+
raise TestAbortWithError("Cannot run a pseudo-TTY on Windows systems")
368360

369361
# Only import ``pty`` after checking that we are not on a Windows
370362
# system.
@@ -376,13 +368,7 @@ def run_in_tty(self, args: list[str], **kwargs) -> tuple[str, int]:
376368

377369
# Open a subprocess with using a pseudo TTY as output
378370
m, s = pty.openpty()
379-
p = subprocess.Popen(
380-
args=args,
381-
stdout=s,
382-
stderr=s,
383-
close_fds=True,
384-
**kwargs
385-
)
371+
p = subprocess.Popen(args=args, stdout=s, stderr=s, close_fds=True, **kwargs)
386372
os.close(s)
387373

388374
# Read result of the process execution and get its return code
@@ -445,15 +431,17 @@ def check_flags(self, execution_flags: dict[str, TaggedLines]) -> None:
445431
)
446432
all_flags: list[FileFlags] = []
447433
for source_name in sorted(ada_source_names):
448-
matching_sources = glob.glob(P.join(self.test_dir(), f"**/{source_name}"), recursive=True)
434+
matching_sources = glob.glob(
435+
P.join(self.test_dir(), f"**/{source_name}"), recursive=True
436+
)
449437
flags, noflags = self.count_annotations_in_files(matching_sources)
450438
all_flags.append(
451439
FileFlags(
452440
file_name=source_name,
453441
matching_sources=matching_sources,
454442
flag_annotations=flags,
455443
noflag_annotations=noflags,
456-
flagged_lines=execution_flags.get(source_name, TaggedLines())
444+
flagged_lines=execution_flags.get(source_name, TaggedLines()),
457445
)
458446
)
459447

@@ -462,7 +450,7 @@ def check_flags(self, execution_flags: dict[str, TaggedLines]) -> None:
462450
for flags in all_flags:
463451
errors = flags.errors
464452
if errors:
465-
errors_buffer.append(f"In file \"{flags.file_name}\":")
453+
errors_buffer.append(f'In file "{flags.file_name}":')
466454
for line, msg in errors:
467455
errors_buffer.append(f" - at line {line}: {msg}")
468456
errors_buffer.append("")
@@ -477,7 +465,9 @@ def check_flags(self, execution_flags: dict[str, TaggedLines]) -> None:
477465
for flags in all_flags:
478466
flags.add_missing_flags()
479467

480-
def count_annotations_in_files(self, files: list[str]) -> tuple[TaggedLines, TaggedLines]:
468+
def count_annotations_in_files(
469+
self, files: list[str]
470+
) -> tuple[TaggedLines, TaggedLines]:
481471
"""
482472
Count `FLAG/NOFLAG` annotations in the provided list of files and
483473
return the result in a tuple structured like
@@ -506,11 +496,11 @@ def read_ada_file(cls, file_name: str) -> tuple[list[str], str]:
506496
507497
:param file_name: The Ada file to read.
508498
"""
509-
with open(file_name, mode='rb') as ada_file:
499+
with open(file_name, mode="rb") as ada_file:
510500
ada_bytes = ada_file.read()
511501
for encoding in cls.ada_source_encodings:
512502
try:
513-
lines = ada_bytes.decode(encoding).split('\n')
503+
lines = ada_bytes.decode(encoding).split("\n")
514504
lines = lines[:-1] if not lines[-1] else lines
515505
return (lines, encoding)
516506
except ValueError as _:
@@ -543,7 +533,7 @@ def locate_on_path(cls, exec_name: str) -> str | None:
543533
`PATH` environment variable and return its absolute path. If none has
544534
been found, return `None`.
545535
"""
546-
dirs = os.environ.get('PATH', "").split(os.pathsep)
536+
dirs = os.environ.get("PATH", "").split(os.pathsep)
547537
for dir in dirs:
548538
possible_file = os.path.join(dir, exec_name)
549539
if os.path.isfile(possible_file):

testsuite/drivers/benchmarks_driver.py

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -21,10 +21,11 @@ def baseline(self) -> tuple[str, str, bool]:
2121

2222
def run(self):
2323
# Call the Maven command to run the JMH benchmarking
24-
benchmark_results_json = self.working_dir('benchmark_result.json')
24+
benchmark_results_json = self.working_dir("benchmark_result.json")
2525
mvn_cmd = [
2626
"mvn",
27-
"-f", os.path.join(self.lkql_jit_dir, "benchmarks"),
27+
"-f",
28+
os.path.join(self.lkql_jit_dir, "benchmarks"),
2829
"jmh:benchmark",
2930
"-Djmh.rf=json",
3031
f"-Djmh.v=SILENT",
@@ -37,20 +38,20 @@ def run(self):
3738

3839
# Read the benchmark result and parse it
3940
res = {}
40-
with open(benchmark_results_json, 'r') as json_file:
41+
with open(benchmark_results_json, "r") as json_file:
4142
benchmark_results = json.load(json_file)
4243
for result in benchmark_results:
4344
# Compute the interesting information about the benchmark
44-
split = result['benchmark'].split(".")
45+
split = result["benchmark"].split(".")
4546
benchmark = ".".join(split[:-1])
4647
run = split[-1]
4748

4849
# Place them in the result
4950
runs = res.get(benchmark, {})
5051
runs[run] = {
51-
'score': result['primaryMetric']['score'],
52-
'unit': result['primaryMetric']['scoreUnit'],
52+
"score": result["primaryMetric"]["score"],
53+
"unit": result["primaryMetric"]["scoreUnit"],
5354
}
5455
res[benchmark] = runs
5556

56-
self.result.info['benchmark_results'] = res
57+
self.result.info["benchmark_results"] = res

testsuite/drivers/checker_driver.py

Lines changed: 15 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44

55
from e3.testsuite.driver.diff import OutputRefiner, Substitute
66

7+
78
class CheckerDriver(BaseDriver):
89
"""
910
This driver runs the checker with the given arguments and compares the
@@ -37,34 +38,31 @@ def run(self) -> None:
3738
args = []
3839

3940
# Use the test's project, if any
40-
if self.test_env.get('project', None):
41-
args += ['-P', self.test_env['project']]
41+
if self.test_env.get("project", None):
42+
args += ["-P", self.test_env["project"]]
4243
else:
43-
args += self.test_env['input_sources']
44+
args += self.test_env["input_sources"]
4445

4546
# Use the wanted charset, if any
46-
if self.test_env.get('source_charset'):
47-
args += ['--charset', self.test_env['source_charset']]
47+
if self.test_env.get("source_charset"):
48+
args += ["--charset", self.test_env["source_charset"]]
4849

49-
for k, v in self.test_env.get('rule_arguments', {}).items():
50-
args += ['--rule-arg', '{}={}'.format(k, v)]
50+
for k, v in self.test_env.get("rule_arguments", {}).items():
51+
args += ["--rule-arg", "{}={}".format(k, v)]
5152

52-
args += ['-r', self.test_env['rule_name']]
53-
args += ['--rules-dir', self.test_env['test_dir']]
53+
args += ["-r", self.test_env["rule_name"]]
54+
args += ["--rules-dir", self.test_env["test_dir"]]
5455

5556
if self.test_env.get("keep_going_on_missing_file", False):
56-
args += ['--keep-going-on-missing-file']
57+
args += ["--keep-going-on-missing-file"]
5758

5859
# Run the checker
5960
if self.perf_mode:
6061
self.perf_run(args)
6162
else:
6263
# Use `catch_error=False` to avoid failing on non-zero status code,
6364
# as some tests actually exert erroneous behaviors.
64-
self.check_run(
65-
self.lkql_checker_exe + args,
66-
catch_error=False
67-
)
65+
self.check_run(self.lkql_checker_exe + args, catch_error=False)
6866

6967
# If required, run the LKQL fix command
7068
if self.test_env.get("auto_fix"):
@@ -75,14 +73,14 @@ def run(self) -> None:
7573
auto_fix_mode = self.test_env.get("auto_fix_mode", "DISPLAY")
7674
assert auto_fix_mode in ["DISPLAY", "NEW_FILE", "PATCH_FILE"]
7775
self.check_run(
78-
self.lkql_fix_exe + args + ['--auto-fix-mode', auto_fix_mode],
76+
self.lkql_fix_exe + args + ["--auto-fix-mode", auto_fix_mode],
7977
check_flags=False,
8078
catch_error=False,
8179
)
8280

8381
# If the auto-fix mode is "NEW_FILE" or "PATCH_FILE", then
8482
# display resulting files.
85-
if auto_fix_mode in ['NEW_FILE', 'PATCH_FILE']:
83+
if auto_fix_mode in ["NEW_FILE", "PATCH_FILE"]:
8684
# Get the list of patched files by parsing the output
8785
patched_files = []
8886
for l in str(self.output).splitlines():
@@ -93,7 +91,7 @@ def run(self) -> None:
9391

9492
# Then, for each patched file, display its content
9593
for pf in patched_files:
96-
with open(self.working_dir(pf), 'r') as f:
94+
with open(self.working_dir(pf), "r") as f:
9795
self.output += f"=== {pf} content:\n"
9896
self.output += f.read()
9997

0 commit comments

Comments
 (0)