Skip to content

Commit 032d726

Browse files
authored
Merge branch 'main' into vplan-split-prepareforvectorization
2 parents b28bb32 + 62735d2 commit 032d726

File tree

2,832 files changed

+123835
-48687
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

2,832 files changed

+123835
-48687
lines changed

.ci/compute_projects.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -80,6 +80,7 @@
8080
"clang": {"compiler-rt"},
8181
"clang-tools-extra": {"libc"},
8282
"libc": {"libc"},
83+
"compiler-rt": {"compiler-rt"},
8384
".ci": {"compiler-rt", "libc"},
8485
}
8586
DEPENDENT_RUNTIMES_TO_TEST_NEEDS_RECONFIG = {
@@ -100,6 +101,9 @@
100101
"libc", # No Windows Support.
101102
"lldb", # TODO(issues/132800): Needs environment setup.
102103
"bolt", # No Windows Support.
104+
"libcxx",
105+
"libcxxabi",
106+
"libunwind",
103107
}
104108

105109
# These are projects that we should test if the project itself is changed but
@@ -118,6 +122,9 @@
118122
"lldb",
119123
"openmp",
120124
"polly",
125+
"libcxx",
126+
"libcxxabi",
127+
"libunwind",
121128
}
122129

123130
PROJECT_CHECK_TARGETS = {

.ci/compute_projects_test.py

Lines changed: 57 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -45,16 +45,14 @@ def test_llvm_windows(self):
4545
env_variables["project_check_targets"],
4646
"check-clang check-clang-tools check-lld check-llvm check-mlir check-polly",
4747
)
48-
self.assertEqual(
49-
env_variables["runtimes_to_build"], "libcxx;libcxxabi;libunwind"
50-
)
48+
self.assertEqual(env_variables["runtimes_to_build"], "")
5149
self.assertEqual(
5250
env_variables["runtimes_check_targets"],
5351
"",
5452
)
5553
self.assertEqual(
5654
env_variables["runtimes_check_targets_needs_reconfig"],
57-
"check-cxx check-cxxabi check-unwind",
55+
"",
5856
)
5957

6058
def test_llvm_mac(self):
@@ -69,16 +67,14 @@ def test_llvm_mac(self):
6967
env_variables["project_check_targets"],
7068
"check-clang check-clang-tools check-lld check-llvm check-mlir",
7169
)
72-
self.assertEqual(
73-
env_variables["runtimes_to_build"], "libcxx;libcxxabi;libunwind"
74-
)
70+
self.assertEqual(env_variables["runtimes_to_build"], "")
7571
self.assertEqual(
7672
env_variables["runtimes_check_targets"],
7773
"",
7874
)
7975
self.assertEqual(
8076
env_variables["runtimes_check_targets_needs_reconfig"],
81-
"check-cxx check-cxxabi check-unwind",
77+
"",
8278
)
8379

8480
def test_clang(self):
@@ -119,19 +115,43 @@ def test_clang_windows(self):
119115
self.assertEqual(
120116
env_variables["project_check_targets"], "check-clang check-clang-tools"
121117
)
122-
self.assertEqual(
123-
env_variables["runtimes_to_build"], "libcxx;libcxxabi;libunwind"
124-
)
118+
self.assertEqual(env_variables["runtimes_to_build"], "")
125119
self.assertEqual(
126120
env_variables["runtimes_check_targets"],
127121
"",
128122
)
129123
self.assertEqual(
130124
env_variables["runtimes_check_targets_needs_reconfig"],
131-
"check-cxx check-cxxabi check-unwind",
125+
"",
132126
)
133127
self.assertEqual(env_variables["enable_cir"], "OFF")
134128

129+
def test_compiler_rt(self):
130+
env_variables = compute_projects.get_env_variables(
131+
["compiler-rt/lib/asan/asan_allocator.cpp"], "Linux"
132+
)
133+
self.assertEqual(
134+
env_variables["projects_to_build"],
135+
"clang;lld",
136+
)
137+
self.assertEqual(
138+
env_variables["project_check_targets"],
139+
"",
140+
)
141+
self.assertEqual(env_variables["runtimes_to_build"], "compiler-rt")
142+
self.assertEqual(
143+
env_variables["runtimes_check_targets"],
144+
"check-compiler-rt",
145+
)
146+
self.assertEqual(
147+
env_variables["runtimes_check_targets_needs_reconfig"],
148+
"",
149+
)
150+
self.assertEqual(
151+
env_variables["enable_cir"],
152+
"OFF",
153+
)
154+
135155
def test_cir(self):
136156
env_variables = compute_projects.get_env_variables(
137157
["clang/lib/CIR/CMakeLists.txt"], "Linux"
@@ -284,6 +304,31 @@ def test_ci(self):
284304
"check-cxx check-cxxabi check-unwind",
285305
)
286306

307+
def test_windows_ci(self):
308+
env_variables = compute_projects.get_env_variables(
309+
[".ci/compute_projects.py"], "Windows"
310+
)
311+
self.assertEqual(
312+
env_variables["projects_to_build"],
313+
"clang;clang-tools-extra;libclc;lld;llvm;mlir;polly",
314+
)
315+
self.assertEqual(
316+
env_variables["project_check_targets"],
317+
"check-clang check-clang-cir check-clang-tools check-lld check-llvm check-mlir check-polly",
318+
)
319+
self.assertEqual(
320+
env_variables["runtimes_to_build"],
321+
"",
322+
)
323+
self.assertEqual(
324+
env_variables["runtimes_check_targets"],
325+
"",
326+
)
327+
self.assertEqual(
328+
env_variables["runtimes_check_targets_needs_reconfig"],
329+
"",
330+
)
331+
287332
def test_lldb(self):
288333
env_variables = compute_projects.get_env_variables(
289334
["lldb/CMakeLists.txt"], "Linux"

.ci/generate_test_report_github.py

Lines changed: 9 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -4,20 +4,25 @@
44
"""Script to generate a build report for Github."""
55

66
import argparse
7+
import platform
78

89
import generate_test_report_lib
910

11+
PLATFORM_TITLES = {
12+
"Windows": ":window: Windows x64 Test Results",
13+
"Linux": ":penguin: Linux x64 Test Results",
14+
}
15+
1016
if __name__ == "__main__":
1117
parser = argparse.ArgumentParser()
18+
parser.add_argument("return_code", help="The build's return code.", type=int)
1219
parser.add_argument(
13-
"title", help="Title of the test report, without Markdown formatting."
20+
"build_test_logs", help="Paths to JUnit report files and ninja logs.", nargs="*"
1421
)
15-
parser.add_argument("return_code", help="The build's return code.", type=int)
16-
parser.add_argument("junit_files", help="Paths to JUnit report files.", nargs="*")
1722
args = parser.parse_args()
1823

1924
report = generate_test_report_lib.generate_report_from_files(
20-
args.title, args.return_code, args.junit_files
25+
PLATFORM_TITLES[platform.system()], args.return_code, args.build_test_logs
2126
)
2227

2328
print(report)

.ci/generate_test_report_lib.py

Lines changed: 143 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,84 @@
1212
"https://github.com/llvm/llvm-project/issues and add the "
1313
"`infrastructure` label."
1414
)
15+
# The maximum number of lines to pull from a ninja failure.
16+
NINJA_LOG_SIZE_THRESHOLD = 500
17+
18+
19+
def _parse_ninja_log(ninja_log: list[str]) -> list[tuple[str, str]]:
20+
"""Parses an individual ninja log."""
21+
failures = []
22+
index = 0
23+
while index < len(ninja_log):
24+
while index < len(ninja_log) and not ninja_log[index].startswith("FAILED:"):
25+
index += 1
26+
if index == len(ninja_log):
27+
# We hit the end of the log without finding a build failure, go to
28+
# the next log.
29+
return failures
30+
# We are trying to parse cases like the following:
31+
#
32+
# [4/5] test/4.stamp
33+
# FAILED: touch test/4.stamp
34+
# touch test/4.stamp
35+
#
36+
# index will point to the line that starts with Failed:. The progress
37+
# indicator is the line before this ([4/5] test/4.stamp) and contains a pretty
38+
# printed version of the target being built (test/4.stamp). We use this line
39+
# and remove the progress information to get a succinct name for the target.
40+
failing_action = ninja_log[index - 1].split("] ")[1]
41+
failure_log = []
42+
while (
43+
index < len(ninja_log)
44+
and not ninja_log[index].startswith("[")
45+
and not ninja_log[index].startswith("ninja: build stopped:")
46+
and len(failure_log) < NINJA_LOG_SIZE_THRESHOLD
47+
):
48+
failure_log.append(ninja_log[index])
49+
index += 1
50+
failures.append((failing_action, "\n".join(failure_log)))
51+
return failures
52+
53+
54+
def find_failure_in_ninja_logs(ninja_logs: list[list[str]]) -> list[tuple[str, str]]:
55+
"""Extracts failure messages from ninja output.
56+
57+
This function takes stdout/stderr from ninja in the form of a list of files
58+
represented as a list of lines. This function then returns tuples containing
59+
the name of the target and the error message.
60+
61+
Args:
62+
ninja_logs: A list of files in the form of a list of lines representing the log
63+
files captured from ninja.
64+
65+
Returns:
66+
A list of tuples. The first string is the name of the target that failed. The
67+
second string is the error message.
68+
"""
69+
failures = []
70+
for ninja_log in ninja_logs:
71+
log_failures = _parse_ninja_log(ninja_log)
72+
failures.extend(log_failures)
73+
return failures
74+
75+
76+
def _format_ninja_failures(ninja_failures: list[tuple[str, str]]) -> list[str]:
77+
"""Formats ninja failures into summary views for the report."""
78+
output = []
79+
for build_failure in ninja_failures:
80+
failed_action, failure_message = build_failure
81+
output.extend(
82+
[
83+
"<details>",
84+
f"<summary>{failed_action}</summary>",
85+
"",
86+
"```",
87+
failure_message,
88+
"```",
89+
"</details>",
90+
]
91+
)
92+
return output
1593

1694

1795
# Set size_limit to limit the byte size of the report. The default is 1MB as this
@@ -24,6 +102,7 @@ def generate_report(
24102
title,
25103
return_code,
26104
junit_objects,
105+
ninja_logs: list[list[str]],
27106
size_limit=1024 * 1024,
28107
list_failures=True,
29108
):
@@ -61,15 +140,34 @@ def generate_report(
61140
]
62141
)
63142
else:
64-
report.extend(
65-
[
66-
"The build failed before running any tests.",
67-
"",
68-
SEE_BUILD_FILE_STR,
69-
"",
70-
UNRELATED_FAILURES_STR,
71-
]
72-
)
143+
ninja_failures = find_failure_in_ninja_logs(ninja_logs)
144+
if not ninja_failures:
145+
report.extend(
146+
[
147+
"The build failed before running any tests. Detailed "
148+
"information about the build failure could not be "
149+
"automatically obtained.",
150+
"",
151+
SEE_BUILD_FILE_STR,
152+
"",
153+
UNRELATED_FAILURES_STR,
154+
]
155+
)
156+
else:
157+
report.extend(
158+
[
159+
"The build failed before running any tests. Click on a "
160+
"failure below to see the details.",
161+
"",
162+
]
163+
)
164+
report.extend(_format_ninja_failures(ninja_failures))
165+
report.extend(
166+
[
167+
"",
168+
UNRELATED_FAILURES_STR,
169+
]
170+
)
73171
return "\n".join(report)
74172

75173
tests_passed = tests_run - tests_skipped - tests_failed
@@ -114,14 +212,28 @@ def plural(num_tests):
114212
elif return_code != 0:
115213
# No tests failed but the build was in a failed state. Bring this to the user's
116214
# attention.
117-
report.extend(
118-
[
119-
"",
120-
"All tests passed but another part of the build **failed**.",
121-
"",
122-
SEE_BUILD_FILE_STR,
123-
]
124-
)
215+
ninja_failures = find_failure_in_ninja_logs(ninja_logs)
216+
if not ninja_failures:
217+
report.extend(
218+
[
219+
"",
220+
"All tests passed but another part of the build **failed**. "
221+
"Information about the build failure could not be automatically "
222+
"obtained.",
223+
"",
224+
SEE_BUILD_FILE_STR,
225+
]
226+
)
227+
else:
228+
report.extend(
229+
[
230+
"",
231+
"All tests passed but another part of the build **failed**. Click on "
232+
"a failure below to see the details.",
233+
"",
234+
]
235+
)
236+
report.extend(_format_ninja_failures(ninja_failures))
125237

126238
if failures or return_code != 0:
127239
report.extend(["", UNRELATED_FAILURES_STR])
@@ -139,9 +251,19 @@ def plural(num_tests):
139251
return report
140252

141253

142-
def generate_report_from_files(title, return_code, junit_files):
254+
def generate_report_from_files(title, return_code, build_log_files):
255+
junit_files = [
256+
junit_file for junit_file in build_log_files if junit_file.endswith(".xml")
257+
]
258+
ninja_log_files = [
259+
ninja_log for ninja_log in build_log_files if ninja_log.endswith(".log")
260+
]
261+
ninja_logs = []
262+
for ninja_log_file in ninja_log_files:
263+
with open(ninja_log_file, "r") as ninja_log_file_handle:
264+
ninja_logs.append(
265+
[log_line.strip() for log_line in ninja_log_file_handle.readlines()]
266+
)
143267
return generate_report(
144-
title,
145-
return_code,
146-
[JUnitXml.fromfile(p) for p in junit_files],
268+
title, return_code, [JUnitXml.fromfile(p) for p in junit_files], ninja_logs
147269
)

0 commit comments

Comments
 (0)