Skip to content

Commit f746019

Browse files
committed
Merge remote-tracking branch 'origin/main' into expose-static-inline-functions-from-gmf
2 parents f62f7a1 + 22417ec commit f746019

File tree

7,301 files changed

+390538
-220869
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

7,301 files changed

+390538
-220869
lines changed

.ci/generate_test_report.py

Lines changed: 117 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -5,11 +5,11 @@
55
# python3 -m unittest discover -p generate_test_report.py
66

77
import argparse
8+
import subprocess
89
import unittest
910
from io import StringIO
1011
from junitparser import JUnitXml, Failure
1112
from textwrap import dedent
12-
from subprocess import check_call
1313

1414

1515
def junit_from_xml(xml):
@@ -18,7 +18,7 @@ def junit_from_xml(xml):
1818

1919
class TestReports(unittest.TestCase):
2020
def test_title_only(self):
21-
self.assertEqual(_generate_report("Foo", []), ("", None))
21+
self.assertEqual(_generate_report("Foo", []), ("", "success"))
2222

2323
def test_no_tests_in_testsuite(self):
2424
self.assertEqual(
@@ -105,7 +105,7 @@ def test_report_single_file_single_testsuite(self):
105105
* 1 test skipped
106106
* 2 tests failed
107107
108-
## Failed tests
108+
## Failed Tests
109109
(click to see output)
110110
111111
### Bar
@@ -137,7 +137,7 @@ def test_report_single_file_single_testsuite(self):
137137
* 1 test skipped
138138
* 2 tests failed
139139
140-
## Failed tests
140+
## Failed Tests
141141
(click to see output)
142142
143143
### ABC
@@ -233,12 +233,84 @@ def test_report_multiple_files_multiple_testsuites(self):
233233
self.MULTI_SUITE_OUTPUT,
234234
)
235235

236+
def test_report_dont_list_failures(self):
237+
self.assertEqual(
238+
_generate_report(
239+
"Foo",
240+
[
241+
junit_from_xml(
242+
dedent(
243+
"""\
244+
<?xml version="1.0" encoding="UTF-8"?>
245+
<testsuites time="0.02">
246+
<testsuite name="Bar" tests="1" failures="1" skipped="0" time="0.02">
247+
<testcase classname="Bar/test_1" name="test_1" time="0.02">
248+
<failure><![CDATA[Output goes here]]></failure>
249+
</testcase>
250+
</testsuite>
251+
</testsuites>"""
252+
)
253+
)
254+
],
255+
list_failures=False,
256+
),
257+
(
258+
dedent(
259+
"""\
260+
# Foo
236261
237-
def _generate_report(title, junit_objects):
238-
style = None
262+
* 1 test failed
239263
264+
Failed tests and their output was too large to report. Download the build's log file to see the details."""
265+
),
266+
"error",
267+
),
268+
)
269+
270+
def test_report_size_limit(self):
271+
self.assertEqual(
272+
_generate_report(
273+
"Foo",
274+
[
275+
junit_from_xml(
276+
dedent(
277+
"""\
278+
<?xml version="1.0" encoding="UTF-8"?>
279+
<testsuites time="0.02">
280+
<testsuite name="Bar" tests="1" failures="1" skipped="0" time="0.02">
281+
<testcase classname="Bar/test_1" name="test_1" time="0.02">
282+
<failure><![CDATA[Some long output goes here...]]></failure>
283+
</testcase>
284+
</testsuite>
285+
</testsuites>"""
286+
)
287+
)
288+
],
289+
size_limit=128,
290+
),
291+
(
292+
dedent(
293+
"""\
294+
# Foo
295+
296+
* 1 test failed
297+
298+
Failed tests and their output was too large to report. Download the build's log file to see the details."""
299+
),
300+
"error",
301+
),
302+
)
303+
304+
305+
# Set size_limit to limit the byte size of the report. The default is 1MB as this
306+
# is the most that can be put into an annotation. If the generated report exceeds
307+
# this limit and failures are listed, it will be generated again without failures
308+
# listed. This minimal report will always fit into an annotation.
309+
# If include failures is False, total number of test will be reported but their names
310+
# and output will not be.
311+
def _generate_report(title, junit_objects, size_limit=1024 * 1024, list_failures=True):
240312
if not junit_objects:
241-
return ("", style)
313+
return ("", "success")
242314

243315
failures = {}
244316
tests_run = 0
@@ -264,7 +336,7 @@ def _generate_report(title, junit_objects):
264336
)
265337

266338
if not tests_run:
267-
return ("", style)
339+
return ("", None)
268340

269341
style = "error" if tests_failed else "success"
270342
report = [f"# {title}", ""]
@@ -281,8 +353,17 @@ def plural(num_tests):
281353
if tests_failed:
282354
report.append(f"* {tests_failed} {plural(tests_failed)} failed")
283355

284-
if failures:
285-
report.extend(["", "## Failed tests", "(click to see output)"])
356+
if not list_failures:
357+
report.extend(
358+
[
359+
"",
360+
"Failed tests and their output was too large to report. "
361+
"Download the build's log file to see the details.",
362+
]
363+
)
364+
elif failures:
365+
report.extend(["", "## Failed Tests", "(click to see output)"])
366+
286367
for testsuite_name, failures in failures.items():
287368
report.extend(["", f"### {testsuite_name}"])
288369
for name, output in failures:
@@ -298,7 +379,11 @@ def plural(num_tests):
298379
]
299380
)
300381

301-
return "\n".join(report), style
382+
report = "\n".join(report)
383+
if len(report.encode("utf-8")) > size_limit:
384+
return _generate_report(title, junit_objects, size_limit, list_failures=False)
385+
386+
return report, style
302387

303388

304389
def generate_report(title, junit_files):
@@ -315,14 +400,24 @@ def generate_report(title, junit_files):
315400
args = parser.parse_args()
316401

317402
report, style = generate_report(args.title, args.junit_files)
318-
check_call(
319-
[
320-
"buildkite-agent",
321-
"annotate",
322-
"--context",
323-
args.context,
324-
"--style",
325-
style,
326-
report,
327-
]
328-
)
403+
404+
if report:
405+
p = subprocess.Popen(
406+
[
407+
"buildkite-agent",
408+
"annotate",
409+
"--context",
410+
args.context,
411+
"--style",
412+
style,
413+
],
414+
stdin=subprocess.PIPE,
415+
stderr=subprocess.PIPE,
416+
universal_newlines=True,
417+
)
418+
419+
# The report can be larger than the buffer for command arguments so we send
420+
# it over stdin instead.
421+
_, err = p.communicate(input=report)
422+
if p.returncode:
423+
raise RuntimeError(f"Failed to send report to buildkite-agent:\n{err}")

.ci/metrics/Dockerfile

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
FROM docker.io/python:3.12
2+
3+
COPY requirements.lock.txt ./
4+
RUN pip3 install --no-cache-dir -r requirements.lock.txt
5+
COPY metrics.py ./
6+
7+
CMD ["python3", "metrics.py"]

.ci/metrics/metrics.py

Lines changed: 182 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,182 @@
1+
import requests
2+
import time
3+
import os
4+
from dataclasses import dataclass
5+
import sys
6+
7+
import github
8+
from github import Github
9+
from github import Auth
10+
11+
GRAFANA_URL = (
12+
"https://influx-prod-13-prod-us-east-0.grafana.net/api/v1/push/influx/write"
13+
)
14+
GITHUB_PROJECT = "llvm/llvm-project"
15+
WORKFLOWS_TO_TRACK = ["Check code formatting"]
16+
SCRAPE_INTERVAL_SECONDS = 5 * 60
17+
18+
19+
@dataclass
20+
class JobMetrics:
21+
job_name: str
22+
queue_time: int
23+
run_time: int
24+
status: int
25+
created_at_ns: int
26+
workflow_id: int
27+
28+
29+
def get_metrics(github_repo: github.Repository, workflows_to_track: dict[str, int]):
30+
"""Gets the metrics for specified Github workflows.
31+
32+
This function takes in a list of workflows to track, and optionally the
33+
workflow ID of the last tracked invocation. It grabs the relevant data
34+
from Github, returning it to the caller.
35+
36+
Args:
37+
github_repo: A github repo object to use to query the relevant information.
38+
workflows_to_track: A dictionary mapping workflow names to the last
39+
invocation ID where metrics have been collected, or None to collect the
40+
last five results.
41+
42+
Returns:
43+
Returns a list of JobMetrics objects, containing the relevant metrics about
44+
the workflow.
45+
"""
46+
workflow_runs = iter(github_repo.get_workflow_runs())
47+
48+
workflow_metrics = []
49+
50+
workflows_to_include = set(workflows_to_track.keys())
51+
52+
while len(workflows_to_include) > 0:
53+
workflow_run = next(workflow_runs)
54+
if workflow_run.status != "completed":
55+
continue
56+
57+
# This workflow was already sampled for this run, or is not tracked at
58+
# all. Ignoring.
59+
if workflow_run.name not in workflows_to_include:
60+
continue
61+
62+
# There were no new workflow invocations since the previous scrape.
63+
# The API returns a sorted list with the most recent invocations first,
64+
# so we can stop looking for this particular workflow. Continue to grab
65+
# information on the other workflows of interest, if present.
66+
if workflows_to_track[workflow_run.name] == workflow_run.id:
67+
workflows_to_include.remove(workflow_run.name)
68+
continue
69+
70+
workflow_jobs = workflow_run.jobs()
71+
if workflow_jobs.totalCount == 0:
72+
continue
73+
if workflow_jobs.totalCount > 1:
74+
raise ValueError(
75+
f"Encountered an unexpected number of jobs: {workflow_jobs.totalCount}"
76+
)
77+
78+
created_at = workflow_jobs[0].created_at
79+
started_at = workflow_jobs[0].started_at
80+
completed_at = workflow_jobs[0].completed_at
81+
82+
job_result = int(workflow_jobs[0].conclusion == "success")
83+
84+
queue_time = started_at - created_at
85+
run_time = completed_at - started_at
86+
87+
if run_time.seconds == 0:
88+
continue
89+
90+
if (
91+
workflows_to_track[workflow_run.name] is None
92+
or workflows_to_track[workflow_run.name] == workflow_run.id
93+
):
94+
workflows_to_include.remove(workflow_run.name)
95+
if (
96+
workflows_to_track[workflow_run.name] is not None
97+
and len(workflows_to_include) == 0
98+
):
99+
break
100+
101+
# The timestamp associated with the event is expected by Grafana to be
102+
# in nanoseconds.
103+
created_at_ns = int(created_at.timestamp()) * 10**9
104+
105+
workflow_metrics.append(
106+
JobMetrics(
107+
workflow_run.name,
108+
queue_time.seconds,
109+
run_time.seconds,
110+
job_result,
111+
created_at_ns,
112+
workflow_run.id,
113+
)
114+
)
115+
116+
return workflow_metrics
117+
118+
119+
def upload_metrics(workflow_metrics, metrics_userid, api_key):
120+
"""Upload metrics to Grafana.
121+
122+
Takes in a list of workflow metrics and then uploads them to Grafana
123+
through a REST request.
124+
125+
Args:
126+
workflow_metrics: A list of metrics to upload to Grafana.
127+
metrics_userid: The userid to use for the upload.
128+
api_key: The API key to use for the upload.
129+
"""
130+
metrics_batch = []
131+
for workflow_metric in workflow_metrics:
132+
workflow_formatted_name = workflow_metric.job_name.lower().replace(" ", "_")
133+
metrics_batch.append(
134+
f"{workflow_formatted_name} queue_time={workflow_metric.queue_time},run_time={workflow_metric.run_time},status={workflow_metric.status} {workflow_metric.created_at_ns}"
135+
)
136+
137+
request_data = "\n".join(metrics_batch)
138+
response = requests.post(
139+
GRAFANA_URL,
140+
headers={"Content-Type": "text/plain"},
141+
data=request_data,
142+
auth=(metrics_userid, api_key),
143+
)
144+
145+
if response.status_code < 200 or response.status_code >= 300:
146+
print(
147+
f"Failed to submit data to Grafana: {response.status_code}", file=sys.stderr
148+
)
149+
150+
151+
def main():
152+
# Authenticate with Github
153+
auth = Auth.Token(os.environ["GITHUB_TOKEN"])
154+
github_object = Github(auth=auth)
155+
github_repo = github_object.get_repo("llvm/llvm-project")
156+
157+
grafana_api_key = os.environ["GRAFANA_API_KEY"]
158+
grafana_metrics_userid = os.environ["GRAFANA_METRICS_USERID"]
159+
160+
workflows_to_track = {}
161+
for workflow_to_track in WORKFLOWS_TO_TRACK:
162+
workflows_to_track[workflow_to_track] = None
163+
164+
# Enter the main loop. Every five minutes we wake up and dump metrics for
165+
# the relevant jobs.
166+
while True:
167+
current_metrics = get_metrics(github_repo, workflows_to_track)
168+
if len(current_metrics) == 0:
169+
print("No metrics found to upload.", file=sys.stderr)
170+
continue
171+
172+
upload_metrics(current_metrics, grafana_metrics_userid, grafana_api_key)
173+
print(f"Uploaded {len(current_metrics)} metrics", file=sys.stderr)
174+
175+
for workflow_metric in reversed(current_metrics):
176+
workflows_to_track[workflow_metric.job_name] = workflow_metric.workflow_id
177+
178+
time.sleep(SCRAPE_INTERVAL_SECONDS)
179+
180+
181+
if __name__ == "__main__":
182+
main()

0 commit comments

Comments
 (0)