Skip to content

Commit c0fe781

Browse files
committed
Merge remote-tracking branch 'origin/main' into 2024q4-riscv-globals-merging-mergeexternalbydefault
2 parents 2e3ee91 + 22780f8 commit c0fe781

File tree

6,191 files changed

+254167
-107184
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

6,191 files changed

+254167
-107184
lines changed

.ci/generate_test_report.py

Lines changed: 86 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
# python3 -m unittest discover -p generate_test_report.py
66

77
import argparse
8+
import os
89
import subprocess
910
import unittest
1011
from io import StringIO
@@ -18,7 +19,7 @@ def junit_from_xml(xml):
1819

1920
class TestReports(unittest.TestCase):
2021
def test_title_only(self):
21-
self.assertEqual(_generate_report("Foo", []), ("", None))
22+
self.assertEqual(_generate_report("Foo", []), ("", "success"))
2223

2324
def test_no_tests_in_testsuite(self):
2425
self.assertEqual(
@@ -267,6 +268,46 @@ def test_report_dont_list_failures(self):
267268
),
268269
)
269270

271+
def test_report_dont_list_failures_link_to_log(self):
272+
self.assertEqual(
273+
_generate_report(
274+
"Foo",
275+
[
276+
junit_from_xml(
277+
dedent(
278+
"""\
279+
<?xml version="1.0" encoding="UTF-8"?>
280+
<testsuites time="0.02">
281+
<testsuite name="Bar" tests="1" failures="1" skipped="0" time="0.02">
282+
<testcase classname="Bar/test_1" name="test_1" time="0.02">
283+
<failure><![CDATA[Output goes here]]></failure>
284+
</testcase>
285+
</testsuite>
286+
</testsuites>"""
287+
)
288+
)
289+
],
290+
list_failures=False,
291+
buildkite_info={
292+
"BUILDKITE_ORGANIZATION_SLUG": "organization_slug",
293+
"BUILDKITE_PIPELINE_SLUG": "pipeline_slug",
294+
"BUILDKITE_BUILD_NUMBER": "build_number",
295+
"BUILDKITE_JOB_ID": "job_id",
296+
},
297+
),
298+
(
299+
dedent(
300+
"""\
301+
# Foo
302+
303+
* 1 test failed
304+
305+
Failed tests and their output was too large to report. [Download](https://buildkite.com/organizations/organization_slug/pipelines/pipeline_slug/builds/build_number/jobs/job_id/download.txt) the build's log file to see the details."""
306+
),
307+
"error",
308+
),
309+
)
310+
270311
def test_report_size_limit(self):
271312
self.assertEqual(
272313
_generate_report(
@@ -308,7 +349,13 @@ def test_report_size_limit(self):
308349
# listed. This minimal report will always fit into an annotation.
309350
# If include failures is False, total number of test will be reported but their names
310351
# and output will not be.
311-
def _generate_report(title, junit_objects, size_limit=1024 * 1024, list_failures=True):
352+
def _generate_report(
353+
title,
354+
junit_objects,
355+
size_limit=1024 * 1024,
356+
list_failures=True,
357+
buildkite_info=None,
358+
):
312359
if not junit_objects:
313360
return ("", "success")
314361

@@ -336,7 +383,7 @@ def _generate_report(title, junit_objects, size_limit=1024 * 1024, list_failures
336383
)
337384

338385
if not tests_run:
339-
return ("", style)
386+
return ("", None)
340387

341388
style = "error" if tests_failed else "success"
342389
report = [f"# {title}", ""]
@@ -354,11 +401,21 @@ def plural(num_tests):
354401
report.append(f"* {tests_failed} {plural(tests_failed)} failed")
355402

356403
if not list_failures:
404+
if buildkite_info is not None:
405+
log_url = (
406+
"https://buildkite.com/organizations/{BUILDKITE_ORGANIZATION_SLUG}/"
407+
"pipelines/{BUILDKITE_PIPELINE_SLUG}/builds/{BUILDKITE_BUILD_NUMBER}/"
408+
"jobs/{BUILDKITE_JOB_ID}/download.txt".format(**buildkite_info)
409+
)
410+
download_text = f"[Download]({log_url})"
411+
else:
412+
download_text = "Download"
413+
357414
report.extend(
358415
[
359416
"",
360417
"Failed tests and their output was too large to report. "
361-
"Download the build's log file to see the details.",
418+
f"{download_text} the build's log file to see the details.",
362419
]
363420
)
364421
elif failures:
@@ -381,13 +438,23 @@ def plural(num_tests):
381438

382439
report = "\n".join(report)
383440
if len(report.encode("utf-8")) > size_limit:
384-
return _generate_report(title, junit_objects, size_limit, list_failures=False)
441+
return _generate_report(
442+
title,
443+
junit_objects,
444+
size_limit,
445+
list_failures=False,
446+
buildkite_info=buildkite_info,
447+
)
385448

386449
return report, style
387450

388451

389-
def generate_report(title, junit_files):
390-
return _generate_report(title, [JUnitXml.fromfile(p) for p in junit_files])
452+
def generate_report(title, junit_files, buildkite_info):
453+
return _generate_report(
454+
title,
455+
[JUnitXml.fromfile(p) for p in junit_files],
456+
buildkite_info=buildkite_info,
457+
)
391458

392459

393460
if __name__ == "__main__":
@@ -399,7 +466,18 @@ def generate_report(title, junit_files):
399466
parser.add_argument("junit_files", help="Paths to JUnit report files.", nargs="*")
400467
args = parser.parse_args()
401468

402-
report, style = generate_report(args.title, args.junit_files)
469+
# All of these are required to build a link to download the log file.
470+
env_var_names = [
471+
"BUILDKITE_ORGANIZATION_SLUG",
472+
"BUILDKITE_PIPELINE_SLUG",
473+
"BUILDKITE_BUILD_NUMBER",
474+
"BUILDKITE_JOB_ID",
475+
]
476+
buildkite_info = {k: v for k, v in os.environ.items() if k in env_var_names}
477+
if len(buildkite_info) != len(env_var_names):
478+
buildkite_info = None
479+
480+
report, style = generate_report(args.title, args.junit_files, buildkite_info)
403481

404482
if report:
405483
p = subprocess.Popen(

.ci/metrics/Dockerfile

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
FROM docker.io/python:3.12
2+
3+
COPY requirements.lock.txt ./
4+
RUN pip3 install --no-cache-dir -r requirements.lock.txt
5+
COPY metrics.py ./
6+
7+
CMD ["python3", "metrics.py"]

.ci/metrics/metrics.py

Lines changed: 182 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,182 @@
1+
import requests
2+
import time
3+
import os
4+
from dataclasses import dataclass
5+
import sys
6+
7+
import github
8+
from github import Github
9+
from github import Auth
10+
11+
GRAFANA_URL = (
12+
"https://influx-prod-13-prod-us-east-0.grafana.net/api/v1/push/influx/write"
13+
)
14+
GITHUB_PROJECT = "llvm/llvm-project"
15+
WORKFLOWS_TO_TRACK = ["Check code formatting"]
16+
SCRAPE_INTERVAL_SECONDS = 5 * 60
17+
18+
19+
@dataclass
20+
class JobMetrics:
21+
job_name: str
22+
queue_time: int
23+
run_time: int
24+
status: int
25+
created_at_ns: int
26+
workflow_id: int
27+
28+
29+
def get_metrics(github_repo: github.Repository, workflows_to_track: dict[str, int]):
30+
"""Gets the metrics for specified Github workflows.
31+
32+
This function takes in a list of workflows to track, and optionally the
33+
workflow ID of the last tracked invocation. It grabs the relevant data
34+
from Github, returning it to the caller.
35+
36+
Args:
37+
github_repo: A github repo object to use to query the relevant information.
38+
workflows_to_track: A dictionary mapping workflow names to the last
39+
invocation ID where metrics have been collected, or None to collect the
40+
last five results.
41+
42+
Returns:
43+
Returns a list of JobMetrics objects, containing the relevant metrics about
44+
the workflow.
45+
"""
46+
workflow_runs = iter(github_repo.get_workflow_runs())
47+
48+
workflow_metrics = []
49+
50+
workflows_to_include = set(workflows_to_track.keys())
51+
52+
while len(workflows_to_include) > 0:
53+
workflow_run = next(workflow_runs)
54+
if workflow_run.status != "completed":
55+
continue
56+
57+
# This workflow was already sampled for this run, or is not tracked at
58+
# all. Ignoring.
59+
if workflow_run.name not in workflows_to_include:
60+
continue
61+
62+
# There were no new workflow invocations since the previous scrape.
63+
# The API returns a sorted list with the most recent invocations first,
64+
# so we can stop looking for this particular workflow. Continue to grab
65+
# information on the other workflows of interest, if present.
66+
if workflows_to_track[workflow_run.name] == workflow_run.id:
67+
workflows_to_include.remove(workflow_run.name)
68+
continue
69+
70+
workflow_jobs = workflow_run.jobs()
71+
if workflow_jobs.totalCount == 0:
72+
continue
73+
if workflow_jobs.totalCount > 1:
74+
raise ValueError(
75+
f"Encountered an unexpected number of jobs: {workflow_jobs.totalCount}"
76+
)
77+
78+
created_at = workflow_jobs[0].created_at
79+
started_at = workflow_jobs[0].started_at
80+
completed_at = workflow_jobs[0].completed_at
81+
82+
job_result = int(workflow_jobs[0].conclusion == "success")
83+
84+
queue_time = started_at - created_at
85+
run_time = completed_at - started_at
86+
87+
if run_time.seconds == 0:
88+
continue
89+
90+
if (
91+
workflows_to_track[workflow_run.name] is None
92+
or workflows_to_track[workflow_run.name] == workflow_run.id
93+
):
94+
workflows_to_include.remove(workflow_run.name)
95+
if (
96+
workflows_to_track[workflow_run.name] is not None
97+
and len(workflows_to_include) == 0
98+
):
99+
break
100+
101+
# The timestamp associated with the event is expected by Grafana to be
102+
# in nanoseconds.
103+
created_at_ns = int(created_at.timestamp()) * 10**9
104+
105+
workflow_metrics.append(
106+
JobMetrics(
107+
workflow_run.name,
108+
queue_time.seconds,
109+
run_time.seconds,
110+
job_result,
111+
created_at_ns,
112+
workflow_run.id,
113+
)
114+
)
115+
116+
return workflow_metrics
117+
118+
119+
def upload_metrics(workflow_metrics, metrics_userid, api_key):
120+
"""Upload metrics to Grafana.
121+
122+
Takes in a list of workflow metrics and then uploads them to Grafana
123+
through a REST request.
124+
125+
Args:
126+
workflow_metrics: A list of metrics to upload to Grafana.
127+
metrics_userid: The userid to use for the upload.
128+
api_key: The API key to use for the upload.
129+
"""
130+
metrics_batch = []
131+
for workflow_metric in workflow_metrics:
132+
workflow_formatted_name = workflow_metric.job_name.lower().replace(" ", "_")
133+
metrics_batch.append(
134+
f"{workflow_formatted_name} queue_time={workflow_metric.queue_time},run_time={workflow_metric.run_time},status={workflow_metric.status} {workflow_metric.created_at_ns}"
135+
)
136+
137+
request_data = "\n".join(metrics_batch)
138+
response = requests.post(
139+
GRAFANA_URL,
140+
headers={"Content-Type": "text/plain"},
141+
data=request_data,
142+
auth=(metrics_userid, api_key),
143+
)
144+
145+
if response.status_code < 200 or response.status_code >= 300:
146+
print(
147+
f"Failed to submit data to Grafana: {response.status_code}", file=sys.stderr
148+
)
149+
150+
151+
def main():
152+
# Authenticate with Github
153+
auth = Auth.Token(os.environ["GITHUB_TOKEN"])
154+
github_object = Github(auth=auth)
155+
github_repo = github_object.get_repo("llvm/llvm-project")
156+
157+
grafana_api_key = os.environ["GRAFANA_API_KEY"]
158+
grafana_metrics_userid = os.environ["GRAFANA_METRICS_USERID"]
159+
160+
workflows_to_track = {}
161+
for workflow_to_track in WORKFLOWS_TO_TRACK:
162+
workflows_to_track[workflow_to_track] = None
163+
164+
# Enter the main loop. Every five minutes we wake up and dump metrics for
165+
# the relevant jobs.
166+
while True:
167+
current_metrics = get_metrics(github_repo, workflows_to_track)
168+
if len(current_metrics) == 0:
169+
print("No metrics found to upload.", file=sys.stderr)
170+
continue
171+
172+
upload_metrics(current_metrics, grafana_metrics_userid, grafana_api_key)
173+
print(f"Uploaded {len(current_metrics)} metrics", file=sys.stderr)
174+
175+
for workflow_metric in reversed(current_metrics):
176+
workflows_to_track[workflow_metric.job_name] = workflow_metric.workflow_id
177+
178+
time.sleep(SCRAPE_INTERVAL_SECONDS)
179+
180+
181+
if __name__ == "__main__":
182+
main()

0 commit comments

Comments
 (0)