Skip to content

Commit f2e2533

Browse files
ci: enhance CI testing by summarizing tests durations (#3754)
* ci: add minimal testing requirements for CI pipeline * chore: update testing dependencies in pyproject.toml * feat: enhance log collection by adding JSONL file support and updating pytest arguments * chore: adding changelog file 3754.dependencies.md [dependabot-skip] * feat: add pytest summary script and update CI workflow to include test duration reporting * style: using click CLI for passing arguments. * test: printing files * feat: add step to uncompress log files in CI workflow * fix: adjust file search depth for uncompressing logs and improve directory listing in CI workflow * fix: ckecking cwd * fix: update CI workflow to improve test summary output and formatting * fix: enhance test summary output formatting and improve readability in CI workflow * fix: remove unnecessary export of GITHUB_STEP_SUMMARY in CI workflow summary creation * ci: adding all jobs * ci: printing only 10 jobs * fix: wrong parsing * build: removing pytest-durations dependency --------- Co-authored-by: pyansys-ci-bot <[email protected]>
1 parent ed6cbd6 commit f2e2533

File tree

7 files changed

+329
-8
lines changed

7 files changed

+329
-8
lines changed

.ci/collect_mapdl_logs_locals.sh

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,8 @@ cp *pymapdl.apdl ./"$LOG_NAMES"/ || echo "No PYMAPDL APDL log files could be fou
99
echo "Copying the profiling files..."
1010
cp -r prof ./"$LOG_NAMES"/prof || echo "No profile files could be found"
1111

12+
echo "Copying the JSONL files..."
13+
cp *.jsonl ./"$LOG_NAMES"/ || echo "No JSONL files could be found"
1214

1315
ls -la ./"$LOG_NAMES"
1416

.ci/collect_mapdl_logs_remote.sh

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,9 @@ cp mapdl_launch_1.log ./"$LOG_NAMES"/mapdl_launch_1.log || echo "MAPDL launch do
3838
echo "Copying the profiling files..."
3939
cp -r prof ./"$LOG_NAMES"/prof || echo "No profile files could be found"
4040

41+
echo "Copying the JSONL files..."
42+
cp *.jsonl ./"$LOG_NAMES"/ || echo "No JSONL files could be found"
43+
4144
echo "Collecting file structure..."
4245
ls -R > ./"$LOG_NAMES"/files_structure.txt || echo "Failed to copy file structure to a file"
4346

.ci/pytest_summary.py

Lines changed: 251 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,251 @@
1+
import json
2+
import os
3+
4+
import click
5+
import numpy as np
6+
7+
BIG_WIDTH = 80
8+
SMALL_WIDTH = 8
9+
10+
11+
def find_json_files(base_dir):
12+
"""Recursively find all JSON files in subdirectories."""
13+
json_files = []
14+
for root, _, files in os.walk(base_dir):
15+
for file in files:
16+
if file.endswith(".jsonl"):
17+
json_files.append(os.path.join(root, file))
18+
return json_files
19+
20+
21+
def read_json_file(file_path):
22+
"""Read a JSON file and return its content as a list of test configurations."""
23+
with open(file_path, "r", encoding="utf-8") as f:
24+
try:
25+
data = [json.loads(line) for line in f]
26+
return data
27+
except json.JSONDecodeError as e:
28+
print(f"Error reading {file_path}: {e}")
29+
return []
30+
31+
32+
def extract_tests_with_tags(json_files):
33+
"""Extract test data and assign a tag based on the directory name."""
34+
tests = []
35+
36+
for file_path in json_files:
37+
directory_name = os.path.basename(os.path.dirname(file_path))
38+
test_data = read_json_file(file_path)
39+
40+
for test in test_data:
41+
if test.get("outcome", "").lower() == "passed" and test.get("duration"):
42+
nodeid = test.get("nodeid")
43+
if nodeid.startswith("tests/"):
44+
nodeid = nodeid[6:]
45+
46+
when = test.get("when")
47+
duration = test["duration"]
48+
tags = directory_name.split("-")
49+
tags.remove("logs")
50+
id_ = f"{nodeid}({when})"
51+
52+
tests.append(
53+
{
54+
"tags": tags,
55+
"id": id_,
56+
"nodeid": nodeid,
57+
"duration": duration,
58+
"when": when,
59+
}
60+
)
61+
return tests
62+
63+
64+
def compute_statistics(tests):
65+
"""Compute average duration and standard deviation per test ID."""
66+
test_stats = {}
67+
68+
for test in tests:
69+
test_id = test["id"]
70+
if test_id not in test_stats:
71+
test_stats[test_id] = {
72+
"durations": [],
73+
"n_tests": 0,
74+
}
75+
76+
test_stats[test_id]["durations"].append(test["duration"])
77+
test_stats[test_id]["n_tests"] += 1
78+
79+
summary = []
80+
81+
for test_id, data in test_stats.items():
82+
durations = np.array(data["durations"])
83+
84+
if durations.size == 0:
85+
continue
86+
87+
avg_duration = np.mean(durations)
88+
std_dev = np.std(durations)
89+
90+
mask_99 = durations < np.percentile(durations, 99)
91+
if mask_99.sum() == 0:
92+
avg_duration_minus_one = np.nan
93+
std_dev_minus_one = np.nan
94+
else:
95+
avg_duration_minus_one = np.mean(durations[mask_99])
96+
std_dev_minus_one = np.std(durations[mask_99])
97+
98+
mask_75 = durations < np.percentile(durations, 75)
99+
if mask_75.sum() == 0:
100+
avg_duration_minus_34 = np.nan
101+
std_dev_minus_34 = np.nan
102+
else:
103+
avg_duration_minus_34 = np.mean(durations[mask_75])
104+
std_dev_minus_34 = np.std(durations[mask_75])
105+
106+
summary.append(
107+
{
108+
"id": test_id,
109+
"n_tests": data["n_tests"],
110+
"average_duration": avg_duration,
111+
"std_dev": std_dev,
112+
"avg_duration_minus_one": avg_duration_minus_one,
113+
"std_dev_minus_one": std_dev_minus_one,
114+
"avg_duration_minus_34": avg_duration_minus_34,
115+
"std_dev_minus_34": std_dev_minus_34,
116+
}
117+
)
118+
119+
return summary
120+
121+
122+
def print_table(data, keys, headers, title=""):
123+
JUNCTION = "|"
124+
125+
def make_bold(s):
126+
return click.style(s, bold=True)
127+
128+
h = [headers[0].ljust(BIG_WIDTH)]
129+
h.extend([each.center(SMALL_WIDTH)[:SMALL_WIDTH] for each in headers[1:]])
130+
131+
len_h = len("| " + " | ".join(h) + " |")
132+
133+
sep = (
134+
f"{JUNCTION}-"
135+
+ f"-{JUNCTION}-".join(["-" * len(each) for each in h])
136+
+ f"-{JUNCTION}"
137+
)
138+
top_sep = f"{JUNCTION}" + "-" * (len_h - 2) + f"{JUNCTION}"
139+
140+
if title:
141+
# click.echo(top_sep)
142+
click.echo(
143+
"| " + make_bold(f"Top {len(data)} {title}".center(len_h - 4)) + " |"
144+
)
145+
click.echo(sep)
146+
147+
click.echo("| " + " | ".join([make_bold(each) for each in h]) + " |")
148+
click.echo(sep)
149+
150+
for test in data:
151+
s = []
152+
for i, each_key in enumerate(keys):
153+
154+
if i == 0:
155+
id_ = test[each_key]
156+
157+
id_ = (
158+
id_.replace("(", "\(")
159+
.replace(")", "\)")
160+
.replace("[", "\[")
161+
.replace("]", "\]")
162+
)
163+
if len(id_) >= BIG_WIDTH:
164+
id_ = id_[: BIG_WIDTH - 15] + "..." + id_[-12:]
165+
166+
s.append(f"{id_}".ljust(BIG_WIDTH)[0:BIG_WIDTH])
167+
168+
elif each_key == "n_tests":
169+
s.append(f"{int(test[each_key])}".center(SMALL_WIDTH))
170+
else:
171+
if np.isnan(test[each_key]):
172+
s.append("N/A".center(SMALL_WIDTH))
173+
else:
174+
s.append(f"{test[each_key]:.4f}".center(SMALL_WIDTH))
175+
176+
click.echo("| " + " | ".join(s) + " |")
177+
# click.echo(sep)
178+
179+
180+
def print_summary(summary, num=10):
181+
"""Print the top N longest tests and the top N most variable tests."""
182+
longest_tests = sorted(summary, key=lambda x: -x["average_duration"])[:num]
183+
most_variable_tests = sorted(summary, key=lambda x: -x["std_dev"])[:num]
184+
185+
print(f"\n## Top {num} Longest Running Tests\n")
186+
print_table(
187+
longest_tests,
188+
["id", "n_tests", "average_duration", "std_dev"],
189+
["Test ID", "N. tests", "Avg", "STD"],
190+
# "Longest Running Tests",
191+
)
192+
193+
print("")
194+
print(f"\n## Top {num} Most Variable Running Tests\n")
195+
print_table(
196+
most_variable_tests,
197+
[
198+
"id",
199+
"n_tests",
200+
"std_dev",
201+
"average_duration",
202+
"std_dev_minus_one",
203+
"avg_duration_minus_one",
204+
"std_dev_minus_34",
205+
"avg_duration_minus_34",
206+
],
207+
[
208+
"Test ID",
209+
"N. tests",
210+
"Std",
211+
"Avg",
212+
"Std-99%",
213+
"Avg-99%",
214+
"Std-75%",
215+
"Avg-75%",
216+
],
217+
# "Most Variable Running Tests",
218+
)
219+
220+
221+
@click.command()
222+
@click.option(
223+
"--directory",
224+
type=click.Path(exists=True, file_okay=False, dir_okay=True),
225+
default=None,
226+
)
227+
@click.option(
228+
"--num", default=10, help="Number of top tests to display.", show_default=True
229+
)
230+
@click.option(
231+
"--save-file",
232+
default=None,
233+
help="File to save the test durations. Default 'tests_durations.json'.",
234+
show_default=True,
235+
)
236+
def analyze_tests(directory, num, save_file):
237+
directory = directory or os.getcwd() # Change this to your base directory
238+
json_files = find_json_files(directory)
239+
tests = extract_tests_with_tags(json_files)
240+
241+
if save_file:
242+
with open(save_file, "a+") as f:
243+
for each_line in tests:
244+
json.dump(each_line, f, indent=2)
245+
246+
summary = compute_statistics(tests)
247+
print_summary(summary, num=num)
248+
249+
250+
if __name__ == "__main__":
251+
analyze_tests()
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@ pyfakefs==5.7.2
22
pytest-cov==6.0.0
33
pytest-profiling==1.8.1
44
pytest-random-order==1.1.1
5+
pytest-reportlog==0.4.0
56
pytest-rerunfailures==15.0
67
pytest-timeout==2.3.1
78
pytest==8.3.4

0 commit comments

Comments
 (0)