Skip to content

Commit 12d1795

Browse files
committed
Feat: improve Excel report with regression sheet and remove pandas
Update excel_generator.py with: Regression analysis sheet: - Add _create_regression_sheet() rendering regressions, fixes, and new failures in a dedicated Excel sheet with color-coded rows (red for regressions, green for fixes, orange for new failures) - Accept regression_data parameter in generate_excel_report() Code improvements: - Remove pandas dependency, replace _calculate_statistics() with plain sum() via a _suite_stats() helper function - DRY up _build_metadata_section() from two identical pytest/gtest blocks into a single loop over (prefix, label) pairs - Remove unused platform and ram columns from system info section Signed-off-by: Wilczynski, Andrzej <andrzej.wilczynski@intel.com>
1 parent 9055d63 commit 12d1795

File tree

1 file changed

+109
-109
lines changed

1 file changed

+109
-109
lines changed

.github/scripts/report_generators/excel_generator.py

Lines changed: 109 additions & 109 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,6 @@
44

55
from datetime import datetime
66

7-
import pandas as pd
87
from openpyxl import Workbook
98
from openpyxl.styles import Alignment, Font, PatternFill
109
from openpyxl.utils import get_column_letter
@@ -16,10 +15,18 @@
1615
COLOR_SKIPPED = "fff3cd"
1716
COLOR_SEPARATOR = "e6f2ff"
1817
COLOR_WHITE = "FFFFFF"
18+
COLOR_REGRESSION = "e2b3b3"
19+
COLOR_FIXED = "b3d9b3"
20+
COLOR_NEW_FAILURE = "f5d6a8"
1921

2022

2123
def generate_excel_report(
22-
pytest_data, gtest_data, output_file, system_info_list=None, test_metadata=None
24+
pytest_data,
25+
gtest_data,
26+
output_file,
27+
system_info_list=None,
28+
test_metadata=None,
29+
regression_data=None,
2330
):
2431
"""Create Excel report with separate sheets for pytest and gtest."""
2532
wb = Workbook()
@@ -30,6 +37,8 @@ def generate_excel_report(
3037
_create_pytest_sheet(wb, pytest_data)
3138
if gtest_data:
3239
_create_gtest_sheet(wb, gtest_data)
40+
if regression_data:
41+
_create_regression_sheet(wb, regression_data)
3342
_create_summary_sheet(wb, pytest_data, gtest_data, system_info_list, test_metadata)
3443

3544
wb.save(output_file)
@@ -246,69 +255,40 @@ def _create_summary_sheet(wb, pytest_data, gtest_data, system_info_list, test_me
246255

247256
def _calculate_statistics(pytest_data, gtest_data):
248257
"""Calculate test statistics from data."""
249-
pytest_total = pytest_passed = pytest_failed = pytest_skipped = 0
250-
gtest_total = gtest_passed = gtest_failed = gtest_skipped = 0
251-
252-
if pytest_data:
253-
df = pd.DataFrame(
254-
[{k: v for k, v in d.items() if k != "test_cases"} for d in pytest_data]
255-
)
256-
pytest_total = df["total"].sum()
257-
pytest_passed = df["passed"].sum()
258-
pytest_failed = df["failed"].sum()
259-
pytest_skipped = df["skipped"].sum()
260-
261-
if gtest_data:
262-
df = pd.DataFrame(
263-
[{k: v for k, v in d.items() if k != "test_cases"} for d in gtest_data]
264-
)
265-
gtest_total = df["total"].sum()
266-
gtest_passed = df["passed"].sum()
267-
gtest_failed = df["failed"].sum()
268-
gtest_skipped = df["skipped"].sum()
269-
270-
pytest_pass_rate = (
271-
(pytest_passed / (pytest_passed + pytest_failed) * 100)
272-
if (pytest_passed + pytest_failed) > 0
273-
else 0
274-
)
275-
gtest_pass_rate = (
276-
(gtest_passed / (gtest_passed + gtest_failed) * 100)
277-
if (gtest_passed + gtest_failed) > 0
278-
else 0
279-
)
280-
281-
combined_total = pytest_total + gtest_total
282-
combined_passed = pytest_passed + gtest_passed
283-
combined_failed = pytest_failed + gtest_failed
284-
combined_skipped = pytest_skipped + gtest_skipped
285-
combined_pass_rate = (
286-
(combined_passed / (combined_passed + combined_failed) * 100)
287-
if (combined_passed + combined_failed) > 0
288-
else 0
289-
)
290258

259+
def _suite_stats(data):
260+
if not data:
261+
return {
262+
"total": 0,
263+
"passed": 0,
264+
"failed": 0,
265+
"skipped": 0,
266+
"pass_rate": 0,
267+
}
268+
passed = sum(d.get("passed", 0) for d in data)
269+
failed = sum(d.get("failed", 0) for d in data)
270+
return {
271+
"total": sum(d.get("total", 0) for d in data),
272+
"passed": passed,
273+
"failed": failed,
274+
"skipped": sum(d.get("skipped", 0) for d in data),
275+
"pass_rate": (
276+
passed / (passed + failed) * 100 if (passed + failed) > 0 else 0
277+
),
278+
}
279+
280+
p = _suite_stats(pytest_data)
281+
g = _suite_stats(gtest_data)
282+
cp, cf = p["passed"] + g["passed"], p["failed"] + g["failed"]
291283
return {
292-
"pytest": {
293-
"total": pytest_total,
294-
"passed": pytest_passed,
295-
"failed": pytest_failed,
296-
"skipped": pytest_skipped,
297-
"pass_rate": pytest_pass_rate,
298-
},
299-
"gtest": {
300-
"total": gtest_total,
301-
"passed": gtest_passed,
302-
"failed": gtest_failed,
303-
"skipped": gtest_skipped,
304-
"pass_rate": gtest_pass_rate,
305-
},
284+
"pytest": p,
285+
"gtest": g,
306286
"combined": {
307-
"total": combined_total,
308-
"passed": combined_passed,
309-
"failed": combined_failed,
310-
"skipped": combined_skipped,
311-
"pass_rate": combined_pass_rate,
287+
"total": p["total"] + g["total"],
288+
"passed": cp,
289+
"failed": cf,
290+
"skipped": p["skipped"] + g["skipped"],
291+
"pass_rate": cp / (cp + cf) * 100 if (cp + cf) > 0 else 0,
312292
},
313293
}
314294

@@ -317,49 +297,20 @@ def _build_metadata_section(test_metadata):
317297
"""Build test run metadata section."""
318298
data = [["Test Run Information"], []]
319299

320-
if test_metadata.get("pytest_run_number") and test_metadata.get("pytest_branch"):
321-
run_date = (
322-
test_metadata["pytest_run_date"].split("T")[0]
323-
if test_metadata.get("pytest_run_date")
324-
else "N/A"
325-
)
326-
run_time = (
327-
test_metadata["pytest_run_date"].split("T")[1].split("Z")[0]
328-
if test_metadata.get("pytest_run_date")
329-
and "T" in test_metadata["pytest_run_date"]
330-
else "N/A"
331-
)
332-
data.append(
333-
[
334-
"Pytest Run:",
335-
f"#{test_metadata['pytest_run_number']} (branch: {test_metadata['pytest_branch']})",
336-
]
337-
)
338-
data.append(["Pytest Date:", f"{run_date} {run_time}"])
339-
if test_metadata.get("pytest_run_url"):
340-
data.append(["Pytest URL:", test_metadata["pytest_run_url"]])
341-
342-
if test_metadata.get("gtest_run_number") and test_metadata.get("gtest_branch"):
343-
run_date = (
344-
test_metadata["gtest_run_date"].split("T")[0]
345-
if test_metadata.get("gtest_run_date")
346-
else "N/A"
347-
)
348-
run_time = (
349-
test_metadata["gtest_run_date"].split("T")[1].split("Z")[0]
350-
if test_metadata.get("gtest_run_date")
351-
and "T" in test_metadata["gtest_run_date"]
352-
else "N/A"
353-
)
354-
data.append(
355-
[
356-
"GTest Run:",
357-
f"#{test_metadata['gtest_run_number']} (branch: {test_metadata['gtest_branch']})",
358-
]
359-
)
360-
data.append(["GTest Date:", f"{run_date} {run_time}"])
361-
if test_metadata.get("gtest_run_url"):
362-
data.append(["GTest URL:", test_metadata["gtest_run_url"]])
300+
for prefix, label in [("pytest", "Pytest"), ("gtest", "GTest")]:
301+
run_num = test_metadata.get(f"{prefix}_run_number")
302+
branch = test_metadata.get(f"{prefix}_branch")
303+
if not run_num or not branch:
304+
continue
305+
raw_date = test_metadata.get(f"{prefix}_run_date", "")
306+
parts = raw_date.split("T") if raw_date else []
307+
run_date = parts[0] if parts else "N/A"
308+
run_time = parts[1].rstrip("Z") if len(parts) > 1 else "N/A"
309+
data.append([f"{label} Run:", f"#{run_num} (branch: {branch})"])
310+
data.append([f"{label} Date:", f"{run_date} {run_time}"])
311+
url = test_metadata.get(f"{prefix}_run_url")
312+
if url:
313+
data.append([f"{label} URL:", url])
363314

364315
data.extend([[], []])
365316
return data
@@ -389,10 +340,8 @@ def _add_system_info_section(ws, system_info_list):
389340
# Headers
390341
sys_headers = [
391342
"Hostname",
392-
"Platform",
393343
"CPU",
394344
"Cores",
395-
"RAM",
396345
"HugePages",
397346
"OS",
398347
"Kernel",
@@ -406,10 +355,8 @@ def _add_system_info_section(ws, system_info_list):
406355
ws.append(
407356
[
408357
sys_info.get("hostname", "unknown"),
409-
sys_info.get("platform", "unknown"),
410358
sys_info.get("cpu", "unknown"),
411359
sys_info.get("cpu_cores", "unknown"),
412-
sys_info.get("ram", "unknown"),
413360
sys_info.get("hugepages", "unknown"),
414361
sys_info.get("os", "unknown"),
415362
sys_info.get("kernel", "unknown"),
@@ -418,6 +365,59 @@ def _add_system_info_section(ws, system_info_list):
418365
)
419366

420367

368+
def _create_regression_sheet(wb, regression_data):
369+
"""Create regression analysis sheet."""
370+
ws = wb.create_sheet("Regressions")
371+
372+
sections = [
373+
(
374+
"REGRESSIONS (previously passed, now failing)",
375+
"regressions",
376+
COLOR_REGRESSION,
377+
),
378+
("FIXES (previously failing, now passing)", "fixes", COLOR_FIXED),
379+
("NEW FAILURES (not present in baseline)", "new_failures", COLOR_NEW_FAILURE),
380+
]
381+
382+
for title, key, color in sections:
383+
entries = regression_data.get(key, [])
384+
row_start = ws.max_row + 1
385+
_add_section_title(
386+
ws, f"{title} ({len(entries)})", f"A{row_start}:F{row_start}"
387+
)
388+
ws.append([])
389+
390+
headers = ["Platform", "NIC", "Category", "Test Name", "Baseline", "Current"]
391+
ws.append(headers)
392+
_style_header_row(ws, ws.max_row, len(headers))
393+
394+
if entries:
395+
for e in entries:
396+
ws.append(
397+
[
398+
e["platform"],
399+
e["nic"],
400+
e["category"],
401+
e["test_name"],
402+
e.get("baseline_result") or "N/A",
403+
e["current_result"],
404+
]
405+
)
406+
row = ws.max_row
407+
# Highlight the row with the section color
408+
for col in range(1, 7):
409+
ws.cell(row=row, column=col).fill = PatternFill(
410+
start_color=color, end_color=color, fill_type="solid"
411+
)
412+
else:
413+
ws.append(["No entries"])
414+
415+
ws.append([])
416+
ws.append([])
417+
418+
_auto_adjust_columns(ws)
419+
420+
421421
def _add_section_title(ws, title, merge_range):
422422
"""Add styled section title."""
423423
ws.append([title])

0 commit comments

Comments
 (0)