Skip to content

Commit 2f87286

Browse files
committed
Look at all individual results and summarize
1 parent 59426b7 commit 2f87286

File tree

4 files changed

+154
-31
lines changed

4 files changed

+154
-31
lines changed

.github/workflows/CICD.yml

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -935,17 +935,19 @@ jobs:
935935
- uses: Swatinem/rust-cache@v2
936936
- name: Run sccache-cache
937937
uses: mozilla-actions/[email protected]
938+
- name: Install/setup prerequisites
939+
shell: bash
940+
run: |
941+
sudo apt-get -y update ; sudo apt-get -y install libselinux1-dev
938942
- name: Build coreutils as multiple binaries
939943
shell: bash
940944
run: |
941945
## Build individual uutil binaries
942946
set -v
943947
make
944-
- name: Install/setup prerequisites
948+
- name: Run toybox src
945949
shell: bash
946950
run: |
947-
sudo apt-get -y update ; sudo apt-get -y install libselinux1-dev
948-
## Install/setup prerequisites
949951
make toybox-src
950952
- name: Run Toybox test suite
951953
id: summary

.github/workflows/GnuTests.yml

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -65,8 +65,9 @@ jobs:
6565
TEST_ROOT_FULL_SUMMARY_FILE='gnu-root-full-result.json'
6666
TEST_SELINUX_FULL_SUMMARY_FILE='selinux-gnu-full-result.json'
6767
TEST_SELINUX_ROOT_FULL_SUMMARY_FILE='selinux-root-gnu-full-result.json'
68+
AGGREGATED_SUMMARY_FILE='aggregated-result.json'
6869
69-
outputs SUITE_LOG_FILE ROOT_SUITE_LOG_FILE SELINUX_SUITE_LOG_FILE SELINUX_ROOT_SUITE_LOG_FILE TEST_FILESET_PREFIX TEST_FILESET_SUFFIX TEST_LOGS_GLOB TEST_SUMMARY_FILE TEST_FULL_SUMMARY_FILE TEST_ROOT_FULL_SUMMARY_FILE TEST_SELINUX_FULL_SUMMARY_FILE TEST_SELINUX_ROOT_FULL_SUMMARY_FILE
70+
outputs SUITE_LOG_FILE ROOT_SUITE_LOG_FILE SELINUX_SUITE_LOG_FILE SELINUX_ROOT_SUITE_LOG_FILE TEST_FILESET_PREFIX TEST_FILESET_SUFFIX TEST_LOGS_GLOB TEST_SUMMARY_FILE TEST_FULL_SUMMARY_FILE TEST_ROOT_FULL_SUMMARY_FILE TEST_SELINUX_FULL_SUMMARY_FILE TEST_SELINUX_ROOT_FULL_SUMMARY_FILE AGGREGATED_SUMMARY_FILE
7071
- name: Checkout code (uutil)
7172
uses: actions/checkout@v4
7273
with:
@@ -272,7 +273,7 @@ jobs:
272273
if test -f "${{ steps.vars.outputs.TEST_FULL_SUMMARY_FILE }}"
273274
then
274275
# Look at all individual results and summarize
275-
eval $(python3 ${path_UUTILS}/util/analyze-gnu-results.py ${{ steps.vars.outputs.TEST_FULL_SUMMARY_FILE }} ${{ steps.vars.outputs.TEST_ROOT_FULL_SUMMARY_FILE }} ${{ steps.vars.outputs.TEST_SELINUX_FULL_SUMMARY_FILE }} ${{ steps.vars.outputs.TEST_SELINUX_ROOT_FULL_SUMMARY_FILE }})
276+
eval $(python3 ${path_UUTILS}/util/analyze-gnu-results.py -o=${{ steps.vars.outputs.AGGREGATED_SUMMARY_FILE }} ${{ steps.vars.outputs.TEST_FULL_SUMMARY_FILE }} ${{ steps.vars.outputs.TEST_ROOT_FULL_SUMMARY_FILE }} ${{ steps.vars.outputs.TEST_SELINUX_FULL_SUMMARY_FILE }} ${{ steps.vars.outputs.TEST_SELINUX_ROOT_FULL_SUMMARY_FILE }})
276277
277278
if [[ "$TOTAL" -eq 0 || "$TOTAL" -eq 1 ]]; then
278279
echo "::error ::Failed to parse test results from '${{ steps.vars.outputs.TEST_FULL_SUMMARY_FILE }}'; failing early"
@@ -325,21 +326,26 @@ jobs:
325326
with:
326327
name: gnu-full-result.json
327328
path: ${{ steps.vars.outputs.TEST_FULL_SUMMARY_FILE }}
328-
- name: Upload full json results
329+
- name: Upload root json results
329330
uses: actions/upload-artifact@v4
330331
with:
331332
name: gnu-root-full-result.json
332333
path: ${{ steps.vars.outputs.TEST_ROOT_FULL_SUMMARY_FILE }}
333-
- name: Upload full json results
334+
- name: Upload selinux json results
334335
uses: actions/upload-artifact@v4
335336
with:
336337
name: selinux-gnu-full-result.json
337338
path: ${{ steps.vars.outputs.TEST_SELINUX_FULL_SUMMARY_FILE }}
338-
- name: Upload full json results
339+
- name: Upload selinux root json results
339340
uses: actions/upload-artifact@v4
340341
with:
341342
name: selinux-root-gnu-full-result.json
342343
path: ${{ steps.vars.outputs.TEST_SELINUX_ROOT_FULL_SUMMARY_FILE }}
344+
- name: Upload aggregated json results
345+
uses: actions/upload-artifact@v4
346+
with:
347+
name: aggregated-result.json
348+
path: ${{ steps.vars.outputs.AGGREGATED_SUMMARY_FILE }}
343349
- name: Compare test failures VS reference
344350
shell: bash
345351
run: |

.vscode/cspell.dictionaries/jargon.wordlist.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -142,6 +142,7 @@ whitespace
142142
wordlist
143143
wordlists
144144
xattrs
145+
xpass
145146

146147
# * abbreviations
147148
consts

util/analyze-gnu-results.py

Lines changed: 137 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -1,15 +1,102 @@
11
#!/usr/bin/env python3
2+
3+
"""
4+
GNU Test Results Analyzer and Aggregator
5+
6+
This script analyzes and aggregates test results from the GNU test suite.
7+
It parses JSON files containing test results (PASS/FAIL/SKIP/ERROR) and:
8+
1. Counts the number of tests in each result category
9+
2. Can aggregate results from multiple JSON files with priority ordering
10+
3. Outputs shell export statements for use in GitHub Actions workflows
11+
12+
Priority order for aggregation (highest to lowest):
13+
- PASS: Takes precedence over all other results (best outcome)
14+
- FAIL: Takes precedence over ERROR and SKIP
15+
- ERROR: Takes precedence over SKIP
16+
- SKIP: Lowest priority
17+
18+
Usage:
19+
- Single file:
20+
python analyze-gnu-results.py test-results.json
21+
22+
- Multiple files (with aggregation):
23+
python analyze-gnu-results.py file1.json file2.json
24+
25+
- With output file for aggregated results:
26+
python analyze-gnu-results.py -o=output.json file1.json file2.json
27+
28+
Output:
29+
Prints shell export statements for TOTAL, PASS, FAIL, SKIP, XPASS, and ERROR
30+
that can be evaluated in a shell environment.
31+
"""
232
import json
333
import sys
34+
from collections import defaultdict
35+
36+
37+
def get_priority(result):
38+
"""Return a priority value for result status (lower is higher priority)"""
39+
priorities = {
40+
"PASS": 0, # PASS is highest priority (best result)
41+
"FAIL": 1, # FAIL is second priority
42+
"ERROR": 2, # ERROR is third priority
43+
"SKIP": 3, # SKIP is lowest priority
44+
}
45+
return priorities.get(result, 4) # Unknown states have lowest priority
46+
47+
48+
def aggregate_results(json_files):
49+
"""
50+
Aggregate test results from multiple JSON files.
51+
Prioritizes results in the order: SKIP > ERROR > FAIL > PASS
52+
"""
53+
# Combined results dictionary
54+
combined_results = defaultdict(dict)
55+
56+
# Process each JSON file
57+
for json_file in json_files:
58+
try:
59+
with open(json_file, "r") as f:
60+
data = json.load(f)
61+
62+
# For each utility and its tests
63+
for utility, tests in data.items():
64+
for test_name, result in tests.items():
65+
# If this test hasn't been seen yet, add it
66+
if test_name not in combined_results[utility]:
67+
combined_results[utility][test_name] = result
68+
else:
69+
# If it has been seen, apply priority rules
70+
current_priority = get_priority(
71+
combined_results[utility][test_name]
72+
)
73+
new_priority = get_priority(result)
74+
75+
# Lower priority value means higher precedence
76+
if new_priority < current_priority:
77+
combined_results[utility][test_name] = result
78+
except FileNotFoundError:
79+
print(f"Warning: File '{json_file}' not found.", file=sys.stderr)
80+
continue
81+
except json.JSONDecodeError:
82+
print(f"Warning: '{json_file}' is not a valid JSON file.", file=sys.stderr)
83+
continue
84+
85+
return combined_results
486

587

688
def analyze_test_results(json_data):
89+
"""
90+
Analyze test results from GNU test suite JSON data.
91+
Counts PASS, FAIL, SKIP results for all tests.
92+
"""
793
# Counters for test results
894
total_tests = 0
995
pass_count = 0
1096
fail_count = 0
1197
skip_count = 0
12-
error_count = 0 # Although not in the JSON, included for compatibility
98+
xpass_count = 0 # Not in JSON data but included for compatibility
99+
error_count = 0 # Not in JSON data but included for compatibility
13100

14101
# Analyze each utility's tests
15102
for utility, tests in json_data.items():
@@ -22,47 +109,74 @@ def analyze_test_results(json_data):
22109
fail_count += 1
23110
elif result == "SKIP":
24111
skip_count += 1
112+
elif result == "ERROR":
113+
error_count += 1
114+
elif result == "XPASS":
115+
xpass_count += 1
25116

26117
# Return the statistics
27118
return {
28119
"TOTAL": total_tests,
29120
"PASS": pass_count,
30121
"FAIL": fail_count,
31122
"SKIP": skip_count,
123+
"XPASS": xpass_count,
32124
"ERROR": error_count,
33125
}
34126

35127

36128
def main():
37-
# Check if a file argument was provided
38-
if len(sys.argv) != 2:
39-
print("Usage: python script.py <json_file>")
129+
"""
130+
Main function to process JSON files and export variables.
131+
Supports both single file analysis and multi-file aggregation.
132+
"""
133+
# Check if file arguments were provided
134+
if len(sys.argv) < 2:
135+
print("Usage: python analyze-gnu-results.py <json> [json ...]")
136+
print(" For multiple files, results will be aggregated")
137+
print(" Priority SKIP > ERROR > FAIL > PASS")
40138
sys.exit(1)
41139

42-
json_file = sys.argv[1]
140+
json_files = sys.argv[1:]
141+
output_file = None
43142

44-
try:
45-
# Parse the JSON data from the specified file
46-
with open(json_file, "r") as file:
47-
json_data = json.load(file)
143+
# Check if the first argument is an output file (starts with -)
144+
if json_files[0].startswith("-o="):
145+
output_file = json_files[0][3:]
146+
json_files = json_files[1:]
48147

49-
# Analyze the results
148+
# Process the files
149+
if len(json_files) == 1:
150+
# Single file analysis
151+
try:
152+
with open(json_files[0], "r") as file:
153+
json_data = json.load(file)
154+
results = analyze_test_results(json_data)
155+
except FileNotFoundError:
156+
print(f"Error: File '{json_files[0]}' not found.", file=sys.stderr)
157+
sys.exit(1)
158+
except json.JSONDecodeError:
159+
print(
160+
f"Error: '{json_files[0]}' is not a valid JSON file.", file=sys.stderr
161+
)
162+
sys.exit(1)
163+
else:
164+
# Multiple files - aggregate them
165+
json_data = aggregate_results(json_files)
50166
results = analyze_test_results(json_data)
51167

52-
# Export the results as environment variables
53-
# For use in shell, print export statements
54-
print(f"export TOTAL={results['TOTAL']}")
55-
print(f"export PASS={results['PASS']}")
56-
print(f"export SKIP={results['SKIP']}")
57-
print(f"export FAIL={results['FAIL']}")
58-
print(f"export ERROR={results['ERROR']}")
168+
# Save aggregated data if output file is specified
169+
if output_file:
170+
with open(output_file, "w") as f:
171+
json.dump(json_data, f, indent=2)
59172

60-
except FileNotFoundError:
61-
print(f"Error: File '{json_file}' not found.", file=sys.stderr)
62-
sys.exit(1)
63-
except json.JSONDecodeError:
64-
print(f"Error: '{json_file}' is not a valid JSON", file=sys.stderr)
65-
sys.exit(1)
173+
# Print export statements for shell evaluation
174+
print(f"export TOTAL={results['TOTAL']}")
175+
print(f"export PASS={results['PASS']}")
176+
print(f"export SKIP={results['SKIP']}")
177+
print(f"export FAIL={results['FAIL']}")
178+
print(f"export XPASS={results['XPASS']}")
179+
print(f"export ERROR={results['ERROR']}")
66180

67181

68182
if __name__ == "__main__":

0 commit comments

Comments
 (0)