generated from bitcoin-sv/template
-
-
Notifications
You must be signed in to change notification settings - Fork 1
476 lines (424 loc) · 20.2 KB
/
fortress-completion-tests.yml
File metadata and controls
476 lines (424 loc) · 20.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
# ------------------------------------------------------------------------------------
# Completion Report Test Analysis (Reusable Workflow) (GoFortress)
#
# Purpose: Process all test-related artifacts for the completion report including
# test results, test configuration analysis, and fuzz testing results.
#
# This workflow handles:
# - Test statistics processing and failure analysis
# - Test configuration and output mode analysis
# - Fuzz test statistics and security testing results
# - Test failure details and error extraction
#
# Maintainer: @mrz1836
#
# ------------------------------------------------------------------------------------
name: GoFortress (Completion Tests)
on:
workflow_call:
inputs:
test-suite-result:
description: "Result of the test suite job"
required: true
type: string
env-json:
description: "JSON string of environment variables"
required: true
type: string
outputs:
report-section:
description: "Generated test analysis markdown section"
value: ${{ jobs.analyze-tests.outputs.tests-markdown }}
test-metrics:
description: "Test performance metrics"
value: ${{ jobs.analyze-tests.outputs.test-data }}
failure-metrics:
description: "Test failure analysis metrics"
value: ${{ jobs.analyze-tests.outputs.failure-data }}
# Security: Restrict default permissions (jobs must explicitly request what they need)
permissions: {}
jobs:
# ----------------------------------------------------------------------------------
# Test Analysis
# ----------------------------------------------------------------------------------
analyze-tests:
name: 🧪 Analyze Test Results
runs-on: ubuntu-latest
if: always()
permissions:
contents: read
actions: read
outputs:
tests-markdown: ${{ steps.set-output.outputs.content }}
test-data: ${{ steps.process-tests.outputs.test-metrics }}
failure-data: ${{ steps.process-tests.outputs.failure-metrics }}
steps:
# --------------------------------------------------------------------
# Checkout repository for local actions
# --------------------------------------------------------------------
- name: 📥 Checkout Repository
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
# --------------------------------------------------------------------
# Parse environment variables
# --------------------------------------------------------------------
- name: 🔧 Parse environment variables
env:
ENV_JSON: ${{ inputs.env-json }}
run: |
echo "📋 Setting environment variables..."
echo "$ENV_JSON" | jq -r 'to_entries | .[] | "\(.key)=\(.value)"' | while IFS='=' read -r key value; do
echo "$key=$value" >> $GITHUB_ENV
done
# --------------------------------------------------------------------
# Download specific artifacts needed for test analysis
# --------------------------------------------------------------------
- name: 📥 Download benchmark statistics
if: always()
uses: ./.github/actions/download-artifact-resilient
with:
pattern: "bench-stats-*"
path: ./artifacts/
merge-multiple: true
max-retries: ${{ env.ARTIFACT_DOWNLOAD_RETRIES }}
retry-delay: ${{ env.ARTIFACT_DOWNLOAD_RETRY_DELAY }}
timeout: ${{ env.ARTIFACT_DOWNLOAD_TIMEOUT }}
continue-on-error: ${{ env.ARTIFACT_DOWNLOAD_CONTINUE_ON_ERROR }}
- name: 📥 Download cache statistics
if: always()
uses: ./.github/actions/download-artifact-resilient
with:
pattern: "cache-stats-*"
path: ./artifacts/
merge-multiple: true
max-retries: ${{ env.ARTIFACT_DOWNLOAD_RETRIES }}
retry-delay: ${{ env.ARTIFACT_DOWNLOAD_RETRY_DELAY }}
timeout: ${{ env.ARTIFACT_DOWNLOAD_TIMEOUT }}
continue-on-error: ${{ env.ARTIFACT_DOWNLOAD_CONTINUE_ON_ERROR }}
- name: 📥 Download fuzz test failure artifacts
if: always() && env.ENABLE_GO_TESTS == 'true' && env.ENABLE_FUZZ_TESTING == 'true'
uses: ./.github/actions/download-artifact-resilient
with:
pattern: "test-results-fuzz-*"
path: ./test-artifacts/
merge-multiple: true
max-retries: ${{ env.ARTIFACT_DOWNLOAD_RETRIES }}
retry-delay: ${{ env.ARTIFACT_DOWNLOAD_RETRY_DELAY }}
timeout: ${{ env.ARTIFACT_DOWNLOAD_TIMEOUT }}
continue-on-error: ${{ env.ARTIFACT_DOWNLOAD_CONTINUE_ON_ERROR }}
- name: 📥 Download CI results (native mode)
if: always() && env.ENABLE_GO_TESTS == 'true'
uses: ./.github/actions/download-artifact-resilient
with:
pattern: "ci-results-*"
path: ./ci-artifacts/
merge-multiple: true
max-retries: ${{ env.ARTIFACT_DOWNLOAD_RETRIES }}
retry-delay: ${{ env.ARTIFACT_DOWNLOAD_RETRY_DELAY }}
timeout: ${{ env.ARTIFACT_DOWNLOAD_TIMEOUT }}
continue-on-error: ${{ env.ARTIFACT_DOWNLOAD_CONTINUE_ON_ERROR }}
- name: 🗂️ Flatten artifacts
if: always()
run: |
echo "🗂️ Flattening downloaded artifacts..."
# Source shared helper functions for artifact processing
source .github/scripts/parse-test-label.sh || { echo "❌ Failed to source parse-test-label.sh"; exit 1; }
# Verify critical function is available
if ! type copy_ci_artifact &>/dev/null; then
echo "❌ Error: copy_ci_artifact function not found after sourcing"
exit 1
fi
# Process stats artifacts (bench-stats, cache-stats JSON files)
if [ -d "./artifacts/" ]; then
find ./artifacts/ -name "*.json" -type f | while read -r file; do
filename=$(basename "$file")
echo "Moving $file to ./$filename"
cp "$file" "./$filename"
done
echo "📋 Available stats files:"
ls -la *-stats-*.json 2>/dev/null || echo "No stats files found"
else
echo "ℹ️ No artifacts directory found"
fi
# Process CI results from ci-artifacts (unit tests)
if [ -d "./ci-artifacts/" ]; then
echo "📋 Processing unit test CI results..."
while IFS= read -r -d '' file; do
copy_ci_artifact "$file" "ci" || true
done < <(find ./ci-artifacts/ -name "*.jsonl" -type f -print0 2>/dev/null)
fi
# Process CI results from test-artifacts (fuzz tests)
if [ -d "./test-artifacts/" ]; then
echo "📋 Processing fuzz test CI results..."
while IFS= read -r -d '' file; do
copy_ci_artifact "$file" "ci" || true
done < <(find ./test-artifacts/ -name "*.jsonl" -type f -print0 2>/dev/null)
fi
# Show all available JSONL files
echo "📋 Available CI results JSONL files:"
ls -la ci-*.jsonl 2>/dev/null || echo "No CI results JSONL files found"
# --------------------------------------------------------------------
# Initialize test analysis section
# --------------------------------------------------------------------
- name: 📝 Initialize Test Analysis Section
run: |
touch tests-section.md
# --------------------------------------------------------------------
# Process test statistics
# --------------------------------------------------------------------
- name: 🧪 Process Test Statistics
id: process-tests
run: |
# Source shared helper function for generating test labels
source .github/scripts/parse-test-label.sh || { echo "❌ Failed to source parse-test-label.sh"; exit 1; }
# Enable nullglob so "for f in *.jsonl" loops safely skip when no files match
# (prevents iterating with literal pattern string "ci-*.jsonl")
shopt -s nullglob
# Initialize totals for summary
TOTAL_TESTS=0
TOTAL_FAILURES=0
TOTAL_PASSED=0
TOTAL_SKIPPED=0
SUITE_COUNT=0
HAS_DATA=false
# Check for native CI mode JSONL files first (preferred)
if compgen -G "ci-*.jsonl" >/dev/null 2>&1; then
echo "📊 Processing native CI mode JSONL files..."
HAS_DATA=true
{
echo ""
echo ""
echo "### 🧪 Test Results Summary"
echo "| Test Suite | Duration | Tests | Runs | Passed | Failed | Skipped | Status |"
echo "|------------|----------|-------|------|--------|--------|---------|--------|"
} >> tests-section.md
# Process each JSONL file
for jsonl_file in ci-*.jsonl; do
if [ -f "$jsonl_file" ]; then
# Extract artifact name from filename (ci-ARTIFACT_NAME-ci-results.jsonl)
ARTIFACT_NAME=$(echo "$jsonl_file" | sed 's/^ci-//' | sed 's/-ci-results\.jsonl$//')
SUITE_LABEL=$(parse_test_label "$ARTIFACT_NAME")
# Extract summary line
SUMMARY=$(grep '"type":"summary"' "$jsonl_file" 2>/dev/null | head -1 || echo "")
if [[ -n "$SUMMARY" ]]; then
STATUS=$(echo "$SUMMARY" | jq -r '.summary.status // "unknown"')
PASSED=$(echo "$SUMMARY" | jq -r '.summary.passed // 0')
FAILED=$(echo "$SUMMARY" | jq -r '.summary.failed // 0')
SKIPPED=$(echo "$SUMMARY" | jq -r '.summary.skipped // 0')
TOTAL=$(echo "$SUMMARY" | jq -r '.summary.total // 0')
UNIQUE=$(echo "$SUMMARY" | jq -r '.summary.unique_total // .summary.total // 0')
DURATION=$(echo "$SUMMARY" | jq -r '.summary.duration // "0s"')
STATUS_ICON=$([[ "$STATUS" == "passed" ]] && echo "✅" || echo "❌")
echo "| $SUITE_LABEL | $DURATION | $UNIQUE | $TOTAL | $PASSED | $FAILED | $SKIPPED | $STATUS_ICON |" >> tests-section.md
# Accumulate totals (use unique for primary test count)
TOTAL_TESTS=$((TOTAL_TESTS + UNIQUE))
TOTAL_PASSED=$((TOTAL_PASSED + PASSED))
TOTAL_FAILURES=$((TOTAL_FAILURES + FAILED))
TOTAL_SKIPPED=$((TOTAL_SKIPPED + SKIPPED))
SUITE_COUNT=$((SUITE_COUNT + 1))
fi
fi
done
# Store totals as outputs
echo "test-metrics={\"total_tests\":$TOTAL_TESTS,\"total_failures\":$TOTAL_FAILURES,\"suite_count\":$SUITE_COUNT}" >> $GITHUB_OUTPUT
# Add failure analysis if any failures exist
if [[ $TOTAL_FAILURES -gt 0 ]]; then
{
echo ""
echo ""
echo "### ❌ Test Failure Analysis"
echo "**Total Failures**: $TOTAL_FAILURES across $SUITE_COUNT test suite(s)"
echo ""
} >> tests-section.md
# Show failures by suite
echo "#### 📊 Failures by Test Suite:" >> tests-section.md
for jsonl_file in ci-*.jsonl; do
if [ -f "$jsonl_file" ]; then
ARTIFACT_NAME=$(echo "$jsonl_file" | sed 's/^ci-//' | sed 's/-ci-results\.jsonl$//')
SUITE_LABEL=$(parse_test_label "$ARTIFACT_NAME")
SUMMARY=$(grep '"type":"summary"' "$jsonl_file" 2>/dev/null | head -1 || echo "")
if [[ -n "$SUMMARY" ]]; then
FAILED=$(echo "$SUMMARY" | jq -r '.summary.failed // 0')
if [[ $FAILED -gt 0 ]]; then
echo "- **$SUITE_LABEL**: $FAILED failures" >> tests-section.md
fi
fi
fi
done
# Add collapsible section for failed tests
{
echo ""
echo "<details>"
echo "<summary>🔍 Failed Tests (click to expand)</summary>"
echo ""
echo "| Test Name | Package | Error |"
echo "|-----------|---------|-------|"
} >> tests-section.md
# Extract failure details from all JSONL files
FAILURE_COUNT=0
for jsonl_file in ci-*.jsonl; do
if [ -f "$jsonl_file" ] && [[ $FAILURE_COUNT -lt 20 ]]; then
while read -r line; do
if [[ $FAILURE_COUNT -ge 20 ]]; then
break
fi
TEST=$(echo "$line" | jq -r '.failure.test // "unknown"')
PKG=$(echo "$line" | jq -r '.failure.package // "unknown"' | sed 's|.*/||')
ERROR=$(echo "$line" | jq -r '.failure.error // ""' | head -c 100 | tr '\n' ' ')
# Truncate error message for table display (max 80 chars: 77 + "...")
if [[ ${#ERROR} -gt 80 ]]; then
ERROR="${ERROR:0:77}..."
fi
echo "| \`$TEST\` | $PKG | ${ERROR:-_no message_} |"
FAILURE_COUNT=$((FAILURE_COUNT + 1))
done < <(grep '"type":"failure"' "$jsonl_file" 2>/dev/null) >> tests-section.md || true
fi
done
{
echo ""
echo "</details>"
} >> tests-section.md
# Store failure metrics
echo "failure-metrics={\"total_failures\":$TOTAL_FAILURES,\"has_error_output\":true}" >> $GITHUB_OUTPUT
fi
fi
# No test statistics available
if [[ "$HAS_DATA" == "false" ]]; then
{
echo ""
echo ""
echo "### 🧪 Test Results Summary"
echo ""
echo "| Status | Details |"
echo "|--------|---------|"
if [[ "${{ env.ENABLE_GO_TESTS }}" == "false" ]]; then
echo "| **Test Suite** | ❌ Disabled - Set ENABLE_GO_TESTS=true to enable |"
echo "| **Reason** | Tests are disabled via configuration flag |"
echo "| **Note** | Enable ENABLE_GO_TESTS in .github/env/00-core.env to run tests |"
else
echo "| **Test Suite** | ⚠️ Skipped - No test statistics available |"
echo "| **Reason** | Tests may have been skipped for fork PR security restrictions |"
echo "| **Note** | Repository maintainers can run full tests on merged code |"
echo ""
echo "_For security reasons, fork PRs do not have access to test execution secrets._"
fi
} >> tests-section.md
fi
# --------------------------------------------------------------------
# Add test configuration and output analysis
# --------------------------------------------------------------------
- name: 🎛️ Add Test Configuration Section
id: add-test-config
run: |
# Add test output configuration section
HAS_CONFIG_DATA=false
# Check for native CI mode JSONL files
if compgen -G "ci-*.jsonl" >/dev/null 2>&1; then
HAS_CONFIG_DATA=true
{
echo ""
echo "<br><br>"
echo ""
echo "### 🎛️ Test Output Configuration"
echo ""
echo "**Output Mode**: Native CI Mode (JSONL)"
echo ""
echo "- Tests executed with magex native CI mode"
echo "- Structured output in .mage-x/ci-results.jsonl"
echo "- Automatic GitHub annotations for failures"
} >> tests-section.md
fi
if [[ "$HAS_CONFIG_DATA" == "false" ]]; then
# No test configuration to display - test stats not available
echo "" >> tests-section.md
echo "ℹ️ _Test configuration section skipped - no test data available_" >> tests-section.md
fi
# --------------------------------------------------------------------
# Process fuzz test statistics
# --------------------------------------------------------------------
- name: 🎯 Process Fuzz Test Statistics
id: process-fuzz
run: |
# Process fuzz test statistics - always show status
{
echo "<br><br>"
echo ""
echo "### 🛡️ Security Testing Results"
} >> tests-section.md
# Check if fuzz testing is enabled in environment
if [[ "${{ env.ENABLE_FUZZ_TESTING }}" == "true" ]]; then
# Look for fuzz test JSONL files (native CI mode)
FUZZ_JSONL=$(ls ci-*-ci-results-fuzz.jsonl 2>/dev/null | head -1 || echo "")
if [[ -n "$FUZZ_JSONL" ]] && [[ -f "$FUZZ_JSONL" ]]; then
# Extract summary from fuzz JSONL
SUMMARY=$(grep '"type":"summary"' "$FUZZ_JSONL" 2>/dev/null | head -1 || echo "")
if [[ -n "$SUMMARY" ]]; then
STATUS=$(echo "$SUMMARY" | jq -r '.summary.status // "unknown"')
TOTAL=$(echo "$SUMMARY" | jq -r '.summary.total // 0')
DURATION=$(echo "$SUMMARY" | jq -r '.summary.duration // "0s"')
STATUS_ICON=$([[ "$STATUS" == "passed" ]] && echo "✅" || echo "❌")
# Create table with fuzz test data from JSONL
{
echo "| Fuzz Suite | Duration | Fuzz Tests | Status | Enabled |"
echo "|------------|----------|------------|--------|---------|"
echo "| Fuzz Tests | $DURATION | $TOTAL | $STATUS_ICON | 🎯 |"
} >> tests-section.md
else
# JSONL found but no summary record
{
echo "| Status | Details |"
echo "|--------|---------|"
echo "| **Fuzz Testing** | ✅ Enabled |"
echo "| **Execution** | ⚠️ No fuzz summary found in JSONL - check job logs |"
echo "| **Platform** | Linux with primary Go version |"
} >> tests-section.md
fi
else
# No fuzz JSONL found
{
echo "| Status | Details |"
echo "|--------|---------|"
echo "| **Fuzz Testing** | ✅ Enabled |"
echo "| **Execution** | ⚠️ No fuzz results found - check job logs |"
echo "| **Platform** | Linux with primary Go version |"
} >> tests-section.md
fi
else
# Fuzz testing is disabled
{
echo "| Status | Details |"
echo "|--------|---------|"
echo "| **Fuzz Testing** | ❌ Disabled |"
echo "| **Configuration** | Set ENABLE_FUZZ_TESTING=true to enable |"
echo "| **Target Platform** | Would run on Linux with primary Go version |"
} >> tests-section.md
fi
# --------------------------------------------------------------------
# Upload test analysis section
# --------------------------------------------------------------------
- name: 📤 Upload Test Analysis Section
id: upload-section
if: always()
run: |
if [ -f "tests-section.md" ] && [ -s "tests-section.md" ]; then
echo "🧪 Test section found, uploading..."
ls -la tests-section.md
echo "📋 Content preview:"
head -5 tests-section.md
else
echo "⚠️ Test section file missing or empty, creating minimal section..."
echo "### 🧪 Test Results Section" > tests-section.md
echo "No test data available for this run." >> tests-section.md
fi
- name: 📤 Upload Test Artifact
uses: ./.github/actions/upload-statistics
with:
artifact-name: "tests-section"
artifact-path: "tests-section.md"
retention-days: "7"
if-no-files-found: "warn"
- name: 📋 Set Output Content
id: set-output
run: |
echo "content<<EOF" >> $GITHUB_OUTPUT
cat tests-section.md >> $GITHUB_OUTPUT
echo "EOF" >> $GITHUB_OUTPUT