Skip to content

Commit 2a7f890

Browse files
committed
Move CI scripts to standard scripts directory
- Move test_performance_regression.py from temp/one-off-scripts/ to scripts/ - Move generate_final_report.py from temp/one-off-scripts/ to scripts/ - Move generate_assets.py from temp/one-off-scripts/ to scripts/ - Move generate_test_assets.py from temp/one-off-scripts/ to scripts/ - Update all GitHub Actions workflow references to use scripts/ directory This ensures CI scripts are in a standard, accessible location and fixes path issues in the GitHub Actions environment.
1 parent 1ab07a0 commit 2a7f890

File tree

5 files changed

+406
-7
lines changed

5 files changed

+406
-7
lines changed

.github/workflows/verify.yml

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@ jobs:
5454
retention-days: 30
5555
- name: Generate Final Test Report
5656
run: |
57-
poetry run python temp/one-off-scripts/generate_final_report.py || true
57+
poetry run python scripts/generate_final_report.py || true
5858
- name: Upload Final Test Report
5959
uses: actions/upload-artifact@v4
6060
with:
@@ -107,7 +107,7 @@ jobs:
107107
108108
- name: Generate test fixtures
109109
run: |
110-
poetry run python temp/one-off-scripts/generate_test_assets.py || echo "Test assets generation failed, continuing..."
110+
poetry run python scripts/generate_test_assets.py || echo "Test assets generation failed, continuing..."
111111
112112
- name: Run E2E tests
113113
run: |
@@ -167,7 +167,7 @@ jobs:
167167
run: |
168168
mkdir -p tests/data
169169
mkdir -p test_chroma_db
170-
poetry run python temp/one-off-scripts/generate_assets.py || echo "Test assets generation failed, continuing..."
170+
poetry run python scripts/generate_assets.py || echo "Test assets generation failed, continuing..."
171171
- name: Run integration tests
172172
run: |
173173
poetry run pytest -n auto tests/ -m "integration" -v --tb=short --timeout=300
@@ -186,7 +186,7 @@ jobs:
186186
rm -rf tests/data/test_*
187187
- name: Generate Final Test Report
188188
run: |
189-
poetry run python temp/one-off-scripts/generate_final_report.py || true
189+
poetry run python scripts/generate_final_report.py || true
190190
- name: Upload Final Test Report
191191
uses: actions/upload-artifact@v4
192192
with:
@@ -233,10 +233,10 @@ jobs:
233233
# Check if any tests were actually run
234234
if echo "$TEST_OUTPUT" | grep -q "no tests ran"; then
235235
echo "No performance tests found, running fallback script..."
236-
poetry run python temp/one-off-scripts/test_performance_regression.py
236+
poetry run python scripts/test_performance_regression.py
237237
elif [ $TEST_EXIT_CODE -ne 0 ]; then
238238
echo "Performance tests failed, running fallback script..."
239-
poetry run python temp/one-off-scripts/test_performance_regression.py
239+
poetry run python scripts/test_performance_regression.py
240240
else
241241
echo "Performance tests completed successfully"
242242
fi
@@ -249,7 +249,7 @@ jobs:
249249
retention-days: 30
250250
- name: Generate Final Test Report
251251
run: |
252-
poetry run python temp/one-off-scripts/generate_final_report.py || true
252+
poetry run python scripts/generate_final_report.py || true
253253
- name: Check Final Test Report Exists
254254
run: |
255255
if [ ! -f final_test_report.md ]; then

scripts/generate_assets.py

Lines changed: 62 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,62 @@
1+
import os
2+
from PIL import Image
3+
4+
def ensure_dir(directory):
5+
if not os.path.exists(directory):
6+
os.makedirs(directory)
7+
8+
def generate_assets(logo_path):
9+
# Create directories
10+
dirs = [
11+
'assets/brand/logo',
12+
'assets/brand/favicon',
13+
'assets/brand/social'
14+
]
15+
for dir_path in dirs:
16+
ensure_dir(dir_path)
17+
18+
# Load the logo
19+
logo = Image.open(logo_path)
20+
21+
# Save full logo
22+
logo.save('assets/brand/logo/elron-logo-full.png', 'PNG')
23+
24+
# Generate favicons
25+
favicon_sizes = {
26+
16: 'favicon-16x16.png',
27+
32: 'favicon-32x32.png',
28+
180: 'apple-touch-icon.png'
29+
}
30+
31+
for size, filename in favicon_sizes.items():
32+
resized = logo.resize((size, size), Image.LANCZOS)
33+
resized.save(f'assets/brand/favicon/{filename}')
34+
35+
# Generate social media images
36+
social_sizes = {
37+
'og-image.png': (1200, 630),
38+
'twitter-card.png': (1200, 600)
39+
}
40+
41+
for filename, (width, height) in social_sizes.items():
42+
# Create social media image with logo
43+
social_img = Image.new('RGB', (width, height), (15, 23, 42)) # Dark background
44+
45+
# Calculate dimensions for the logo (use 1/3 of the height)
46+
logo_height = height // 3
47+
aspect_ratio = logo.width / logo.height
48+
logo_width = int(logo_height * aspect_ratio)
49+
50+
# Resize logo
51+
resized_logo = logo.resize((logo_width, logo_height), Image.LANCZOS)
52+
53+
# Calculate position to center the logo
54+
x = (width - logo_width) // 2
55+
y = (height - logo_height) // 2
56+
57+
# Paste the logo
58+
social_img.paste(resized_logo, (x, y))
59+
social_img.save(f'assets/brand/social/{filename}')
60+
61+
if __name__ == "__main__":
62+
generate_assets('LOGO.jpg')

scripts/generate_final_report.py

Lines changed: 159 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,159 @@
1+
#!/usr/bin/env python3
2+
"""
3+
Aggregate all test, coverage, LLM Judge, and performance results into a single Markdown report for CI/CD.
4+
"""
5+
import os
6+
import json
7+
from datetime import datetime
8+
9+
def read_json(path):
10+
try:
11+
with open(path, 'r') as f:
12+
return json.load(f)
13+
except FileNotFoundError:
14+
print(f"⚠️ File not found: {path}")
15+
return None
16+
except json.JSONDecodeError as e:
17+
print(f"⚠️ Failed to decode JSON from {path}: {e}")
18+
return None
19+
except Exception as e:
20+
print(f"⚠️ Unexpected error reading {path}: {e}")
21+
return None
22+
23+
def read_coverage():
24+
# Try to read coverage summary from htmlcov or coverage.xml
25+
summary = {}
26+
if os.path.exists('htmlcov/index.html'):
27+
# Parse HTML for total coverage (simple regex)
28+
try:
29+
with open('htmlcov/index.html') as f:
30+
html = f.read()
31+
import re
32+
m = re.search(r'TOTAL.*?(\d+)%', html)
33+
if m:
34+
summary['total'] = int(m.group(1))
35+
except Exception:
36+
pass
37+
if os.path.exists('coverage.xml'):
38+
try:
39+
import xml.etree.ElementTree as ET
40+
tree = ET.parse('coverage.xml')
41+
root = tree.getroot()
42+
summary['total'] = float(root.attrib.get('line-rate', 0)) * 100
43+
except Exception:
44+
pass
45+
return summary
46+
47+
def read_pytest_results():
48+
# Try to read pytest output from last run (if available)
49+
log_path = 'ci_build_doc_test.log'
50+
if not os.path.exists(log_path):
51+
return None
52+
summary = {}
53+
try:
54+
with open(log_path) as f:
55+
lines = f.readlines()
56+
for line in lines:
57+
if 'collected' in line and 'items' in line:
58+
summary['collected'] = int(line.split('collected')[1].split('items')[0].strip())
59+
if 'passed' in line and 'skipped' in line:
60+
import re
61+
m = re.findall(r'(\d+) passed', line)
62+
if m:
63+
summary['passed'] = int(m[0])
64+
m = re.findall(r'(\d+) skipped', line)
65+
if m:
66+
summary['skipped'] = int(m[0])
67+
return summary
68+
except Exception as e:
69+
print(f"⚠️ Error parsing pytest log: {e}")
70+
return {}
71+
72+
def main():
73+
now = datetime.utcnow().strftime('%Y-%m-%d %H:%M UTC')
74+
report = [f"# 🧪 Final Test Report\n\n*Generated: {now}*\n"]
75+
76+
# Test summary
77+
pytest_results = read_pytest_results()
78+
if pytest_results is not None:
79+
if pytest_results:
80+
report.append("## ✅ Test Results Summary\n")
81+
report.append(f"- Total tests collected: {pytest_results.get('collected','?')}")
82+
report.append(f"- Passed: {pytest_results.get('passed','?')}")
83+
report.append(f"- Skipped: {pytest_results.get('skipped','?')}")
84+
report.append("")
85+
else:
86+
report.append("## ✅ Test Results Summary\n- ⚠️ Pytest log found but no summary could be parsed.\n")
87+
else:
88+
report.append("## ✅ Test Results Summary\n- ⚠️ No pytest summary found.\n")
89+
90+
# Coverage summary
91+
coverage = read_coverage()
92+
if coverage and 'total' in coverage:
93+
report.append(f"## 📊 Coverage Summary\n- Total coverage: **{coverage['total']}%**\n")
94+
else:
95+
report.append("## 📊 Coverage Summary\n- ⚠️ No coverage data found.\n")
96+
97+
# LLM Judge results
98+
llm_judge = read_json('llm_judge_results.json')
99+
if llm_judge:
100+
report.append("## 🤖 LLM Judge Results\n")
101+
score = llm_judge.get('overall_score', '?')
102+
report.append(f"- Overall Score: **{score}/10**")
103+
if 'scores' in llm_judge:
104+
report.append("- Score Breakdown:")
105+
for k, v in llm_judge['scores'].items():
106+
if isinstance(v, dict):
107+
report.append(f" - {k}: {v.get('score','?')}/10 — {v.get('justification','')}")
108+
else:
109+
report.append(f" - {k}: {v}")
110+
if 'recommendations' in llm_judge:
111+
report.append("- Recommendations:")
112+
for rec in llm_judge['recommendations']:
113+
report.append(f" - {rec}")
114+
if 'next_steps' in llm_judge:
115+
report.append("- Next Steps:")
116+
for step in llm_judge['next_steps']:
117+
report.append(f" - {step}")
118+
report.append("")
119+
else:
120+
report.append("## 🤖 LLM Judge Results\n- ⚠️ No LLM Judge results found.\n")
121+
122+
# Performance metrics
123+
perf = read_json('performance_metrics.json')
124+
if perf:
125+
report.append("## 🚦 Performance Metrics\n")
126+
for k in ['elapsed_seconds','memory_mb','threshold_seconds','threshold_mb','status']:
127+
if k in perf:
128+
report.append(f"- {k.replace('_',' ').title()}: {perf[k]}")
129+
report.append("")
130+
else:
131+
report.append("## 🚦 Performance Metrics\n- ⚠️ No performance metrics found.\n")
132+
133+
# Recommendations (consolidated, no duplicates)
134+
report.append("## 📝 Recommendations\n")
135+
any_recommendation = False
136+
if llm_judge and 'recommendations' in llm_judge and llm_judge['recommendations']:
137+
for rec in llm_judge['recommendations']:
138+
report.append(f"- {rec}")
139+
any_recommendation = True
140+
if coverage and coverage.get('total',0) < 50:
141+
report.append("- 🚨 Coverage is below 50%. Add more tests!")
142+
any_recommendation = True
143+
if perf and perf.get('status') == 'FAIL':
144+
report.append("- 🚨 Performance regression detected. Optimize code or dependencies.")
145+
any_recommendation = True
146+
if not any_recommendation:
147+
report.append("- No additional recommendations.\n")
148+
report.append("")
149+
150+
# Comparison to previous run (stub)
151+
report.append("## 🔄 Comparison to Previous Run\n- (Comparison feature coming soon)\n")
152+
153+
# Save report
154+
with open('final_test_report.md','w') as f:
155+
f.write('\n'.join(report))
156+
print("✅ Final test report generated: final_test_report.md")
157+
158+
if __name__ == "__main__":
159+
main()

scripts/generate_test_assets.py

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
#!/usr/bin/env python3
2+
"""
3+
Generate lightweight test assets for integration tests
4+
"""
5+
6+
import os
7+
from pathlib import Path
8+
9+
def create_test_files():
10+
"""Create minimal test files for integration tests"""
11+
test_dir = Path("tests/data")
12+
test_dir.mkdir(parents=True, exist_ok=True)
13+
14+
# Create minimal test PDF
15+
pdf_content = b'%PDF-1.4\n1 0 obj\n<<\n/Type /Catalog\n/Pages 2 0 R\n>>\nendobj\n2 0 obj\n<<\n/Type /Pages\n/Kids [3 0 R]\n/Count 1\n>>\nendobj\n3 0 obj\n<<\n/Type /Page\n/Parent 2 0 R\n/MediaBox [0 0 612 792]\n/Contents 4 0 R\n>>\nendobj\n4 0 obj\n<<\n/Length 44\n>>\nstream\nBT\n/F1 12 Tf\n72 720 Td\n(Test PDF) Tj\nET\nendstream\nendobj\nxref\n0 5\n0000000000 65535 f \n0000000009 00000 n \n0000000058 00000 n \n0000000115 00000 n \n0000000204 00000 n \ntrailer\n<<\n/Size 5\n/Root 1 0 R\n>>\nstartxref\n297\n%%EOF'
16+
17+
with open(test_dir / "test_document.pdf", "wb") as f:
18+
f.write(pdf_content)
19+
20+
# Create minimal test text file
21+
with open(test_dir / "test_document.txt", "w") as f:
22+
f.write("This is a test document for integration testing.")
23+
24+
# Create minimal test image (1x1 pixel PNG)
25+
png_content = b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00\x00\x01\x08\x02\x00\x00\x00\x90wS\xde\x00\x00\x00\tpHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x0cIDATx\x9cc```\x00\x00\x00\x04\x00\x01\xf6\x178U\x00\x00\x00\x00IEND\xaeB`\x82'
26+
27+
with open(test_dir / "test_image.png", "wb") as f:
28+
f.write(png_content)
29+
30+
if __name__ == "__main__":
31+
create_test_files()
32+
print("✅ Test assets generated successfully!")

0 commit comments

Comments
 (0)