diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml new file mode 100644 index 0000000..a8a01cc --- /dev/null +++ b/.github/workflows/main.yml @@ -0,0 +1,245 @@ +name: Push Workflow + +on: + push: + +jobs: + find-designs: + runs-on: ubuntu-latest + outputs: + designs: ${{ steps.set-matrix.outputs.designs }} + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Find design configurations + id: set-matrix + run: | + # Read designs from the configuration file, filtering out comments and empty lines + configs=$(grep -v '^#' designs_to_run.txt | grep -v '^$' | sort | jq -R -s -c 'split("\n")[:-1]') + echo "designs=$configs" >> "$GITHUB_OUTPUT" + echo "Found designs: $configs" + + # Also validate that the config files exist + echo "Validating design configurations..." + grep -v '^#' designs_to_run.txt | grep -v '^$' | while read config; do + if [ -f "$config" ]; then + echo "✓ Found: $config" + else + echo "✗ Missing: $config" + fi + done + + build: + needs: find-designs + runs-on: ubuntu-latest + strategy: + matrix: + design: ${{ fromJson(needs.find-designs.outputs.designs) }} + fail-fast: false + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Run setup script + run: ./setup.sh + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.x' + + - name: Determine ORFS tag + id: get_tag + run: | + cd OpenROAD-flow-scripts + tag=$(git describe --tags --abbrev=8 2>/dev/null || echo "latest") + echo "tag=$tag" >> "$GITHUB_OUTPUT" + + - name: Run ORFS for design + run: | + tag="${{ steps.get_tag.outputs.tag }}" + design="${{ matrix.design }}" + echo "Running design: $design" + + docker run --rm \ + -v ${{ github.workspace }}/OpenROAD-flow-scripts/flow:/OpenROAD-flow-scripts/flow \ + -v ${{ github.workspace }}:/OpenROAD-flow-scripts/UCSC_ML_suite \ + -w /OpenROAD-flow-scripts \ + openroad/orfs:$tag \ + bash -c " + set -e + git config --global --add safe.directory '*' + + echo 'Installing sv2v...' + apt-get update && apt-get install -y wget + wget -q https://github.com/zachjs/sv2v/releases/download/v0.0.13/sv2v-Linux.zip -O /tmp/sv2v.zip + cd /tmp && unzip -q sv2v.zip + chmod +x sv2v + mv sv2v /usr/local/bin/ + echo 'sv2v installed successfully' + sv2v --version + + cd /OpenROAD-flow-scripts/UCSC_ML_suite + echo 'Running design: $design' + make DESIGN_CONFIG='$design' + " + + - name: Sanitize design name for artifact + id: sanitize + run: | + design="${{ matrix.design }}" + # Replace forward slashes with dashes and remove config.mk + sanitized=$(echo "$design" | sed 's|/|-|g' | sed 's|-config\.mk$||') + echo "sanitized_name=$sanitized" >> "$GITHUB_OUTPUT" + echo "Sanitized artifact name: $sanitized" + + - name: Upload design logs for results + uses: actions/upload-artifact@v4 + with: + name: design-logs-${{ steps.sanitize.outputs.sanitized_name }} + path: | + logs/ + reports/ + results/ + retention-days: 30 + if: success() || failure() + + - name: Prepare image for upload + run: | + # Find and copy the final_all.webp image to a flat location + mkdir -p image_output + image_file=$(find reports -name "final_all.webp" | head -1) + if [ -n "$image_file" ]; then + cp "$image_file" "image_output/final_all.webp" + echo "Image found and copied: $image_file" + else + echo "No final_all.webp image found" + fi + if: success() || failure() + + - name: Upload design image + id: upload-image + uses: actions/upload-artifact@v4 + with: + name: design-image-${{ steps.sanitize.outputs.sanitized_name }} + path: image_output/final_all.webp + retention-days: 90 + if: success() || failure() + + - name: Save artifact info + run: | + # Create artifact info file with the design and artifact details + mkdir -p artifact_info + cat > artifact_info/${{ steps.sanitize.outputs.sanitized_name }}.json << EOF + { + "design": "${{ matrix.design }}", + "sanitized_name": "${{ steps.sanitize.outputs.sanitized_name }}", + "run_id": "${{ github.run_id }}", + "repo": "${{ github.repository }}" + } + EOF + if: success() || failure() + + - name: Upload artifact info + uses: actions/upload-artifact@v4 + with: + name: artifact-info-${{ steps.sanitize.outputs.sanitized_name }} + path: artifact_info/ + retention-days: 30 + if: success() || failure() + + collect-results: + needs: [find-designs, build] + runs-on: ubuntu-latest + if: always() + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.x' + + - name: Download logs + uses: actions/download-artifact@v4 + with: + pattern: design-logs-* + merge-multiple: true + + - name: Download artifact info + uses: actions/download-artifact@v4 + with: + pattern: artifact-info-* + merge-multiple: true + + - name: Get artifact IDs and generate URLs + run: | + echo "Getting artifact IDs for image download URLs..." + + # Use GitHub API to get artifacts for this run + artifacts_response=$(curl -s -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + "https://api.github.com/repos/${{ github.repository }}/actions/runs/${{ github.run_id }}/artifacts") + + echo "$artifacts_response" > artifacts.json + echo "Artifacts saved to artifacts.json for results script" + + - name: Extract and update results + run: | + echo "Extracting results from OpenROAD runs..." + + # Check if the script exists + if [ ! -f "extract_results.py" ]; then + echo "Error: extract_results.py not found" + exit 1 + fi + + # Run the extraction script with GitHub context + if python3 extract_results.py \ + --base-dir . \ + --output QOR.md \ + --github-repo "${{ github.repository }}" \ + --github-run-id "${{ github.run_id }}" \ + --artifacts-file artifacts.json; then + echo "Results extraction completed successfully" + else + echo "Results extraction failed, but continuing with workflow" + echo "This may happen if no designs completed or there were format issues" + fi + + # Show a preview of the results + echo "Generated results table preview:" + if grep -A 10 "OpenROAD Flow Results" QOR.md; then + echo "Results table successfully updated" + else + echo "No results table found in QOR.md" + fi + + # Show summary statistics + if [ -d "logs" ]; then + total_reports=$(find logs -name "6_report.json" | wc -l) + echo "Found $total_reports completed design runs" + fi + + - name: Commit and push results + run: | + echo "Current ref: ${{ github.ref }}" + echo "Branch: ${{ github.ref_name }}" + + if [ "${{ github.ref }}" = "refs/heads/main" ]; then + echo "Running on main branch - will commit results" + git config --local user.email "action@github.com" + git config --local user.name "GitHub Action" + + # Check if there are any changes to commit + if git diff --quiet; then + echo "No changes to commit" + else + git add QOR.md + git commit -m "Update OpenROAD flow results [skip ci]" || echo "No changes to commit" + git push + fi + else + echo "Skipping commit - not on main branch (current: ${{ github.ref }})" + fi diff --git a/.github/workflows/test-single-design.yml b/.github/workflows/test-single-design.yml new file mode 100644 index 0000000..049e37a --- /dev/null +++ b/.github/workflows/test-single-design.yml @@ -0,0 +1,115 @@ +name: Test Single Design + +on: + workflow_dispatch: + inputs: + design_config: + description: 'Design config file to run (e.g., designs/nangate45/lfsr_top/config.mk)' + required: true + type: string + +jobs: + test-design: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Run setup script + run: ./setup.sh + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.x' + + - name: Determine ORFS tag + id: get_tag + run: | + cd OpenROAD-flow-scripts + tag=$(git describe --tags --abbrev=8 2>/dev/null || echo "latest") + echo "tag=$tag" >> "$GITHUB_OUTPUT" + + - name: Validate design config + run: | + design_config="${{ github.event.inputs.design_config }}" + echo "Validating design config: $design_config" + + if [ ! -f "$design_config" ]; then + echo "Error: Design config file '$design_config' not found" + echo "Available design configs:" + find designs -name "config.mk" | sort + exit 1 + fi + + echo "Design config exists: $design_config" + + - name: Run single design + run: | + tag="${{ steps.get_tag.outputs.tag }}" + design_config="${{ github.event.inputs.design_config }}" + + echo "Running single design: $design_config" + echo "Using Docker image: openroad/orfs:$tag" + + docker run --rm \ + -v ${{ github.workspace }}/OpenROAD-flow-scripts/flow:/OpenROAD-flow-scripts/flow \ + -v ${{ github.workspace }}:/OpenROAD-flow-scripts/UCSC_ML_suite \ + -w /OpenROAD-flow-scripts \ + openroad/orfs:$tag \ + bash -c " + set +e + git config --global --add safe.directory '*' + cd UCSC_ML_suite + + echo '====================' + echo 'Running design: $design_config' + echo '====================' + + start_time=\$(date +%s) + + if make DESIGN_CONFIG='$design_config'; then + end_time=\$(date +%s) + duration=\$((end_time - start_time)) + echo '' + echo '====================' + echo 'SUCCESS: Design completed successfully' + echo \"Duration: \${duration} seconds\" + echo '====================' + exit 0 + else + exit_code=\$? + end_time=\$(date +%s) + duration=\$((end_time - start_time)) + echo '' + echo '====================' + echo \"FAILURE: Design failed (exit code: \$exit_code)\" + echo \"Duration: \${duration} seconds\" + echo '====================' + exit \$exit_code + fi + " + + - name: Extract results + if: success() + run: | + echo "Extracting results for design: ${{ github.event.inputs.design_config }}" + + # Check if the script exists + if [ ! -f "extract_results.py" ]; then + echo "Warning: extract_results.py not found, skipping results extraction" + exit 0 + fi + + # Run the extraction script + if python3 extract_results.py --base-dir . --output QOR-test.md; then + echo "Results extraction completed successfully" + + # Show a preview of the results + echo "Generated results preview:" + if [ -f "QOR-test.md" ]; then + cat QOR-test.md + fi + else + echo "Results extraction failed, but test was successful" + fi diff --git a/designs_to_run.txt b/designs_to_run.txt new file mode 100644 index 0000000..5520365 --- /dev/null +++ b/designs_to_run.txt @@ -0,0 +1,18 @@ +# LFSR designs +designs/nangate45/lfsr_top/config.mk +designs/asap7/lfsr_top/config.mk +designs/sky130hd/lfsr_top/config.mk + +# Minimax designs +designs/nangate45/minimax/config.mk +designs/asap7/minimax/config.mk +designs/sky130hd/minimax/config.mk + +# LiteEth designs +designs/nangate45/liteeth/liteeth_mac_axi_mii/config.mk +designs/nangate45/liteeth/liteeth_mac_wb_mii/config.mk +designs/sky130hd/liteeth/liteeth_mac_axi_mii/config.mk +designs/sky130hd/liteeth/liteeth_mac_wb_mii/config.mk + +# NyuziProcessor design +# designs/nangate45/NyuziProcessor/config.mk diff --git a/extract_results.py b/extract_results.py new file mode 100755 index 0000000..792bb98 --- /dev/null +++ b/extract_results.py @@ -0,0 +1,211 @@ +#!/usr/bin/env python3 + +import json +import os +import sys +from pathlib import Path +from datetime import datetime +import argparse + + +def extract_key_metrics(report_path): + """Extract key metrics from a report json file.""" + try: + with open(report_path, 'r') as f: + data = json.load(f) + + metrics = { + 'design_name': '', + 'platform': '', + 'total_area': data.get('finish__design__core__area', 'N/A'), + 'utilization': f"{data.get('finish__design__instance__utilization', 0) * 100:.1f}%" if data.get('finish__design__instance__utilization') else 'N/A', + 'instance_count': data.get('finish__design__instance__count', 'N/A'), + 'setup_tns': data.get('finish__timing__setup__tns', 'N/A'), + 'setup_wns': data.get('finish__timing__setup__ws', 'N/A'), + 'hold_tns': data.get('finish__timing__hold__tns', 'N/A'), + 'hold_wns': data.get('finish__timing__hold__ws', 'N/A'), + 'total_power': f"{data.get('finish__power__total', 0) * 1000:.3f} mW" if data.get('finish__power__total') else 'N/A', + 'clock_skew': data.get('finish__clock__skew__setup', 'N/A'), + 'warnings': data.get('finish__flow__warnings__count', 'N/A'), + 'errors': data.get('finish__flow__errors__count', 'N/A'), + 'status': 'Pass' if data.get('finish__flow__errors__count', 1) == 0 else 'Fail' + } + + return metrics + except (FileNotFoundError, json.JSONDecodeError, KeyError) as e: + print(f"Error reading {report_path}: {e}") + return None + + +def find_report_files(base_dir): + """Find all 6_report.json files in the results directory structure.""" + report_files = [] + base_path = Path(base_dir) + + for report_file in base_path.rglob("logs/**/6_report.json"): + parts = report_file.parts + if len(parts) >= 4: + # Extract platform and design from path + logs_idx = next(i for i, part in enumerate( + parts) if part == 'logs') + if logs_idx + 3 < len(parts): + platform = parts[logs_idx + 1] + design = parts[logs_idx + 2] + report_files.append({ + 'path': report_file, + 'platform': platform, + 'design': design + }) + + return report_files + + +def generate_markdown_table(results, github_repo=None, github_run_id=None, artifacts_file=None): + """Generate a markdown table from the results.""" + if not results: + return "No results found.\n" + + # Load artifact information if available + artifact_map = {} + if artifacts_file and os.path.exists(artifacts_file): + try: + with open(artifacts_file, 'r') as f: + artifacts_data = json.load(f) + # Create mapping of artifact name to ID + for artifact in artifacts_data.get('artifacts', []): + artifact_map[artifact['name']] = artifact['id'] + except Exception as e: + print(f"Warning: Could not load artifacts file: {e}") + + # Table header - add Image column if GitHub info is provided + if github_repo and github_run_id: + table = """ +## OpenROAD Flow Results + +Last updated: {} + +| Design | Platform | Status | Area (μm²) | Utilization | Instances | Setup TNS | Setup WNS | Hold TNS | Hold WNS | Power | Clock Skew | Warnings | Errors | Image | +|--------|----------|--------|------------|-------------|-----------|-----------|-----------|----------|----------|-------|------------|----------|--------|-------| +""".format(datetime.now().strftime("%Y-%m-%d %H:%M:%S UTC")) + else: + table = """ +## OpenROAD Flow Results + +Last updated: {} + +| Design | Platform | Status | Area (μm²) | Utilization | Instances | Setup TNS | Setup WNS | Hold TNS | Hold WNS | Power | Clock Skew | Warnings | Errors | +|--------|----------|--------|------------|-------------|-----------|-----------|-----------|----------|----------|-------|------------|----------|--------| +""".format(datetime.now().strftime("%Y-%m-%d %H:%M:%S UTC")) + + # Sort results by platform, then design name + sorted_results = sorted(results, key=lambda x: ( + x['platform'], x['design_name'])) + + # Table rows + for result in sorted_results: + if github_repo and github_run_id: + # Generate artifact name for this design's image (matching workflow naming) + sanitized_name = f"designs-{result['platform']}-{result['design_name']}" + artifact_name = f"design-image-{sanitized_name}" + + # Try to get the specific artifact ID for direct download + if artifact_name in artifact_map: + artifact_id = artifact_map[artifact_name] + artifact_url = f"https://github.com/{github_repo}/actions/runs/{github_run_id}/artifacts/{artifact_id}" + image_link = f"[Download Image]({artifact_url})" + else: + # Fallback to run page if artifact ID not found + artifact_url = f"https://github.com/{github_repo}/actions/runs/{github_run_id}" + image_link = f"[View Run]({artifact_url})" + + table += f"| {result['design_name']} | {result['platform']} | {result['status']} | {result['total_area']} | {result['utilization']} | {result['instance_count']} | {result['setup_tns']} | {result['setup_wns']} | {result['hold_tns']} | {result['hold_wns']} | {result['total_power']} | {result['clock_skew']} | {result['warnings']} | {result['errors']} | {image_link} |\n" + else: + table += f"| {result['design_name']} | {result['platform']} | {result['status']} | {result['total_area']} | {result['utilization']} | {result['instance_count']} | {result['setup_tns']} | {result['setup_wns']} | {result['hold_tns']} | {result['hold_wns']} | {result['total_power']} | {result['clock_skew']} | {result['warnings']} | {result['errors']} |\n" + + table += "\n" + return table + + +def update_readme(readme_path, results_table): + """Update the README.md file with the results table.""" + try: + with open(readme_path, 'r') as f: + content = f.read() + + # Find the marker or add one + start_marker = "" + end_marker = "" + + if start_marker in content and end_marker in content: + # Replace existing table + start_idx = content.find(start_marker) + end_idx = content.find(end_marker) + len(end_marker) + new_content = (content[:start_idx] + + start_marker + "\n" + + results_table + + end_marker + + content[end_idx:]) + else: + # Add table at the end + new_content = content + "\n" + start_marker + \ + "\n" + results_table + end_marker + "\n" + + with open(readme_path, 'w') as f: + f.write(new_content) + + print(f"Updated {readme_path} with results table") + + except Exception as e: + print(f"Error updating README: {e}") + + +def main(): + parser = argparse.ArgumentParser( + description='Extract OpenROAD metrics and update README') + parser.add_argument('--base-dir', default='.', + help='Base directory to search for results (default: current directory)') + parser.add_argument('--readme', default='README.md', + help='Path to README.md file to update (default: README.md)') + parser.add_argument('--output', default=None, + help='Output file for results table (default: update README)') + parser.add_argument('--github-repo', default=None, + help='GitHub repository (owner/repo) for artifact links') + parser.add_argument('--github-run-id', default=None, + help='GitHub Actions run ID for artifact links') + parser.add_argument('--artifacts-file', default=None, + help='JSON file containing GitHub artifacts information') + + args = parser.parse_args() + + # Find all report files + report_files = find_report_files(args.base_dir) + print(f"Found {len(report_files)} report files") + + # Extract metrics from each report + results = [] + for report_info in report_files: + print( + f"Processing {report_info['platform']}/{report_info['design']}...") + metrics = extract_key_metrics(report_info['path']) + if metrics: + metrics['design_name'] = report_info['design'] + metrics['platform'] = report_info['platform'] + results.append(metrics) + + # Generate markdown table + table = generate_markdown_table(results, args.github_repo, args.github_run_id, args.artifacts_file) + + if args.output: + # Write to specified output file + with open(args.output, 'w') as f: + f.write(table) + print(f"Results written to {args.output}") + else: + # Update README + update_readme(args.readme, table) + + print(f"Processed {len(results)} designs successfully") + + +if __name__ == "__main__": + main()