diff --git a/.github/workflows/developer-guide-docs.yml b/.github/workflows/developer-guide-docs.yml index bd8c1b34a8..b678ad0684 100644 --- a/.github/workflows/developer-guide-docs.yml +++ b/.github/workflows/developer-guide-docs.yml @@ -84,7 +84,30 @@ jobs: - name: Install Asciidoctor tooling run: | - gem install --no-document asciidoctor asciidoctor-pdf + gem install --no-document asciidoctor asciidoctor-pdf rouge + + - name: Run Asciidoctor lint + run: | + set -euo pipefail + REPORT_DIR="build/developer-guide/reports" + REPORT_FILE="${REPORT_DIR}/asciidoc-lint-report.txt" + mkdir -p "$REPORT_DIR" + set +e + asciidoctor \ + --require rouge \ + --failure-level WARN \ + --verbose \ + --trace \ + -o /dev/null \ + docs/developer-guide/developer-guide.asciidoc \ + 2>&1 | tee "$REPORT_FILE" + STATUS=${PIPESTATUS[0]} + set -e + echo "ASCII_DOC_LINT_REPORT=$REPORT_FILE" >> "$GITHUB_ENV" + echo "ASCII_DOC_LINT_STATUS=$STATUS" >> "$GITHUB_ENV" + if [ "$STATUS" -ne 0 ]; then + echo "Asciidoctor exited with status $STATUS" >&2 + fi - name: Build Developer Guide HTML and PDF run: | @@ -135,6 +158,76 @@ jobs: rm -f "$GENERATED_COVER_SVG" fi + - name: Install Vale + run: | + set -euo pipefail + VALE_VERSION="3.13.0" + VALE_ARCHIVE="vale_${VALE_VERSION}_Linux_64-bit.tar.gz" + curl -fsSL -o "$VALE_ARCHIVE" "https://github.com/errata-ai/vale/releases/download/v${VALE_VERSION}/${VALE_ARCHIVE}" + tar -xzf "$VALE_ARCHIVE" + sudo mv vale /usr/local/bin/vale + rm -f "$VALE_ARCHIVE" + + - name: Sync Vale styles + run: | + set -euo pipefail + vale sync --config docs/developer-guide/.vale.ini + + - name: Run Vale style linter + run: | + set -euo pipefail + REPORT_DIR="build/developer-guide/reports" + REPORT_FILE="${REPORT_DIR}/vale-report.json" + HTML_REPORT="${REPORT_DIR}/vale-report.html" + mkdir -p "$REPORT_DIR" + set +e + vale --config docs/developer-guide/.vale.ini --output=JSON docs/developer-guide > "$REPORT_FILE" + STATUS=$? + set -e + python3 scripts/developer-guide/vale_report_to_html.py --input "$REPORT_FILE" --output "$HTML_REPORT" + echo "VALE_REPORT=$REPORT_FILE" >> "$GITHUB_ENV" + echo "VALE_HTML_REPORT=$HTML_REPORT" >> "$GITHUB_ENV" + echo "VALE_STATUS=$STATUS" >> "$GITHUB_ENV" + if [ "$STATUS" -ne 0 ]; then + echo "Vale exited with status $STATUS" >&2 + fi + + - name: Check for unused developer guide images + run: | + set -euo pipefail + REPORT_DIR="build/developer-guide/reports" + JSON_REPORT="${REPORT_DIR}/unused-images.json" + TEXT_REPORT="${REPORT_DIR}/unused-images.txt" + mkdir -p "$REPORT_DIR" + python3 scripts/developer-guide/find_unused_images.py docs/developer-guide --output "$JSON_REPORT" | tee "$TEXT_REPORT" + echo "UNUSED_IMAGES_JSON=$JSON_REPORT" >> "$GITHUB_ENV" + echo "UNUSED_IMAGES_TEXT=$TEXT_REPORT" >> "$GITHUB_ENV" + + - name: Summarize AsciiDoc linter findings + id: summarize_asciidoc_lint + run: | + python3 scripts/developer-guide/summarize_reports.py ascii \ + --report "${ASCII_DOC_LINT_REPORT}" \ + --status "${ASCII_DOC_LINT_STATUS:-0}" \ + --output "${GITHUB_OUTPUT}" + + - name: Summarize Vale findings + id: summarize_vale + run: | + python3 scripts/developer-guide/summarize_reports.py vale \ + --report "${VALE_REPORT}" \ + --status "${VALE_STATUS:-0}" \ + --output "${GITHUB_OUTPUT}" + + - name: Summarize unused image findings + id: summarize_unused_images + run: | + python3 scripts/developer-guide/summarize_reports.py unused-images \ + --report "${UNUSED_IMAGES_JSON}" \ + --output "${GITHUB_OUTPUT}" \ + --details-key details \ + --preview-limit 10 + - name: Upload HTML artifact uses: actions/upload-artifact@v4 with: @@ -149,9 +242,39 @@ jobs: path: build/developer-guide/pdf/developer-guide.pdf if-no-files-found: error + - name: Upload AsciiDoc linter report + uses: actions/upload-artifact@v4 + with: + name: developer-guide-asciidoc-lint + path: ${{ env.ASCII_DOC_LINT_REPORT }} + if-no-files-found: warn + + - name: Upload Vale report + uses: actions/upload-artifact@v4 + with: + name: developer-guide-vale-report + path: | + ${{ env.VALE_REPORT }} + ${{ env.VALE_HTML_REPORT }} + if-no-files-found: warn + + - name: Upload unused image report + uses: actions/upload-artifact@v4 + with: + name: developer-guide-unused-images + path: | + ${{ env.UNUSED_IMAGES_JSON }} + ${{ env.UNUSED_IMAGES_TEXT }} + if-no-files-found: warn + - name: Comment with artifact download links if: ${{ github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork }} uses: actions/github-script@v7 + env: + ASCII_SUMMARY: ${{ steps.summarize_asciidoc_lint.outputs.summary }} + VALE_SUMMARY: ${{ steps.summarize_vale.outputs.summary }} + UNUSED_SUMMARY: ${{ steps.summarize_unused_images.outputs.summary }} + UNUSED_DETAILS: ${{ steps.summarize_unused_images.outputs.details }} with: github-token: ${{ secrets.GITHUB_TOKEN }} script: | @@ -183,14 +306,29 @@ jobs: per_page: 100 }); - const links = []; + const artifactLinks = new Map(); for (const artifact of artifacts.data.artifacts) { - if (artifact.name === 'developer-guide-html') { - links.push(`- [Developer Guide HTML package](https://github.com/${owner}/${repo}/actions/runs/${runId}/artifacts/${artifact.id})`); - } - if (artifact.name === 'developer-guide-pdf') { - links.push(`- [Developer Guide PDF](https://github.com/${owner}/${repo}/actions/runs/${runId}/artifacts/${artifact.id})`); - } + artifactLinks.set( + artifact.name, + `https://github.com/${owner}/${repo}/actions/runs/${runId}/artifacts/${artifact.id}` + ); + } + + const links = []; + if (artifactLinks.has('developer-guide-html')) { + links.push(`- [Developer Guide HTML package](${artifactLinks.get('developer-guide-html')})`); + } + if (artifactLinks.has('developer-guide-pdf')) { + links.push(`- [Developer Guide PDF](${artifactLinks.get('developer-guide-pdf')})`); + } + if (artifactLinks.has('developer-guide-asciidoc-lint')) { + links.push(`- [AsciiDoc linter report](${artifactLinks.get('developer-guide-asciidoc-lint')})`); + } + if (artifactLinks.has('developer-guide-vale-report')) { + links.push(`- [Vale report](${artifactLinks.get('developer-guide-vale-report')})`); + } + if (artifactLinks.has('developer-guide-unused-images')) { + links.push(`- [Unused image report](${artifactLinks.get('developer-guide-unused-images')})`); } if (!links.length) { @@ -198,7 +336,46 @@ jobs: return; } - const body = `${marker}\nDeveloper Guide build artifacts are available for download from this workflow run:\n\n${links.join('\n')}\n`; + const qualityLines = []; + const asciiSummary = process.env.ASCII_SUMMARY?.trim(); + const valeSummary = process.env.VALE_SUMMARY?.trim(); + const unusedSummary = process.env.UNUSED_SUMMARY?.trim(); + const asciiLink = artifactLinks.get('developer-guide-asciidoc-lint'); + const valeLink = artifactLinks.get('developer-guide-vale-report'); + const unusedLink = artifactLinks.get('developer-guide-unused-images'); + + if (asciiSummary) { + qualityLines.push(`- AsciiDoc linter: ${asciiSummary}${asciiLink ? ` ([report](${asciiLink}))` : ''}`); + } + if (valeSummary) { + qualityLines.push(`- Vale: ${valeSummary}${valeLink ? ` ([report](${valeLink}))` : ''}`); + } + if (unusedSummary) { + qualityLines.push(`- Image references: ${unusedSummary}${unusedLink ? ` ([report](${unusedLink}))` : ''}`); + } + + let unusedDetails = process.env.UNUSED_DETAILS ? process.env.UNUSED_DETAILS.split('\n') : []; + unusedDetails = unusedDetails.filter(Boolean); + const detailsSection = unusedDetails.length + ? `\nUnused image preview:\n\n${unusedDetails.map(line => ` ${line}`).join('\n')}\n` + : ''; + + const sections = [ + `${marker}`, + 'Developer Guide build artifacts are available for download from this workflow run:', + '', + links.join('\n') + ]; + + if (qualityLines.length) { + sections.push('', 'Developer Guide quality checks:', '', qualityLines.join('\n')); + } + + if (detailsSection) { + sections.push(detailsSection.trimEnd()); + } + + const body = sections.join('\n') + '\n'; const comments = await github.rest.issues.listComments({ owner, repo, diff --git a/docs/developer-guide/.gitignore b/docs/developer-guide/.gitignore index 7516c3134b..acec73ea5c 100644 --- a/docs/developer-guide/.gitignore +++ b/docs/developer-guide/.gitignore @@ -1,2 +1,3 @@ book-cover.generated.svg book-cover.generated.png +styles/ diff --git a/docs/developer-guide/.vale.ini b/docs/developer-guide/.vale.ini new file mode 100644 index 0000000000..8b8cd0f341 --- /dev/null +++ b/docs/developer-guide/.vale.ini @@ -0,0 +1,6 @@ +StylesPath = styles +MinAlertLevel = suggestion +Packages = https://github.com/errata-ai/packages/releases/download/v0.2.0/Microsoft.zip, https://github.com/errata-ai/packages/releases/download/v0.2.0/proselint.zip, https://github.com/errata-ai/packages/releases/download/v0.2.0/write-good.zip + +[*.{adoc,asciidoc}] +BasedOnStyles = Microsoft, proselint, write-good diff --git a/docs/developer-guide/Config.groovy b/docs/developer-guide/Config.groovy new file mode 100644 index 0000000000..2b04f9b241 --- /dev/null +++ b/docs/developer-guide/Config.groovy @@ -0,0 +1,11 @@ +outputPath = 'build' + +inputPath = '.' + +inputFiles = [ + [file: 'developer-guide.asciidoc', formats: ['html']] +] + +imageDirs = [ + 'img' +] diff --git a/scripts/developer-guide/find_unused_images.py b/scripts/developer-guide/find_unused_images.py new file mode 100755 index 0000000000..cc0215ad2c --- /dev/null +++ b/scripts/developer-guide/find_unused_images.py @@ -0,0 +1,73 @@ +#!/usr/bin/env python3 +"""Identify unreferenced images in the developer guide.""" +from __future__ import annotations + +import argparse +import json +from pathlib import Path +from typing import Iterable, List + +ASCIIDOC_EXTENSIONS = {".adoc", ".asciidoc"} + + +def iter_text_files(root: Path) -> Iterable[Path]: + for path in root.rglob("*"): + if path.is_file() and path.suffix.lower() in ASCIIDOC_EXTENSIONS: + yield path + + +def main() -> None: + parser = argparse.ArgumentParser(description=__doc__) + parser.add_argument("doc_root", type=Path, help="Path to the developer guide root directory") + parser.add_argument( + "--image-dir", + type=Path, + default=None, + help="Directory containing images (defaults to /img)", + ) + parser.add_argument( + "--output", + type=Path, + default=None, + help="Optional path to write a JSON report", + ) + args = parser.parse_args() + + doc_root = args.doc_root.resolve() + image_dir = (args.image_dir or (doc_root / "img")).resolve() + + if not image_dir.exists(): + raise SystemExit(f"Image directory '{image_dir}' does not exist") + + adoc_files = list(iter_text_files(doc_root)) + contents = [path.read_text(encoding="utf-8", errors="ignore") for path in adoc_files] + + unused: List[str] = [] + for image_path in sorted(image_dir.rglob("*")): + if not image_path.is_file(): + continue + rel_path = image_path.relative_to(doc_root).as_posix() + if any(rel_path in text for text in contents): + continue + # Also fall back to checking just the file name to catch references that rely on imagesdir. + filename = image_path.name + if any(filename in text for text in contents): + continue + unused.append(rel_path) + + report = {"unused_images": unused} + + if args.output: + args.output.parent.mkdir(parents=True, exist_ok=True) + args.output.write_text(json.dumps(report, indent=2), encoding="utf-8") + + if unused: + print("Unused images detected:") + for rel_path in unused: + print(f" - {rel_path}") + else: + print("No unused images found.") + + +if __name__ == "__main__": + main() diff --git a/scripts/developer-guide/summarize_reports.py b/scripts/developer-guide/summarize_reports.py new file mode 100644 index 0000000000..e234035c4f --- /dev/null +++ b/scripts/developer-guide/summarize_reports.py @@ -0,0 +1,273 @@ +#!/usr/bin/env python3 +"""Utilities for summarizing developer guide quality reports.""" + +from __future__ import annotations + +import argparse +import json +import re +from pathlib import Path +from typing import Iterable + + +def _is_alert(candidate: dict[str, object]) -> bool: + required_keys = {"Severity", "Check", "Message"} + return required_keys.issubset(candidate.keys()) + + +def _collect_alerts(node: object, path_hint: str = "") -> list[dict[str, object]]: + alerts: list[dict[str, object]] = [] + + if isinstance(node, dict): + if _is_alert(node): + alert = dict(node) + if path_hint and not alert.get("Path"): + alert["Path"] = path_hint + alerts.append(alert) + return alerts + + node_path = path_hint + path_value = node.get("Path") + if isinstance(path_value, str) and path_value: + node_path = path_value + + alert_list = node.get("alerts") + if isinstance(alert_list, list): + alerts.extend(_collect_alerts(alert_list, node_path)) + + file_map = node.get("files") + if isinstance(file_map, dict): + for file_path, file_node in file_map.items(): + hint = file_path if isinstance(file_path, str) else node_path + alerts.extend(_collect_alerts(file_node, hint)) + + for key, value in node.items(): + if key in {"alerts", "files", "Path"}: + continue + hint = node_path + if isinstance(key, str) and ("/" in key or key.endswith(".adoc") or key.endswith(".asciidoc")): + hint = key + alerts.extend(_collect_alerts(value, hint)) + + elif isinstance(node, list): + for item in node: + alerts.extend(_collect_alerts(item, path_hint)) + + return alerts + + +def write_outputs(entries: Iterable[tuple[str, str]], output: Path | None) -> None: + lines: list[str] = [] + for key, value in entries: + delimiter = "" + if "\n" in value: + delimiter = "__GH_OUTPUT__" + while delimiter in value: + delimiter += "_X" + lines.append(f"{key}<<{delimiter}\n{value}\n{delimiter}") + else: + lines.append(f"{key}={value}") + + content = "\n".join(lines) + "\n" + if output: + output.parent.mkdir(parents=True, exist_ok=True) + with output.open("a", encoding="utf-8") as fh: + fh.write(content) + else: + print(content, end="") + + +def _normalize_status(status: str) -> str: + return status.strip() + + +def _has_nonzero_status(status: str) -> bool: + status = _normalize_status(status) + return bool(status and status != "0") + + +def summarize_asciidoc(report: Path, status: str, summary_key: str, output: Path | None) -> None: + text = "" + if report.is_file(): + text = report.read_text(encoding="utf-8", errors="ignore") + + pattern = re.compile( + r"^\s*(?:[^:\n]+:\d+:\d+:\s+|asciidoctor:\s+)(ERROR|WARN(?:ING)?|INFO)\b", + re.MULTILINE, + ) + matches = pattern.findall(text) + + counts = {"error": 0, "warning": 0, "info": 0} + for severity in matches: + normalized = severity.upper() + if normalized == "ERROR": + counts["error"] += 1 + elif normalized in {"WARN", "WARNING"}: + counts["warning"] += 1 + elif normalized == "INFO": + counts["info"] += 1 + + total = sum(counts.values()) + + if total: + parts = [f"{counts['error']} errors"] if counts["error"] else [] + if counts["warning"]: + parts.append(f"{counts['warning']} warnings") + if counts["info"]: + parts.append(f"{counts['info']} info") + detail = f" ({', '.join(parts)})" if parts else "" + summary = f"{total} issue(s) flagged{detail}" + if _has_nonzero_status(status): + summary += f" (exit code {_normalize_status(status)})" + elif _has_nonzero_status(status): + summary = f"Linter failed (exit code {_normalize_status(status)})" + else: + summary = "No issues found" + + write_outputs([(summary_key, summary)], output) + + +def summarize_vale( + report: Path, status: str, summary_key: str, output: Path | None +) -> None: + alerts: list[dict[str, object]] = [] + if report.is_file(): + try: + data = json.loads(report.read_text(encoding="utf-8")) + except json.JSONDecodeError: + data = {} + alerts = _collect_alerts(data) + + counts = {"error": 0, "warning": 0, "suggestion": 0} + total = 0 + for alert in alerts: + if not isinstance(alert, dict): + continue + severity = str(alert.get("Severity", "")).lower() + if severity in counts: + counts[severity] += 1 + total += 1 + + if total: + parts = [ + f"{counts['error']} errors", + f"{counts['warning']} warnings", + f"{counts['suggestion']} suggestions", + ] + summary = f"{total} alert(s) ({', '.join(parts)})" + if _has_nonzero_status(status): + summary += f" (exit code {_normalize_status(status)})" + elif _has_nonzero_status(status): + summary = f"Vale failed (exit code {_normalize_status(status)})" + else: + summary = "No alerts found" + + write_outputs([(summary_key, summary)], output) + + +def summarize_unused_images( + report: Path, + summary_key: str, + details_key: str | None, + preview_limit: int, + output: Path | None, +) -> None: + unused: list[str] = [] + if report.is_file(): + try: + data = json.loads(report.read_text(encoding="utf-8")) + except json.JSONDecodeError: + data = {} + if isinstance(data, dict): + value = data.get("unused_images", []) + if isinstance(value, list): + unused = [str(item) for item in value] + + count = len(unused) + if count: + summary = f"{count} unused image(s) found" + preview = unused[: max(preview_limit, 0)] if preview_limit else [] + lines = [f"- {path}" for path in preview] + if preview_limit and count > preview_limit: + lines.append(f"- ... and {count - preview_limit} more") + else: + summary = "No unused images detected" + lines = [] + + output_entries: list[tuple[str, str]] = [(summary_key, summary)] + if details_key and lines: + details_value = "\n".join(lines) + output_entries.append((details_key, details_value)) + + write_outputs(output_entries, output) + + +def build_common_parser() -> argparse.ArgumentParser: + common = argparse.ArgumentParser(add_help=False) + common.add_argument( + "--output", + type=Path, + default=None, + help="File to append GitHub Actions outputs to (defaults to stdout).", + ) + return common + + +def parse_args() -> argparse.Namespace: + common = build_common_parser() + parser = argparse.ArgumentParser(description=__doc__, parents=[common]) + subparsers = parser.add_subparsers(dest="command", required=True) + + ascii_parser = subparsers.add_parser( + "ascii", + help="Summarize Asciidoctor diagnostics.", + parents=[common], + ) + ascii_parser.add_argument("--report", type=Path, required=True) + ascii_parser.add_argument("--status", default="0") + ascii_parser.add_argument("--summary-key", default="summary") + + vale_parser = subparsers.add_parser( + "vale", + help="Summarize Vale style linter results.", + parents=[common], + ) + vale_parser.add_argument("--report", type=Path, required=True) + vale_parser.add_argument("--status", default="0") + vale_parser.add_argument("--summary-key", default="summary") + + unused_parser = subparsers.add_parser( + "unused-images", + help="Summarize unused image report results.", + parents=[common], + ) + unused_parser.add_argument("--report", type=Path, required=True) + unused_parser.add_argument("--summary-key", default="summary") + unused_parser.add_argument("--details-key", default=None) + unused_parser.add_argument("--preview-limit", type=int, default=10) + + return parser.parse_args() + + +def main() -> None: + args = parse_args() + output = args.output + command = args.command + if command == "ascii": + summarize_asciidoc(args.report, args.status, args.summary_key, output) + elif command == "vale": + summarize_vale(args.report, args.status, args.summary_key, output) + elif command == "unused-images": + summarize_unused_images( + args.report, + args.summary_key, + args.details_key, + args.preview_limit, + output, + ) + else: + raise ValueError(f"Unsupported command: {command}") + + +if __name__ == "__main__": + main() diff --git a/scripts/developer-guide/vale_report_to_html.py b/scripts/developer-guide/vale_report_to_html.py new file mode 100755 index 0000000000..190365f579 --- /dev/null +++ b/scripts/developer-guide/vale_report_to_html.py @@ -0,0 +1,201 @@ +#!/usr/bin/env python3 +"""Convert a Vale JSON report into a standalone HTML file.""" + +from __future__ import annotations + +import argparse +import html +import json +from pathlib import Path +from typing import Iterable + + +def _is_alert(candidate: dict[str, object]) -> bool: + required_keys = {"Severity", "Check", "Message"} + return required_keys.issubset(candidate.keys()) + + +def _collect_alerts(node: object, path_hint: str = "") -> list[dict[str, object]]: + alerts: list[dict[str, object]] = [] + + if isinstance(node, dict): + if _is_alert(node): + alert = dict(node) + if path_hint and not alert.get("Path"): + alert["Path"] = path_hint + alerts.append(alert) + return alerts + + node_path = path_hint + path_value = node.get("Path") + if isinstance(path_value, str) and path_value: + node_path = path_value + + alert_list = node.get("alerts") + if isinstance(alert_list, list): + alerts.extend(_collect_alerts(alert_list, node_path)) + + file_map = node.get("files") + if isinstance(file_map, dict): + for file_path, file_node in file_map.items(): + hint = file_path if isinstance(file_path, str) else node_path + alerts.extend(_collect_alerts(file_node, hint)) + + for key, value in node.items(): + if key in {"alerts", "files", "Path"}: + continue + hint = node_path + if isinstance(key, str) and ("/" in key or key.endswith(".adoc") or key.endswith(".asciidoc")): + hint = key + alerts.extend(_collect_alerts(value, hint)) + + elif isinstance(node, list): + for item in node: + alerts.extend(_collect_alerts(item, path_hint)) + + return alerts + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description=__doc__) + parser.add_argument("--input", type=Path, required=True, help="Path to the Vale JSON report.") + parser.add_argument("--output", type=Path, required=True, help="Destination path for the HTML report.") + return parser.parse_args() + + +def load_alerts(report: Path) -> list[dict[str, object]]: + if not report.is_file(): + return [] + try: + data = json.loads(report.read_text(encoding="utf-8")) + except json.JSONDecodeError: + return [] + + return _collect_alerts(data) + + +def render_alert_rows(alerts: Iterable[dict[str, object]]) -> str: + normalized: list[dict[str, str]] = [] + for alert in alerts: + if not isinstance(alert, dict): + continue + span = alert.get("Span") + line = column = "" + if isinstance(span, dict): + start = span.get("Start") + if isinstance(start, dict): + line = str(start.get("Line", "")) + column = str(start.get("Column", "")) + elif isinstance(span, list) and span: + line = str(span[0]) + if len(span) > 1: + column = str(span[1]) + normalized.append( + { + "file": str(alert.get("Path", "")), + "line": line, + "column": column, + "severity": str(alert.get("Severity", "")), + "rule": str(alert.get("Check", "")), + "message": str(alert.get("Message", "")), + } + ) + + if not normalized: + return "No alerts found." + + def sort_key(entry: dict[str, str]) -> tuple: + def as_int(value: str) -> int: + try: + return int(value) + except (TypeError, ValueError): + return 0 + + return ( + entry["file"], + as_int(entry["line"]), + as_int(entry["column"]), + entry["rule"], + ) + + normalized.sort(key=sort_key) + + rows: list[str] = [] + for entry in normalized: + severity_value = entry["severity"] + severity = html.escape(severity_value) + severity_class = f"severity-{severity_value.lower()}" if severity_value else "" + rows.append( + "" + f"{html.escape(entry['file'])}" + f"{html.escape(entry['line'])}" + f"{html.escape(entry['column'])}" + f"{severity}" + f"{html.escape(entry['rule'])}" + f"{html.escape(entry['message'])}" + "" + ) + + return "\n".join(rows) + + +def main() -> None: + args = parse_args() + alerts = load_alerts(args.input) + counts = {"error": 0, "warning": 0, "suggestion": 0} + for alert in alerts: + if not isinstance(alert, dict): + continue + severity = str(alert.get("Severity", "")).lower() + if severity in counts: + counts[severity] += 1 + args.output.parent.mkdir(parents=True, exist_ok=True) + table_rows = render_alert_rows(alerts) + html_content = f""" + + + + + Vale Report + + + +

Vale Report

+

Total alerts: {len(alerts)}

+ + + + + + + + + + + + + + {table_rows} + +
FileLineColumnSeverityRuleMessage
+ + +""" + args.output.write_text(html_content, encoding="utf-8") + + +if __name__ == "__main__": + main()