Skip to content

Commit 300f419

Browse files
feat: APV JSON ingest + risk summary CLI (#5)
* feat: APV JSON ingest + risk summary CLI (exit code by severity) * fix: lint+types; add summarize_apv_json; clean worst logic * fix: allow summarize_apv_json to read file paths; keep lint clean * fix: support predicted_risk; by_severity keys in lowercase; path-or-text ingest * fix: by_severity exposes upper+lower keys; clean types * chore(lint): migrate Ruff config; fix mypy generator types (no ignores) * chore(ci): retrigger checks after disabling labels workflow
1 parent 3c0504b commit 300f419

File tree

8 files changed

+192
-52
lines changed

8 files changed

+192
-52
lines changed
Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,14 @@
11
name: dependabot metadata and labels
2-
on: pull_request_target
2+
on:
3+
pull_request_target:
4+
types: [opened, synchronize, reopened, labeled]
35
jobs:
46
label:
7+
if: github.actor == 'dependabot[bot]'
58
runs-on: ubuntu-latest
69
steps:
710
- uses: dependabot/fetch-metadata@v2
811
with:
912
github-token: "${{ secrets.GITHUB_TOKEN }}"
13+
- name: noop
14+
run: echo "labels applied (if any)"

examples/sample_apv.json

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,7 @@
1-
[
2-
{"file":"app.py","predicted_risk":"high","reason":"Sensitive API call in diff"},
3-
{"file":"utils.py","predicted_risk":"medium","reason":"Input validation weakened"},
4-
{"file":"README.md","predicted_risk":"low","reason":"Docs-only change"}
5-
]
1+
{
2+
"findings": [
3+
{"severity": "HIGH", "title": "dangerous pattern"},
4+
{"severity": "MEDIUM", "title": "needs review"},
5+
{"severity": "LOW", "title": "style"}
6+
]
7+
}

pyproject.toml

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,8 +21,6 @@ mypy = "^1.11"
2121
[tool.ruff]
2222
target-version = "py311"
2323
line-length = 100
24-
select = ["E","F","I","W","UP"]
25-
ignore = ["E203"]
2624

2725
[tool.black]
2826
line-length = 100
@@ -31,3 +29,7 @@ target-version = ["py311"]
3129
[tool.mypy]
3230
python_version = "3.11"
3331
strict = true
32+
33+
[tool.ruff.lint]
34+
select = ["E","F","I","UP"]
35+
ignore = []
Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
11
from .cli import main
2-
if __name__ == "__main__":
3-
main()
2+
3+
raise SystemExit(main())

src/diff_risk_dashboard/cli.py

Lines changed: 39 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -1,22 +1,41 @@
11
from __future__ import annotations
2+
23
import argparse
3-
from rich.console import Console
4-
from rich.table import Table
5-
from .core import summarize_apv_json
6-
7-
def main() -> None:
8-
parser = argparse.ArgumentParser(description="Diff Risk Dashboard (CLI)")
9-
parser.add_argument("json_path", help="Path to ai-patch-verifier JSON report")
10-
args = parser.parse_args()
11-
12-
summary = summarize_apv_json(args.json_path)
13-
console = Console()
14-
table = Table(title="PR Risk Exposure")
15-
table.add_column("Severity", justify="left")
16-
table.add_column("Count", justify="right")
17-
18-
for sev in ["high", "medium", "low"]:
19-
table.add_row(sev.capitalize(), str(summary["by_severity"][sev]))
20-
21-
console.print(table)
22-
console.print(f"[bold]Total findings:[/bold] {summary['total']}")
4+
import json
5+
import sys
6+
from pathlib import Path
7+
8+
from .core import Summary, summarize
9+
10+
11+
def _print_table(summary: Summary) -> None:
12+
bs = summary["by_severity"]
13+
rows = [
14+
("CRITICAL", bs["CRITICAL"]),
15+
("HIGH", bs["HIGH"]),
16+
("MEDIUM", bs["MEDIUM"]),
17+
("LOW", bs["LOW"]),
18+
("INFO", bs["INFO"]),
19+
]
20+
print("\n=== Diff Risk Summary ===")
21+
print(f"Total findings: {summary['total']}")
22+
print("Severity counts:")
23+
w = max(len(r[0]) for r in rows)
24+
for name, cnt in rows:
25+
print(f" {name:<{w}} : {cnt}")
26+
print(f"Worst severity : {summary['worst']}")
27+
print(f"Risk level : {summary['risk_level']}\n")
28+
29+
30+
def main(argv: list[str] | None = None) -> int:
31+
p = argparse.ArgumentParser(description="Diff Risk Dashboard (APV JSON -> summary)")
32+
p.add_argument("apv_json", help="Path to ai-patch-verifier JSON")
33+
args = p.parse_args(argv)
34+
data = json.loads(Path(args.apv_json).read_text(encoding="utf-8"))
35+
sm = summarize(data)
36+
_print_table(sm)
37+
return 2 if sm["risk_level"] == "red" else (1 if sm["risk_level"] == "yellow" else 0)
38+
39+
40+
if __name__ == "__main__":
41+
sys.exit(main())

src/diff_risk_dashboard/core.py

Lines changed: 115 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -1,25 +1,118 @@
11
from __future__ import annotations
2-
from typing import Dict, Any
2+
33
import json
4-
from collections import Counter
5-
6-
def summarize_apv_json(path: str) -> Dict[str, Any]:
7-
'''
8-
Expect a JSON array or object containing findings with a 'predicted_risk'
9-
field in {'low','medium','high'} (interface compatible with ai-patch-verifier output).
10-
'''
11-
with open(path, "r", encoding="utf-8") as f:
12-
data = json.load(f)
13-
14-
items = data if isinstance(data, list) else data.get("findings", [])
15-
risks = [str(i.get("predicted_risk", "")).lower() for i in items]
16-
counts = Counter(risks)
17-
total = sum(counts.values())
18-
return {
19-
"total": total,
20-
"by_severity": {
21-
"high": counts.get("high", 0),
22-
"medium": counts.get("medium", 0),
23-
"low": counts.get("low", 0),
24-
},
4+
from collections.abc import Iterable
5+
from pathlib import Path
6+
from typing import Any, Literal, TypedDict, cast
7+
8+
Severity = Literal["CRITICAL", "HIGH", "MEDIUM", "LOW", "INFO"]
9+
10+
11+
class Finding(TypedDict, total=False):
12+
severity: str
13+
predicted_risk: str
14+
title: str
15+
score: float
16+
17+
18+
class Summary(TypedDict):
19+
total: int
20+
by_severity: dict[str, int] # incluye claves lower y UPPER
21+
worst: Severity
22+
risk_level: Literal["red", "yellow", "green"]
23+
24+
25+
_SEV_ORDER: dict[Severity, int] = {
26+
"CRITICAL": 4,
27+
"HIGH": 3,
28+
"MEDIUM": 2,
29+
"LOW": 1,
30+
"INFO": 0,
31+
}
32+
33+
34+
def _norm_sev(s: str | None) -> Severity:
35+
if not s:
36+
return "INFO"
37+
s = s.strip().upper()
38+
if s in _SEV_ORDER:
39+
return s # type: ignore[return-value]
40+
if s in {"CRIT"}:
41+
return "CRITICAL"
42+
if s in {"MED", "MODERATE"}:
43+
return "MEDIUM"
44+
if s in {"WARN", "WARNING"}:
45+
return "LOW"
46+
return "INFO"
47+
48+
49+
def _extract_raw_sev(f: Finding) -> str | None:
50+
return f.get("severity") or f.get("predicted_risk")
51+
52+
53+
def _iter_findings(obj: Any) -> Iterable[Finding]:
54+
if isinstance(obj, dict):
55+
cand = obj.get("findings", obj.get("results", []))
56+
if isinstance(cand, list):
57+
for x in cand:
58+
if isinstance(x, dict):
59+
yield cast(Finding, x)
60+
return
61+
if isinstance(obj, list):
62+
for x in obj:
63+
if isinstance(x, dict):
64+
yield cast(Finding, x)
65+
66+
67+
def summarize(obj: Any) -> Summary:
68+
counts_uc: dict[Severity, int] = {"CRITICAL": 0, "HIGH": 0, "MEDIUM": 0, "LOW": 0, "INFO": 0}
69+
total = 0
70+
for f in _iter_findings(obj):
71+
sev = _norm_sev(_extract_raw_sev(f))
72+
counts_uc[sev] += 1
73+
total += 1
74+
75+
worst: Severity = "INFO"
76+
if counts_uc["CRITICAL"] > 0:
77+
worst = "CRITICAL"
78+
elif counts_uc["HIGH"] > 0:
79+
worst = "HIGH"
80+
elif counts_uc["MEDIUM"] > 0:
81+
worst = "MEDIUM"
82+
elif counts_uc["LOW"] > 0:
83+
worst = "LOW"
84+
85+
if worst in {"CRITICAL", "HIGH"}:
86+
risk: Literal["red", "yellow", "green"] = "red"
87+
elif worst == "MEDIUM":
88+
risk = "yellow"
89+
else:
90+
risk = "green"
91+
92+
by_lc = {
93+
"critical": counts_uc["CRITICAL"],
94+
"high": counts_uc["HIGH"],
95+
"medium": counts_uc["MEDIUM"],
96+
"low": counts_uc["LOW"],
97+
"info": counts_uc["INFO"],
98+
}
99+
by_uc = {
100+
"CRITICAL": counts_uc["CRITICAL"],
101+
"HIGH": counts_uc["HIGH"],
102+
"MEDIUM": counts_uc["MEDIUM"],
103+
"LOW": counts_uc["LOW"],
104+
"INFO": counts_uc["INFO"],
25105
}
106+
by_sev: dict[str, int] = {**by_lc, **by_uc}
107+
return {"total": total, "by_severity": by_sev, "worst": worst, "risk_level": risk}
108+
109+
110+
def summarize_apv_json(text_or_path: str | bytes) -> Summary:
111+
"""Acepta JSON (str/bytes) o ruta a archivo JSON."""
112+
if isinstance(text_or_path, bytes):
113+
payload = text_or_path.decode("utf-8", errors="strict")
114+
else:
115+
p = Path(text_or_path)
116+
payload = p.read_text(encoding="utf-8") if p.exists() else text_or_path
117+
data = json.loads(payload)
118+
return summarize(data)

tests/test_core.py

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
from diff_risk_dashboard.core import summarize
2+
3+
4+
def test_summarize_counts_and_worst():
5+
data = {
6+
"findings": [
7+
{"severity": "LOW"},
8+
{"severity": "MEDIUM"},
9+
{"severity": "HIGH"},
10+
{"severity": "INFO"},
11+
]
12+
}
13+
s = summarize(data)
14+
assert s["total"] == 4
15+
assert s["by_severity"]["HIGH"] == 1
16+
assert s["worst"] == "HIGH"
17+
assert s["risk_level"] == "red"

tests/test_smoke.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,6 @@
11
from diff_risk_dashboard.core import summarize_apv_json
2+
3+
24
def test_summary_counts(tmp_path):
35
sample = tmp_path / "s.json"
46
sample.write_text(

0 commit comments

Comments
 (0)