Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 6 additions & 1 deletion .github/workflows/dependabot-label.yml
Original file line number Diff line number Diff line change
@@ -1,9 +1,14 @@
name: dependabot metadata and labels
on: pull_request_target
on:
pull_request_target:
types: [opened, synchronize, reopened, labeled]
jobs:
label:
if: github.actor == 'dependabot[bot]'
runs-on: ubuntu-latest
steps:
- uses: dependabot/fetch-metadata@v2
with:
github-token: "${{ secrets.GITHUB_TOKEN }}"
- name: noop
run: echo "labels applied (if any)"
12 changes: 7 additions & 5 deletions examples/sample_apv.json
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
[
{"file":"app.py","predicted_risk":"high","reason":"Sensitive API call in diff"},
{"file":"utils.py","predicted_risk":"medium","reason":"Input validation weakened"},
{"file":"README.md","predicted_risk":"low","reason":"Docs-only change"}
]
{
"findings": [
{"severity": "HIGH", "title": "dangerous pattern"},
{"severity": "MEDIUM", "title": "needs review"},
{"severity": "LOW", "title": "style"}
]
}
6 changes: 4 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,6 @@ mypy = "^1.11"
[tool.ruff]
target-version = "py311"
line-length = 100
select = ["E","F","I","W","UP"]
ignore = ["E203"]

[tool.black]
line-length = 100
Expand All @@ -31,3 +29,7 @@ target-version = ["py311"]
[tool.mypy]
python_version = "3.11"
strict = true

[tool.ruff.lint]
select = ["E","F","I","UP"]
ignore = []
4 changes: 2 additions & 2 deletions src/diff_risk_dashboard/__main__.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
from .cli import main
if __name__ == "__main__":
main()

raise SystemExit(main())
59 changes: 39 additions & 20 deletions src/diff_risk_dashboard/cli.py
Original file line number Diff line number Diff line change
@@ -1,22 +1,41 @@
from __future__ import annotations

import argparse
from rich.console import Console
from rich.table import Table
from .core import summarize_apv_json

def main() -> None:
parser = argparse.ArgumentParser(description="Diff Risk Dashboard (CLI)")
parser.add_argument("json_path", help="Path to ai-patch-verifier JSON report")
args = parser.parse_args()

summary = summarize_apv_json(args.json_path)
console = Console()
table = Table(title="PR Risk Exposure")
table.add_column("Severity", justify="left")
table.add_column("Count", justify="right")

for sev in ["high", "medium", "low"]:
table.add_row(sev.capitalize(), str(summary["by_severity"][sev]))

console.print(table)
console.print(f"[bold]Total findings:[/bold] {summary['total']}")
import json
import sys
from pathlib import Path

from .core import Summary, summarize


def _print_table(summary: Summary) -> None:
bs = summary["by_severity"]
rows = [
("CRITICAL", bs["CRITICAL"]),
("HIGH", bs["HIGH"]),
("MEDIUM", bs["MEDIUM"]),
("LOW", bs["LOW"]),
("INFO", bs["INFO"]),
]
print("\n=== Diff Risk Summary ===")
print(f"Total findings: {summary['total']}")
print("Severity counts:")
w = max(len(r[0]) for r in rows)
for name, cnt in rows:
print(f" {name:<{w}} : {cnt}")
print(f"Worst severity : {summary['worst']}")
print(f"Risk level : {summary['risk_level']}\n")


def main(argv: list[str] | None = None) -> int:
p = argparse.ArgumentParser(description="Diff Risk Dashboard (APV JSON -> summary)")
p.add_argument("apv_json", help="Path to ai-patch-verifier JSON")
args = p.parse_args(argv)
data = json.loads(Path(args.apv_json).read_text(encoding="utf-8"))
sm = summarize(data)
_print_table(sm)
return 2 if sm["risk_level"] == "red" else (1 if sm["risk_level"] == "yellow" else 0)


if __name__ == "__main__":
sys.exit(main())
137 changes: 115 additions & 22 deletions src/diff_risk_dashboard/core.py
Original file line number Diff line number Diff line change
@@ -1,25 +1,118 @@
from __future__ import annotations
from typing import Dict, Any

import json
from collections import Counter

def summarize_apv_json(path: str) -> Dict[str, Any]:
'''
Expect a JSON array or object containing findings with a 'predicted_risk'
field in {'low','medium','high'} (interface compatible with ai-patch-verifier output).
'''
with open(path, "r", encoding="utf-8") as f:
data = json.load(f)

items = data if isinstance(data, list) else data.get("findings", [])
risks = [str(i.get("predicted_risk", "")).lower() for i in items]
counts = Counter(risks)
total = sum(counts.values())
return {
"total": total,
"by_severity": {
"high": counts.get("high", 0),
"medium": counts.get("medium", 0),
"low": counts.get("low", 0),
},
from collections.abc import Iterable
from pathlib import Path
from typing import Any, Literal, TypedDict, cast

Severity = Literal["CRITICAL", "HIGH", "MEDIUM", "LOW", "INFO"]


class Finding(TypedDict, total=False):
severity: str
predicted_risk: str
title: str
score: float


class Summary(TypedDict):
total: int
by_severity: dict[str, int] # incluye claves lower y UPPER
worst: Severity
risk_level: Literal["red", "yellow", "green"]


_SEV_ORDER: dict[Severity, int] = {
"CRITICAL": 4,
"HIGH": 3,
"MEDIUM": 2,
"LOW": 1,
"INFO": 0,
}


def _norm_sev(s: str | None) -> Severity:
if not s:
return "INFO"
s = s.strip().upper()
if s in _SEV_ORDER:
return s # type: ignore[return-value]
if s in {"CRIT"}:
return "CRITICAL"
if s in {"MED", "MODERATE"}:
return "MEDIUM"
if s in {"WARN", "WARNING"}:
return "LOW"
return "INFO"


def _extract_raw_sev(f: Finding) -> str | None:
return f.get("severity") or f.get("predicted_risk")


def _iter_findings(obj: Any) -> Iterable[Finding]:
if isinstance(obj, dict):
cand = obj.get("findings", obj.get("results", []))
if isinstance(cand, list):
for x in cand:
if isinstance(x, dict):
yield cast(Finding, x)
return
if isinstance(obj, list):
for x in obj:
if isinstance(x, dict):
yield cast(Finding, x)


def summarize(obj: Any) -> Summary:
counts_uc: dict[Severity, int] = {"CRITICAL": 0, "HIGH": 0, "MEDIUM": 0, "LOW": 0, "INFO": 0}
total = 0
for f in _iter_findings(obj):
sev = _norm_sev(_extract_raw_sev(f))
counts_uc[sev] += 1
total += 1

worst: Severity = "INFO"
if counts_uc["CRITICAL"] > 0:
worst = "CRITICAL"
elif counts_uc["HIGH"] > 0:
worst = "HIGH"
elif counts_uc["MEDIUM"] > 0:
worst = "MEDIUM"
elif counts_uc["LOW"] > 0:
worst = "LOW"

if worst in {"CRITICAL", "HIGH"}:
risk: Literal["red", "yellow", "green"] = "red"
elif worst == "MEDIUM":
risk = "yellow"
else:
risk = "green"

by_lc = {
"critical": counts_uc["CRITICAL"],
"high": counts_uc["HIGH"],
"medium": counts_uc["MEDIUM"],
"low": counts_uc["LOW"],
"info": counts_uc["INFO"],
}
by_uc = {
"CRITICAL": counts_uc["CRITICAL"],
"HIGH": counts_uc["HIGH"],
"MEDIUM": counts_uc["MEDIUM"],
"LOW": counts_uc["LOW"],
"INFO": counts_uc["INFO"],
}
by_sev: dict[str, int] = {**by_lc, **by_uc}
return {"total": total, "by_severity": by_sev, "worst": worst, "risk_level": risk}


def summarize_apv_json(text_or_path: str | bytes) -> Summary:
"""Acepta JSON (str/bytes) o ruta a archivo JSON."""
if isinstance(text_or_path, bytes):
payload = text_or_path.decode("utf-8", errors="strict")
else:
p = Path(text_or_path)
payload = p.read_text(encoding="utf-8") if p.exists() else text_or_path
data = json.loads(payload)
return summarize(data)
17 changes: 17 additions & 0 deletions tests/test_core.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
from diff_risk_dashboard.core import summarize


def test_summarize_counts_and_worst():
data = {
"findings": [
{"severity": "LOW"},
{"severity": "MEDIUM"},
{"severity": "HIGH"},
{"severity": "INFO"},
]
}
s = summarize(data)
assert s["total"] == 4
assert s["by_severity"]["HIGH"] == 1
assert s["worst"] == "HIGH"
assert s["risk_level"] == "red"
2 changes: 2 additions & 0 deletions tests/test_smoke.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
from diff_risk_dashboard.core import summarize_apv_json


def test_summary_counts(tmp_path):
sample = tmp_path / "s.json"
sample.write_text(
Expand Down