|
1 | 1 | import logging |
2 | 2 | from collections import defaultdict |
3 | 3 | from functools import lru_cache |
4 | | -from typing import Optional, List, Dict, Tuple, Callable, AsyncIterator, cast |
| 4 | +from typing import Optional, List, Dict, Tuple, Callable, AsyncIterator, cast, Set |
5 | 5 |
|
6 | 6 | from aiostream import stream, pipe |
7 | 7 | from aiostream.core import Stream |
@@ -267,11 +267,13 @@ async def perform_benchmarks( |
267 | 267 |
|
268 | 268 | def account_failing(account_id: str) -> Json: |
269 | 269 | failing_checks: Dict[ReportSeverity, int] = defaultdict(int) |
270 | | - failing_resources: Dict[ReportSeverity, int] = defaultdict(int) |
| 270 | + failing_resource_ids: Dict[ReportSeverity, Set[str]] = defaultdict(set) |
271 | 271 | for cr in all_checks.values(): |
272 | 272 | if fr := cr.resources_failing_by_account.get(account_id, []): |
273 | 273 | failing_checks[cr.check.severity] += 1 |
274 | | - failing_resources[cr.check.severity] += len(fr) |
| 274 | + for r in fr: |
| 275 | + failing_resource_ids[cr.check.severity].add(r["node_id"]) |
| 276 | + failing_resources = {sev: len(ids) for sev, ids in failing_resource_ids.items()} |
275 | 277 | return { |
276 | 278 | sev.value: {"checks": count, "resources": failing_resources[sev]} |
277 | 279 | for sev, count in failing_checks.items() |
|
0 commit comments