Skip to content

Commit

Permalink
enable collection of parity metrics in CI (#6441)
Browse files Browse the repository at this point in the history
  • Loading branch information
steffyP committed Aug 2, 2022
1 parent 1ef2ad0 commit d68eae7
Show file tree
Hide file tree
Showing 5 changed files with 342 additions and 7 deletions.
8 changes: 7 additions & 1 deletion .circleci/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -252,6 +252,7 @@ jobs:
paths:
- repo/target/reports/
- repo/target/coverage/
- repo/target/metric_reports

report:
executor: ubuntu-machine-amd64
Expand All @@ -274,8 +275,13 @@ jobs:
coverage report || true
coverage html || true
coveralls || true
- run:
name: Parity metric aggregation
command: |
source .venv/bin/activate
python scripts/metric_aggregator.py . amd64
- store_artifacts:
path: htmlcov/
path: parity_metrics/

docker-push:
executor: ubuntu-machine-amd64
Expand Down
4 changes: 4 additions & 0 deletions localstack/aws/handlers/metric_handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,7 @@ class Metric:
"xfail",
"aws_validated",
"snapshot",
"snapshot_skipped_paths",
]

def __init__(
Expand All @@ -74,6 +75,7 @@ def __init__(
xfail: bool = False,
aws_validated: bool = False,
snapshot: bool = False,
snapshot_skipped_paths: str = "",
) -> None:
self.service = service
self.operation = operation
Expand All @@ -87,6 +89,7 @@ def __init__(
self.xfail = xfail
self.aws_validated = aws_validated
self.snapshot = snapshot
self.snapshot_skipped_paths = snapshot_skipped_paths

def __iter__(self):
return iter(
Expand All @@ -103,6 +106,7 @@ def __iter__(self):
self.xfail,
self.aws_validated,
self.snapshot,
self.snapshot_skipped_paths,
]
)

Expand Down
5 changes: 5 additions & 0 deletions localstack/testing/pytest/metric_collection.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,18 +44,23 @@ def pytest_runtest_teardown(item: "Item", nextitem: Optional["Item"]) -> None:
xfail = False
aws_validated = False
snapshot = False
skipped = ""

for _ in item.iter_markers(name="xfail"):
xfail = True
for _ in item.iter_markers(name="aws_validated"):
aws_validated = True
if hasattr(item, "fixturenames") and "snapshot" in item.fixturenames:
snapshot = True
for sk in item.iter_markers(name="skip_snapshot_verify"):
skipped = sk.kwargs.get("paths", "all")

for metric in MetricHandler.metric_data:
metric.xfail = xfail
metric.aws_validated = aws_validated
metric.snapshot = snapshot
metric.node_id = node_id
metric.snapshot_skipped_paths = skipped

with open(FNAME_RAW_DATA_CSV, "a") as fd:
writer = csv.writer(fd)
Expand Down
232 changes: 232 additions & 0 deletions scripts/coverage_docs_utility.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,232 @@
import csv
import os
import sys
from pathlib import Path

from scripts.metric_aggregator import aggregate_recorded_raw_data

DOCS_HEADER = """---
title: "LocalStack Coverage"
linkTitle: "LocalStack Coverage"
weight: 100
description: >
Overview of the implemented AWS APIs in LocalStack
---
\n\n
"""

TABLE_HEADER = """
<thead>
<tr>
<th>Operation</th>
<th style="text-align:right">Implemented</th>
</tr>
</thead>"""


def create_simplified_metrics(metrics: dict, impl_details: dict):
simplified_metric = {}
for service in metrics:
simplified_metric[service] = {}
details = metrics[service]
if "service_attributes" in metrics[service]:
del metrics[service]["service_attributes"]
for operation in sorted(details.keys()):
op_details = details[operation]
if impl_details[service].get(operation) is None:
print(
f"------> WARNING: {service}.{operation} does not have implementation details"
)
continue
simplified_metric[service][operation] = {
"implemented": impl_details[service][operation]["implemented"],
"tested": True if op_details.get("invoked", 0) > 0 else False,
"aws_validated": op_details["aws_validated"],
"snapshot": op_details[
"snapshot"
], # TODO also consider 'snapshot_skipped_paths' once we use this in docs
"pro": impl_details[service][operation]["pro"],
}

return simplified_metric


def create_metric_coverage_docs(file_name: str, metrics: dict, impl_details: dict):
simplified_metrics = create_simplified_metrics(metrics, impl_details)

if os.path.exists(file_name):
os.remove(file_name)

output = DOCS_HEADER
output += '<div class="coverage-report">\n\n'
header = f"<table>{TABLE_HEADER}\n"
for service in sorted(simplified_metrics.keys()):
output += f"## {service} ##\n\n"
output += header
output += " <tbody>\n"
details = simplified_metrics[service]

implemented_ops = {
operation[0]: operation[1]
for operation in details.items()
if operation[1]["implemented"]
}

tested_indicator = ' <a href="#misc" title="covered by our integration test suite">✨</a>'
for operation in sorted(implemented_ops.keys()):
tested = ""
pro_info = ""
if implemented_ops.get(operation).get("tested"):
tested = tested_indicator
if implemented_ops.get(operation).get("pro"):
pro_info = " (Pro) "
output += (
" <tr>\n"
f" <td>{operation}{pro_info}{tested}</td>\n"
' <td style="text-align:right">✅</td>\n'
" </tr>\n"
)
output += " </tbody>\n"
other_ops = {
operation[0]: operation[1]
for operation in details.items()
if not operation[1]["implemented"]
}
if other_ops:
output += (
" <tbody>"
" <tr>\n"
f""" <td><a data-toggle="collapse" href=".{service.lower()}-notimplemented">Show missing</a></td>\n"""
' <td style="text-align:right"></td>\n'
" </tr>\n"
" </tbody>\n"
f""" <tbody class="collapse {service.lower()}-notimplemented"> """
)
for operation in sorted(other_ops.keys()):
output += (
" <tr>\n"
f" <td>{operation}</td>\n"
' <td style="text-align:right">-</td>\n'
" </tr>\n"
)
output += " </tbody>\n"

# for operation in sorted(details.items(), key=lambda x: (x[1]["implemented"] < 1, x[0])):
# # print(f"{service}.{operation[0]}: {operation[1]['implemented']}")
# tested_indicator = "✨" if operation[1]["tested"] else ""
# trailing_spaces = 38 - len(operation[0]) - len(tested_indicator)
# implemented = "✅" if operation[1]["implemented"] else "-"
# output += f"| {operation[0]} {tested_indicator}{' ' * trailing_spaces}| {implemented} |\n"

output += " </table>\n\n"
with open(file_name, "a") as fd:
fd.write(f"{output}\n")
output = ""

with open(file_name, "a") as fd:
fd.write(f"{output}\n")
fd.write(
"## Misc ##\n\n" "Endpoints marked with ✨ are covered by our integration test suite."
)
fd.write("\n\n</div>")


def create_metric_coverage_docs_internal(
file_name: str, metrics: dict, impl_details: dict, coverage: dict
):
if os.path.exists(file_name):
os.remove(file_name)

output = DOCS_HEADER
header = """
| Operation | Implemented | Tested |
|----------------------------------------|-------------|--------|
"""
yes_indicator = "✅"
no_indicator = "❌"
for service in sorted(metrics.keys()):
output += f"## {service} ##\n\n"
if not impl_details.get(service):
print(f"--------> Missing implementation details for service: {service}")
continue
output += f"API returns a response for {coverage[service]}% of the operations.\n"
output += header
del metrics[service]["service_attributes"]
details = metrics[service]
for operation in sorted(details.keys()):
op_details = details[operation]
if impl_details[service].get(operation) is None:
print(
f"------> WARNING: {service}.{operation} does not have implementation details"
)
continue
implemented = yes_indicator if impl_details[service][operation] else no_indicator
tested = yes_indicator if op_details.get("invoked", 0) > 0 else no_indicator

output += (
f"| {operation}{' '*(39-len(operation))}| {implemented} | {tested} |\n"
)

output += "\n\n"
with open(file_name, "a") as fd:
fd.write(f"{output}\n")
output = ""


def main(path_to_implementation_details: str, path_to_raw_metrics: str):
# coverage = {}
# with open(
# f"{path_to_implementation_details}/implementation_coverage_aggregated.csv", mode="r"
# ) as file:
# csv_reader = csv.DictReader(file)
# for row in csv_reader:
# coverage[row["service"]] = row["percentage"]

impl_details = {}
with open(
f"{path_to_implementation_details}/community/implementation_coverage_full.csv", mode="r"
) as file:
csv_reader = csv.DictReader(file)
for row in csv_reader:
service = impl_details.setdefault(row["service"], {})
service[row["operation"]] = {
"implemented": True if row["is_implemented"] == "True" else False,
"pro": False,
}
with open(
f"{path_to_implementation_details}/pro/implementation_coverage_full.csv", mode="r"
) as file:
csv_reader = csv.DictReader(file)
for row in csv_reader:
service = impl_details[row["service"]]
details = service[row["operation"]]
if not details["implemented"] and row["is_implemented"] == "True":
details["implemented"] = True
details["pro"] = True

recorded_metrics = aggregate_recorded_raw_data(base_dir=path_to_raw_metrics)
# create_metric_coverage_docs_internal(
# file_name=path_to_raw_metrics + "/metric-coverage_internal.md",
# metrics=recorded_metrics,
# impl_details=impl_details,
# coverage=coverage,
# )
create_metric_coverage_docs(
file_name=path_to_raw_metrics + "/coverage.md",
metrics=recorded_metrics,
impl_details=impl_details,
)


def print_usage():
print("missing arguments")
print(
"usage: python coverage_docs_utility.py <dir-to-implementaiton-details> <dir-to-raw-csv-metric>"
)


if __name__ == "__main__":
if len(sys.argv) != 3 or not Path(sys.argv[1]).is_dir() or not Path(sys.argv[2]).is_dir():
print_usage()
else:
main(sys.argv[1], sys.argv[2])
Loading

0 comments on commit d68eae7

Please sign in to comment.