Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[REF-2284]Benchmark add extra info on publishing data #2864

Merged
merged 7 commits into from Mar 21, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/benchmarks.yml
Expand Up @@ -123,4 +123,4 @@ jobs:
--python-version "${{ matrix.python-version }}" --commit-sha "${{ github.sha }}"
--benchmark-json "${{ env.OUTPUT_FILE }}" --pr-title "${{ github.event.pull_request.title }}"
--db-url "${{ env.DATABASE_URL }}" --branch-name "${{ github.head_ref || github.ref_name }}"
--event-type "${{ github.event_name }}" --actor "${{ github.actor }}"
--event-type "${{ github.event_name }}" --actor "${{ github.actor }}" --pr-id "${{ github.event.pull_request.id }}"
30 changes: 20 additions & 10 deletions scripts/simple_app_benchmark_upload.py
Expand Up @@ -29,20 +29,21 @@ def extract_stats_from_json(json_file: str) -> list[dict]:

# Iterate over each test in the 'benchmarks' list
for test in data.get("benchmarks", []):
group = test.get("group", None)
stats = test.get("stats", {})
full_name = test.get("fullname")
file_name = (
full_name.split("/")[-1].split("::")[0].strip(".py") if full_name else None
)
test_name = test.get("name", "Unknown Test")
min_value = stats.get("min", None)
max_value = stats.get("max", None)
mean_value = stats.get("mean", None)
stdev_value = stats.get("stddev", None)
Comment on lines -34 to -37
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

so are we just not collecting this stuff anymore? or is it coming from somewhere else?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

we are still collecting them, it comes from the benchmark json generated by pytest-benchmark under stats key of a benchmark run. I thought it would be cool to store the full thing in case we need some data in the future we're currently not using.

This is an example of what json file looks like:

{
    "machine_info": {
        "node": "Elijahs-MacBook-Pro.local",
        "processor": "arm",
        "machine": "arm64",
        "python_compiler": "Clang 13.0.0 (clang-1300.0.29.30)",
        "python_implementation": "CPython",
        "python_implementation_version": "3.11.3",
        "python_version": "3.11.3",
        "python_build": [
            "v3.11.3:f3909b8bc8",
            "Apr  4 2023 20:12:10"
        ],
        "release": "22.1.0",
        "system": "Darwin",
        "cpu": {
            "python_version": "3.11.3.final.0 (64 bit)",
            "cpuinfo_version": [
                9,
                0,
                0
            ],
            "cpuinfo_version_string": "9.0.0",
            "arch": "ARM_8",
            "bits": 64,
            "count": 10,
            "arch_string_raw": "arm64",
            "brand_raw": "Apple M2 Pro"
        }
    },
    "commit_info": {
        "id": "fb2c3606d951aab77e4f8b61a9d97ddc43a6a87d",
        "time": "2024-03-14T10:03:30-07:00",
        "author_time": "2024-03-14T10:03:30-07:00",
        "dirty": true,
        "project": "reflex",
        "branch": "main"
    },
    "benchmarks": [
        {
            "group": "Compile time of varying component numbers",
            "name": "test_app_10_compile_time_cold",
            "fullname": "benchmarks/test_benchmark_compile_components.py::test_app_10_compile_time_cold",
            "params": null,
            "param": null,
            "extra_info": {},
            "options": {
                "disable_gc": true,
                "timer": "perf_counter",
                "min_rounds": 5,
                "max_time": 1.0,
                "min_time": 5e-06,
                "warmup": false
            },
            "stats": {
                "min": 0.025916790997143835,
                "max": 0.02970566597650759,
                "mean": 0.02696435419784393,
                "stddev": 0.0012199914837186603,
                "rounds": 10,
                "median": 0.026633937493897974,
                "iqr": 0.0010124579712282866,
                "q1": 0.02608416700968519,
                "q3": 0.027096624980913475,
                "iqr_outliers": 1,
                "stddev_outliers": 2,
                "outliers": "2;1",
                "ld15iqr": 0.025916790997143835,
                "hd15iqr": 0.02970566597650759,
                "ops": 37.08599852467299,
                "total": 0.2696435419784393,
                "data": [
                    0.02970566597650759,
                    0.02608416700968519,
                    0.026580250007100403,
                    0.026032584020867944,
                    0.02610079199075699,
                    0.026687624980695546,
                    0.028431000013370067,
                    0.027096624980913475,
                    0.025916790997143835,
                    0.02700804200139828
                ],
                "iterations": 1
            }
        },
        {
            "group": "Compile time of varying page numbers",
            "name": "test_app_1_compile_time_cold",
            "fullname": "benchmarks/test_benchmark_compile_pages.py::test_app_1_compile_time_cold",
            "params": null,
            "param": null,
            "extra_info": {},
            "options": {
                "disable_gc": true,
                "timer": "perf_counter",
                "min_rounds": 5,
                "max_time": 1.0,
                "min_time": 5e-06,
                "warmup": false
            },
            "stats": {
                "min": 0.019124624988762662,
                "max": 0.020664624986238778,
                "mean": 0.01957777519710362,
                "stddev": 0.0006171553775501039,
                "rounds": 5,
                "median": 0.01937037499737926,
                "iqr": 0.0004436252565938048,
                "q1": 0.019275312748504803,
                "q3": 0.019718938005098607,
                "iqr_outliers": 1,
                "stddev_outliers": 1,
                "outliers": "1;1",
                "ld15iqr": 0.019124624988762662,
                "hd15iqr": 0.020664624986238778,
                "ops": 51.07832682377221,
                "total": 0.0978888759855181,
                "data": [
                    0.020664624986238778,
                    0.01937037499737926,
                    0.019124624988762662,
                    0.019325542001752183,
                    0.019403709011385217
                ],
                "iterations": 1
            }
        }
    ],
    "datetime": "2024-03-15T11:12:52.404109",
    "version": "4.0.0"
}


test_stats.append(
{
"test_name": test_name,
"min": min_value,
"max": max_value,
"mean": mean_value,
"stdev": stdev_value,
"group": group,
"stats": stats,
"full_name": full_name,
martinxu9 marked this conversation as resolved.
Show resolved Hide resolved
"file_name": file_name,
}
)
return test_stats
Expand All @@ -58,6 +59,7 @@ def insert_benchmarking_data(
branch_name: str,
event_type: str,
actor: str,
pr_id: str,
):
"""Insert the benchmarking data into the database.

Expand All @@ -71,6 +73,7 @@ def insert_benchmarking_data(
branch_name: The name of the branch.
event_type: Type of github event(push, pull request, etc)
actor: Username of the user that triggered the run.
pr_id: Id of the PR.
"""
# Serialize the JSON data
simple_app_performance_json = json.dumps(performance_data)
Expand All @@ -81,8 +84,8 @@ def insert_benchmarking_data(
# Connect to the database and insert the data
with psycopg2.connect(db_connection_url) as conn, conn.cursor() as cursor:
insert_query = """
INSERT INTO simple_app_benchmarks (os, python_version, commit_sha, time, pr_title, branch_name, event_type, actor, performance)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s);
INSERT INTO simple_app_benchmarks (os, python_version, commit_sha, time, pr_title, branch_name, event_type, actor, performance, pr_id)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s);
"""
cursor.execute(
insert_query,
Expand All @@ -96,6 +99,7 @@ def insert_benchmarking_data(
event_type,
actor,
simple_app_performance_json,
pr_id,
),
)
# Commit the transaction
Expand Down Expand Up @@ -144,6 +148,11 @@ def main():
help="Username of the user that triggered the run.",
required=True,
)
parser.add_argument(
"--pr-id",
help="ID of the PR.",
required=True,
)
args = parser.parse_args()

# Get the results of pytest benchmarks
Expand All @@ -159,6 +168,7 @@ def main():
branch_name=args.branch_name,
event_type=args.event_type,
actor=args.actor,
pr_id=args.pr_id,
)


Expand Down