Skip to content

Commit

Permalink
Add benchmarking tests (#2143)
Browse files Browse the repository at this point in the history
  • Loading branch information
Alek99 committed Nov 9, 2023
1 parent 4d6fa9b commit dd982c5
Show file tree
Hide file tree
Showing 8 changed files with 545 additions and 133 deletions.
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
name: lighthouse-tests
name: benchmarking

on:
push:
Expand Down Expand Up @@ -62,8 +62,14 @@ jobs:
run: |
# Check that npm is home
npm -v
poetry run bash scripts/lighthouse.sh ./reflex-web prod
poetry run bash scripts/benchmarks.sh ./reflex-web prod
env:
LHCI_GITHUB_APP_TOKEN: $
POSTHOG: $

- name: Run Benchmarks
working-directory: ./integration/benchmarks
run:
poetry run python benchmarks.py "$GITHUB_SHA" .lighthouseci
env:
GITHUB_SHA: ${{ github.sha }}
DATABASE_URL: ${{ secrets.DATABASE_URL }}
PR_TITLE: ${{ github.event.pull_request.title }}
138 changes: 138 additions & 0 deletions integration/benchmarks/benchmarks.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,138 @@
"""Runs the benchmarks and inserts the results into the database."""

import json
import os
import sys

import pytest
from helpers import insert_benchmarking_data


def get_lighthouse_scores(directory_path: str) -> dict:
"""Extracts the Lighthouse scores from the JSON files in the specified directory.
Args:
directory_path (str): The path to the directory containing the JSON files.
Returns:
dict: The Lighthouse scores.
"""
scores = {}

try:
for filename in os.listdir(directory_path):
if filename.endswith(".json") and filename != "manifest.json":
file_path = os.path.join(directory_path, filename)
with open(file_path, "r") as file:
data = json.load(file)
# Extract scores and add them to the dictionary with the filename as key
scores[data["finalUrl"].replace("http://localhost:3000/", "")] = {
"performance_score": data["categories"]["performance"]["score"],
"accessibility_score": data["categories"]["accessibility"][
"score"
],
"best_practices_score": data["categories"]["best-practices"][
"score"
],
"seo_score": data["categories"]["seo"]["score"],
"pwa_score": data["categories"]["pwa"]["score"],
}
except Exception as e:
print(e)
return {"error": "Error parsing JSON files"}

return scores


def run_pytest_and_get_results(test_path=None) -> dict:
"""Runs pytest and returns the results.
Args:
test_path: The path to the tests to run.
Returns:
dict: The results of the tests.
"""
# Set the default path to the current directory if no path is provided
if not test_path:
test_path = os.getcwd()
# Ensure you have installed the pytest-json plugin before running this
pytest_args = ["-v", "--benchmark-json=benchmark_report.json", test_path]

# Run pytest with the specified arguments
pytest.main(pytest_args)

# Print ls of the current directory
print(os.listdir())

with open("benchmark_report.json", "r") as file:
pytest_results = json.load(file)

return pytest_results


def extract_stats_from_json(json_data) -> list[dict]:
"""Extracts the stats from the JSON data and returns them as a list of dictionaries.
Args:
json_data: The JSON data to extract the stats from.
Returns:
list[dict]: The stats for each test.
"""
# Load the JSON data if it is a string, otherwise assume it's already a dictionary
data = json.loads(json_data) if isinstance(json_data, str) else json_data

# Initialize an empty list to store the stats for each test
test_stats = []

# Iterate over each test in the 'benchmarks' list
for test in data.get("benchmarks", []):
stats = test.get("stats", {})
test_name = test.get("name", "Unknown Test")
min_value = stats.get("min", None)
max_value = stats.get("max", None)
mean_value = stats.get("mean", None)
stdev_value = stats.get("stddev", None)

test_stats.append(
{
"test_name": test_name,
"min": min_value,
"max": max_value,
"mean": mean_value,
"stdev": stdev_value,
}
)

return test_stats


def main():
"""Runs the benchmarks and inserts the results into the database."""
# Get the commit SHA and JSON directory from the command line arguments
commit_sha = sys.argv[1]
json_dir = sys.argv[2]

# Get the PR title and database URL from the environment variables
pr_title = os.environ.get("PR_TITLE")
db_url = os.environ.get("DATABASE_URL")

if db_url is None or pr_title is None:
sys.exit("Missing environment variables")

# Run pytest and get the results
results = run_pytest_and_get_results()
cleaned_results = extract_stats_from_json(results)

# Get the Lighthouse scores
lighthouse_scores = get_lighthouse_scores(json_dir)

# Insert the data into the database
insert_benchmarking_data(
db_url, lighthouse_scores, cleaned_results, commit_sha, pr_title
)


if __name__ == "__main__":
main()
49 changes: 49 additions & 0 deletions integration/benchmarks/helpers.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
"""Helper functions for the benchmarking integration."""

import json
from datetime import datetime

import psycopg2


def insert_benchmarking_data(
db_connection_url: str,
lighthouse_data: dict,
performance_data: list[dict],
commit_sha: str,
pr_title: str,
):
"""Insert the benchmarking data into the database.
Args:
db_connection_url: The URL to connect to the database.
lighthouse_data: The Lighthouse data to insert.
performance_data: The performance data to insert.
commit_sha: The commit SHA to insert.
pr_title: The PR title to insert.
"""
# Serialize the JSON data
lighthouse_json = json.dumps(lighthouse_data)
performance_json = json.dumps(performance_data)

# Get the current timestamp
current_timestamp = datetime.now()

# Connect to the database and insert the data
with psycopg2.connect(db_connection_url) as conn, conn.cursor() as cursor:
insert_query = """
INSERT INTO benchmarks (lighthouse, performance, commit_sha, pr_title, time)
VALUES (%s, %s, %s, %s, %s);
"""
cursor.execute(
insert_query,
(
lighthouse_json,
performance_json,
commit_sha,
pr_title,
current_timestamp,
),
)
# Commit the transaction
conn.commit()
121 changes: 121 additions & 0 deletions integration/benchmarks/test_compile_benchmark.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,121 @@
"""Benchmark the time it takes to compile a reflex app."""

import importlib

import reflex

rx = reflex


class State(rx.State):
"""A simple state class with a count variable."""

count: int = 0

def increment(self):
"""Increment the count."""
self.count += 1

def decrement(self):
"""Decrement the count."""
self.count -= 1


class SliderVariation(State):
"""A simple state class with a count variable."""

value: int = 50

def set_end(self, value: int):
"""Increment the count.
Args:
value: The value of the slider.
"""
self.value = value


def sample_small_page() -> rx.Component:
"""A simple page with a button that increments the count.
Returns:
A reflex component.
"""
return rx.vstack(
*[rx.button(State.count, font_size="2em") for i in range(100)],
spacing="1em",
)


def sample_large_page() -> rx.Component:
"""A large page with a slider that increments the count.
Returns:
A reflex component.
"""
return rx.vstack(
*[
rx.vstack(
rx.heading(SliderVariation.value),
rx.slider(on_change_end=SliderVariation.set_end),
width="100%",
)
for i in range(100)
],
spacing="1em",
)


def add_small_pages(app: rx.App):
"""Add 10 small pages to the app.
Args:
app: The reflex app to add the pages to.
"""
for i in range(10):
app.add_page(sample_small_page, route=f"/{i}")


def add_large_pages(app: rx.App):
"""Add 10 large pages to the app.
Args:
app: The reflex app to add the pages to.
"""
for i in range(10):
app.add_page(sample_large_page, route=f"/{i}")


def test_mean_import_time(benchmark):
"""Test that the mean import time is less than 1 second.
Args:
benchmark: The benchmark fixture.
"""

def import_reflex():
importlib.reload(reflex)

# Benchmark the import
benchmark(import_reflex)


def test_mean_add_small_page_time(benchmark):
"""Test that the mean add page time is less than 1 second.
Args:
benchmark: The benchmark fixture.
"""
app = rx.App(state=State)
benchmark(add_small_pages, app)


def test_mean_add_large_page_time(benchmark):
"""Test that the mean add page time is less than 1 second.
Args:
benchmark: The benchmark fixture.
"""
app = rx.App(state=State)
results = benchmark(add_large_pages, app)
print(results)
Loading

0 comments on commit dd982c5

Please sign in to comment.