Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[tool.poetry]
name = "redisbench-admin"
version = "0.4.13"
version = "0.4.14"
description = "Redis benchmark run helper. A wrapper around Redis and Redis Modules benchmark tools ( ftsb_redisearch, memtier_benchmark, redis-benchmark, aibench, etc... )."
authors = ["filipecosta90 <filipecosta.90@gmail.com>"]
readme = "README.md"
Expand Down
2 changes: 2 additions & 0 deletions redisbench_admin/compare/compare.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,8 @@ def compare_command_logic(args, project_name, project_version):
testcases_setname,
tsname_project_total_failures,
tsname_project_total_success,
_,
_,
) = get_overall_dashboard_keynames(tf_github_org, tf_github_repo, tf_triggering_env)
test_names = []
try:
Expand Down
3 changes: 3 additions & 0 deletions redisbench_admin/run/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -247,6 +247,7 @@ def common_exporter_logic(
artifact_version="N/A",
metadata_tags={},
build_variant_name=None,
running_platform=None,
):
per_version_time_series_dict = None
per_branch_time_series_dict = None
Expand Down Expand Up @@ -277,6 +278,7 @@ def common_exporter_logic(
tf_triggering_env,
metadata_tags,
build_variant_name,
running_platform,
)
if ok:
# push per-version data
Expand All @@ -295,6 +297,7 @@ def common_exporter_logic(
tf_triggering_env,
metadata_tags,
build_variant_name,
running_platform,
)
if ok:
# push per-branch data
Expand Down
14 changes: 14 additions & 0 deletions redisbench_admin/run/redistimeseries.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ def redistimeseries_results_logic(
tf_triggering_env,
metadata_tags={},
build_variant_name=None,
running_platform=None,
):
# check which metrics to extract
exporter_timemetric_path, metrics = merge_default_and_config_metrics(
Expand All @@ -50,6 +51,7 @@ def redistimeseries_results_logic(
artifact_version,
metadata_tags,
build_variant_name,
running_platform,
)
return per_version_time_series_dict, per_branch_time_series_dict

Expand All @@ -67,6 +69,7 @@ def add_standardized_metric_bybranch(
tf_triggering_env,
metadata_tags={},
build_variant_name=None,
running_platform=None,
):
tsname_use_case_duration = get_ts_metric_name(
"by.branch",
Expand All @@ -80,6 +83,7 @@ def add_standardized_metric_bybranch(
None,
False,
build_variant_name,
running_platform,
)
labels = get_project_ts_tags(
tf_github_org,
Expand All @@ -88,6 +92,7 @@ def add_standardized_metric_bybranch(
tf_triggering_env,
metadata_tags,
build_variant_name,
running_platform,
)
labels["branch"] = tf_github_branch
labels["test_name"] = str(test_name)
Expand Down Expand Up @@ -130,6 +135,7 @@ def add_standardized_metric_byversion(
tf_triggering_env,
metadata_tags={},
build_variant_name=None,
running_platform=None,
):
tsname_use_case_duration = get_ts_metric_name(
"by.version",
Expand All @@ -143,6 +149,7 @@ def add_standardized_metric_byversion(
None,
False,
build_variant_name,
running_platform,
)
labels = get_project_ts_tags(
tf_github_org,
Expand Down Expand Up @@ -201,6 +208,7 @@ def timeseries_test_sucess_flow(
tsname_project_total_success,
metadata_tags={},
build_variant_name=None,
running_platform=None,
):
if push_results_redistimeseries:
logging.info("Pushing results to RedisTimeSeries.")
Expand All @@ -219,6 +227,7 @@ def timeseries_test_sucess_flow(
tf_triggering_env,
metadata_tags,
build_variant_name,
running_platform,
)
try:
rts.redis.sadd(testcases_setname, test_name)
Expand All @@ -233,6 +242,7 @@ def timeseries_test_sucess_flow(
tf_triggering_env,
metadata_tags,
build_variant_name,
running_platform,
),
)
if tf_github_branch is not None and tf_github_branch != "":
Expand All @@ -249,6 +259,7 @@ def timeseries_test_sucess_flow(
tf_triggering_env,
metadata_tags,
build_variant_name,
running_platform,
)
add_standardized_metric_bybranch(
"dataset_load_duration",
Expand All @@ -263,6 +274,7 @@ def timeseries_test_sucess_flow(
tf_triggering_env,
metadata_tags,
build_variant_name,
running_platform,
)
if artifact_version is not None and artifact_version != "":
add_standardized_metric_byversion(
Expand All @@ -278,6 +290,7 @@ def timeseries_test_sucess_flow(
tf_triggering_env,
metadata_tags,
build_variant_name,
running_platform,
)
add_standardized_metric_byversion(
"dataset_load_duration",
Expand All @@ -292,6 +305,7 @@ def timeseries_test_sucess_flow(
tf_triggering_env,
metadata_tags,
build_variant_name,
running_platform,
)
except redis.exceptions.ResponseError as e:
logging.warning(
Expand Down
2 changes: 2 additions & 0 deletions redisbench_admin/run_remote/run_remote.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,6 +147,8 @@ def run_remote_command_logic(args, project_name, project_version):
testcases_setname,
tsname_project_total_failures,
tsname_project_total_success,
_,
_,
) = get_overall_dashboard_keynames(tf_github_org, tf_github_repo, tf_triggering_env)
rts = None
if args.push_results_redistimeseries:
Expand Down
42 changes: 39 additions & 3 deletions redisbench_admin/utils/remote.py
Original file line number Diff line number Diff line change
Expand Up @@ -498,6 +498,7 @@ def extract_perversion_timeseries_from_results(
tf_triggering_env: str,
metadata_tags={},
build_variant_name=None,
running_platform=None,
):
break_by_key = "version"
break_by_str = "by.{}".format(break_by_key)
Expand All @@ -515,6 +516,7 @@ def extract_perversion_timeseries_from_results(
tf_triggering_env,
metadata_tags,
build_variant_name,
running_platform,
)
return True, branch_time_series_dict

Expand All @@ -533,6 +535,7 @@ def common_timeseries_extraction(
tf_triggering_env,
metadata_tags={},
build_variant_name=None,
running_platform=None,
):
branch_time_series_dict = {}
for jsonpath in metrics:
Expand Down Expand Up @@ -568,6 +571,7 @@ def common_timeseries_extraction(
tf_triggering_env,
metadata_tags,
build_variant_name,
running_platform,
)
timeserie_tags[break_by_key] = project_version
timeserie_tags["test_name"] = str(test_name)
Expand All @@ -588,6 +592,7 @@ def common_timeseries_extraction(
metric_context_path,
use_metric_context_path,
build_variant_name,
running_platform,
)
branch_time_series_dict[ts_name] = {
"labels": timeserie_tags.copy(),
Expand All @@ -607,6 +612,7 @@ def get_project_ts_tags(
tf_triggering_env: str,
metadata_tags={},
build_variant_name=None,
running_platform=None,
):
tags = {
"github_org": tf_github_org,
Expand All @@ -617,6 +623,8 @@ def get_project_ts_tags(
}
if build_variant_name is not None:
tags["build_variant"] = build_variant_name
if running_platform is not None:
tags["running_platform"] = running_platform
for k, v in metadata_tags.items():
tags[k] = str(v)
return tags
Expand All @@ -634,6 +642,7 @@ def extract_perbranch_timeseries_from_results(
tf_triggering_env: str,
metadata_tags={},
build_variant_name=None,
running_platform=None,
):
break_by_key = "branch"
break_by_str = "by.{}".format(break_by_key)
Expand All @@ -651,20 +660,45 @@ def extract_perbranch_timeseries_from_results(
tf_triggering_env,
metadata_tags,
build_variant_name,
running_platform,
)
return True, branch_time_series_dict


def get_overall_dashboard_keynames(tf_github_org, tf_github_repo, tf_triggering_env):
prefix = (
def get_overall_dashboard_keynames(
tf_github_org,
tf_github_repo,
tf_triggering_env,
build_variant_name=None,
running_platform=None,
):
build_variant_str = ""
if build_variant_name is not None:
build_variant_str = "/{}".format(build_variant_name)
running_platform_str = ""
if running_platform is not None:
running_platform_str = "/{}".format(running_platform)
sprefix = (
"ci.benchmarks.redislabs/"
+ "{triggering_env}/{github_org}/{github_repo}".format(
triggering_env=tf_triggering_env,
github_org=tf_github_org,
github_repo=tf_github_repo,
)
)
testcases_setname = "{}:testcases".format(prefix)
testcases_setname = "{}:testcases".format(sprefix)
running_platforms_setname = "{}:platforms".format(sprefix)
build_variant_prefix = "{sprefix}{build_variant_str}".format(
sprefix=sprefix,
build_variant_str=build_variant_str,
)
testcases_build_variant_setname = "{}:testcases:build_variants".format(
build_variant_prefix
)
prefix = "{build_variant_prefix}{running_platform_str}".format(
build_variant_prefix=build_variant_prefix,
running_platform_str=running_platform_str,
)
tsname_project_total_success = "{}:total_success".format(
prefix,
)
Expand All @@ -676,6 +710,8 @@ def get_overall_dashboard_keynames(tf_github_org, tf_github_repo, tf_triggering_
testcases_setname,
tsname_project_total_failures,
tsname_project_total_success,
running_platforms_setname,
testcases_build_variant_setname,
)


Expand Down
7 changes: 6 additions & 1 deletion redisbench_admin/utils/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -221,23 +221,28 @@ def get_ts_metric_name(
metric_context_path=None,
use_metric_context_path=False,
build_variant_name=None,
running_platform=None,
):
if use_metric_context_path:
metric_name = "{}/{}".format(metric_name, metric_context_path)
build_variant_str = ""
if build_variant_name is not None:
build_variant_str = "{}/".format(str(build_variant_name))
running_platform_str = ""
if running_platform is not None:
running_platform_str = "{}/".format(str(running_platform))
ts_name = (
"ci.benchmarks.redislabs/{by}/"
"{triggering_env}/{github_org}/{github_repo}/"
"{test_name}/{build_variant_str}{deployment_type}/{by_value}/{metric}".format(
"{test_name}/{build_variant_str}{running_platform_str}{deployment_type}/{by_value}/{metric}".format(
by=by,
triggering_env=tf_triggering_env,
github_org=tf_github_org,
github_repo=tf_github_repo,
test_name=test_name,
deployment_type=deployment_type,
build_variant_str=build_variant_str,
running_platform_str=running_platform_str,
by_value=str(by_value),
metric=metric_name,
)
Expand Down
2 changes: 2 additions & 0 deletions tests/test_redistimeseries.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,8 @@ def test_timeseries_test_sucess_flow():
testcases_setname,
tsname_project_total_failures,
tsname_project_total_success,
_,
_,
) = get_overall_dashboard_keynames(
tf_github_org, tf_github_repo, tf_triggering_env
)
Expand Down
30 changes: 30 additions & 0 deletions tests/test_run_remote.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,8 @@ def test_get_overall_dashboard_keynames():
testcases_setname,
tsname_project_total_failures,
tsname_project_total_success,
_,
_,
) = get_overall_dashboard_keynames("org", "repo", "env")
assert "ci.benchmarks.redislabs/env/org/repo:testcases" == testcases_setname
assert "ci.benchmarks.redislabs/env/org/repo" == prefix
Expand All @@ -105,6 +107,34 @@ def test_get_overall_dashboard_keynames():
"ci.benchmarks.redislabs/env/org/repo:total_failures"
== tsname_project_total_failures
)
(
prefix,
testcases_setname,
tsname_project_total_failures,
tsname_project_total_success,
running_platforms_setname,
testcases_build_variant_setname,
) = get_overall_dashboard_keynames(
"org",
"repo",
"env",
"build-1",
)
assert "ci.benchmarks.redislabs/env/org/repo:testcases" == testcases_setname
assert "ci.benchmarks.redislabs/env/org/repo:platforms" == running_platforms_setname
assert (
"ci.benchmarks.redislabs/env/org/repo/build-1:testcases:build_variants"
== testcases_build_variant_setname
)
assert "ci.benchmarks.redislabs/env/org/repo/build-1" == prefix
assert (
"ci.benchmarks.redislabs/env/org/repo/build-1:total_success"
== tsname_project_total_success
)
assert (
"ci.benchmarks.redislabs/env/org/repo/build-1:total_failures"
== tsname_project_total_failures
)


def test_extract_tsbs_extra_links():
Expand Down