Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[tool.poetry]
name = "redisbench-admin"
version = "0.2.10"
version = "0.2.11"
description = "Redis benchmark run helper. A wrapper around Redis and Redis Modules benchmark tools ( ftsb_redisearch, memtier_benchmark, redis-benchmark, aibench, etc... )."
authors = ["filipecosta90 <filipecosta.90@gmail.com>"]
readme = "README.md"
Expand Down
19 changes: 10 additions & 9 deletions redisbench_admin/profilers/perf.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,16 +103,20 @@ def generate_record_command(self, pid, output, frequency=None):
cmd += ["--freq", "{}".format(frequency)]
return cmd

def generate_report_command(self, input, dso=None):
def generate_report_command(self, tid, input, dso, percentage_mode):
cmd = [self.perf, "report"]
if dso is not None:
cmd += ["--dso", dso]
cmd += [
"--header",
"--tid",
"{}".format(tid),
"--no-children",
"--stdio",
"-g",
"none,1.0,caller,function",
"--percentage",
percentage_mode,
"--input",
input,
]
Expand Down Expand Up @@ -293,12 +297,13 @@ def generate_outputs(self, use_case, **kwargs):
if artifact_result is True:
outputs["perf output"] = os.path.abspath(self.output)

tid = self.pid
# generate perf report --stdio report
logging.info("Generating perf report text outputs")
perf_report_output = self.output + ".perf-report.top-cpu.txt"

artifact_result, perf_report_artifact = self.run_perf_report(
perf_report_output, None
tid, perf_report_output, None, "absolute"
)

if artifact_result is True:
Expand All @@ -312,7 +317,7 @@ def generate_outputs(self, use_case, **kwargs):
perf_report_output_dso = self.output + ".perf-report.top-cpu.dso.txt"

artifact_result, perf_report_artifact = self.run_perf_report(
perf_report_output_dso, binary
tid, perf_report_output_dso, binary, "relative"
)

if artifact_result is True:
Expand Down Expand Up @@ -406,14 +411,10 @@ def generate_flame_graph(self, title="Flame Graph", subtitle="", filename=None):
def get_collapsed_stacks(self):
return self.collapsed_stacks

def run_perf_report(
self,
output,
dso,
):
def run_perf_report(self, tid, output, dso, percentage_mode):
status = False
result_artifact = None
args = self.generate_report_command(self.output, dso)
args = self.generate_report_command(tid, self.output, dso, percentage_mode)
logging.info("Running {} report with args {}".format(self.perf, args))
try:
stdout, _ = subprocess.Popen(
Expand Down
14 changes: 14 additions & 0 deletions redisbench_admin/run_local/args.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
PROFILE_FREQ_DEFAULT,
)

PUSH_S3 = bool(os.getenv("PUSH_S3", False))
PROFILERS_ENABLED = os.getenv("PROFILE", 0)
PROFILERS = os.getenv("PROFILERS", PROFILERS_DEFAULT)
PROFILE_FREQ = os.getenv("PROFILE_FREQ", PROFILE_FREQ_DEFAULT)
Expand All @@ -38,6 +39,19 @@ def create_run_local_arguments(parser):
help="path to the module file. "
"You can use `--required-module` more than once",
)
parser.add_argument(
"--s3_bucket_name",
type=str,
default="ci.benchmarks.redislabs",
help="S3 bucket name.",
)
parser.add_argument(
"--upload_results_s3",
default=PUSH_S3,
action="store_true",
help="uploads the result files and configuration file to public "
"'ci.benchmarks.redislabs' bucket. Proper credentials are required",
)
parser.add_argument("--profilers", type=str, default=PROFILERS)
parser.add_argument(
"--enable-profilers",
Expand Down
27 changes: 26 additions & 1 deletion redisbench_admin/run_local/run_local.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@
)
from redisbench_admin.run_remote.run_remote import (
extract_module_semver_from_info_modules_cmd,
get_test_s3_bucket_path,
)
from redisbench_admin.utils.local import (
spin_up_local_redis,
Expand All @@ -49,7 +50,11 @@
extract_git_vars,
)
from redisbench_admin.utils.results import post_process_benchmark_results
from redisbench_admin.utils.utils import decompress_file, get_decompressed_filename
from redisbench_admin.utils.utils import (
decompress_file,
get_decompressed_filename,
upload_artifacts_to_s3,
)


def run_local_command_logic(args):
Expand All @@ -66,6 +71,7 @@ def run_local_command_logic(args):
os.path.abspath(".")
required_modules = args.required_module
profilers_enabled = args.enable_profilers
s3_bucket_name = args.s3_bucket_name
profilers_map = {}
profilers_list = []
if profilers_enabled:
Expand Down Expand Up @@ -119,6 +125,7 @@ def run_local_command_logic(args):

for test_name, benchmark_config in benchmark_definitions.items():
redis_process = None

# after we've spinned Redis, even on error we should always teardown
# in case of some unexpected error we fail the test
# noinspection PyBroadException
Expand Down Expand Up @@ -249,6 +256,13 @@ def run_local_command_logic(args):
+ "If that is not possible please change the profile frequency to an higher value."
+ "via the env variable PROFILE_FREQ. NOTICE THAT THIS INCREASES OVERHEAD!!!"
)
s3_bucket_path = get_test_s3_bucket_path(
s3_bucket_name,
test_name,
github_org_name,
github_repo_name,
"profiles",
)
for profiler_name, profiler_obj in profilers_map.items():
# Collect and fold stacks
logging.info(
Expand Down Expand Up @@ -278,6 +292,7 @@ def run_local_command_logic(args):
len(profile_res_artifacts_map.values()),
)
)
artifact_paths = []
for (
artifact_name,
profile_artifact,
Expand All @@ -290,11 +305,21 @@ def run_local_command_logic(args):
profile_artifact,
]
)
artifact_paths.append(profile_artifact)
logging.info(
"artifact {}: {}.".format(
artifact_name, profile_artifact
)
)
if args.upload_results_s3:
logging.info(
"Uploading results to s3. s3 bucket name: {}. s3 bucket path: {}".format(
s3_bucket_name, s3_bucket_path
)
)
upload_artifacts_to_s3(
artifact_paths, s3_bucket_name, s3_bucket_path
)

post_process_benchmark_results(
benchmark_tool,
Expand Down
7 changes: 5 additions & 2 deletions redisbench_admin/run_remote/run_remote.py
Original file line number Diff line number Diff line change
Expand Up @@ -864,11 +864,14 @@ def extract_tsbs_extra_links(benchmark_config, benchmark_tool):
return queries_file_link, remote_tool_link, tool_link


def get_test_s3_bucket_path(s3_bucket_name, test_name, tf_github_org, tf_github_repo):
s3_bucket_path = "{github_org}/{github_repo}/results/{test_name}/".format(
def get_test_s3_bucket_path(
s3_bucket_name, test_name, tf_github_org, tf_github_repo, folder="results"
):
s3_bucket_path = "{github_org}/{github_repo}/{folder}/{test_name}/".format(
github_org=tf_github_org,
github_repo=tf_github_repo,
test_name=test_name,
folder=folder,
)
return s3_bucket_path

Expand Down
3 changes: 2 additions & 1 deletion redisbench_admin/utils/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,14 +25,15 @@


def upload_artifacts_to_s3(artifacts, s3_bucket_name, s3_bucket_path):
print("-- uploading results to s3 -- ")
logging.info("-- uploading results to s3 -- ")
s3 = boto3.resource("s3")
bucket = s3.Bucket(s3_bucket_name)
progress = tqdm(unit="files", total=len(artifacts))
for artifact in artifacts:
object_key = "{bucket_path}{filename}".format(
bucket_path=s3_bucket_path, filename=artifact
)

bucket.upload_file(artifact, object_key)
object_acl = s3.ObjectAcl(s3_bucket_name, object_key)
object_acl.put(ACL="public-read")
Expand Down