diff --git a/pyproject.toml b/pyproject.toml index a975e2d..10a34dc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "redisbench-admin" -version = "0.2.10" +version = "0.2.11" description = "Redis benchmark run helper. A wrapper around Redis and Redis Modules benchmark tools ( ftsb_redisearch, memtier_benchmark, redis-benchmark, aibench, etc... )." authors = ["filipecosta90 "] readme = "README.md" diff --git a/redisbench_admin/profilers/perf.py b/redisbench_admin/profilers/perf.py index 5a69971..2eb34fc 100644 --- a/redisbench_admin/profilers/perf.py +++ b/redisbench_admin/profilers/perf.py @@ -103,16 +103,20 @@ def generate_record_command(self, pid, output, frequency=None): cmd += ["--freq", "{}".format(frequency)] return cmd - def generate_report_command(self, input, dso=None): + def generate_report_command(self, tid, input, dso, percentage_mode): cmd = [self.perf, "report"] if dso is not None: cmd += ["--dso", dso] cmd += [ "--header", + "--tid", + "{}".format(tid), "--no-children", "--stdio", "-g", "none,1.0,caller,function", + "--percentage", + percentage_mode, "--input", input, ] @@ -293,12 +297,13 @@ def generate_outputs(self, use_case, **kwargs): if artifact_result is True: outputs["perf output"] = os.path.abspath(self.output) + tid = self.pid # generate perf report --stdio report logging.info("Generating perf report text outputs") perf_report_output = self.output + ".perf-report.top-cpu.txt" artifact_result, perf_report_artifact = self.run_perf_report( - perf_report_output, None + tid, perf_report_output, None, "absolute" ) if artifact_result is True: @@ -312,7 +317,7 @@ def generate_outputs(self, use_case, **kwargs): perf_report_output_dso = self.output + ".perf-report.top-cpu.dso.txt" artifact_result, perf_report_artifact = self.run_perf_report( - perf_report_output_dso, binary + tid, perf_report_output_dso, binary, "relative" ) if artifact_result is True: @@ -406,14 +411,10 @@ def generate_flame_graph(self, title="Flame Graph", subtitle="", filename=None): def get_collapsed_stacks(self): return self.collapsed_stacks - def run_perf_report( - self, - output, - dso, - ): + def run_perf_report(self, tid, output, dso, percentage_mode): status = False result_artifact = None - args = self.generate_report_command(self.output, dso) + args = self.generate_report_command(tid, self.output, dso, percentage_mode) logging.info("Running {} report with args {}".format(self.perf, args)) try: stdout, _ = subprocess.Popen( diff --git a/redisbench_admin/run_local/args.py b/redisbench_admin/run_local/args.py index 2532f93..56fc095 100644 --- a/redisbench_admin/run_local/args.py +++ b/redisbench_admin/run_local/args.py @@ -12,6 +12,7 @@ PROFILE_FREQ_DEFAULT, ) +PUSH_S3 = bool(os.getenv("PUSH_S3", False)) PROFILERS_ENABLED = os.getenv("PROFILE", 0) PROFILERS = os.getenv("PROFILERS", PROFILERS_DEFAULT) PROFILE_FREQ = os.getenv("PROFILE_FREQ", PROFILE_FREQ_DEFAULT) @@ -38,6 +39,19 @@ def create_run_local_arguments(parser): help="path to the module file. " "You can use `--required-module` more than once", ) + parser.add_argument( + "--s3_bucket_name", + type=str, + default="ci.benchmarks.redislabs", + help="S3 bucket name.", + ) + parser.add_argument( + "--upload_results_s3", + default=PUSH_S3, + action="store_true", + help="uploads the result files and configuration file to public " + "'ci.benchmarks.redislabs' bucket. Proper credentials are required", + ) parser.add_argument("--profilers", type=str, default=PROFILERS) parser.add_argument( "--enable-profilers", diff --git a/redisbench_admin/run_local/run_local.py b/redisbench_admin/run_local/run_local.py index 9b63037..39e8604 100644 --- a/redisbench_admin/run_local/run_local.py +++ b/redisbench_admin/run_local/run_local.py @@ -38,6 +38,7 @@ ) from redisbench_admin.run_remote.run_remote import ( extract_module_semver_from_info_modules_cmd, + get_test_s3_bucket_path, ) from redisbench_admin.utils.local import ( spin_up_local_redis, @@ -49,7 +50,11 @@ extract_git_vars, ) from redisbench_admin.utils.results import post_process_benchmark_results -from redisbench_admin.utils.utils import decompress_file, get_decompressed_filename +from redisbench_admin.utils.utils import ( + decompress_file, + get_decompressed_filename, + upload_artifacts_to_s3, +) def run_local_command_logic(args): @@ -66,6 +71,7 @@ def run_local_command_logic(args): os.path.abspath(".") required_modules = args.required_module profilers_enabled = args.enable_profilers + s3_bucket_name = args.s3_bucket_name profilers_map = {} profilers_list = [] if profilers_enabled: @@ -119,6 +125,7 @@ def run_local_command_logic(args): for test_name, benchmark_config in benchmark_definitions.items(): redis_process = None + # after we've spinned Redis, even on error we should always teardown # in case of some unexpected error we fail the test # noinspection PyBroadException @@ -249,6 +256,13 @@ def run_local_command_logic(args): + "If that is not possible please change the profile frequency to an higher value." + "via the env variable PROFILE_FREQ. NOTICE THAT THIS INCREASES OVERHEAD!!!" ) + s3_bucket_path = get_test_s3_bucket_path( + s3_bucket_name, + test_name, + github_org_name, + github_repo_name, + "profiles", + ) for profiler_name, profiler_obj in profilers_map.items(): # Collect and fold stacks logging.info( @@ -278,6 +292,7 @@ def run_local_command_logic(args): len(profile_res_artifacts_map.values()), ) ) + artifact_paths = [] for ( artifact_name, profile_artifact, @@ -290,11 +305,21 @@ def run_local_command_logic(args): profile_artifact, ] ) + artifact_paths.append(profile_artifact) logging.info( "artifact {}: {}.".format( artifact_name, profile_artifact ) ) + if args.upload_results_s3: + logging.info( + "Uploading results to s3. s3 bucket name: {}. s3 bucket path: {}".format( + s3_bucket_name, s3_bucket_path + ) + ) + upload_artifacts_to_s3( + artifact_paths, s3_bucket_name, s3_bucket_path + ) post_process_benchmark_results( benchmark_tool, diff --git a/redisbench_admin/run_remote/run_remote.py b/redisbench_admin/run_remote/run_remote.py index f28f126..8df283c 100644 --- a/redisbench_admin/run_remote/run_remote.py +++ b/redisbench_admin/run_remote/run_remote.py @@ -864,11 +864,14 @@ def extract_tsbs_extra_links(benchmark_config, benchmark_tool): return queries_file_link, remote_tool_link, tool_link -def get_test_s3_bucket_path(s3_bucket_name, test_name, tf_github_org, tf_github_repo): - s3_bucket_path = "{github_org}/{github_repo}/results/{test_name}/".format( +def get_test_s3_bucket_path( + s3_bucket_name, test_name, tf_github_org, tf_github_repo, folder="results" +): + s3_bucket_path = "{github_org}/{github_repo}/{folder}/{test_name}/".format( github_org=tf_github_org, github_repo=tf_github_repo, test_name=test_name, + folder=folder, ) return s3_bucket_path diff --git a/redisbench_admin/utils/utils.py b/redisbench_admin/utils/utils.py index e2b265f..c5c96c5 100644 --- a/redisbench_admin/utils/utils.py +++ b/redisbench_admin/utils/utils.py @@ -25,7 +25,7 @@ def upload_artifacts_to_s3(artifacts, s3_bucket_name, s3_bucket_path): - print("-- uploading results to s3 -- ") + logging.info("-- uploading results to s3 -- ") s3 = boto3.resource("s3") bucket = s3.Bucket(s3_bucket_name) progress = tqdm(unit="files", total=len(artifacts)) @@ -33,6 +33,7 @@ def upload_artifacts_to_s3(artifacts, s3_bucket_name, s3_bucket_path): object_key = "{bucket_path}{filename}".format( bucket_path=s3_bucket_path, filename=artifact ) + bucket.upload_file(artifact, object_key) object_acl = s3.ObjectAcl(s3_bucket_name, object_key) object_acl.put(ACL="public-read")