From 0d32b96487e19c553987dc65596cbc566ed22cc7 Mon Sep 17 00:00:00 2001 From: filipecosta90 Date: Tue, 1 Feb 2022 23:31:36 +0000 Subject: [PATCH 1/3] [fix] get_final_benchmark_config is error agnostic --- pyproject.toml | 2 +- redisbench_admin/utils/benchmark_config.py | 34 +++++++++++++++------- tests/test_common.py | 10 ++++++- 3 files changed, 34 insertions(+), 12 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index f75927b..597ce26 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "redisbench-admin" -version = "0.6.16" +version = "0.6.17" description = "Redis benchmark run helper. A wrapper around Redis and Redis Modules benchmark tools ( ftsb_redisearch, memtier_benchmark, redis-benchmark, aibench, etc... )." authors = ["filipecosta90 ","Redis Performance Group "] readme = "README.md" diff --git a/redisbench_admin/utils/benchmark_config.py b/redisbench_admin/utils/benchmark_config.py index 67996a4..904ae1f 100644 --- a/redisbench_admin/utils/benchmark_config.py +++ b/redisbench_admin/utils/benchmark_config.py @@ -62,10 +62,11 @@ def prepare_benchmark_definitions(args): ) = get_defaults(defaults_filename) for usecase_filename in files: with open(usecase_filename, "r") as stream: - benchmark_config, test_name = get_final_benchmark_config( + result, benchmark_config, test_name = get_final_benchmark_config( default_kpis, stream, usecase_filename ) - benchmark_definitions[test_name] = benchmark_config + if result: + benchmark_definitions[test_name] = benchmark_config return ( benchmark_definitions, default_metrics, @@ -109,15 +110,28 @@ def get_defaults(defaults_filename): def get_final_benchmark_config(default_kpis, stream, usecase_filename): - os.path.dirname(os.path.abspath(usecase_filename)) - benchmark_config = yaml.safe_load(stream) - kpis_keyname = "kpis" - if default_kpis is not None: - merge_default_and_specific_properties_dict_type( - benchmark_config, default_kpis, kpis_keyname, usecase_filename + result = False + benchmark_config = None + test_name = None + try: + os.path.dirname(os.path.abspath(usecase_filename)) + benchmark_config = yaml.safe_load(stream) + kpis_keyname = "kpis" + if default_kpis is not None: + merge_default_and_specific_properties_dict_type( + benchmark_config, default_kpis, kpis_keyname, usecase_filename + ) + test_name = benchmark_config["name"] + result = True + except Exception as e: + logging.error( + "while loading file {} and error was returned: {}".format( + usecase_filename, e.__str__() + ) ) - test_name = benchmark_config["name"] - return benchmark_config, test_name + pass + + return result, benchmark_config, test_name def merge_default_and_specific_properties_dict_type( diff --git a/tests/test_common.py b/tests/test_common.py index fe1ed6c..5f39c45 100644 --- a/tests/test_common.py +++ b/tests/test_common.py @@ -369,7 +369,7 @@ def test_extract_test_feasible_setups(): ) = get_defaults(defaults_filename) usecase_filename = "./tests/test_data/tsbs-devops-ingestion-scale100-4days-v2.yml" with open(usecase_filename, "r") as stream: - benchmark_config, test_name = get_final_benchmark_config( + _, benchmark_config, test_name = get_final_benchmark_config( default_kpis, stream, usecase_filename ) assert cluster_config == { @@ -409,6 +409,14 @@ def test_extract_test_feasible_setups(): assert osscluster_setup_type == t assert osscluster_shard_count == c + # wrong read + res, benchmark_config, test_name = get_final_benchmark_config( + default_kpis, stream, "dont exist" + ) + assert res == False + assert benchmark_config == None + assert benchmark_config == None + def test_check_dbconfig_tool_requirement(): with open( From 859ae713731c4e072bf174ca97a88396014ddd0a Mon Sep 17 00:00:00 2001 From: filipecosta90 Date: Thu, 10 Feb 2022 15:24:41 +0000 Subject: [PATCH 2/3] Fixed profiler-daemon startup code. Included documentation about perf-daemon --- README.md | 36 ++++++++++++++++++++++++++++ docs/export.md | 2 +- pyproject.toml | 2 +- redisbench_admin/profilers/daemon.py | 2 +- redisbench_admin/utils/remote.py | 6 ++--- 5 files changed, 42 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 11c40a7..5daa6f0 100644 --- a/README.md +++ b/README.md @@ -36,6 +36,42 @@ python3 -m pip install redisbench-admin ## Profiler daemon +You can use the profiler daemon by itself in the following manner. +On the target machine do as follow: + +```bash +pip3 install --upgrade pip +pip3 install redisbench-admin --ignore-installed PyYAML + +# install perf +apt install linux-tools-common linux-tools-generic linux-tools-`uname -r` -y + +# ensure perf is working +perf --version + +# install awscli +snap install aws-cli --classic + + +# configure aws +aws configure + +# start the perf-daemon +perf-daemon start +WARNING:root:Unable to detected github_actor. caught the following error: No section: 'user' +Writting log to /tmp/perf-daemon.log +Starting perf-daemon. PID file /tmp/perfdaemon.pid. Daemon workdir: /root/RedisGraph + +# check daemon is working appropriatelly +curl localhost:5000/ping + +# start a profile +curl -X POST localhost:5000/profiler/perf/start/ + +# stop a profile +curl -X POST -d '{"aws_access_key_id":$AWS_ACCESS_KEY_ID,"aws_secret_access_key":$AWS_SECRET_ACCESS_KEY}' localhost:5000/profiler/perf/stop/ +``` + ## Development diff --git a/docs/export.md b/docs/export.md index a851836..04acaef 100644 --- a/docs/export.md +++ b/docs/export.md @@ -27,7 +27,7 @@ Current supported benchmark tools to export data from: Installation is done using pip, the package installer for Python, in the following manner: ```bash -python3 -m pip install redisbench-admin>=0.5.5 +python3 -m pip install redisbench-admin>=0.5.5 --ignore-installed PyYAML ``` ## Required minimum arguments diff --git a/pyproject.toml b/pyproject.toml index 597ce26..443b40a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "redisbench-admin" -version = "0.6.17" +version = "0.6.18" description = "Redis benchmark run helper. A wrapper around Redis and Redis Modules benchmark tools ( ftsb_redisearch, memtier_benchmark, redis-benchmark, aibench, etc... )." authors = ["filipecosta90 ","Redis Performance Group "] readme = "README.md" diff --git a/redisbench_admin/profilers/daemon.py b/redisbench_admin/profilers/daemon.py index 44dc732..3209741 100644 --- a/redisbench_admin/profilers/daemon.py +++ b/redisbench_admin/profilers/daemon.py @@ -19,7 +19,7 @@ from redisbench_admin.cli import populate_with_poetry_data from redisbench_admin.profilers.perf import Perf from redisbench_admin.profilers.perf_daemon_caller import PERF_DAEMON_LOGNAME -from redisbench_admin.profilers.profile_local import local_profilers_platform_checks +from redisbench_admin.profilers.profilers_local import local_profilers_platform_checks from redisbench_admin.run.args import S3_BUCKET_NAME from redisbench_admin.run.common import get_start_time_vars from redisbench_admin.run.s3 import get_test_s3_bucket_path diff --git a/redisbench_admin/utils/remote.py b/redisbench_admin/utils/remote.py index aec0391..fd9bc63 100644 --- a/redisbench_admin/utils/remote.py +++ b/redisbench_admin/utils/remote.py @@ -332,7 +332,7 @@ def extract_git_vars(path=None, github_url=None): try: github_branch = github_repo.active_branch except TypeError as e: - logging.warning( + logging.debug( "Unable to detected github_branch. caught the following error: {}".format( e.__str__() ) @@ -343,14 +343,14 @@ def extract_git_vars(path=None, github_url=None): try: github_actor = github_repo.config_reader().get_value("user", "name") except configparser.NoSectionError as e: - logging.warning( + logging.debug( "Unable to detected github_actor. caught the following error: {}".format( e.__str__() ) ) github_branch_detached = True except git.exc.InvalidGitRepositoryError as e: - logging.warning( + logging.debug( "Unable to fill git vars. caught the following error: {}".format( e.__str__() ) From e0f0535eda3dc6e987fd622715fb0fd58621d460 Mon Sep 17 00:00:00 2001 From: filipecosta90 Date: Thu, 10 Feb 2022 16:30:58 +0000 Subject: [PATCH 3/3] In case of failure of DB sping, retrieve the log and push to S3 --- redisbench_admin/environments/oss_cluster.py | 10 +++-- redisbench_admin/run/cluster.py | 8 +++- redisbench_admin/run_remote/remote_db.py | 45 +++++++++++++++----- redisbench_admin/run_remote/run_remote.py | 2 + 4 files changed, 51 insertions(+), 14 deletions(-) diff --git a/redisbench_admin/environments/oss_cluster.py b/redisbench_admin/environments/oss_cluster.py index 1c417b1..0c92ae9 100644 --- a/redisbench_admin/environments/oss_cluster.py +++ b/redisbench_admin/environments/oss_cluster.py @@ -28,7 +28,7 @@ def spin_up_local_redis_cluster( for master_shard_id in range(1, shard_count + 1): shard_port = master_shard_id + start_port - 1 - command = generate_cluster_redis_server_args( + command, _ = generate_cluster_redis_server_args( dbdir, local_module_file, ip, @@ -139,14 +139,18 @@ def generate_cluster_redis_server_args( configuration_parameters=None, daemonize="yes", modules_configuration_parameters_map={}, + logname_prefix=None, ): + if logname_prefix is None: + logname_prefix = "" + logfile = "{}cluster-node-port-{}.log".format(logname_prefix, port) # start redis-server command = [ "redis-server", "--appendonly", "no", "--logfile", - "cluster-node-port-{}.log".format(port), + logfile, "--cluster-enabled", "yes", "--daemonize", @@ -186,7 +190,7 @@ def generate_cluster_redis_server_args( redis_server_config_module_part( command, mod, modules_configuration_parameters_map ) - return command + return command, logfile def get_cluster_dbfilename(port): diff --git a/redisbench_admin/run/cluster.py b/redisbench_admin/run/cluster.py index 7486def..d802185 100644 --- a/redisbench_admin/run/cluster.py +++ b/redisbench_admin/run/cluster.py @@ -112,13 +112,16 @@ def spin_up_redis_cluster_remote_redis( start_port, ssh_port, modules_configuration_parameters_map, + logname, ): logging.info("Generating the remote redis-server command arguments") redis_process_commands = [] + logfiles = [] + logname_prefix = logname[: len(logname) - 4] + "-" for master_shard_id in range(1, shard_count + 1): shard_port = master_shard_id + start_port - 1 - command = generate_cluster_redis_server_args( + command, logfile = generate_cluster_redis_server_args( dbdir_folder, remote_module_files, server_private_ip, @@ -126,13 +129,16 @@ def spin_up_redis_cluster_remote_redis( redis_configuration_parameters, "yes", modules_configuration_parameters_map, + logname_prefix, ) logging.error( "Remote primary shard {} command: {}".format( master_shard_id, " ".join(command) ) ) + logfiles.append(logfile) redis_process_commands.append(" ".join(command)) execute_remote_commands( server_public_ip, username, private_key, redis_process_commands, ssh_port ) + return logfiles diff --git a/redisbench_admin/run_remote/remote_db.py b/redisbench_admin/run_remote/remote_db.py index dfb3cdf..eed171e 100644 --- a/redisbench_admin/run_remote/remote_db.py +++ b/redisbench_admin/run_remote/remote_db.py @@ -6,6 +6,8 @@ import datetime import logging +import redis + from redisbench_admin.environments.oss_cluster import setup_redis_cluster_from_conns from redisbench_admin.run.cluster import ( spin_up_redis_cluster_remote_redis, @@ -24,6 +26,7 @@ remote_dataset_folder, ) from redisbench_admin.run_remote.remote_client import run_remote_client_tool +from redisbench_admin.run_remote.remote_failures import failed_remote_run_artifact_store from redisbench_admin.run_remote.standalone import ( cp_local_dbdir_to_remote, remote_module_files_cp, @@ -84,6 +87,8 @@ def remote_db_spin( tf_github_sha, username, private_key, + s3_bucket_name, + s3_bucket_path, ): ( _, @@ -119,7 +124,7 @@ def remote_db_spin( redis_conns = [] topology_setup_start_time = datetime.datetime.now() if setup_type == "oss-cluster": - spin_up_redis_cluster_remote_redis( + logfiles = spin_up_redis_cluster_remote_redis( server_public_ip, server_private_ip, username, @@ -131,19 +136,39 @@ def remote_db_spin( cluster_start_port, db_ssh_port, modules_configuration_parameters_map, + logname, ) - - for p in range(cluster_start_port, cluster_start_port + shard_count): - local_redis_conn, ssh_tunnel = ssh_tunnel_redisconn( - p, - server_private_ip, - server_public_ip, + try: + for p in range(cluster_start_port, cluster_start_port + shard_count): + local_redis_conn, ssh_tunnel = ssh_tunnel_redisconn( + p, + server_private_ip, + server_public_ip, + username, + db_ssh_port, + private_key, + ) + local_redis_conn.ping() + redis_conns.append(local_redis_conn) + except redis.exceptions.ConnectionError as e: + logging.error("A error occurred while spinning DB: {}".format(e.__str__())) + remote_file = "{}/{}".format(temporary_dir, logfiles[0]) + logging.error( + "Trying to fetch DB remote log {} into {}".format( + remote_file, logfiles[0] + ) + ) + failed_remote_run_artifact_store( + True, + client_public_ip, + dirname, + remote_file, + logfiles[0], + s3_bucket_name, + s3_bucket_path, username, - db_ssh_port, private_key, ) - local_redis_conn.ping() - redis_conns.append(local_redis_conn) if setup_type == "oss-standalone": full_logfile = spin_up_standalone_remote_redis( diff --git a/redisbench_admin/run_remote/run_remote.py b/redisbench_admin/run_remote/run_remote.py index 70eea62..894e4f0 100644 --- a/redisbench_admin/run_remote/run_remote.py +++ b/redisbench_admin/run_remote/run_remote.py @@ -317,6 +317,8 @@ def run_remote_command_logic(args, project_name, project_version): tf_github_sha, username, private_key, + s3_bucket_name, + s3_bucket_path, ) if benchmark_type == "read-only": logging.info(