Permalink
Browse files

Some fixes to make log info consistent with change

After code was added to mix-match workload, the stats of executed,
passed and failed tests kept by each workload class have become
inconsistent. It makes sense to only maintain a count at the total
number of tests executed/passed/failed vs per  workload.

Change-Id: I4f84f4580ac29206e7ce660222a2a396e419cac8
  • Loading branch information...
smalleni committed Jan 10, 2018
1 parent e56bba7 commit 665d0ef7155a4549a21d0ea9e4e9cc3566626c55
Showing with 5 additions and 93 deletions.
  1. +3 −3 browbeat.py
  2. +0 −20 browbeat/perfkit.py
  3. +0 −20 browbeat/rally.py
  4. +0 −32 browbeat/shaker.py
  5. +2 −18 browbeat/workloadbase.py
View
@@ -130,7 +130,7 @@ def main():
_logger.info("{} workload {} is enabled".format(workload["type"], workload["name"]))
tools.run_workload(workload, result_dir_ts, 0)
browbeat.workloadbase.WorkloadBase.print_summary()
browbeat.workloadbase.WorkloadBase.display_summary()
elif _config["browbeat"]["rerun_type"] == "complete":
# Complete rerun_type, reruns after all workloads have been run.
@@ -148,9 +148,9 @@ def main():
_logger.info("{} workload {} is enabled".format(workload["type"], workload["name"]))
tools.run_workload(workload, result_dir_ts, run_iteration)
browbeat.workloadbase.WorkloadBase.print_summary()
browbeat.workloadbase.WorkloadBase.display_summary()
browbeat.workloadbase.WorkloadBase.print_report(results_path, result_dir_ts)
browbeat.workloadbase.WorkloadBase.dump_report(results_path, result_dir_ts)
_logger.info("Saved browbeat result summary to {}"
.format(os.path.join(results_path, "{}.report".format(result_dir_ts))))
View
@@ -34,13 +34,9 @@ def __init__(self, config, result_dir_ts):
self.overcloudrc = get_overcloudrc()
self.config = config
self.result_dir_ts = result_dir_ts
self.error_count = 0
self.tools = tools.Tools(self.config)
self.grafana = grafana.Grafana(self.config)
self.elastic = elastic.Elastic(self.config, self.__class__.__name__.lower())
self.test_count = 0
self.scenario_count = 0
self.pass_count = 0
def string_to_dict(self, string):
"""Function for converting "|" quoted hash data into python dictionary."""
@@ -53,18 +49,6 @@ def string_to_dict(self, string):
dict_data[split_item[0]] = ast.literal_eval("'" + split_item[1] + "'")
return dict_data
def update_tests(self):
self.test_count += 1
def update_pass_tests(self):
self.pass_count += 1
def update_fail_tests(self):
self.error_count += 1
def update_scenarios(self):
self.scenario_count += 1
def get_error_details(self, result_dir):
error_details = []
with open('{}/pkb.stderr.log'.format(result_dir)) as perfkit_stderr:
@@ -182,7 +166,6 @@ def run_workload(self, workload, run_iteration):
self.logger.debug("Time Stamp (Prefix): {}".format(time_stamp))
self.logger.info("Benchmark: {}".format(workload['name']))
self.update_scenarios()
self.update_total_scenarios()
# Add default parameters as necessary
for default_item, value in self.config['perfkit']['default'].iteritems():
@@ -195,7 +178,6 @@ def run_workload(self, workload, run_iteration):
rerun_range = range(run_iteration, run_iteration + 1)
for run in rerun_range:
self.update_tests()
self.update_total_tests()
result_dir = self.tools.create_results_dir(
results_path, self.result_dir_ts, workload['name'], str(run))
@@ -209,13 +191,11 @@ def run_workload(self, workload, run_iteration):
new_test_name = new_test_name[2:]
new_test_name = '-'.join(new_test_name)
if success:
self.update_pass_tests()
self.update_total_pass_tests()
self.get_time_dict(to_ts, from_ts, workload['benchmarks'],
new_test_name, self.__class__.__name__, "pass",
index_success)
else:
self.update_fail_tests()
self.update_total_fail_tests()
self.get_time_dict(to_ts, from_ts, workload['benchmarks'],
new_test_name, self.__class__.__name__, "fail",
View
@@ -37,10 +37,6 @@ def __init__(self, config, result_dir_ts):
self.tools = tools.Tools(self.config)
self.grafana = grafana.Grafana(self.config)
self.elastic = elastic.Elastic(self.config, self.__class__.__name__.lower())
self.error_count = 0
self.pass_count = 0
self.test_count = 0
self.scenario_count = 0
def run_scenario(self, task_file, scenario_args, result_dir, test_name, benchmark):
self.logger.debug("--------------------------------")
@@ -76,18 +72,6 @@ def run_scenario(self, task_file, scenario_args, result_dir, test_name, benchmar
self.grafana.print_dashboard_url(test_name)
return (from_time, to_time)
def update_tests(self):
self.test_count += 1
def update_pass_tests(self):
self.pass_count += 1
def update_fail_tests(self):
self.error_count += 1
def update_scenarios(self):
self.scenario_count += 1
def get_task_id(self, test_name):
cmd = "grep \"rally task report [a-z0-9\-]* --out\" {}.log | awk '{{print $4}}'".format(
test_name)
@@ -230,7 +214,6 @@ def run_workload(self, workload, run_iteration):
self.logger.info("Running Scenario: {}".format(scenario["name"]))
self.logger.debug("Scenario File: {}".format(scenario["file"]))
self.update_scenarios()
self.update_total_scenarios()
scenario_name = scenario["name"]
scenario_file = scenario["file"]
@@ -268,7 +251,6 @@ def run_workload(self, workload, run_iteration):
rerun_range = range(run_iteration, run_iteration + 1)
for run in rerun_range:
self.update_tests()
self.update_total_tests()
concurrency_count_dict[concurrency] += 1
test_name = "{}-browbeat-{}-{}-{}-iteration-{}".format(
@@ -294,7 +276,6 @@ def run_workload(self, workload, run_iteration):
self.gen_scenario_html([task_id], test_name)
self.gen_scenario_json_file(task_id, test_name)
results.append(task_id)
self.update_pass_tests()
self.update_total_pass_tests()
if self.config["elasticsearch"]["enabled"]:
# Start indexing
@@ -310,7 +291,6 @@ def run_workload(self, workload, run_iteration):
else:
self.logger.error("Cannot find task_id")
self.update_fail_tests()
self.update_total_fail_tests()
self.get_time_dict(to_time, from_time, workload["name"], new_test_name,
self.__class__.__name__, "fail")
View
@@ -38,10 +38,6 @@ def __init__(self, config, result_dir_ts):
self.tools = tools.Tools(self.config)
self.grafana = grafana.Grafana(self.config)
self.elastic = elastic.Elastic(self.config, self.__class__.__name__.lower())
self.error_count = 0
self.pass_count = 0
self.test_count = 0
self.scenario_count = 0
def shaker_checks(self):
cmd = "source {}; source {}; glance image-list | grep -w shaker-image".format(
@@ -72,32 +68,8 @@ def accommodation_to_list(self, accommodation):
accommodation_list.append(temp_dict)
return accommodation_list
def final_stats(self, total):
self.logger.info(
"Total Shaker scenarios enabled by user: {}".format(total))
self.logger.info(
"Total number of Shaker tests executed: {}".format(
self.test_count))
self.logger.info(
"Total number of Shaker tests passed: {}".format(self.pass_count))
self.logger.info(
"Total number of Shaker tests failed: {}".format(self.error_count))
def update_tests(self):
self.test_count += 1
def update_pass_tests(self):
self.pass_count += 1
def update_fail_tests(self):
self.error_count += 1
def update_scenarios(self):
self.scenario_count += 1
# Method to process JSON outputted by Shaker, model data in a format that can be consumed
# by ElasticSearch and ship the data to ES
def send_to_elastic(self, outputfile, browbeat_scenario,
shaker_uuid, es_ts, es_list, run, test_name, result_dir):
fname = outputfile
@@ -325,7 +297,6 @@ def error_update(self, result_dir, test_name, scenario, to_time, from_time,
self.logger.error("Failed Test: {}".format(scenario['name']))
self.logger.error("saved log to: {}.log".format(os.path.join(result_dir,
test_name)))
self.update_fail_tests()
self.update_total_fail_tests()
self.get_time_dict(to_time, from_time, scenario['name'],
new_test_name, workload, "fail", index_status)
@@ -337,7 +308,6 @@ def success_update(self, result_dir, test_name, scenario, to_time,
format(os.path.join(result_dir, test_name)))
self.logger.info("saved log to: {}.log".format(os.path.join(result_dir,
test_name)))
self.update_pass_tests()
self.update_total_pass_tests()
self.get_time_dict(to_time, from_time, scenario['name'],
new_test_name, workload, "pass", index_status)
@@ -381,7 +351,6 @@ def run_scenario(self, scenario, result_dir, test_name, filename,
from_time = time.time()
self.tools.run_cmd(cmd)
to_time = time.time()
self.update_tests()
self.update_total_tests()
outputfile = os.path.join(result_dir, test_name + "." + "json")
if 'sleep_after' in self.config['shaker']:
@@ -406,7 +375,6 @@ def run_workload(self, workload, run_iteration):
venv = get_workload_venv('shaker', False)
self.shaker_checks()
self.update_scenarios()
self.update_total_scenarios()
shaker_uuid = uuid.uuid4()
es_ts = datetime.datetime.utcnow()
View
@@ -35,22 +35,6 @@ def __init__(self):
def run_workload(self, workload, run_iteration):
pass
@abc.abstractmethod
def update_fail_tests(self):
pass
@abc.abstractmethod
def update_pass_tests(self):
pass
@abc.abstractmethod
def update_scenarios(self):
pass
@abc.abstractmethod
def update_tests(self):
pass
def update_total_scenarios(self):
WorkloadBase.total_scenarios += 1
@@ -94,7 +78,7 @@ def get_time_dict(self, to_time, from_time, benchmark, test_name, workload, stat
'Elasticsearch Indexing': index_status})
@staticmethod
def print_report(result_dir, time_stamp):
def dump_report(result_dir, time_stamp):
with open(os.path.join(result_dir, time_stamp + '.' + 'report'), 'w') as yaml_file:
yaml_file.write("Browbeat Report Card\n")
if not WorkloadBase.browbeat:
@@ -104,7 +88,7 @@ def print_report(result_dir, time_stamp):
yaml.dump(WorkloadBase.browbeat, default_flow_style=False))
@staticmethod
def print_summary():
def display_summary():
WorkloadBase.logger.info("Total scenarios executed:{}".format(
WorkloadBase.total_scenarios))
WorkloadBase.logger.info("Total tests executed:{}".format(WorkloadBase.total_tests))

0 comments on commit 665d0ef

Please sign in to comment.