Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
Include flake8 execution in tox -e linters
This fixes several flake8 failures and integrates flake8 into linters
tox environment.

Rule W504 is disabled because at this moment there is no known way
to avoid flapping between W504 and W503.

This allows us to retire openstack-tox-pep8 job because the more
beneric openstack-tox-linters includes it. Still, developers will
be able to coveniently run only pep8 if they want.

Change-Id: I7da0f6f09a533dd1c4dc303029e8c587bc200f66
  • Loading branch information
ssbarnea committed Oct 24, 2018
1 parent bac4e8a commit 90d9c1b
Show file tree
Hide file tree
Showing 9 changed files with 59 additions and 53 deletions.
2 changes: 2 additions & 0 deletions browbeat.py
Expand Up @@ -34,6 +34,7 @@ def handle_signal(signum, stack):
global terminate
terminate = True


signal.signal(signal.SIGINT, handle_signal)

def run_iteration(_config, _cli_args, result_dir_ts, _logger, tools):
Expand Down Expand Up @@ -204,5 +205,6 @@ def main():
_logger.info("Browbeat finished successfully, UUID: {}".format(browbeat.elastic.browbeat_uuid))
sys.exit(0)


if __name__ == '__main__':
sys.exit(main())
71 changes: 38 additions & 33 deletions browbeat/elastic.py
Expand Up @@ -179,6 +179,7 @@ def get_software_metadata(self, index, role, browbeat_uuid):
this function will iterate through all the data points, combining the iteration
and rerun data points into a single 95%tile.
"""

def summarize_results(self, data, combined):
summary = {}
if combined:
Expand Down Expand Up @@ -240,6 +241,7 @@ def summarize_results(self, data, combined):

"""
"""

def compare_rally_results(self, data, uuids, combined, metadata=None):
missing = []
if len(data) < 2:
Expand All @@ -249,13 +251,13 @@ def compare_rally_results(self, data, uuids, combined, metadata=None):
self.logger.error("Not able to find UUID in data set")
return False
if combined:
print "+{}+".format("-" * (33 + 44 + 10 + 10 + 23))
print "{0:33} | {1:40} | {2:10} | {3:10} | {4:13} ".format("Scenario",
print("+{}+".format("-" * (33 + 44 + 10 + 10 + 23)))
print("{0:33} | {1:40} | {2:10} | {3:10} | {4:13} ".format("Scenario",
"Action",
uuids[0][-8:],
uuids[1][-8:],
"% Difference")
print "+{}+".format("-" * (33 + 44 + 10 + 10 + 23))
"% Difference"))
print("+{}+".format("-" * (33 + 44 + 10 + 10 + 23)))
for scenario in data[uuids[0]]:
if scenario not in data[uuids[1]]:
missing.append(scenario)
Expand All @@ -268,23 +270,23 @@ def compare_rally_results(self, data, uuids, combined, metadata=None):
perf1 = data[uuids[1]][scenario][action]
diff = numpy.diff(dset)[0] / numpy.abs(dset[:-1])[0] * 100

print "{0:33} | {1:40} | {2:10.3f} | {3:10.3f} | {4:13.3f}".format(scenario,
print("{0:33} | {1:40} | {2:10.3f} | {3:10.3f} | {4:13.3f}".format(scenario,
action,
perf0,
perf1,
diff)
print "+{}+".format("-" * (33 + 44 + 10 + 10 + 26))
diff))
print("+{}+".format("-" * (33 + 44 + 10 + 10 + 26)))
else:
print "+{}+".format("-" * (33 + 44 + 15 + 15 + 10 + 10 + 26))
print "{0:33} | {1:40} | {2:15} | {3:15} | {4:10} | {5:10} | {6:23}".format(
print("+{}+".format("-" * (33 + 44 + 15 + 15 + 10 + 10 + 26)))
print("{0:33} | {1:40} | {2:15} | {3:15} | {4:10} | {5:10} | {6:23}".format(
"Scenario",
"Action",
"times",
"concurrency",
uuids[0][-8:],
uuids[1][-8:],
"% Difference")
print "+{}+".format("-" * (33 + 44 + 15 + 15 + 10 + 10 + 26))
"% Difference"))
print("+{}+".format("-" * (33 + 44 + 15 + 15 + 10 + 10 + 26)))
for scenario in data[uuids[0]]:
if scenario not in data[uuids[1]]:
missing.append(scenario)
Expand Down Expand Up @@ -314,38 +316,39 @@ def compare_rally_results(self, data, uuids, combined, metadata=None):
diff = numpy.diff(dset)[0] / numpy.abs(dset[:-1])[0] * 100
output = "{0:33} | {1:40} | {2:15} | {3:15} "
output += "| {4:10.3f} | {5:10.3f} | {6:13.3f}"
print output.format(scenario,
print(output.format(scenario,
action,
times,
concurrency,
perf0,
perf1,
diff)
print "+{}+".format("-" * (33 + 44 + 15 + 15 + 10 + 10 + 26))
diff))
print("+{}+".format("-" * (33 + 44 + 15 + 15 + 10 + 10 + 26)))
if metadata:
print "+{}+".format("-" * (40 + 20 + 20 + 33))
print "{0:40} | {1:20} | {2:20} | {3:20}".format("UUID", "Version", "Build",
"Number of runs")
print "+{}+".format("-" * (40 + 20 + 20 + 33))
print("+{}+".format("-" * (40 + 20 + 20 + 33)))
print("{0:40} | {1:20} | {2:20} | {3:20}".format("UUID", "Version", "Build",
"Number of runs"))
print("+{}+".format("-" * (40 + 20 + 20 + 33)))
for uuids in metadata:
print "{0:40} | {1:20} | {2:20} | {3:20}".format(uuids,
print("{0:40} | {1:20} | {2:20} | {3:20}".format(uuids,
metadata[uuids][
'version'],
metadata[uuids][
'build'],
metadata[uuids]['rerun'])
metadata[uuids]['rerun']))

print "+{}+".format("-" * (40 + 20 + 20 + 33))
print("+{}+".format("-" * (40 + 20 + 20 + 33)))
if len(missing) > 0:
print "+-------------------------------------+"
print "Missing Scenarios to compare results:"
print "+-------------------------------------+"
print("+-------------------------------------+")
print("Missing Scenarios to compare results:")
print("+-------------------------------------+")
for scenario in missing:
print " - {}".format(scenario)
print(" - {}".format(scenario))

"""
returns a list of dicts that contain 95%tile performance data.
"""

def get_result_data(self, index, browbeat_uuid):
results = []
data = []
Expand Down Expand Up @@ -395,6 +398,7 @@ def get_version_metadata(self, index, browbeat_uuid):
Currently this function will only compare two uuids. I (rook) am not convinced it is worth
the effort to engineer anything > 2.
"""

def compare_metadata(self, index, role, uuids):
meta = []
for browbeat_uuid in uuids:
Expand Down Expand Up @@ -490,24 +494,24 @@ def compare_metadata(self, index, role, uuids):
"Host [{}] Service [{}] {} [{}]".format(
uuids[1], host, service, options, key))

print "+{}+".format("-" * (33 + 44 + 15 + 15 + 30 + 10 + 6))
print "{0:25} | {1:15} | {2:30} | {3:23} | {4:40} | {5:40} ".format(
print("+{}+".format("-" * (33 + 44 + 15 + 15 + 30 + 10 + 6)))
print("{0:25} | {1:15} | {2:30} | {3:23} | {4:40} | {5:40} ".format(
"Host",
"Service",
"Option",
"Key",
"Old Value",
"New Value")
print "+{}+".format("-" * (33 + 44 + 15 + 15 + 30 + 10 + 6))
for difference in differences :
"New Value"))
print("+{}+".format("-" * (33 + 44 + 15 + 15 + 30 + 10 + 6)))
for difference in differences:
value = difference.split("|")
print "{0:25} | {1:15} | {2:30} | {3:23} | {4:40} | {5:40} ".format(value[0],
print("{0:25} | {1:15} | {2:30} | {3:23} | {4:40} | {5:40} ".format(value[0],
value[1],
value[2],
value[3],
value[4],
value[5])
print "+{}+".format("-" * (33 + 44 + 15 + 15 + 30 + 10 + 6))
value[5]))
print("+{}+".format("-" * (33 + 44 + 15 + 15 + 30 + 10 + 6)))

def scroll(self, search, sid, scroll_size):
data = []
Expand All @@ -528,6 +532,7 @@ def scroll(self, search, sid, scroll_size):
index, however, this is quite expensive, and it might be quicker to
only look for errors for specific browbeat_uuids
"""

def get_errors(self, index, browbeat_id):
self.logger.info("Making query against {}".format(index))
page = self.es.search(
Expand Down
1 change: 1 addition & 0 deletions browbeat/metadata.py
Expand Up @@ -135,5 +135,6 @@ def main():
metadata.write_metadata_file(
software_data, os.path.join(args.path, 'software-metadata.json'))


if __name__ == '__main__':
sys.exit(main())
6 changes: 3 additions & 3 deletions browbeat/rally.py
Expand Up @@ -73,7 +73,7 @@ def run_scenario(self, task_file, scenario_args, result_dir, test_name, benchmar
return (from_time, to_time)

def get_task_id(self, test_name):
cmd = "grep \"rally task report [a-z0-9\-]* --out\" {}.log | awk '{{print $4}}'".format(
cmd = "grep \"rally task report [a-z0-9\\-]* --out\" {}.log | awk '{{print $4}}'".format(
test_name)
return self.tools.run_cmd(cmd)['stdout']

Expand Down Expand Up @@ -147,7 +147,7 @@ def json_parse(self, json_doc, metadata={}):
iteration = 1
workload_name = value
if value.find('(') is not -1:
iteration = re.findall('\d+', value)[0]
iteration = re.findall(r'\d+', value)[0]
workload_name = value.split('(')[0]
error = {'action': workload_name.strip(),
'iteration': iteration,
Expand All @@ -164,7 +164,7 @@ def json_parse(self, json_doc, metadata={}):
iteration = 1
workload_name = workload
if workload.find('(') is not -1:
iteration = re.findall('\d+', workload)[0]
iteration = re.findall(r'\d+', workload)[0]
workload_name = workload.split('(')[0]
rally_stats = {'action': workload_name.strip(),
'iteration': iteration,
Expand Down
3 changes: 1 addition & 2 deletions browbeat/shaker.py
Expand Up @@ -383,8 +383,7 @@ def run_workload(self, workload, run_iteration):
test_time = workload.get("time", 60)
for interval in range(0, test_time + 9):
es_list.append(
datetime.datetime.utcnow() +
datetime.timedelta(0, interval))
datetime.datetime.utcnow() + datetime.timedelta(0, interval))

rerun_range = range(self.config["browbeat"]["rerun"])
if self.config["browbeat"]["rerun_type"] == "complete":
Expand Down
2 changes: 1 addition & 1 deletion browbeat/tools.py
Expand Up @@ -143,7 +143,7 @@ def post_process(self, cli):

""" Capture per-workload results """
workload_results = {}
json = re.compile("\.json")
json = re.compile(r"\.json")
if len(results) > 0:
for path in results:
for regex in workloads:
Expand Down
15 changes: 6 additions & 9 deletions browbeat/yoda.py
Expand Up @@ -244,9 +244,9 @@ def watch_introspecting_nodes(self, nodes, timeout, conn, results):
time.sleep(10)
continue
if node_obj is None:
self.logger.error("Can't find node " + node +
" Which existed at the start of introspection \
did you delete it manually?")
self.logger.error(
"Can't find node %s which existed at the start of "
"introspection did you delete it manually?", node)
continue

# == works here for string comparison because they are in fact
Expand All @@ -266,8 +266,7 @@ def watch_introspecting_nodes(self, nodes, timeout, conn, results):
node_obj.provision_state, results['nodes'][node_obj.id]["state_list"])

times.append(
(datetime.datetime.utcnow() -
start_time).total_seconds())
(datetime.datetime.utcnow() - start_time).total_seconds())

elif (datetime.datetime.utcnow() - start_time) > timeout:
# return currently active node to the deque to be failed
Expand Down Expand Up @@ -533,8 +532,7 @@ def introspection_workload(
benchmark['timeout'], env_setup, conn)
else:
self.logger.error(
"Malformed YODA configuration for " +
benchmark['name'])
"Malformed YODA configuration for " + benchmark['name'])
exit(1)

self.get_stats()
Expand Down Expand Up @@ -607,8 +605,7 @@ def overcloud_workload(self, benchmark, run, results_dir, env_setup, conn):
benchmark)

results['total_time'] = (
datetime.datetime.utcnow() -
start_time).total_seconds()
datetime.datetime.utcnow() - start_time).total_seconds()
try:
stack_status = conn.orchestration.find_stack("overcloud")
except exceptions.SDKException:
Expand Down
5 changes: 2 additions & 3 deletions tests/bootstrap/test_bootstrap.py
Expand Up @@ -12,11 +12,10 @@

import os
import sys
sys.path.append(os.path.abspath('ansible'))

import pytest
sys.path.append(os.path.abspath('ansible'))
import bootstrap # noqa

import bootstrap

def test_bootstrap_help(capsys):
"""Tests to see if bootstrap.py help text is correct and that it loads sample/tripleo plugins"""
Expand Down
7 changes: 5 additions & 2 deletions tox.ini
@@ -1,6 +1,6 @@
[tox]
minversion = 2.0
envlist = py27,py35,py36,pep8,linters
envlist = py27,py35,py36,linters
skipsdist = True

[testenv]
Expand All @@ -12,8 +12,11 @@ deps = -r{toxinidir}/test-requirements.txt
commands = python setup.py test

[testenv:linters]
# py3 linters are able to stop more than py2 ones
basepython = python3
whitelist_externals = bash
commands =
{[testenv:pep8]commands}
bash -c "cd ansible; find . -type f -regex '.*.y[a]?ml' -print0 | xargs -t -n1 -0 \
ansible-lint \
-x ANSIBLE0012,ANSIBLE0006,ANSIBLE0007,ANSIBLE0016,ANSIBLE0019" \
Expand Down Expand Up @@ -53,7 +56,7 @@ commands = oslo_debug_helper {posargs}
[flake8]
# E123, E125 skipped as they are invalid PEP-8.
show-source = True
ignore = E123,E125,E226,E302,E41,E231,E203,H233,H306,H238,H236,H404,H405
ignore = E123,E125,E226,E302,E41,E231,E203,H233,H306,H238,H236,H404,H405,W504
max-line-length = 100
builtins = _
exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build,ansible/*,.browbeat-venv,.perfkit-venv,.rally-venv,.shaker-venv

0 comments on commit 90d9c1b

Please sign in to comment.