Permalink
Browse files

Include flake8 execution in tox -e linters

This fixes several flake8 failures and integrates flake8 into linters
tox environment.

Rule W504 is disabled because at this moment there is no known way
to avoid flapping between W504 and W503.

This allows us to retire openstack-tox-pep8 job because the more
beneric openstack-tox-linters includes it. Still, developers will
be able to coveniently run only pep8 if they want.

Change-Id: I7da0f6f09a533dd1c4dc303029e8c587bc200f66
  • Loading branch information...
ssbarnea committed Oct 24, 2018
1 parent bac4e8a commit 90d9c1bc4156025dba2d3ef9c2fbe12e54978363
Showing with 59 additions and 53 deletions.
  1. +2 −0 browbeat.py
  2. +38 −33 browbeat/elastic.py
  3. +1 −0 browbeat/metadata.py
  4. +3 −3 browbeat/rally.py
  5. +1 −2 browbeat/shaker.py
  6. +1 −1 browbeat/tools.py
  7. +6 −9 browbeat/yoda.py
  8. +2 −3 tests/bootstrap/test_bootstrap.py
  9. +5 −2 tox.ini
@@ -34,6 +34,7 @@ def handle_signal(signum, stack):
global terminate
terminate = True
signal.signal(signal.SIGINT, handle_signal)
def run_iteration(_config, _cli_args, result_dir_ts, _logger, tools):
@@ -204,5 +205,6 @@ def main():
_logger.info("Browbeat finished successfully, UUID: {}".format(browbeat.elastic.browbeat_uuid))
sys.exit(0)
if __name__ == '__main__':
sys.exit(main())
@@ -179,6 +179,7 @@ def get_software_metadata(self, index, role, browbeat_uuid):
this function will iterate through all the data points, combining the iteration
and rerun data points into a single 95%tile.
"""
def summarize_results(self, data, combined):
summary = {}
if combined:
@@ -240,6 +241,7 @@ def summarize_results(self, data, combined):
"""
"""
def compare_rally_results(self, data, uuids, combined, metadata=None):
missing = []
if len(data) < 2:
@@ -249,13 +251,13 @@ def compare_rally_results(self, data, uuids, combined, metadata=None):
self.logger.error("Not able to find UUID in data set")
return False
if combined:
print "+{}+".format("-" * (33 + 44 + 10 + 10 + 23))
print "{0:33} | {1:40} | {2:10} | {3:10} | {4:13} ".format("Scenario",
print("+{}+".format("-" * (33 + 44 + 10 + 10 + 23)))
print("{0:33} | {1:40} | {2:10} | {3:10} | {4:13} ".format("Scenario",
"Action",
uuids[0][-8:],
uuids[1][-8:],
"% Difference")
print "+{}+".format("-" * (33 + 44 + 10 + 10 + 23))
"% Difference"))
print("+{}+".format("-" * (33 + 44 + 10 + 10 + 23)))
for scenario in data[uuids[0]]:
if scenario not in data[uuids[1]]:
missing.append(scenario)
@@ -268,23 +270,23 @@ def compare_rally_results(self, data, uuids, combined, metadata=None):
perf1 = data[uuids[1]][scenario][action]
diff = numpy.diff(dset)[0] / numpy.abs(dset[:-1])[0] * 100
print "{0:33} | {1:40} | {2:10.3f} | {3:10.3f} | {4:13.3f}".format(scenario,
print("{0:33} | {1:40} | {2:10.3f} | {3:10.3f} | {4:13.3f}".format(scenario,
action,
perf0,
perf1,
diff)
print "+{}+".format("-" * (33 + 44 + 10 + 10 + 26))
diff))
print("+{}+".format("-" * (33 + 44 + 10 + 10 + 26)))
else:
print "+{}+".format("-" * (33 + 44 + 15 + 15 + 10 + 10 + 26))
print "{0:33} | {1:40} | {2:15} | {3:15} | {4:10} | {5:10} | {6:23}".format(
print("+{}+".format("-" * (33 + 44 + 15 + 15 + 10 + 10 + 26)))
print("{0:33} | {1:40} | {2:15} | {3:15} | {4:10} | {5:10} | {6:23}".format(
"Scenario",
"Action",
"times",
"concurrency",
uuids[0][-8:],
uuids[1][-8:],
"% Difference")
print "+{}+".format("-" * (33 + 44 + 15 + 15 + 10 + 10 + 26))
"% Difference"))
print("+{}+".format("-" * (33 + 44 + 15 + 15 + 10 + 10 + 26)))
for scenario in data[uuids[0]]:
if scenario not in data[uuids[1]]:
missing.append(scenario)
@@ -314,38 +316,39 @@ def compare_rally_results(self, data, uuids, combined, metadata=None):
diff = numpy.diff(dset)[0] / numpy.abs(dset[:-1])[0] * 100
output = "{0:33} | {1:40} | {2:15} | {3:15} "
output += "| {4:10.3f} | {5:10.3f} | {6:13.3f}"
print output.format(scenario,
print(output.format(scenario,
action,
times,
concurrency,
perf0,
perf1,
diff)
print "+{}+".format("-" * (33 + 44 + 15 + 15 + 10 + 10 + 26))
diff))
print("+{}+".format("-" * (33 + 44 + 15 + 15 + 10 + 10 + 26)))
if metadata:
print "+{}+".format("-" * (40 + 20 + 20 + 33))
print "{0:40} | {1:20} | {2:20} | {3:20}".format("UUID", "Version", "Build",
"Number of runs")
print "+{}+".format("-" * (40 + 20 + 20 + 33))
print("+{}+".format("-" * (40 + 20 + 20 + 33)))
print("{0:40} | {1:20} | {2:20} | {3:20}".format("UUID", "Version", "Build",
"Number of runs"))
print("+{}+".format("-" * (40 + 20 + 20 + 33)))
for uuids in metadata:
print "{0:40} | {1:20} | {2:20} | {3:20}".format(uuids,
print("{0:40} | {1:20} | {2:20} | {3:20}".format(uuids,
metadata[uuids][
'version'],
metadata[uuids][
'build'],
metadata[uuids]['rerun'])
metadata[uuids]['rerun']))
print "+{}+".format("-" * (40 + 20 + 20 + 33))
print("+{}+".format("-" * (40 + 20 + 20 + 33)))
if len(missing) > 0:
print "+-------------------------------------+"
print "Missing Scenarios to compare results:"
print "+-------------------------------------+"
print("+-------------------------------------+")
print("Missing Scenarios to compare results:")
print("+-------------------------------------+")
for scenario in missing:
print " - {}".format(scenario)
print(" - {}".format(scenario))
"""
returns a list of dicts that contain 95%tile performance data.
"""
def get_result_data(self, index, browbeat_uuid):
results = []
data = []
@@ -395,6 +398,7 @@ def get_version_metadata(self, index, browbeat_uuid):
Currently this function will only compare two uuids. I (rook) am not convinced it is worth
the effort to engineer anything > 2.
"""
def compare_metadata(self, index, role, uuids):
meta = []
for browbeat_uuid in uuids:
@@ -490,24 +494,24 @@ def compare_metadata(self, index, role, uuids):
"Host [{}] Service [{}] {} [{}]".format(
uuids[1], host, service, options, key))
print "+{}+".format("-" * (33 + 44 + 15 + 15 + 30 + 10 + 6))
print "{0:25} | {1:15} | {2:30} | {3:23} | {4:40} | {5:40} ".format(
print("+{}+".format("-" * (33 + 44 + 15 + 15 + 30 + 10 + 6)))
print("{0:25} | {1:15} | {2:30} | {3:23} | {4:40} | {5:40} ".format(
"Host",
"Service",
"Option",
"Key",
"Old Value",
"New Value")
print "+{}+".format("-" * (33 + 44 + 15 + 15 + 30 + 10 + 6))
for difference in differences :
"New Value"))
print("+{}+".format("-" * (33 + 44 + 15 + 15 + 30 + 10 + 6)))
for difference in differences:
value = difference.split("|")
print "{0:25} | {1:15} | {2:30} | {3:23} | {4:40} | {5:40} ".format(value[0],
print("{0:25} | {1:15} | {2:30} | {3:23} | {4:40} | {5:40} ".format(value[0],
value[1],
value[2],
value[3],
value[4],
value[5])
print "+{}+".format("-" * (33 + 44 + 15 + 15 + 30 + 10 + 6))
value[5]))
print("+{}+".format("-" * (33 + 44 + 15 + 15 + 30 + 10 + 6)))
def scroll(self, search, sid, scroll_size):
data = []
@@ -528,6 +532,7 @@ def scroll(self, search, sid, scroll_size):
index, however, this is quite expensive, and it might be quicker to
only look for errors for specific browbeat_uuids
"""
def get_errors(self, index, browbeat_id):
self.logger.info("Making query against {}".format(index))
page = self.es.search(
@@ -135,5 +135,6 @@ def main():
metadata.write_metadata_file(
software_data, os.path.join(args.path, 'software-metadata.json'))
if __name__ == '__main__':
sys.exit(main())
@@ -73,7 +73,7 @@ def run_scenario(self, task_file, scenario_args, result_dir, test_name, benchmar
return (from_time, to_time)
def get_task_id(self, test_name):
cmd = "grep \"rally task report [a-z0-9\-]* --out\" {}.log | awk '{{print $4}}'".format(
cmd = "grep \"rally task report [a-z0-9\\-]* --out\" {}.log | awk '{{print $4}}'".format(
test_name)
return self.tools.run_cmd(cmd)['stdout']
@@ -147,7 +147,7 @@ def json_parse(self, json_doc, metadata={}):
iteration = 1
workload_name = value
if value.find('(') is not -1:
iteration = re.findall('\d+', value)[0]
iteration = re.findall(r'\d+', value)[0]
workload_name = value.split('(')[0]
error = {'action': workload_name.strip(),
'iteration': iteration,
@@ -164,7 +164,7 @@ def json_parse(self, json_doc, metadata={}):
iteration = 1
workload_name = workload
if workload.find('(') is not -1:
iteration = re.findall('\d+', workload)[0]
iteration = re.findall(r'\d+', workload)[0]
workload_name = workload.split('(')[0]
rally_stats = {'action': workload_name.strip(),
'iteration': iteration,
@@ -383,8 +383,7 @@ def run_workload(self, workload, run_iteration):
test_time = workload.get("time", 60)
for interval in range(0, test_time + 9):
es_list.append(
datetime.datetime.utcnow() +
datetime.timedelta(0, interval))
datetime.datetime.utcnow() + datetime.timedelta(0, interval))
rerun_range = range(self.config["browbeat"]["rerun"])
if self.config["browbeat"]["rerun_type"] == "complete":
@@ -143,7 +143,7 @@ def post_process(self, cli):
""" Capture per-workload results """
workload_results = {}
json = re.compile("\.json")
json = re.compile(r"\.json")
if len(results) > 0:
for path in results:
for regex in workloads:
@@ -244,9 +244,9 @@ def watch_introspecting_nodes(self, nodes, timeout, conn, results):
time.sleep(10)
continue
if node_obj is None:
self.logger.error("Can't find node " + node +
" Which existed at the start of introspection \
did you delete it manually?")
self.logger.error(
"Can't find node %s which existed at the start of "
"introspection did you delete it manually?", node)
continue
# == works here for string comparison because they are in fact
@@ -266,8 +266,7 @@ def watch_introspecting_nodes(self, nodes, timeout, conn, results):
node_obj.provision_state, results['nodes'][node_obj.id]["state_list"])
times.append(
(datetime.datetime.utcnow() -
start_time).total_seconds())
(datetime.datetime.utcnow() - start_time).total_seconds())
elif (datetime.datetime.utcnow() - start_time) > timeout:
# return currently active node to the deque to be failed
@@ -533,8 +532,7 @@ def introspection_workload(
benchmark['timeout'], env_setup, conn)
else:
self.logger.error(
"Malformed YODA configuration for " +
benchmark['name'])
"Malformed YODA configuration for " + benchmark['name'])
exit(1)
self.get_stats()
@@ -607,8 +605,7 @@ def overcloud_workload(self, benchmark, run, results_dir, env_setup, conn):
benchmark)
results['total_time'] = (
datetime.datetime.utcnow() -
start_time).total_seconds()
datetime.datetime.utcnow() - start_time).total_seconds()
try:
stack_status = conn.orchestration.find_stack("overcloud")
except exceptions.SDKException:
@@ -12,11 +12,10 @@
import os
import sys
sys.path.append(os.path.abspath('ansible'))
import pytest
sys.path.append(os.path.abspath('ansible'))
import bootstrap # noqa
import bootstrap
def test_bootstrap_help(capsys):
"""Tests to see if bootstrap.py help text is correct and that it loads sample/tripleo plugins"""
@@ -1,6 +1,6 @@
[tox]
minversion = 2.0
envlist = py27,py35,py36,pep8,linters
envlist = py27,py35,py36,linters
skipsdist = True
[testenv]
@@ -12,8 +12,11 @@ deps = -r{toxinidir}/test-requirements.txt
commands = python setup.py test
[testenv:linters]
# py3 linters are able to stop more than py2 ones
basepython = python3
whitelist_externals = bash
commands =
{[testenv:pep8]commands}
bash -c "cd ansible; find . -type f -regex '.*.y[a]?ml' -print0 | xargs -t -n1 -0 \
ansible-lint \
-x ANSIBLE0012,ANSIBLE0006,ANSIBLE0007,ANSIBLE0016,ANSIBLE0019" \
@@ -53,7 +56,7 @@ commands = oslo_debug_helper {posargs}
[flake8]
# E123, E125 skipped as they are invalid PEP-8.
show-source = True
ignore = E123,E125,E226,E302,E41,E231,E203,H233,H306,H238,H236,H404,H405
ignore = E123,E125,E226,E302,E41,E231,E203,H233,H306,H238,H236,H404,H405,W504
max-line-length = 100
builtins = _
exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build,ansible/*,.browbeat-venv,.perfkit-venv,.rally-venv,.shaker-venv

0 comments on commit 90d9c1b

Please sign in to comment.