Skip to content

Commit

Permalink
[tools] Run autopep8 and apply fixes found. (#739)
Browse files Browse the repository at this point in the history
  • Loading branch information
JBakamovic authored and LebedevRI committed Dec 7, 2018
1 parent eafa34a commit eee8b05
Show file tree
Hide file tree
Showing 3 changed files with 15 additions and 13 deletions.
6 changes: 2 additions & 4 deletions tools/compare.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
#!/usr/bin/env python

import unittest
"""
compare.py - versatile benchmark output compare tool
"""
Expand Down Expand Up @@ -244,9 +245,6 @@ def main():
print(ln)


import unittest


class TestParser(unittest.TestCase):
def setUp(self):
self.parser = create_parser()
Expand Down Expand Up @@ -402,7 +400,7 @@ def test_benchmarksfiltered_with_remainder_after_doubleminus(self):


if __name__ == '__main__':
#unittest.main()
# unittest.main()
main()

# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
Expand Down
7 changes: 3 additions & 4 deletions tools/gbench/report.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import unittest
"""report.py - Utilities for reporting statistics about benchmark results
"""
import os
Expand Down Expand Up @@ -270,9 +271,6 @@ def get_color(res):
# Unit tests


import unittest


class TestGetUniqueBenchmarkNames(unittest.TestCase):
def load_results(self):
import json
Expand All @@ -290,7 +288,7 @@ def test_basic(self):
'BM_One',
'BM_Two',
'short', # These two are not sorted
'medium', # These two are not sorted
'medium', # These two are not sorted
]
json = self.load_results()
output_lines = get_unique_benchmark_names(json)
Expand All @@ -300,6 +298,7 @@ def test_basic(self):
for i in range(0, len(output_lines)):
self.assertEqual(expect_lines[i], output_lines[i])


class TestReportDifference(unittest.TestCase):
def load_results(self):
import json
Expand Down
15 changes: 10 additions & 5 deletions tools/gbench/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,13 @@
import sys

# Input file type enumeration
IT_Invalid = 0
IT_JSON = 1
IT_Invalid = 0
IT_JSON = 1
IT_Executable = 2

_num_magic_bytes = 2 if sys.platform.startswith('win') else 4


def is_executable_file(filename):
"""
Return 'True' if 'filename' names a valid file which is likely
Expand Down Expand Up @@ -46,7 +48,7 @@ def is_json_file(filename):
with open(filename, 'r') as f:
json.load(f)
return True
except:
except BaseException:
pass
return False

Expand Down Expand Up @@ -84,6 +86,7 @@ def check_input_file(filename):
sys.exit(1)
return ftype


def find_benchmark_flag(prefix, benchmark_flags):
"""
Search the specified list of flags for a flag matching `<prefix><arg>` and
Expand All @@ -97,6 +100,7 @@ def find_benchmark_flag(prefix, benchmark_flags):
result = f[len(prefix):]
return result


def remove_benchmark_flags(prefix, benchmark_flags):
"""
Return a new list containing the specified benchmark_flags except those
Expand All @@ -105,6 +109,7 @@ def remove_benchmark_flags(prefix, benchmark_flags):
assert prefix.startswith('--') and prefix.endswith('=')
return [f for f in benchmark_flags if not f.startswith(prefix)]


def load_benchmark_results(fname):
"""
Read benchmark output from a file and return the JSON object.
Expand All @@ -129,7 +134,7 @@ def run_benchmark(exe_name, benchmark_flags):
thandle, output_name = tempfile.mkstemp()
os.close(thandle)
benchmark_flags = list(benchmark_flags) + \
['--benchmark_out=%s' % output_name]
['--benchmark_out=%s' % output_name]

cmd = [exe_name] + benchmark_flags
print("RUNNING: %s" % ' '.join(cmd))
Expand All @@ -156,4 +161,4 @@ def run_or_load_benchmark(filename, benchmark_flags):
elif ftype == IT_Executable:
return run_benchmark(filename, benchmark_flags)
else:
assert False # This branch is unreachable
assert False # This branch is unreachable

0 comments on commit eee8b05

Please sign in to comment.