Permalink
Browse files

Fix up PEP8 prior to asking contributors to submit PEP8-compliant cod…

…e; ignore E501: line too long errors for entire project.
  • Loading branch information...
1 parent 4bc1a30 commit 471abdf9761d2c5660c3938aabb3fec803f4c559 @dbishop dbishop committed Feb 12, 2013
Showing with 91 additions and 60 deletions.
  1. +2 −0 .pep8
  2. +2 −0 CHANGELOG
  3. +13 −13 ssbench/master.py
  4. +49 −26 ssbench/tests/test_master.py
  5. +14 −14 ssbench/tests/test_scenario.py
  6. +10 −6 ssbench/tests/test_worker.py
  7. +1 −1 ssbench/worker.py
View
@@ -0,0 +1,2 @@
+[pep8]
+ignore: E501
View
@@ -3,6 +3,8 @@ ssbench (0.0.12)
* Fixed #15: added CHANGELOG and AUTHORS; added "Contributing to ssbench"
section to README.
+ * Cleaned up PEP8 compliance and added .pep8 to ignore E501: line too long
+
ssbench (0.0.11)
* Added ssbench-master scenario op count override.
View
@@ -82,7 +82,7 @@ def process_result_to(self, job, processor, label=''):
sys.stderr.write('O')
else:
sys.stderr.write('*')
- elif result.has_key('exception'):
+ elif 'exception' in result:
sys.stderr.write('X')
else:
sys.stderr.write('_')
@@ -173,11 +173,11 @@ def run_scenario(self, auth_url, user, key, storage_url, token, scenario,
# Enqueue initialization jobs
if not noop:
logging.info('Initializing cluster with stock data (up to %d '
- 'concurrent workers)', scenario.user_count)
+ 'concurrent workers)', scenario.user_count)
self.do_a_run(scenario.user_count, scenario.initial_jobs(),
- run_state.handle_initialization_result,
- ssbench.PRIORITY_SETUP, storage_url, token)
+ run_state.handle_initialization_result,
+ ssbench.PRIORITY_SETUP, storage_url, token)
logging.info('Starting benchmark run (up to %d concurrent '
'workers)', scenario.user_count)
@@ -202,10 +202,10 @@ def run_scenario(self, auth_url, user, key, storage_url, token, scenario,
if not noop:
logging.info('Deleting population objects from cluster')
self.do_a_run(scenario.user_count,
- run_state.cleanup_object_infos(),
- lambda *_: None,
- ssbench.PRIORITY_CLEANUP, storage_url, token,
- mapper_fn=_gen_cleanup_job)
+ run_state.cleanup_object_infos(),
+ lambda *_: None,
+ ssbench.PRIORITY_CLEANUP, storage_url, token,
+ mapper_fn=_gen_cleanup_job)
return run_state.run_results
@@ -274,7 +274,7 @@ def generate_scenario_report(self, scenario, stats):
'stop_time': datetime.utcfromtimestamp(
stats['time_series']['stop']).strftime(REPORT_TIME_FORMAT),
'duration': stats['time_series']['stop']
- - stats['time_series']['start_time'],
+ - stats['time_series']['start_time'],
}
return template.render(scenario=scenario, stats=stats, **tmpl_vars)
@@ -356,7 +356,7 @@ def calculate_scenario_stats(self, scenario, results, nth_pctile=95):
# 'last_byte_latency': 0.913769006729126,
# 'completed_at': 1324372892.360802,
#}
- #OR
+ # OR
# {
# 'worker_id': 1,
# 'type': 'get_object',
@@ -365,7 +365,7 @@ def calculate_scenario_stats(self, scenario, results, nth_pctile=95):
# }
logging.info('Calculating statistics for %d result items...',
len(results))
- agg_stats = dict(start=2**32, stop=0, req_count=0)
+ agg_stats = dict(start=2 ** 32, stop=0, req_count=0)
op_stats = {}
for crud_type in [ssbench.CREATE_OBJECT, ssbench.READ_OBJECT,
ssbench.UPDATE_OBJECT, ssbench.DELETE_OBJECT]:
@@ -376,7 +376,7 @@ def calculate_scenario_stats(self, scenario, results, nth_pctile=95):
req_completion_seconds = {}
start_time = 0
completion_time_max = 0
- completion_time_min = 2**32
+ completion_time_min = 2 ** 32
stats = dict(
nth_pctile=nth_pctile,
agg_stats=agg_stats,
@@ -530,4 +530,4 @@ def _rec_latency(self, stats_dict, result):
if worst_key not in stats_dict \
or result[latency_type] > stats_dict[worst_key][0]:
stats_dict[worst_key] = (round(result[latency_type], 6),
- result['trans_id'])
+ result['trans_id'])
@@ -27,6 +27,7 @@
from ssbench.tests.test_scenario import ScenarioFixture
+
class TestMaster(ScenarioFixture, TestCase):
maxDiff = None
@@ -52,30 +53,45 @@ def setUp(self):
super(TestMaster, self).setUp()
self.stub_queue = flexmock()
- self.stub_queue.should_receive('watch').with_args(ssbench.STATS_TUBE).once
- self.stub_queue.should_receive('ignore').with_args(ssbench.DEFAULT_TUBE).once
+ self.stub_queue.should_receive(
+ 'watch').with_args(ssbench.STATS_TUBE).once
+ self.stub_queue.should_receive(
+ 'ignore').with_args(ssbench.DEFAULT_TUBE).once
self.master = Master(self.stub_queue)
self.result_index = 1 # for self.gen_result()
self.stub_results = [
- self.gen_result(1, ssbench.CREATE_OBJECT, 'small', 100.0, 101.0, 103.0),
- self.gen_result(1, ssbench.READ_OBJECT, 'tiny', 103.0, 103.1, 103.8),
- self.gen_result(1, ssbench.CREATE_OBJECT, 'huge', 103.8, 105.0, 106.0),
- self.gen_result(1, ssbench.UPDATE_OBJECT, 'large', 106.1, 106.3, 106.4),
+ self.gen_result(
+ 1, ssbench.CREATE_OBJECT, 'small', 100.0, 101.0, 103.0),
+ self.gen_result(
+ 1, ssbench.READ_OBJECT, 'tiny', 103.0, 103.1, 103.8),
+ self.gen_result(
+ 1, ssbench.CREATE_OBJECT, 'huge', 103.8, 105.0, 106.0),
+ self.gen_result(
+ 1, ssbench.UPDATE_OBJECT, 'large', 106.1, 106.3, 106.4),
#
# exceptions should be ignored
- dict(worker_id=2, type=ssbench.UPDATE_OBJECT, completed_at=39293.2, exception='wacky!'),
- self.gen_result(2, ssbench.UPDATE_OBJECT, 'medium', 100.1, 100.9, 102.9),
- self.gen_result(2, ssbench.DELETE_OBJECT, 'large', 102.9, 103.0, 103.3),
- self.gen_result(2, ssbench.CREATE_OBJECT, 'tiny', 103.3, 103.4, 103.5),
- self.gen_result(2, ssbench.READ_OBJECT, 'small', 103.5, 103.7, 104.0),
+ dict(worker_id=2, type=ssbench.UPDATE_OBJECT,
+ completed_at=39293.2, exception='wacky!'),
+ self.gen_result(
+ 2, ssbench.UPDATE_OBJECT, 'medium', 100.1, 100.9, 102.9),
+ self.gen_result(
+ 2, ssbench.DELETE_OBJECT, 'large', 102.9, 103.0, 103.3),
+ self.gen_result(
+ 2, ssbench.CREATE_OBJECT, 'tiny', 103.3, 103.4, 103.5),
+ self.gen_result(
+ 2, ssbench.READ_OBJECT, 'small', 103.5, 103.7, 104.0),
#
- self.gen_result(3, ssbench.READ_OBJECT, 'tiny', 100.1, 101.1, 101.9),
+ self.gen_result(
+ 3, ssbench.READ_OBJECT, 'tiny', 100.1, 101.1, 101.9),
# worker 3 took a while (observer lower concurrency in second 102
- self.gen_result(3, ssbench.DELETE_OBJECT, 'small', 103.1, 103.6, 103.9),
- self.gen_result(3, ssbench.READ_OBJECT, 'medium', 103.9, 104.2, 104.3),
- self.gen_result(3, ssbench.UPDATE_OBJECT, 'tiny', 104.3, 104.9, 104.999),
+ self.gen_result(
+ 3, ssbench.DELETE_OBJECT, 'small', 103.1, 103.6, 103.9),
+ self.gen_result(
+ 3, ssbench.READ_OBJECT, 'medium', 103.9, 104.2, 104.3),
+ self.gen_result(
+ 3, ssbench.UPDATE_OBJECT, 'tiny', 104.3, 104.9, 104.999),
]
def tearDown(self):
@@ -99,8 +115,10 @@ def gen_result(self, worker_id, op_type, size_str, start, first_byte,
}
def test_calculate_scenario_stats_aggregate(self):
- first_byte_latency_all = [1, 0.1, 1.2, 0.2, 0.8, 0.1, 0.1, 0.2, 1, 0.5, 0.3, 0.6]
- last_byte_latency_all = [3, 0.8, 2.2, 0.3, 2.8, 0.4, 0.2, 0.5, 1.8, 0.8, 0.4, 0.699]
+ first_byte_latency_all = [1, 0.1, 1.2, 0.2, 0.8, 0.1, 0.1,
+ 0.2, 1, 0.5, 0.3, 0.6]
+ last_byte_latency_all = [3, 0.8, 2.2, 0.3, 2.8, 0.4, 0.2,
+ 0.5, 1.8, 0.8, 0.4, 0.699]
scen_stats = self.master.calculate_scenario_stats(self.scenario,
self.stub_results)
self.assertDictEqual(dict(
@@ -121,15 +139,17 @@ def test_calculate_scenario_stats_aggregate(self):
pctile='%7.3f' % 3.0,
std_dev='%7.3f' % stats.lsamplestdev(last_byte_latency_all),
median=' 0.749', # XXX why??
- #median='%7.3f' % stats.lmedianscore(last_byte_latency_all),
+ # median='%7.3f' % stats.lmedianscore(last_byte_latency_all),
),
worst_first_byte_latency=(1.2, 'txID004'),
worst_last_byte_latency=(3.0, 'txID002'),
), scen_stats['agg_stats'])
def test_calculate_scenario_stats_aggregate_low_pctile(self):
- first_byte_latency_all = [1, 0.1, 1.2, 0.2, 0.8, 0.1, 0.1, 0.2, 1, 0.5, 0.3, 0.6]
- last_byte_latency_all = [3, 0.8, 2.2, 0.3, 2.8, 0.4, 0.2, 0.5, 1.8, 0.8, 0.4, 0.699]
+ first_byte_latency_all = [1, 0.1, 1.2, 0.2, 0.8, 0.1, 0.1,
+ 0.2, 1, 0.5, 0.3, 0.6]
+ last_byte_latency_all = [3, 0.8, 2.2, 0.3, 2.8, 0.4, 0.2,
+ 0.5, 1.8, 0.8, 0.4, 0.699]
scen_stats = self.master.calculate_scenario_stats(self.scenario,
self.stub_results,
nth_pctile=20)
@@ -151,7 +171,7 @@ def test_calculate_scenario_stats_aggregate_low_pctile(self):
pctile='%7.3f' % sorted(last_byte_latency_all)[2],
std_dev='%7.3f' % stats.lsamplestdev(last_byte_latency_all),
median=' 0.749', # XXX why??
- #median='%7.3f' % stats.lmedianscore(last_byte_latency_all),
+ # median='%7.3f' % stats.lmedianscore(last_byte_latency_all),
),
worst_first_byte_latency=(1.2, 'txID004'),
worst_last_byte_latency=(3.0, 'txID002'),
@@ -183,7 +203,8 @@ def test_calculate_scenario_stats_worker1(self):
),
worst_first_byte_latency=(float(max(w1_first_byte_latency)),
'txID004'),
- worst_last_byte_latency=(float(max(w1_last_byte_latency)), 'txID002'),
+ worst_last_byte_latency=(
+ float(max(w1_last_byte_latency)), 'txID002'),
), scen_stats['worker_stats'][1])
def test_calculate_scenario_stats_worker2(self):
@@ -212,7 +233,8 @@ def test_calculate_scenario_stats_worker2(self):
),
worst_first_byte_latency=(float(max(w2_first_byte_latency)),
'txID006'),
- worst_last_byte_latency=(float(max(w2_last_byte_latency)), 'txID006'),
+ worst_last_byte_latency=(
+ float(max(w2_last_byte_latency)), 'txID006'),
), scen_stats['worker_stats'][2])
def test_calculate_scenario_stats_worker3(self):
@@ -239,8 +261,10 @@ def test_calculate_scenario_stats_worker3(self):
std_dev='%7.3f' % stats.lsamplestdev(w3_last_byte_latency),
median='%7.3f' % stats.lmedianscore(w3_last_byte_latency),
),
- worst_first_byte_latency=(float(max(w3_first_byte_latency)), 'txID010'),
- worst_last_byte_latency=(float(max(w3_last_byte_latency)), 'txID010'),
+ worst_first_byte_latency=(
+ float(max(w3_first_byte_latency)), 'txID010'),
+ worst_last_byte_latency=(
+ float(max(w3_last_byte_latency)), 'txID010'),
), scen_stats['worker_stats'][3])
def test_calculate_scenario_stats_create(self):
@@ -688,7 +712,6 @@ def test_write_rps_histogram(self):
['6', '2'],
], list(reader))
-
def test_generate_scenario_report(self):
# Time series (reqs completed each second
scen_stats = self.master.calculate_scenario_stats(self.scenario,
@@ -22,6 +22,7 @@
import ssbench
from ssbench.scenario import Scenario
+
class ScenarioFixture(object):
def setUp(self):
superclass = super(ScenarioFixture, self)
@@ -44,12 +45,12 @@ def setUp(self):
# shortcut in the definition of scenarios.
operation_count=5000,
# C R U D
- crud_profile=[6, 0, 0, 1], # maybe make this a dict?
+ crud_profile=[6, 0, 0, 1], # maybe make this a dict?
user_count=1,
)
self.write_scenario_file()
self.scenario = Scenario(self.stub_scenario_file)
-
+
def tearDown(self):
try:
os.unlink(self.stub_scenario_file)
@@ -61,15 +62,15 @@ def tearDown(self):
def write_scenario_file(self):
"""Generates a scenario file on disk (in /tmp).
-
+
The tearDown() method will delete the created file. Note that
only one scenario file created by this method can exist at any
time (a static path is reused). Change this behavior if needed.
-
+
:**contents: Contents of the JSON object which is the scenario data.
:returns: (nothing)
"""
-
+
fp = open(self.stub_scenario_file, 'w')
json.dump(self.scenario_dict, fp)
@@ -86,7 +87,7 @@ def test_basic_instantiation(self):
assert_dict_equal(self.scenario_dict, self.scenario._scenario_data)
def test_crud_pcts(self):
- assert_list_equal([6.0/7*100,0.0,0.0,1.0/7*100],
+ assert_list_equal([6.0 / 7 * 100, 0.0, 0.0, 1.0 / 7 * 100],
self.scenario.crud_pcts)
def test_bench_jobs(self):
@@ -98,19 +99,19 @@ def test_bench_jobs(self):
# Expect count of sizes to be +/- 10% of expected proportions (which are
# derived from the initial counts; 30%, 30%, 30%, 10% in this case)
size_counter = Counter([_['size_str'] for _ in jobs])
- assert_almost_equal(1500, size_counter['tiny'], delta=0.10*1500)
- assert_almost_equal(1500, size_counter['small'], delta=0.10*1500)
- assert_almost_equal(1500, size_counter['medium'], delta=0.10*1500)
- assert_almost_equal(500, size_counter['large'], delta=0.10*500)
+ assert_almost_equal(1500, size_counter['tiny'], delta=0.10 * 1500)
+ assert_almost_equal(1500, size_counter['small'], delta=0.10 * 1500)
+ assert_almost_equal(1500, size_counter['medium'], delta=0.10 * 1500)
+ assert_almost_equal(500, size_counter['large'], delta=0.10 * 500)
assert_not_in('huge', size_counter)
# From the CRUD profile, we should have 85.7% Create (6/7), and 14.3%
# Delete (1/7).
type_counter = Counter([_['type'] for _ in jobs])
assert_almost_equal(6 * 5000 / 7, type_counter[ssbench.CREATE_OBJECT],
- delta=0.10*6*5000/7)
+ delta=0.10 * 6 * 5000 / 7)
assert_almost_equal(5000 / 7, type_counter[ssbench.DELETE_OBJECT],
- delta=0.10*5000/7)
+ delta=0.10 * 5000 / 7)
def test_bench_job_0(self):
bench_job = self.scenario.bench_job('small', 0, 31)
@@ -186,12 +187,11 @@ def test_initial_jobs(self):
assert_dict_equal({
'type': ssbench.CREATE_OBJECT,
'size_str': 'tiny',
- 'name': 'tiny_000002', # <Usage><Type>######
+ 'name': 'tiny_000002', # <Usage><Type>######
}, jobs[4])
size_counter = Counter([_['size_str'] for _ in jobs])
assert_equal(300, size_counter['tiny'])
assert_equal(300, size_counter['small'])
assert_equal(300, size_counter['medium'])
assert_equal(100, size_counter['large'])
-
@@ -107,7 +107,6 @@ def test_handle_delete_object(self):
trans_id='9bjkk', completed_at=self.stub_time)),
).once
self.mock_worker.handle_delete_object(object_info)
-
def test_handle_update_object(self):
object_info = {
@@ -201,23 +200,28 @@ def test_dispatching_value_error_exception(self):
def test_dispatching_upload_object(self):
# CREATE_OBJECT = 'upload_object' # includes obj name
info = {'type': ssbench.CREATE_OBJECT, 'a': 1}
- self.mock_worker.should_receive('handle_upload_object').with_args(info).once
+ self.mock_worker.should_receive(
+ 'handle_upload_object').with_args(info).once
self.mock_worker.handle_job(info)
def test_dispatching_get_object(self):
# READ_OBJECT = 'get_object' # does NOT include obj name to get
info = {'type': ssbench.READ_OBJECT, 'b': 2}
- self.mock_worker.should_receive('handle_get_object').with_args(info).once
+ self.mock_worker.should_receive(
+ 'handle_get_object').with_args(info).once
self.mock_worker.handle_job(info)
def test_dispatching_update_object(self):
# UPDATE_OBJECT = 'update_object' # does NOT include obj name to update
info = {'type': ssbench.UPDATE_OBJECT, 'c': 3}
- self.mock_worker.should_receive('handle_update_object').with_args(info).once
+ self.mock_worker.should_receive(
+ 'handle_update_object').with_args(info).once
self.mock_worker.handle_job(info)
def test_dispatching_delete_object(self):
- # DELETE_OBJECT = 'delete_object' # may or may not include obj name to delete
+ # DELETE_OBJECT = 'delete_object' # may or may not include obj name to
+ # delete
info = {'type': ssbench.DELETE_OBJECT, 'd': 4}
- self.mock_worker.should_receive('handle_delete_object').with_args(info).once
+ self.mock_worker.should_receive(
+ 'handle_delete_object').with_args(info).once
self.mock_worker.handle_job(info)
View
@@ -263,7 +263,7 @@ def handle_delete_object(self, object_info):
def handle_get_object(self, object_info):
headers, body_iter = self.ignoring_http_responses(
(404, 503), client.get_object, object_info,
- resp_chunk_size=2**16, toss_body=True)
+ resp_chunk_size=2 ** 16, toss_body=True)
# Having passed in toss_body=True, we don't need to "read" body_iter
# (which will actually just be an empty-string), and we'll have an
# accurate last_byte_latency in the headers.

0 comments on commit 471abdf

Please sign in to comment.