Permalink
Browse files

Reformatting by autopep8

  • Loading branch information...
1 parent e20af67 commit 8eb2b0d9a0503b19920867e364a83f1281b2d83f @svanoort committed Oct 11, 2015
@@ -14,4 +14,4 @@
mycurl.close()
returnval = os.system('git --version > /dev/null')
-assert returnval == 0
+assert returnval == 0
@@ -14,4 +14,4 @@
mycurl.close()
returnval = os.system('git --version > /dev/null')
-assert returnval == 0
+assert returnval == 0
@@ -14,4 +14,4 @@
mycurl.close()
returnval = os.system('git --version > /dev/null')
-assert returnval == 0
+assert returnval == 0
@@ -6,6 +6,7 @@
import cProfile
-cProfile.run('resttest.command_line_run(["http://localhost:8000","pyresttest/content-test.yaml"])', sort='tottime')
+cProfile.run(
+ 'resttest.command_line_run(["http://localhost:8000","pyresttest/content-test.yaml"])', sort='tottime')
#cProfile.run('resttest.command_line_run(["http://localhost:8000","schema_test.yaml"])', sort='tottime')
-#cProfile.run('resttest.command_line_run(["https://api.github.com","github_api_test.yaml"])', sort='tottime')
+#cProfile.run('resttest.command_line_run(["https://api.github.com","github_api_test.yaml"])', sort='tottime')
@@ -13,7 +13,7 @@
test.benchmark_runs = 1000
test.raw_metrics = set()
test.metrics = {'total_time'}
-test.aggregated_metrics = {'total_time': ['total','mean']}
+test.aggregated_metrics = {'total_time': ['total', 'mean']}
# Basic get test
test.url = 'http://localhost:8000/api/person/'
@@ -28,10 +28,10 @@
test.headers = {'Content-Type': 'application/json'}
handler = ContentHandler()
handler.setup('{"first_name": "Gaius","id": "$id","last_name": "Baltar","login": "$id"}',
- is_template_content=True)
+ is_template_content=True)
test.body = handler
context = Context()
context.add_generator('gen', factory_generate_ids(starting_id=10)())
-test.generator_binds = {'id':'gen'}
+test.generator_binds = {'id': 'gen'}
print 'Running templated PUT test'
-cProfile.run('resttest.run_benchmark(test, context=context)', sort='cumtime')
+cProfile.run('resttest.run_benchmark(test, context=context)', sort='cumtime')
@@ -1 +1,2 @@
-__all__ = ["resttest","generators","binding","parsing","validators","contenthandling","benchmarks","tests"]
+__all__ = ["resttest", "generators", "binding", "parsing",
+ "validators", "contenthandling", "benchmarks", "tests"]
@@ -3,31 +3,31 @@
import timeit
# Test basic pycurl create/delete, time is ~2.5 microseconds
-time = timeit.timeit("mycurl=Curl(); mycurl.close()", setup="from pycurl import Curl", number=1000000)
-print('Curl create/destroy runtime for 1M runs (s)'+str(time))
+time = timeit.timeit("mycurl=Curl(); mycurl.close()",
+ setup="from pycurl import Curl", number=1000000)
+print('Curl create/destroy runtime for 1M runs (s)' + str(time))
# Test test interpret/build & configuration speeds for resttest
# Runtime is 36.29 sec, so 36 microseconds per run, or 0.036 ms
time = timeit.timeit("mytest=Test.parse_test('', input); mycurl=mytest.configure_curl(); mycurl.close()",
- setup='from resttest import Test; input = {"url": "/ping", "method": "DELETE", "NAME":"foo", "group":"bar", "body":"<xml>input</xml>","headers":{"Accept":"Application/json"}}',
- number=1000000)
-print('Test interpret/configure test config for 1M runs (s)'+str(time))
+ setup='from resttest import Test; input = {"url": "/ping", "method": "DELETE", "NAME":"foo", "group":"bar", "body":"<xml>input</xml>","headers":{"Accept":"Application/json"}}',
+ number=1000000)
+print('Test interpret/configure test config for 1M runs (s)' + str(time))
# Just configuring the curl object from a pre-built test
# 10s/1M runs, or 0.01 ms per
time = timeit.timeit("mycurl=mytest.configure_curl(); mycurl.close()",
- setup='from resttest import Test; input = {"url": "/ping", "method": "DELETE", "NAME":"foo", "group":"bar", "body":"<xml>input</xml>","headers":{"Accept":"Application/json"}}; mytest=Test.parse_test("", input);',
- number=1000000)
-print('Test configure curl for 1M runs (s)'+str(time))
+ setup='from resttest import Test; input = {"url": "/ping", "method": "DELETE", "NAME":"foo", "group":"bar", "body":"<xml>input</xml>","headers":{"Accept":"Application/json"}}; mytest=Test.parse_test("", input);',
+ number=1000000)
+print('Test configure curl for 1M runs (s)' + str(time))
# Time for full curl execution on Django testing rest app
# Time: 41.4s for 10k runs, or about 4.14 ms per
timeit.timeit("mycurl=mytest.configure_curl(); mycurl.setopt(pycurl.WRITEFUNCTION, lambda x: None); mycurl.perform(); mycurl.close()",
- setup='import pycurl; from resttest import Test; input = {"url": "/api/person/", "NAME":"foo", "group":"bar"}; mytest=Test.parse_test("http://localhost:8000", input);',
- number=10000)
+ setup='import pycurl; from resttest import Test; input = {"url": "/api/person/", "NAME":"foo", "group":"bar"}; mytest=Test.parse_test("http://localhost:8000", input);',
+ number=10000)
# Github perf test, 27 s for 100 runs = 270 ms per
timeit.timeit("mycurl=mytest.configure_curl(); mycurl.setopt(pycurl.WRITEFUNCTION, lambda x: None); mycurl.perform(); mycurl.close()",
- setup='import pycurl; from resttest import Test; input = {"url": "/search/users?q=jewzaam", "NAME":"foo", "group":"bar"}; mytest=Test.parse_test("https://api.github.com", input);',
- number=100)
-
+ setup='import pycurl; from resttest import Test; input = {"url": "/search/users?q=jewzaam", "NAME":"foo", "group":"bar"}; mytest=Test.parse_test("https://api.github.com", input);',
+ number=100)
@@ -26,85 +26,94 @@
"""
-#Curl metrics for benchmarking, key is name in config file, value is pycurl variable
-#Taken from pycurl docs, this is libcurl variable minus the CURLINFO prefix
+# Curl metrics for benchmarking, key is name in config file, value is pycurl variable
+# Taken from pycurl docs, this is libcurl variable minus the CURLINFO prefix
# Descriptions of the timing variables are taken from libcurl docs:
# http://curl.haxx.se/libcurl/c/curl_easy_getinfo.html
METRICS = {
- #Timing info, precisely in order from start to finish
- #The time it took from the start until the name resolving was completed.
- 'namelookup_time' : pycurl.NAMELOOKUP_TIME,
-
- #The time it took from the start until the connect to the remote host (or proxy) was completed.
- 'connect_time' : pycurl.CONNECT_TIME,
-
- #The time it took from the start until the SSL connect/handshake with the remote host was completed.
- 'appconnect_time' : pycurl.APPCONNECT_TIME,
-
- #The time it took from the start until the file transfer is just about to begin.
- #This includes all pre-transfer commands and negotiations that are specific to the particular protocol(s) involved.
- 'pretransfer_time' : pycurl.PRETRANSFER_TIME,
-
- #The time it took from the start until the first byte is received by libcurl.
- 'starttransfer_time' : pycurl.STARTTRANSFER_TIME,
-
- #The time it took for all redirection steps include name lookup, connect, pretransfer and transfer
- # before final transaction was started. So, this is zero if no redirection took place.
- 'redirect_time' : pycurl.REDIRECT_TIME,
-
- #Total time of the previous request.
- 'total_time' : pycurl.TOTAL_TIME,
-
-
- #Transfer sizes and speeds
- 'size_download' : pycurl.SIZE_DOWNLOAD,
- 'size_upload' : pycurl.SIZE_UPLOAD,
- 'request_size' : pycurl.REQUEST_SIZE,
- 'speed_download' : pycurl.SPEED_DOWNLOAD,
- 'speed_upload' : pycurl.SPEED_UPLOAD,
-
- #Connection counts
- 'redirect_count' : pycurl.REDIRECT_COUNT,
- 'num_connects' : pycurl.NUM_CONNECTS
+ # Timing info, precisely in order from start to finish
+ # The time it took from the start until the name resolving was completed.
+ 'namelookup_time': pycurl.NAMELOOKUP_TIME,
+
+ # The time it took from the start until the connect to the remote host (or
+ # proxy) was completed.
+ 'connect_time': pycurl.CONNECT_TIME,
+
+ # The time it took from the start until the SSL connect/handshake with the
+ # remote host was completed.
+ 'appconnect_time': pycurl.APPCONNECT_TIME,
+
+ # The time it took from the start until the file transfer is just about to begin.
+ # This includes all pre-transfer commands and negotiations that are
+ # specific to the particular protocol(s) involved.
+ 'pretransfer_time': pycurl.PRETRANSFER_TIME,
+
+ # The time it took from the start until the first byte is received by
+ # libcurl.
+ 'starttransfer_time': pycurl.STARTTRANSFER_TIME,
+
+ # The time it took for all redirection steps include name lookup, connect, pretransfer and transfer
+ # before final transaction was started. So, this is zero if no redirection
+ # took place.
+ 'redirect_time': pycurl.REDIRECT_TIME,
+
+ # Total time of the previous request.
+ 'total_time': pycurl.TOTAL_TIME,
+
+
+ # Transfer sizes and speeds
+ 'size_download': pycurl.SIZE_DOWNLOAD,
+ 'size_upload': pycurl.SIZE_UPLOAD,
+ 'request_size': pycurl.REQUEST_SIZE,
+ 'speed_download': pycurl.SPEED_DOWNLOAD,
+ 'speed_upload': pycurl.SPEED_UPLOAD,
+
+ # Connection counts
+ 'redirect_count': pycurl.REDIRECT_COUNT,
+ 'num_connects': pycurl.NUM_CONNECTS
}
-#Map statistical aggregate to the function to use to perform the aggregation on an array
+# Map statistical aggregate to the function to use to perform the
+# aggregation on an array
AGGREGATES = {
- 'mean_arithmetic': #AKA the average, good for many things
- lambda x: float(sum(x))/float(len(x)),
+ 'mean_arithmetic': # AKA the average, good for many things
+ lambda x: float(sum(x)) / float(len(x)),
'mean': # Alias for arithmetic mean
- lambda x: float(sum(x))/float(len(x)),
- 'mean_harmonic': #Harmonic mean, better predicts average of rates: http://en.wikipedia.org/wiki/Harmonic_mean
- lambda x: 1.0/( sum([1.0/float(y) for y in x]) / float(len(x))),
- 'median': lambda x: median(x),
+ lambda x: float(sum(x)) / float(len(x)),
+ 'mean_harmonic': # Harmonic mean, better predicts average of rates: http://en.wikipedia.org/wiki/Harmonic_mean
+ lambda x: 1.0 / (sum([1.0 / float(y) for y in x]) / float(len(x))),
+ 'median': lambda x: median(x),
'std_deviation': lambda x: std_deviation(x),
- 'sum' : lambda x: sum(x),
- 'total' : lambda x: sum(x)
+ 'sum': lambda x: sum(x),
+ 'total': lambda x: sum(x)
}
OUTPUT_FORMATS = [u'csv', u'json']
+
def median(array):
""" Get the median of an array """
sorted = [x for x in array]
sorted.sort()
- middle = len(sorted)/2 #Gets the middle element, if present
- if len(sorted) % 2 == 0: #Even, so need to average together the middle two values
- return float((sorted[middle]+sorted[middle-1]))/2
+ middle = len(sorted) / 2 # Gets the middle element, if present
+ if len(sorted) % 2 == 0: # Even, so need to average together the middle two values
+ return float((sorted[middle] + sorted[middle - 1])) / 2
else:
return sorted[middle]
+
def std_deviation(array):
""" Compute the standard deviation of an array of numbers """
if not array or len(array) == 1:
return 0
average = AGGREGATES['mean_arithmetic'](array)
- variance = map(lambda x: (x-average)**2,array)
+ variance = map(lambda x: (x - average)**2, array)
stdev = AGGREGATES['mean_arithmetic'](variance)
return math.sqrt(stdev)
+
class Benchmark(Test):
""" Extends test with configuration for benchmarking
warmup_runs and benchmark_runs behave like you'd expect
@@ -115,16 +124,18 @@ class Benchmark(Test):
- list contains aggregagate name from AGGREGATES
- value of 'all' returns everything
"""
- warmup_runs = 10 #Times call is executed to warm up
- benchmark_runs = 100 #Times call is executed to generate benchmark results
+ warmup_runs = 10 # Times call is executed to warm up
+ benchmark_runs = 100 # Times call is executed to generate benchmark results
output_format = u'csv'
output_file = None
- #Metrics to gather, both raw and aggregated
+ # Metrics to gather, both raw and aggregated
metrics = set()
raw_metrics = set() # Metrics that do not have any aggregation performed
- aggregated_metrics = dict() # Metrics where an aggregate is computed, maps key(metric name) -> list(aggregates to use)
+ # Metrics where an aggregate is computed, maps key(metric name) ->
+ # list(aggregates to use)
+ aggregated_metrics = dict()
def ninja_copy(self):
""" Optimization: limited, fast copy of benchmark, overrides Test parent method """
@@ -142,22 +153,24 @@ def add_metric(self, metric_name, aggregate=None):
clean_metric = metric_name.lower().strip()
if clean_metric.lower() not in METRICS:
- raise Exception("Metric named: " + metric_name + " is not a valid benchmark metric.")
+ raise Exception("Metric named: " + metric_name +
+ " is not a valid benchmark metric.")
self.metrics.add(clean_metric)
if not aggregate:
self.raw_metrics.add(clean_metric)
elif aggregate.lower().strip() in AGGREGATES:
# Add aggregate to this metric
clean_aggregate = aggregate.lower().strip()
- current_aggregates = self.aggregated_metrics.get(clean_metric, list())
+ current_aggregates = self.aggregated_metrics.get(
+ clean_metric, list())
current_aggregates.append(clean_aggregate)
- self.aggregated_metrics[clean_metric] = current_aggregates
+ self.aggregated_metrics[clean_metric] = current_aggregates
else:
- raise Exception("Aggregate function " + aggregate + " is not a legal aggregate function name");
-
- return self;
+ raise Exception("Aggregate function " + aggregate +
+ " is not a legal aggregate function name")
+ return self
def __init__(self):
self.metrics = set()
@@ -168,23 +181,29 @@ def __init__(self):
def __str__(self):
return json.dumps(self, default=safe_to_json)
+
def realize_partial(self, context=None):
""" Attempt to template out what is possible for this benchmark """
if not self.is_dynamic():
return self
if self.is_context_modifier():
- # Enhanceme - once extract is done, check if variables already bound, in that case template out
+ # Enhanceme - once extract is done, check if variables already bound,
+ # in that case template out
return self
else:
copyout = copy.cop
pass
+
def configure_curl(self, timeout=tests.DEFAULT_TIMEOUT, context=None, curl_handle=None):
- curl = super().configure_curl(self, timeout=timeout, context=context, curl_handle=curl_handle)
- curl.setopt(pycurl.FORBID_REUSE, 1) # Simulate results from different users hitting server
+ curl = super().configure_curl(self, timeout=timeout,
+ context=context, curl_handle=curl_handle)
+ # Simulate results from different users hitting server
+ curl.setopt(pycurl.FORBID_REUSE, 1)
return curl
+
def parse_benchmark(base_url, node):
""" Try building a benchmark configuration from deserialized configuration root node """
node = lowercase_keys(flatten_dictionaries(node)) # Make it usable
@@ -211,32 +230,39 @@ def parse_benchmark(base_url, node):
raise Exception("Invalid output file format")
benchmark.output_file = value
elif key == u'metrics':
- if isinstance(value, unicode) or isinstance(value,str):
+ if isinstance(value, unicode) or isinstance(value, str):
# Single value
benchmark.add_metric(unicode(value, 'UTF-8'))
elif isinstance(value, list) or isinstance(value, set):
- # List of single values or list of {metric:aggregate, ...}
+ # List of single values or list of {metric:aggregate, ...}
for metric in value:
if isinstance(metric, dict):
for metricname, aggregate in metric.items():
if not isinstance(metricname, basestring):
- raise Exception("Invalid metric input: non-string metric name")
+ raise Exception(
+ "Invalid metric input: non-string metric name")
if not isinstance(aggregate, basestring):
- raise Exception("Invalid aggregate input: non-string aggregate name")
+ raise Exception(
+ "Invalid aggregate input: non-string aggregate name")
# TODO unicode-safe this
- benchmark.add_metric(unicode(metricname,'UTF-8'), unicode(aggregate,'UTF-8'))
+ benchmark.add_metric(
+ unicode(metricname, 'UTF-8'), unicode(aggregate, 'UTF-8'))
elif isinstance(metric, unicode) or isinstance(metric, str):
- benchmark.add_metric(unicode(metric,'UTF-8'))
+ benchmark.add_metric(unicode(metric, 'UTF-8'))
elif isinstance(value, dict):
# Dictionary of metric-aggregate pairs
for metricname, aggregate in value.items():
if not isinstance(metricname, basestring):
- raise Exception("Invalid metric input: non-string metric name")
+ raise Exception(
+ "Invalid metric input: non-string metric name")
if not isinstance(aggregate, basestring):
- raise Exception("Invalid aggregate input: non-string aggregate name")
- benchmark.add_metric(unicode(metricname,'UTF-8'), unicode(aggregate,'UTF-8'))
+ raise Exception(
+ "Invalid aggregate input: non-string aggregate name")
+ benchmark.add_metric(
+ unicode(metricname, 'UTF-8'), unicode(aggregate, 'UTF-8'))
else:
- raise Exception("Invalid benchmark metric datatype: "+str(value))
+ raise Exception(
+ "Invalid benchmark metric datatype: " + str(value))
- return benchmark
+ return benchmark
Oops, something went wrong.

0 comments on commit 8eb2b0d

Please sign in to comment.