Permalink
Switch branches/tags
Nothing to show
Find file Copy path
Fetching contributors…
Cannot retrieve contributors at this time
executable file 150 lines (125 sloc) 5.17 KB
#!/usr/bin/env python
from time import time, sleep
from urllib.request import Request, urlopen
from statistics import mean
import os
from optparse import OptionParser
from multiprocessing.pool import ThreadPool
from datetime import datetime, timedelta
import threading
base_url = os.environ["REST_API_BASE"]
api_key = os.environ["REST_API_KEY"]
endpoints = [
["/js/MEMORY", "js", 0],
["/python/MEMORY", "python", 0],
["/cljs/MEMORY", "cljs", 0],
["/vpc/js/MEMORY", "js", 1],
["/vpc/python/MEMORY", "python", 1],
["/vpc/cljs/MEMORY", "cljs", 1]
]
memory_sizes = [128, 256, 512, 768, 1024, 1280, 1536]
sleep_times = [0, 5, 10, 15, 30, 60, 300, 600, 900, 1800, 2700, 3600]
def request_duration(path):
try:
start = time()
resp = urlopen(Request(base_url + path, None, {"x-api-key": api_key}))
end = time()
s = resp.read().decode("utf-8")
if s == "\"reused\"":
return [round((end - start) * 1000), 1]
elif s == "\"new\"":
return [round((end - start) * 1000), 0]
else:
print("- invalid response: {}".format(s))
return [-1, 0]
except Exception as e:
print(" - invalid response, exception:", e)
return [-1, 0]
def benchmark_endpoint(endpoint, sleep_time, workers, requests):
results = []
for memory_size in memory_sizes:
path = endpoint[0].replace("MEMORY", str(memory_size))
lang = endpoint[1]
vpc = endpoint[2]
print("lang: {} vpc: {} path: {}".format(lang, vpc, path))
start = time()
print("issuing {} requests with {} workers".format(requests, workers))
paths = [path] * requests
with ThreadPool(workers) as pool:
mp = pool.map_async(request_duration, paths)
pool.close()
try:
durations = mp.get(300)
except Exception as e:
print("pool exception", e)
d = [-1, 0]
durations = [d] * requests
print("threads: {}".format(threading.active_count()))
end = time()
print("took: {}ms".format(round((end - start) * 1000)))
reused = 0
failed = 0
for d in durations:
if d[0] == -1:
failed += 1
if d[1] == 1:
reused += 1
print("failures: {} reused containers: {}".format(failed, reused))
durations_error_free = [d[0] for d in durations if d[0] != -1]
r = {}
r["sleep"] = sleep_time
r["path"] = path
r["lang"] = lang
r["vpc"] = vpc
r["start"] = str(datetime.now())
r["memory"] = memory_size
r["workers"] = workers
if len(durations_error_free) > 0:
r["min"] = min(durations_error_free)
r["max"] = max(durations_error_free)
r["mean"] = round(mean(durations_error_free))
else:
r["min"] = -1
r["max"] = -1
r["mean"] = -1
r["durations"] = durations
results.append(r)
print("min: {} max: {} mean: {}".format(r["min"], r["max"], r["mean"]))
return results
def csv_write_header(filename, requests):
with open(filename, "w") as out:
print("sleep,start,path,lang,vpc,memory,workers,min,max,mean", file=out, end="")
for i in range(requests):
print(",d{},r{}".format(i+1, i+1), file=out,end="")
print("", file=out)
def csv_append_results(filename, results):
with open(filename, "a") as out:
for result in results:
print("{},{},{},{},{},{},{},{},{},{}".format(result["sleep"], result["start"], result["path"],
result["lang"], result["vpc"],
result["memory"], result["workers"], result["min"],
result["max"], result["mean"]), file=out, end="")
for d in result["durations"]:
print(",{},{}".format(d[0], d[1]), file=out, end="")
print("", file=out)
def benchmark(options):
csv_write_header(options.filename, options.requests)
for sleep_time in sleep_times:
now = datetime.now()
next_batch = now + timedelta(seconds=sleep_time)
print("---------------")
print("now: {} sleep time: {}s next batch at: {}".format(now, sleep_time, next_batch))
sleep(sleep_time)
for endpoint in endpoints:
results = benchmark_endpoint(endpoint, sleep_time, options.workers, options.requests)
csv_append_results(options.filename, results)
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-f", "--file", dest="filename", default="data.csv",
help="Output CSV file name", metavar="FILE")
parser.add_option("-w", "--workers", dest="workers", default=10, type=int,
help="Number of workers (parallel requests)", metavar="NUMBER")
parser.add_option("-r", "--requests", dest="requests", default=30, type=int,
help="Number of requests per endpoint to issue", metavar="NUMBER")
(options, args) = parser.parse_args()
benchmark(options)