Skip to content

Commit

Permalink
[perf-test] Add in the Benchmark_DTrace driver.
Browse files Browse the repository at this point in the history
This and the associated *.d file can be used to determine dynamic
retain/release counts over the perf test suite.
  • Loading branch information
gottesmm committed Feb 8, 2016
1 parent d4a95ee commit 1c2f40e
Show file tree
Hide file tree
Showing 4 changed files with 167 additions and 10 deletions.
111 changes: 111 additions & 0 deletions benchmark/scripts/Benchmark_DTrace.in
@@ -0,0 +1,111 @@
#!/usr/bin/env python

# ===--- Benchmark_DTrace.in ----------------------------------------------===//
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See http://swift.org/LICENSE.txt for license information
# See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ===----------------------------------------------------------------------===//

import os
import sys
import subprocess
import argparse

DRIVER_LIBRARY_PATH = "@PATH_TO_DRIVER_LIBRARY@"
sys.path.append(DRIVER_LIBRARY_PATH)
DTRACE_PATH = os.path.join(DRIVER_LIBRARY_PATH, 'swift_stats.d')

import perf_test_driver

# Regexes for the XFAIL_LIST. Matches against '([Onone|O|Ounchecked],TestName)'
XFAIL_LIST = [
]

class DTraceResult(perf_test_driver.Result):

def __init__(self, name, status, output, csv_output):
perf_test_driver.Result.__init__(self, name, status, output, XFAIL_LIST)
self.csv_output = csv_output

@classmethod
def data_headers(cls):
return ['Name', 'Result', 'strong_retain', 'strong_retain/iter', 'strong_release', 'strong_release/iter']

@classmethod
def data_format(cls, max_test_len):
non_name_headers = DTraceResult.data_headers()[1:]
fmt = ('{:<%d}' % (max_test_len+5)) + ''.join(['{:<%d}' % (len(h)+2) for h in non_name_headers])
return fmt

@classmethod
def print_data_header(cls, max_test_len, csv_output):
headers = cls.data_headers()
if csv_output:
print(','.join(headers))
return
print(cls.data_format(max_test_len).format(*headers))

def print_data(self, max_test_len):
result = [self.get_name(), self.get_result()] + map(str, self.output)
if self.csv_output:
print(','.join(result))
return

print(DTraceResult.data_format(max_test_len).format(*result))

class DTracePerfTestDriver(perf_test_driver.PerfTestDriver):
def __init__(self, binary, xfail_list, csv_output):
perf_test_driver.PerfTestDriver.__init__(self, binary, xfail_list,
enable_parallel=False,
opt_levels = ['O'])
self.csv_output = csv_output

def print_data_header(self, max_test_len):
DTraceResult.print_data_header(max_test_len, self.csv_output)

def prepare_input(self, name):
return {}

def process_input(self, data):
test_name = '({}_{})'.format(data['opt'], data['test_name'])
print "Running {}...".format(test_name)
sys.stdout.flush()

def get_results_with_iters(iters):
p = subprocess.Popen(['sudo', 'dtrace', '-s', DTRACE_PATH, '-c', '%s %s %s' % (data['path'], data['test_name'], '--num-iters=%d' % iters)],
stdout=subprocess.PIPE, stderr=open('/dev/null', 'w'))
results = [x for x in p.communicate()[0].split("\n") if len(x) > 0]
return [x.split(',')[1] for x in results[results.index('DTRACE RESULTS')+1:]]
iter_2_results = get_results_with_iters(2)
iter_3_results = get_results_with_iters(3)

results = []
for x in zip(iter_2_results, iter_3_results):
results.append(x[1])
results.append(int(x[1]) - int(x[0]))

return DTraceResult(test_name, 0, results, self.csv_output)

SWIFT_BIN_DIR = os.path.dirname(os.path.abspath(__file__))

def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-filter', type=str, default=None,
help='Filter out any test that does not match the given regex')
parser.add_argument('-csv', default=False, action='store_true',
help="Emit csv output", dest='csv_output')
return parser.parse_args()

if __name__ == "__main__":
args = parse_args()
g = DTracePerfTestDriver(SWIFT_BIN_DIR, XFAIL_LIST, args.csv_output)
if g.run(args.filter):
sys.exit(0)
else:
sys.exit(-1)
9 changes: 9 additions & 0 deletions benchmark/scripts/CMakeLists.txt
Expand Up @@ -8,6 +8,10 @@ configure_file(
${CMAKE_CURRENT_SOURCE_DIR}/Benchmark_RuntimeLeaksRunner.in
${CMAKE_CURRENT_BINARY_DIR}/Benchmark_RuntimeLeaksRunner
@ONLY)
configure_file(
${CMAKE_CURRENT_SOURCE_DIR}/Benchmark_DTrace.in
${CMAKE_CURRENT_BINARY_DIR}/Benchmark_DTrace
@ONLY)
set(PATH_TO_DRIVER_LIBRARY)

file(COPY ${CMAKE_CURRENT_BINARY_DIR}/Benchmark_GuardMalloc
Expand All @@ -20,6 +24,11 @@ file(COPY ${CMAKE_CURRENT_BINARY_DIR}/Benchmark_RuntimeLeaksRunner
FILE_PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ
GROUP_EXECUTE WORLD_READ WORLD_EXECUTE)

file(COPY ${CMAKE_CURRENT_BINARY_DIR}/Benchmark_DTrace
DESTINATION ${CMAKE_BINARY_DIR}/bin
FILE_PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ
GROUP_EXECUTE WORLD_READ WORLD_EXECUTE)

file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/Benchmark_Driver
DESTINATION ${CMAKE_BINARY_DIR}/bin
FILE_PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ
Expand Down
30 changes: 20 additions & 10 deletions benchmark/scripts/perf_test_driver/perf_test_driver.py
Expand Up @@ -53,28 +53,39 @@ def merge_in_extra_data(self, d):
"""Rather than modifying the extra data dict, just return it as a no-op"""
return d

def print_data(self, max_test_len):
fmt = '{:<%d}{:}' % (max_test_len + 5)
print(fmt.format(self.get_name(), self.get_result()))

def _unwrap_self(args):
return type(args[0]).process_input(*args)

class BenchmarkDriver(object):
PerfTestDriver_OptLevels = ['Onone', 'O', 'Ounchecked']

OptLevels = ['Onone', 'O', 'Ounchecked']
class PerfTestDriver(object):

def __init__(self, binary_dir, xfail_list, enable_parallel=False):
self.targets = [(os.path.join(binary_dir, 'Benchmark_%s' % o), o) for o in BenchmarkDriver.OptLevels]
def __init__(self, binary_dir, xfail_list, enable_parallel=False, opt_levels=PerfTestDriver_OptLevels):
self.targets = [(os.path.join(binary_dir, 'PerfTests_%s' % o), o) for o in opt_levels]
self.xfail_list = xfail_list
self.enable_parallel = enable_parallel
self.data = None

def print_data_header(self, max_test_len):
fmt = '{:<%d}{:}' % (max_test_len + 5)
print(fmt.format('Name', 'Result'))

def prepare_input(self, name, opt_level):
raise RuntimeError("Abstract method")

def process_input(self, data):
raise RuntimeError("Abstract method")

def run_for_opt_level(self, binary, opt_level):
def run_for_opt_level(self, binary, opt_level, test_filter):
print("testing driver at path: %s" % binary)
names = [n.strip() for n in subprocess.check_output([binary, "--list"]).split()[2:]]
if test_filter:
regex = re.compile(test_filter)
names = [n for n in names if regex.match(n)]

def prepare_input_wrapper(name):
x = {'opt': opt_level, 'path': binary, 'test_name': name}
Expand All @@ -101,15 +112,14 @@ def reduce_results(acc, r):

def print_data(self, data, max_test_len):
print("Results:")
fmt = '{:<%d}{:}' % (max_test_len + 5)
self.print_data_header(max_test_len)
for d in data:
for r in d['result']:
print(fmt.format(r.get_name(), r.get_result()))
r.print_data(max_test_len)

def run(self):
self.data = [self.run_for_opt_level(binary, opt_level) for binary, opt_level in self.targets]
def run(self, test_filter=None):
self.data = [self.run_for_opt_level(binary, opt_level, test_filter) for binary, opt_level in self.targets]
max_test_len = reduce(max, [d['max_test_len']for d in self.data])
has_failure = reduce(max, [d['has_failure']for d in self.data])
self.print_data(self.data, max_test_len)
return not has_failure

27 changes: 27 additions & 0 deletions benchmark/scripts/perf_test_driver/swift_stats.d
@@ -0,0 +1,27 @@
/*===--- swift_stats.d ----------------------------------------------------===//
*
* This source file is part of the Swift.org open source project
*
* Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
* Licensed under Apache License v2.0 with Runtime Library Exception
*
* See http://swift.org/LICENSE.txt for license information
* See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
*
*===----------------------------------------------------------------------===*/

pid$target:*:swift_retain:entry
{
@counts[probefunc] = count();
}

pid$target:*:swift_release:entry
{
@counts[probefunc] = count();
}

END
{
printf("\nDTRACE RESULTS\n");
printa("%s,%@u\n", @counts)
}

0 comments on commit 1c2f40e

Please sign in to comment.