Skip to content

Commit e92c9aa

Browse files
committed
tests: Add performance benchmarking test-suite framework.
This benchmarking test suite is intended to be run on any MicroPython target. As such all tests are parameterised with N and M: N is the approximate CPU frequency (in MHz) of the target and M is the approximate amount of heap memory (in kbytes) available on the target. When running the benchmark suite these parameters must be specified and then each test is tuned to run on that target in a reasonable time (<1 second). The test scripts are not standalone: they require adding some extra code at the end to run the test with the appropriate parameters. This is done automatically by the run-perfbench.py script, in such a way that imports are minimised (so the tests can be run on targets without filesystem support). To interface with the benchmarking framework, each test provides a bm_params dict and a bm_setup function, with the later taking a set of parameters (chosen based on N, M) and returning a pair of functions, one to run the test and one to get the results. When running the test the number of microseconds taken by the test are recorded. Then this is converted into a benchmark score by inverting it (so higher number is faster) and normalising it with an appropriate factor (based roughly on the amount of work done by the test, eg number of iterations). Test outputs are also compared against a "truth" value, computed by running the test with CPython. This provides a basic way of making sure the test actually ran correctly. Each test is run multiple times and the results averaged and standard deviation computed. This is output as a summary of the test. To make comparisons of performance across different runs the run-perfbench.py script also includes a diff mode that reads in the output of two previous runs and computes the difference in performance. Reports are given as a percentage change in performance with a combined standard deviation to give an indication if the noise in the benchmarking is less than the thing that is being measured. Example invocations for PC, pyboard and esp8266 targets respectively: $ ./run-perfbench.py 1000 1000 $ ./run-perfbench.py --pyboard 100 100 $ ./run-perfbench.py --pyboard --device /dev/ttyUSB0 50 25
1 parent d86fb67 commit e92c9aa

File tree

2 files changed

+267
-0
lines changed

2 files changed

+267
-0
lines changed

tests/perf_bench/benchrun.py

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
def bm_run(N, M):
2+
try:
3+
from utime import ticks_us, ticks_diff
4+
except ImportError:
5+
import time
6+
ticks_us = lambda: int(time.perf_counter() * 1000000)
7+
ticks_diff = lambda a, b: a - b
8+
9+
# Pick sensible parameters given N, M
10+
cur_nm = (0, 0)
11+
param = None
12+
for nm, p in bm_params.items():
13+
if 10 * nm[0] <= 12 * N and nm[1] <= M and nm > cur_nm:
14+
cur_nm = nm
15+
param = p
16+
if param is None:
17+
print(-1, -1, 'no matching params')
18+
return
19+
20+
# Run and time benchmark
21+
run, result = bm_setup(param)
22+
t0 = ticks_us()
23+
run()
24+
t1 = ticks_us()
25+
norm, out = result()
26+
print(ticks_diff(t1, t0), norm, out)

tests/run-perfbench.py

Lines changed: 241 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,241 @@
1+
#!/usr/bin/env python3
2+
3+
# This file is part of the MicroPython project, http://micropython.org/
4+
# The MIT License (MIT)
5+
# Copyright (c) 2019 Damien P. George
6+
7+
import os
8+
import subprocess
9+
import sys
10+
import argparse
11+
from glob import glob
12+
13+
sys.path.append('../tools')
14+
import pyboard
15+
16+
# Paths for host executables
17+
if os.name == 'nt':
18+
CPYTHON3 = os.getenv('MICROPY_CPYTHON3', 'python3.exe')
19+
MICROPYTHON = os.getenv('MICROPY_MICROPYTHON', '../ports/windows/micropython.exe')
20+
else:
21+
CPYTHON3 = os.getenv('MICROPY_CPYTHON3', 'python3')
22+
MICROPYTHON = os.getenv('MICROPY_MICROPYTHON', '../ports/unix/micropython')
23+
24+
PYTHON_TRUTH = CPYTHON3
25+
26+
BENCH_SCRIPT_DIR = 'perf_bench/'
27+
28+
def compute_stats(lst):
29+
avg = 0
30+
var = 0
31+
for x in lst:
32+
avg += x
33+
var += x * x
34+
avg /= len(lst)
35+
var = max(0, var / len(lst) - avg ** 2)
36+
return avg, var ** 0.5
37+
38+
def run_script_on_target(target, script):
39+
output = b''
40+
err = None
41+
42+
if isinstance(target, pyboard.Pyboard):
43+
# Run via pyboard interface
44+
try:
45+
target.enter_raw_repl()
46+
output = target.exec_(script)
47+
except pyboard.PyboardError as er:
48+
err = er
49+
else:
50+
# Run local executable
51+
try:
52+
p = subprocess.run([target], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, input=script)
53+
output = p.stdout
54+
except subprocess.CalledProcessError as er:
55+
err = er
56+
57+
return str(output.strip(), 'ascii'), err
58+
59+
def run_feature_test(target, test):
60+
with open('feature_check/' + test + '.py', 'rb') as f:
61+
script = f.read()
62+
output, err = run_script_on_target(target, script)
63+
if err is None:
64+
return output
65+
else:
66+
return 'CRASH: %r' % err
67+
68+
def run_benchmark_on_target(target, script):
69+
output, err = run_script_on_target(target, script)
70+
if err is None:
71+
time, norm, result = output.split(None, 2)
72+
try:
73+
return int(time), int(norm), result
74+
except ValueError:
75+
return -1, -1, 'CRASH: %r' % output
76+
else:
77+
return -1, -1, 'CRASH: %r' % err
78+
79+
def run_benchmarks(target, param_n, param_m, n_average, test_list):
80+
skip_native = run_feature_test(target, 'native_check') != ''
81+
82+
for test_file in sorted(test_list):
83+
print(test_file + ': ', end='')
84+
85+
# Check if test should be skipped
86+
skip = skip_native and test_file.find('viper_') != -1
87+
if skip:
88+
print('skip')
89+
continue
90+
91+
# Create test script
92+
with open(test_file, 'rb') as f:
93+
test_script = f.read()
94+
with open(BENCH_SCRIPT_DIR + 'benchrun.py', 'rb') as f:
95+
test_script += f.read()
96+
test_script += b'bm_run(%u, %u)\n' % (param_n, param_m)
97+
98+
# Write full test script if needed
99+
if 0:
100+
with open('%s.full' % test_file, 'wb') as f:
101+
f.write(test_script)
102+
103+
# Run MicroPython a given number of times
104+
times = []
105+
scores = []
106+
error = None
107+
result_out = None
108+
for _ in range(n_average):
109+
time, norm, result = run_benchmark_on_target(target, test_script)
110+
if time < 0 or norm < 0:
111+
error = result
112+
break
113+
if result_out is None:
114+
result_out = result
115+
elif result != result_out:
116+
error = 'FAIL self'
117+
break
118+
times.append(time)
119+
scores.append(1e6 * norm / time)
120+
121+
# Check result against truth if needed
122+
if error is None and result_out != 'None':
123+
_, _, result_exp = run_benchmark_on_target(PYTHON_TRUTH, test_script)
124+
if result_out != result_exp:
125+
error = 'FAIL truth'
126+
break
127+
128+
if error is not None:
129+
print(error)
130+
else:
131+
t_avg, t_sd = compute_stats(times)
132+
s_avg, s_sd = compute_stats(scores)
133+
print('{:.2f} {:.4f} {:.2f} {:.4f}'.format(t_avg, 100 * t_sd / t_avg, s_avg, 100 * s_sd / s_avg))
134+
if 0:
135+
print(' times: ', times)
136+
print(' scores:', scores)
137+
138+
sys.stdout.flush()
139+
140+
def parse_output(filename):
141+
with open(filename) as f:
142+
params = f.readline()
143+
n, m, _ = params.strip().split()
144+
n = int(n.split('=')[1])
145+
m = int(m.split('=')[1])
146+
data = []
147+
for l in f:
148+
if l.find(': ') != -1 and l.find(': skip') == -1 and l.find('CRASH: ') == -1:
149+
name, values = l.strip().split(': ')
150+
values = tuple(float(v) for v in values.split())
151+
data.append((name,) + values)
152+
return n, m, data
153+
154+
def compute_diff(file1, file2, diff_score):
155+
# Parse output data from previous runs
156+
n1, m1, d1 = parse_output(file1)
157+
n2, m2, d2 = parse_output(file2)
158+
159+
# Print header
160+
if diff_score:
161+
print('diff of scores (higher is better)')
162+
else:
163+
print('diff of microsecond times (lower is better)')
164+
if n1 == n2 and m1 == m2:
165+
hdr = 'N={} M={}'.format(n1, m1)
166+
else:
167+
hdr = 'N={} M={} vs N={} M={}'.format(n1, m1, n2, m2)
168+
print('{:24} {:>10} -> {:>10} {:>10} {:>7}% (error%)'.format(hdr, file1, file2, 'diff', 'diff'))
169+
170+
# Print entries
171+
while d1 and d2:
172+
if d1[0][0] == d2[0][0]:
173+
# Found entries with matching names
174+
entry1 = d1.pop(0)
175+
entry2 = d2.pop(0)
176+
name = entry1[0].rsplit('/')[-1]
177+
av1, sd1 = entry1[1 + 2 * diff_score], entry1[2 + 2 * diff_score]
178+
av2, sd2 = entry2[1 + 2 * diff_score], entry2[2 + 2 * diff_score]
179+
sd1 *= av1 / 100 # convert from percent sd to absolute sd
180+
sd2 *= av2 / 100 # convert from percent sd to absolute sd
181+
av_diff = av2 - av1
182+
sd_diff = (sd1 ** 2 + sd2 ** 2) ** 0.5
183+
percent = 100 * av_diff / av1
184+
percent_sd = 100 * sd_diff / av1
185+
print('{:24} {:10.2f} -> {:10.2f} : {:+10.2f} = {:+7.3f}% (+/-{:.2f}%)'.format(name, av1, av2, av_diff, percent, percent_sd))
186+
elif d1[0][0] < d2[0][0]:
187+
d1.pop(0)
188+
else:
189+
d2.pop(0)
190+
191+
def main():
192+
cmd_parser = argparse.ArgumentParser(description='Run benchmarks for MicroPython')
193+
cmd_parser.add_argument('-t', '--diff-time', action='store_true', help='diff time outputs from a previous run')
194+
cmd_parser.add_argument('-s', '--diff-score', action='store_true', help='diff score outputs from a previous run')
195+
cmd_parser.add_argument('-p', '--pyboard', action='store_true', help='run tests via pyboard.py')
196+
cmd_parser.add_argument('-d', '--device', default='/dev/ttyACM0', help='the device for pyboard.py')
197+
cmd_parser.add_argument('-a', '--average', default='8', help='averaging number')
198+
cmd_parser.add_argument('N', nargs=1, help='N parameter (approximate target CPU frequency)')
199+
cmd_parser.add_argument('M', nargs=1, help='M parameter (approximate target heap in kbytes)')
200+
cmd_parser.add_argument('files', nargs='*', help='input test files')
201+
args = cmd_parser.parse_args()
202+
203+
if args.diff_time or args.diff_score:
204+
compute_diff(args.N[0], args.M[0], args.diff_score)
205+
sys.exit(0)
206+
207+
# N, M = 50, 25 # esp8266
208+
# N, M = 100, 100 # pyboard, esp32
209+
# N, M = 1000, 1000 # PC
210+
N = int(args.N[0])
211+
M = int(args.M[0])
212+
n_average = int(args.average)
213+
214+
if args.pyboard:
215+
target = pyboard.Pyboard(args.device)
216+
target.enter_raw_repl()
217+
else:
218+
target = MICROPYTHON
219+
220+
if len(args.files) == 0:
221+
tests_skip = ('benchrun.py',)
222+
if M <= 25:
223+
# These scripts are too big to be compiled by the target
224+
tests_skip += ('bm_chaos.py', 'bm_hexiom.py', 'misc_raytrace.py')
225+
tests = sorted(
226+
BENCH_SCRIPT_DIR + test_file for test_file in os.listdir(BENCH_SCRIPT_DIR)
227+
if test_file.endswith('.py') and test_file not in tests_skip
228+
)
229+
else:
230+
tests = sorted(args.files)
231+
232+
print('N={} M={} n_average={}'.format(N, M, n_average))
233+
234+
run_benchmarks(target, N, M, n_average, tests)
235+
236+
if isinstance(target, pyboard.Pyboard):
237+
target.exit_raw_repl()
238+
target.close()
239+
240+
if __name__ == "__main__":
241+
main()

0 commit comments

Comments
 (0)