/
subunit_trace.py
443 lines (383 loc) · 16.2 KB
/
subunit_trace.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
# Copyright 2014 Hewlett-Packard Development Company, L.P.
# Copyright 2014 Samsung Electronics
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Trace a subunit stream in reasonable detail and high accuracy."""
import argparse
import functools
import os
import re
import sys
import pbr.version
import subunit
import testtools
from stestr import colorizer
from stestr import results
# NOTE(mtreinish) on python3 anydbm was renamed dbm and the python2 dbm module
# was renamed to dbm.ndbm, this block takes that into account
try:
import anydbm as dbm
except ImportError:
import dbm
DAY_SECONDS = 60 * 60 * 24
FAILS = []
RESULTS = {}
def total_seconds(timedelta):
# NOTE(mtreinish): This method is built-in to the timedelta class in
# python >= 2.7 it is here to enable it's use on older versions
return ((timedelta.days * DAY_SECONDS + timedelta.seconds) * 10 ** 6 +
timedelta.microseconds) / 10 ** 6
def cleanup_test_name(name, strip_tags=True, strip_scenarios=False):
"""Clean up the test name for display.
By default we strip out the tags in the test because they don't help us
in identifying the test that is run to it's result.
Make it possible to strip out the testscenarios information (not to
be confused with tempest scenarios) however that's often needed to
indentify generated negative tests.
"""
if strip_tags:
tags_start = name.find('[')
tags_end = name.find(']')
if tags_start > 0 and tags_end > tags_start:
newname = name[:tags_start]
newname += name[tags_end + 1:]
name = newname
if strip_scenarios:
tags_start = name.find('(')
tags_end = name.find(')')
if tags_start > 0 and tags_end > tags_start:
newname = name[:tags_start]
newname += name[tags_end + 1:]
name = newname
return name
def get_duration(timestamps):
start, end = timestamps
if not start or not end:
duration = ''
else:
delta = end - start
duration = '%d.%06ds' % (
delta.days * DAY_SECONDS + delta.seconds, delta.microseconds)
return duration
def find_worker(test):
"""Get the worker number.
If there are no workers because we aren't in a concurrent environment,
assume the worker number is 0.
"""
for tag in test['tags']:
if tag.startswith('worker-'):
return int(tag[7:])
return 0
# Print out stdout/stderr if it exists, always
def print_attachments(stream, test, all_channels=False,
show_binary_attachments=False):
"""Print out subunit attachments.
Print out subunit attachments that contain content. This
runs in 2 modes, one for successes where we print out just stdout
and stderr, and an override that dumps all the attachments.
"""
channels = ('stdout', 'stderr')
for name, detail in test['details'].items():
# NOTE(sdague): the subunit names are a little crazy, and actually
# are in the form pythonlogging:'' (with the colon and quotes)
name = name.split(':')[0]
if detail.content_type.type == 'test':
detail.content_type.type = 'text'
if all_channels or name in channels:
title = "Captured %s:" % name
stream.write("\n{}\n{}\n".format(title, ('~' * len(title))))
# indent attachment lines 4 spaces to make them visually
# offset
if detail.content_type.type == 'text':
for line in detail.iter_text():
stream.write(" %s\n" % line)
elif show_binary_attachments: # binary
for line in detail.iter_bytes():
stream.write(" %s\n" % line)
def find_test_run_time_diff(test_id, run_time):
times_db_path = os.path.join(os.path.join(os.getcwd(), '.testrepository'),
'times.dbm')
if os.path.isfile(times_db_path):
try:
test_times = dbm.open(times_db_path)
except Exception:
return False
try:
avg_runtime = float(test_times.get(str(test_id), False))
except Exception:
try:
avg_runtime = float(test_times[str(test_id)])
except Exception:
avg_runtime = False
if avg_runtime and avg_runtime > 0:
run_time = float(run_time.rstrip('s'))
perc_diff = ((run_time - avg_runtime) / avg_runtime) * 100
return perc_diff
return False
def show_outcome(stream, test, print_failures=False, failonly=False,
enable_diff=False, threshold='0', abbreviate=False,
enable_color=False, suppress_attachments=False,
all_attachments=False, show_binary_attachments=True):
global RESULTS
status = test['status']
# TODO(sdague): ask lifeless why on this?
if status == 'exists':
return
worker = find_worker(test)
name = cleanup_test_name(test['id'])
duration = get_duration(test['timestamps'])
if worker not in RESULTS:
RESULTS[worker] = []
RESULTS[worker].append(test)
# don't count the end of the return code as a fail
if name == 'process-returncode':
return
for color in [colorizer.AnsiColorizer, colorizer.NullColorizer]:
if not enable_color:
color = colorizer.NullColorizer(stream)
break
if color.supported():
color = color(stream)
break
if status == 'fail' or status == 'uxsuccess':
FAILS.append(test)
if abbreviate:
color.write('F', 'red')
else:
stream.write('{{{}}} {} [{}] ... '.format(
worker, name, duration))
color.write('FAILED', 'red')
stream.write('\n')
if not print_failures:
print_attachments(
stream, test, all_channels=True,
show_binary_attachments=show_binary_attachments)
elif not failonly:
if status == 'success' or status == 'xfail':
if abbreviate:
color.write('.', 'green')
else:
out_string = '{{{}}} {} [{}'.format(worker, name, duration)
perc_diff = find_test_run_time_diff(test['id'], duration)
if enable_diff:
if perc_diff and abs(perc_diff) >= abs(float(threshold)):
if perc_diff > 0:
out_string = out_string + ' +%.2f%%' % perc_diff
else:
out_string = out_string + ' %.2f%%' % perc_diff
stream.write(out_string + '] ... ')
color.write('ok', 'green')
stream.write('\n')
if not suppress_attachments:
print_attachments(
stream, test, all_channels=all_attachments,
show_binary_attachments=show_binary_attachments)
elif status == 'skip':
if abbreviate:
color.write('S', 'blue')
else:
reason = test['details'].get('reason', '')
if reason:
reason = ': ' + reason.as_text()
stream.write('{{{}}} {} ... '.format(
worker, name))
color.write('SKIPPED', 'blue')
stream.write('%s' % (reason))
stream.write('\n')
else:
if abbreviate:
stream.write('%s' % test['status'][0])
else:
stream.write('{{{}}} {} [{}] ... {}\n'.format(
worker, name, duration, test['status']))
if not print_failures:
print_attachments(
stream, test, all_channels=True,
show_binary_attachments=show_binary_attachments)
stream.flush()
def print_fails(stream):
"""Print summary failure report.
Currently unused, however there remains debate on inline vs. at end
reporting, so leave the utility function for later use.
"""
if not FAILS:
return
stream.write("\n==============================\n")
stream.write("Failed %s tests - output below:" % len(FAILS))
stream.write("\n==============================\n")
for f in FAILS:
stream.write("\n%s\n" % f['id'])
stream.write("%s\n" % ('-' * len(f['id'])))
print_attachments(stream, f, all_channels=True)
stream.write('\n')
def count_tests(key, value):
count = 0
for k, v in RESULTS.items():
for item in v:
if key in item:
if re.search(value, item[key]):
count += 1
return count
def get_stuck_in_progress():
key = 'status'
match = re.compile('^inprogress$')
in_progress = []
for k, v in RESULTS.items():
for item in v:
if key in item:
if match.search(item[key]):
in_progress.append(item['id'])
return in_progress
def run_time():
runtime = 0.0
for k, v in RESULTS.items():
for test in v:
test_dur = get_duration(test['timestamps']).strip('s')
# NOTE(toabctl): get_duration() can return an empty string
# which leads to a ValueError when casting to float
if test_dur:
runtime += float(test_dur)
return runtime
def worker_stats(worker):
tests = RESULTS[worker]
num_tests = len(tests)
stop_time = tests[-1]['timestamps'][1]
start_time = tests[0]['timestamps'][0]
if not start_time or not stop_time:
delta = 'N/A'
else:
delta = stop_time - start_time
return num_tests, str(delta)
def print_summary(stream, elapsed_time):
stream.write("\n======\nTotals\n======\n")
stream.write("Ran: {} tests in {:.4f} sec.\n".format(
count_tests('status', '.*'), total_seconds(elapsed_time)))
stream.write(" - Passed: %s\n" % count_tests('status', '^success$'))
stream.write(" - Skipped: %s\n" % count_tests('status', '^skip$'))
stream.write(" - Expected Fail: %s\n" % count_tests('status', '^xfail$'))
stream.write(" - Unexpected Success: %s\n" % count_tests('status',
'^uxsuccess$'))
stream.write(" - Failed: %s\n" % count_tests('status', '^fail$'))
stream.write("Sum of execute time for each test: %.4f sec.\n" % run_time())
# we could have no results, especially as we filter out the process-codes
if RESULTS:
stream.write("\n==============\nWorker Balance\n==============\n")
for w in range(max(RESULTS.keys()) + 1):
if w not in RESULTS:
stream.write(
" - WARNING: missing Worker %s!\n" % w)
else:
num, time = worker_stats(w)
out_str = " - Worker {} ({} tests) => {}".format(w, num, time)
if time.isdigit():
out_str += 's'
out_str += '\n'
stream.write(out_str)
__version__ = pbr.version.VersionInfo('stestr').version_string()
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--version', action='version',
version='%s' % __version__)
parser.add_argument('--no-failure-debug', '-n', action='store_true',
dest='print_failures', help='Disable printing failure '
'debug information in realtime')
parser.add_argument('--fails', '-f', action='store_true',
dest='post_fails', help='Print failure debug '
'information after the stream is proccesed')
parser.add_argument('--failonly', action='store_true',
dest='failonly', help="Don't print success items",
default=(
os.environ.get('TRACE_FAILONLY', False)
is not False))
parser.add_argument('--abbreviate', '-a', action='store_true',
dest='abbreviate', help='Print one character status'
'for each test')
parser.add_argument('--perc-diff', '-d', action='store_true',
dest='enable_diff',
help="Print percent change in run time on each test ")
parser.add_argument('--diff-threshold', '-t', dest='threshold',
help="Threshold to use for displaying percent change "
"from the avg run time. If one is not specified "
"the percent change will always be displayed")
parser.add_argument('--no-summary', action='store_true',
help="Don't print the summary of the test run after "
" completes")
parser.add_argument('--color', action='store_true',
help="Print results with colors")
return parser.parse_args()
def trace(stdin, stdout, print_failures=False, failonly=False,
enable_diff=False, abbreviate=False, color=False, post_fails=False,
no_summary=False, suppress_attachments=False, all_attachments=False,
show_binary_attachments=False):
stream = subunit.ByteStreamToStreamResult(
stdin, non_subunit_name='stdout')
outcomes = testtools.StreamToDict(
functools.partial(show_outcome, stdout,
print_failures=print_failures,
failonly=failonly,
enable_diff=enable_diff,
abbreviate=abbreviate,
enable_color=color,
suppress_attachments=suppress_attachments,
all_attachments=all_attachments,
show_binary_attachments=show_binary_attachments))
summary = testtools.StreamSummary()
result = testtools.CopyStreamResult([outcomes, summary])
result = testtools.StreamResultRouter(result)
cat = subunit.test_results.CatFiles(stdout)
result.add_rule(cat, 'test_id', test_id=None)
result.startTestRun()
try:
stream.run(result)
finally:
result.stopTestRun()
start_times = []
stop_times = []
for worker in RESULTS:
start_times += [
x['timestamps'][0] for x in RESULTS[worker] if
x['timestamps'][0] is not None]
stop_times += [
x['timestamps'][1] for x in RESULTS[worker] if
x['timestamps'][1] is not None]
start_time = min(start_times)
stop_time = max(stop_times)
elapsed_time = stop_time - start_time
if count_tests('status', '.*') == 0:
print("The test run didn't actually run any tests", file=sys.stderr)
return 1
if post_fails:
print_fails(stdout)
if not no_summary:
print_summary(stdout, elapsed_time)
# NOTE(mtreinish): Ideally this should live in testtools streamSummary
# this is just in place until the behavior lands there (if it ever does)
if count_tests('status', '^success$') == 0:
print("\nNo tests were successful during the run", file=sys.stderr)
return 1
in_progress = get_stuck_in_progress()
if count_tests('status', '^inprogress$') > 0:
print("\nThe following tests exited without returning a status \n"
"and likely segfaulted or crashed Python:", file=sys.stderr)
for test in in_progress:
print("\n\t* %s" % test, file=sys.stderr)
return 1
return 0 if results.wasSuccessful(summary) else 1
def main():
args = parse_args()
exit(trace(sys.stdin, sys.stdout, args.print_failures, args.failonly,
args.enable_diff, args.abbreviate, args.color, args.post_fails,
args.no_summary))
if __name__ == '__main__':
main()