/
analysis.py
538 lines (472 loc) · 26.1 KB
/
analysis.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
"""
MIT License
Copyright (c) 2021 Terence Parr
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import os
import sys
import traceback
import inspect
import hashlib
from pathlib import Path
import matplotlib.pyplot as plt
import tsensor
class clarify:
# Prevent nested clarify() calls from processing exceptions.
# See https://github.com/parrt/tensor-sensor/issues/18
# Probably will fail with Python `threading` package due to this class var
# but only if multiple threads call clarify().
# Multiprocessing forks new processes so not a problem. Each vm has it's own class var.
# Bump in __enter__, drop in __exit__
nesting = 0
def __init__(self,
fontname=('Consolas', 'DejaVu Sans Mono'), fontsize=13,
dimfontname='Arial', dimfontsize=9, char_sep_scale=1.8, fontcolor='#444443',
underline_color='#C2C2C2', ignored_color='#B4B4B4', error_op_color='#A40227',
show:(None,'viz')='viz',
hush_errors=True,
dtype_colors=None, dtype_precisions=None, dtype_alpha_range=None):
"""
Augment tensor-related exceptions generated from numpy, pytorch, and tensorflow.
Also display a visual representation of the offending Python line that
shows the shape of tensors referenced by the code. All you have to do is wrap
the outermost level of your code and clarify() will activate upon exception.
Visualizations pop up in a separate window unless running from a notebook,
in which case the visualization appears as part of the cell execution output.
There is no runtime overhead associated with clarify() unless an exception occurs.
The offending code is executed a second time, to identify which sub expressions
are to blame. This implies that code with side effects could conceivably cause
a problem, but since an exception has been generated, results are suspicious
anyway.
Example:
import numpy as np
import tsensor
b = np.array([9, 10]).reshape(2, 1)
with tsensor.clarify():
np.dot(b,b) # tensor code or call to a function with tensor code
See examples.ipynb for more examples.
:param fontname: The name of the font used to display Python code
:param fontsize: The font size used to display Python code; default is 13.
Also use this to increase the size of the generated figure;
larger font size means larger image.
:param dimfontname: The name of the font used to display the dimensions on the matrix and vector boxes
:param dimfontsize: The size of the font used to display the dimensions on the matrix and vector boxes
:param char_sep_scale: It is notoriously difficult to discover how wide and tall
text is when plotted in matplotlib. In fact there's probably,
no hope to discover this information accurately in all cases.
Certainly, I gave up after spending huge effort. We have a
situation here where the font should be constant width, so
we can just use a simple scaler times the font size to get
a reasonable approximation to the width and height of a
character box; the default of 1.8 seems to work reasonably
well for a wide range of fonts, but you might have to tweak it
when you change the font size.
:param fontcolor: The color of the Python code.
:param underline_color: The color of the lines that underscore tensor subexpressions; default is grey
:param ignored_color: The de-highlighted color for deemphasizing code not involved in an erroneous sub expression
:param error_op_color: The color to use for characters associated with the erroneous operator
:param ax: If not none, this is the matplotlib drawing region in which to draw the visualization
:param dpi: This library tries to generate SVG files, which are vector graphics not
2D arrays of pixels like PNG files. However, it needs to know how to
compute the exact figure size to remove padding around the visualization.
Matplotlib uses inches for its figure size and so we must convert
from pixels or data units to inches, which means we have to know what the
dots per inch, dpi, is for the image.
:param hush_errors: Normally, error messages from true syntax errors but also
unhandled code caught by my parser are ignored. Turn this off
to see what the error messages are coming from my parser.
:param show: Show visualization upon tensor error if show='viz'.
:param dtype_colors: map from dtype w/o precision like 'int' to color
:param dtype_precisions: list of bit precisions to colorize, such as [32,64,128]
:param dtype_alpha_range: all tensors of the same type are drawn to the same color,
and the alpha channel is used to show precision; the
smaller the bit size, the lower the alpha channel. You
can play with the range to get better visual dynamic range
depending on how many precisions you want to display.
"""
self.show, self.fontname, self.fontsize, self.dimfontname, self.dimfontsize, \
self.char_sep_scale, self.fontcolor, self.underline_color, self.ignored_color, \
self.error_op_color, self.hush_errors, \
self.dtype_colors, self.dtype_precisions, self.dtype_alpha_range = \
show, fontname, fontsize, dimfontname, dimfontsize, \
char_sep_scale, fontcolor, underline_color, ignored_color, \
error_op_color, hush_errors, \
dtype_colors, dtype_precisions, dtype_alpha_range
def __enter__(self):
self.frame = sys._getframe().f_back # where do we start tracking? Hmm...not sure we use this
# print("ENTER", clarify.nesting, self.frame, id(self.frame))
clarify.nesting += 1
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
# print("EXIT", clarify.nesting, self.frame, id(self.frame))
clarify.nesting -= 1
if clarify.nesting>0:
return
if exc_type is None:
return
exc_frame, lib_entry_frame = tensor_lib_entry_frame(exc_traceback)
if lib_entry_frame is not None or is_interesting_exception(exc_value):
# print("exception:", exc_value, exc_traceback)
# traceback.print_tb(exc_traceback, limit=5, file=sys.stdout)
module, name, filename, line, code = info(exc_frame)
# print('info', module, name, filename, line, code)
# print("exc id", id(exc_value))
if code is not None:
self.view = tsensor.viz.pyviz(code, exc_frame,
self.fontname, self.fontsize, self.dimfontname,
self.dimfontsize,
self.char_sep_scale, self.fontcolor,
self.underline_color, self.ignored_color,
self.error_op_color,
hush_errors=self.hush_errors,
dtype_colors=self.dtype_colors,
dtype_precisions=self.dtype_precisions,
dtype_alpha_range=self.dtype_alpha_range)
if self.view is not None: # Ignore if we can't process code causing exception (I use a subparser)
if self.show=='viz':
self.view.show()
augment_exception(exc_value, self.view.offending_expr)
class explain:
def __init__(self,
fontname=('Consolas', 'DejaVu Sans Mono'), fontsize=13,
dimfontname='Arial', dimfontsize=9, char_sep_scale=1.8, fontcolor='#444443',
underline_color='#C2C2C2', ignored_color='#B4B4B4', error_op_color='#A40227',
savefig=None, hush_errors=True,
dtype_colors=None, dtype_precisions=None, dtype_alpha_range=None):
"""
As the Python virtual machine executes lines of code, generate a
visualization for tensor-related expressions using from numpy, pytorch,
and tensorflow. The shape of tensors referenced by the code are displayed.
Visualizations pop up in a separate window unless running from a notebook,
in which case the visualization appears as part of the cell execution output.
There is heavy runtime overhead associated with explain() as every line
is executed twice: once by explain() and then another time by the interpreter
as part of normal execution.
Expressions with side effects can easily generate incorrect results. Due to
this and the overhead, you should limit the use of this to code you're trying
to debug. Assignments are not evaluated by explain so code `x = ...` causes
an assignment to x just once, during normal execution. This explainer
knows the value of x and will display it but does not assign to it.
Upon exception, execution will stop as usual but, like clarify(), explain()
will augment the exception to indicate the offending sub expression. Further,
the visualization will deemphasize code not associated with the offending
sub expression. The sizes of relevant tensor values are still visualized.
Example:
import numpy as np
import tsensor
b = np.array([9, 10]).reshape(2, 1)
with tsensor.explain():
b + b # tensor code or call to a function with tensor code
See examples.ipynb for more examples.
:param fontname: The name of the font used to display Python code
:param fontsize: The font size used to display Python code; default is 13.
Also use this to increase the size of the generated figure;
larger font size means larger image.
:param dimfontname: The name of the font used to display the dimensions on the matrix and vector boxes
:param dimfontsize: The size of the font used to display the dimensions on the matrix and vector boxes
:param char_sep_scale: It is notoriously difficult to discover how wide and tall
text is when plotted in matplotlib. In fact there's probably,
no hope to discover this information accurately in all cases.
Certainly, I gave up after spending huge effort. We have a
situation here where the font should be constant width, so
we can just use a simple scaler times the font size to get
a reasonable approximation to the width and height of a
character box; the default of 1.8 seems to work reasonably
well for a wide range of fonts, but you might have to tweak it
when you change the font size.
:param fontcolor: The color of the Python code.
:param underline_color: The color of the lines that underscore tensor subexpressions; default is grey
:param ignored_color: The de-highlighted color for deemphasizing code not involved in an erroneous sub expression
:param error_op_color: The color to use for characters associated with the erroneous operator
:param ax: If not none, this is the matplotlib drawing region in which to draw the visualization
:param dpi: This library tries to generate SVG files, which are vector graphics not
2D arrays of pixels like PNG files. However, it needs to know how to
compute the exact figure size to remove padding around the visualization.
Matplotlib uses inches for its figure size and so we must convert
from pixels or data units to inches, which means we have to know what the
dots per inch, dpi, is for the image.
:param hush_errors: Normally, error messages from true syntax errors but also
unhandled code caught by my parser are ignored. Turn this off
to see what the error messages are coming from my parser.
:param savefig: A string indicating where to save the visualization; don't save
a file if None.
:param dtype_colors: map from dtype w/o precision like 'int' to color
:param dtype_precisions: list of bit precisions to colorize, such as [32,64,128]
:param dtype_alpha_range: all tensors of the same type are drawn to the same color,
and the alpha channel is used to show precision; the
smaller the bit size, the lower the alpha channel. You
can play with the range to get better visual dynamic range
depending on how many precisions you want to display.
"""
self.savefig, self.fontname, self.fontsize, self.dimfontname, self.dimfontsize, \
self.char_sep_scale, self.fontcolor, self.underline_color, self.ignored_color, \
self.error_op_color, self.hush_errors, \
self.dtype_colors, self.dtype_precisions, self.dtype_alpha_range = \
savefig, fontname, fontsize, dimfontname, dimfontsize, \
char_sep_scale, fontcolor, underline_color, ignored_color, \
error_op_color, hush_errors, \
dtype_colors, dtype_precisions, dtype_alpha_range
def __enter__(self):
# print("ON trace", sys._getframe())
self.tracer = ExplainTensorTracer(self)
sys.settrace(self.tracer.listener)
frame = sys._getframe()
prev = frame.f_back # get block wrapped in "with"
prev.f_trace = self.tracer.listener
return self.tracer
def __exit__(self, exc_type, exc_value, exc_traceback):
# print("OFF trace")
sys.settrace(None)
# At this point we have already tried to visualize the statement
# If there was no error, the visualization will look normal
# but a matrix operation error will show the erroneous operator highlighted.
# That was artificial execution of the code. Now the VM has executed
# the statement for real and has found the same exception. Make sure to
# augment the message with causal information.
if exc_type is None:
return
exc_frame, lib_entry_frame = tensor_lib_entry_frame(exc_traceback)
if lib_entry_frame is not None or is_interesting_exception(exc_value):
# print("exception:", exc_value, exc_traceback)
# traceback.print_tb(exc_traceback, limit=5, file=sys.stdout)
module, name, filename, line, code = info(exc_frame)
# print('info', module, name, filename, line, code)
if code is not None:
# We've already displayed picture so just augment message
root, tokens = tsensor.parsing.parse(code)
if root is not None: # Could be syntax error in statement or code I can't handle
offending_expr = None
try:
root.eval(exc_frame)
except tsensor.ast.IncrEvalTrap as e:
offending_expr = e.offending_expr
augment_exception(exc_value, offending_expr)
class ExplainTensorTracer:
def __init__(self, explainer):
self.explainer = explainer
self.exceptions = set()
self.linecount = 0
self.views = []
# set of hashes for statements already visualized;
# generate each combination of statement and shapes once
self.done = set()
def listener(self, frame, event, arg):
# print("listener", event, ":", frame)
if event!='line':
# It seems that we are getting CALL events even for calls in foo() from:
# with tsensor.explain(): foo()
# Must be that we already have a listener and, though we returned None here,
# somehow the original listener is still getting events. Strange but oh well.
# We must ignore these.
return None
module = frame.f_globals['__name__']
info = inspect.getframeinfo(frame)
filename, line = info.filename, info.lineno
name = info.function
# Note: always true since L292 above...
if event=='line':
self.line_listener(module, name, filename, line, info, frame)
# By returning none, we prevent explain()'ing from descending into
# invoked functions. In principle, we could allow a certain amount
# of tracing but I'm not sure that would be super useful.
return None
def line_listener(self, module, name, filename, line, info, frame):
code = info.code_context[0].strip()
if code.startswith("sys.settrace(None)"):
return
# Don't generate a statement visualization more than once
h = hash(code)
if h in self.done:
return
self.done.add(h)
p = tsensor.parsing.PyExprParser(code)
t = p.parse()
if t is not None:
# print(f"A line encountered in {module}.{name}() at {filename}:{line}")
# print("\t", code)
# print("\t", repr(t))
self.linecount += 1
self.viz_statement(code, frame)
def viz_statement(self, code, frame):
view = tsensor.viz.pyviz(code, frame,
self.explainer.fontname, self.explainer.fontsize,
self.explainer.dimfontname,
self.explainer.dimfontsize,
self.explainer.char_sep_scale, self.explainer.fontcolor,
self.explainer.underline_color, self.explainer.ignored_color,
self.explainer.error_op_color,
hush_errors=self.explainer.hush_errors,
dtype_colors=self.explainer.dtype_colors,
dtype_precisions=self.explainer.dtype_precisions,
dtype_alpha_range=self.explainer.dtype_alpha_range)
self.views.append(view)
if self.explainer.savefig is not None:
file_path = Path(self.explainer.savefig)
file_path = file_path.parent / f"{file_path.stem}-{self.linecount}{file_path.suffix}"
view.savefig(file_path)
view.filename = file_path
plt.close()
else:
view.show()
return view
@staticmethod
def hash(statement):
"""
We want to avoid generating a visualization more than once.
For now, assume that the code for a statement is the unique identifier.
"""
return hashlib.md5(statement.encode('utf-8')).hexdigest()
def eval(statement:str, frame=None) -> (tsensor.ast.ParseTreeNode, object):
"""
Parse statement and return an ast in the context of execution frame or, if None,
the invoking function's frame. Set the value field of all ast nodes.
Overall result is in root.value.
:param statement: A string representing the line of Python code to visualize within an execution frame.
:param frame: The execution frame in which to evaluate the statement. If None,
use the execution frame of the invoking function
:return An abstract parse tree representing the statement; nodes are
ParseTreeNode subclasses.
"""
p = tsensor.parsing.PyExprParser(statement)
root = p.parse()
if frame is None: # use frame of caller
frame = sys._getframe().f_back
root.eval(frame)
return root, root.value
def augment_exception(exc_value, subexpr):
explanation = subexpr.clarify()
augment = ""
if explanation is not None:
augment = explanation
# Reuse exception but overwrite the message
if hasattr(exc_value, "_message"):
exc_value._message = exc_value.message + "\n" + augment
else:
exc_value.args = [exc_value.args[0] + "\n" + augment]
def is_interesting_exception(e):
# print(f"is_interesting_exception: type is {type(e)}")
if e.__class__.__module__.startswith("tensorflow"):
return True
sentinels = {'matmul', 'THTensorMath', 'tensor', 'tensors', 'dimension',
'not aligned', 'size mismatch', 'shape', 'shapes', 'matrix',
'call to _th_addmm'}
if len(e.args)==0:
msg = e.message
else:
msg = e.args[0]
return any([s in msg for s in sentinels])
def tensor_lib_entry_frame(exc_traceback):
"""
Don't trace into internals of numpy/torch/tensorflow/jax; we want to reset frame
to where in the user's python code it asked the tensor lib to perform an
invalid operation.
To detect libraries, look for code whose filename has "site-packages/{package}"
or "dist-packages/{package}".
Return last-user-frame, first-tensor-lib-frame if lib found else last-user-frame, None
Note: Sometimes operators yield exceptions and no tensor lib entry frame. E.g.,
np.ones(1) @ np.ones(2).
"""
tb = exc_traceback
# import traceback
# for t in traceback.extract_tb(exc_traceback):
# print(t)
packages = ['numpy','torch','tensorflow','jax']
dirs = [os.path.join('site-packages',p) for p in packages]
dirs += [os.path.join('dist-packages',p) for p in packages]
dirs += ['<__array_function__'] # numpy seems to not have real filename
prev = tb
while tb is not None:
filename = tb.tb_frame.f_code.co_filename
reached_lib = [p in filename for p in dirs]
if sum(reached_lib)>0:
return prev.tb_frame, tb.tb_frame
prev = tb
tb = tb.tb_next
return prev.tb_frame, None
def info(frame):
if hasattr(frame, '__name__'):
module = frame.f_globals['__name__']
else:
module = None
info = inspect.getframeinfo(frame)
if info.code_context is not None:
code = info.code_context[0].strip()
else:
code = None
filename, line = info.filename, info.lineno
name = info.function
return module, name, filename, line, code
def smallest_matrix_subexpr(t):
"""
During visualization, we need to find the smallest expression
that evaluates to a non-scalar. That corresponds to the deepest subtree
that evaluates to a non-scalar. Because we do not have parent pointers,
we cannot start at the leaves and walk upwards. Instead, pass a Boolean
back to indicate whether this node or one of the descendants
evaluates to a non-scalar. Nodes in the tree that have matrix values and
no matrix below are the ones to visualize.
"""
nodes = []
_smallest_matrix_subexpr(t, nodes)
return nodes
def _smallest_matrix_subexpr(t, nodes) -> bool:
if t is None: return False # prevent buggy code from causing us to fail
if isinstance(t, tsensor.ast.Member) and \
isinstance(t.obj, tsensor.ast.Atom) and \
isinstance(t.member, tsensor.ast.Atom) and \
str(t.member)=='T':
nodes.append(t)
return True
if len(t.kids)==0: # leaf node
if istensor(t.value):
nodes.append(t)
return istensor(t.value)
n_matrix_below = 0 # once this latches true, it's passed all the way up to the root
for sub in t.kids:
matrix_below = _smallest_matrix_subexpr(sub, nodes)
n_matrix_below += matrix_below # how many descendents evaluated two non-scalar?
# If current node is matrix and no descendents are, then this is smallest
# sub expression that evaluates to a matrix; keep track
if istensor(t.value) and n_matrix_below==0:
nodes.append(t)
# Report to caller that this node or some descendent is a matrix
return istensor(t.value) or n_matrix_below > 0
def istensor(x):
return _shape(x) is not None
def _dtype(v) -> str:
if hasattr(v, "dtype"):
dtype = v.dtype
elif "dtype" in v.__class__.__name__:
dtype = v
else:
return None
if dtype.__class__.__module__ == "torch":
# ugly but works
return str(dtype).replace("torch.", "")
if hasattr(dtype, "names") and dtype.names is not None and hasattr(dtype, "fields"):
# structured dtype: https://numpy.org/devdocs/user/basics.rec.html
return ",".join([_dtype(val) for val, _ in dtype.fields.values()])
return dtype.name
def _shape(v):
# do we have a shape and it answers len()? Should get stuff right.
if hasattr(v, "shape") and hasattr(v.shape, "__len__"):
if v.shape.__class__.__module__ == "torch" and v.shape.__class__.__name__ == "Size":
if len(v.shape)==0:
return None
return list(v.shape)
return v.shape
return None