-
Notifications
You must be signed in to change notification settings - Fork 761
/
evaluate.py
476 lines (400 loc) · 16.7 KB
/
evaluate.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
import tensorflow as tf
from edward.models import RandomVariable
from edward.util import check_data, get_session, compute_multinomial_mode, \
with_binary_averaging
try:
from edward.models import Bernoulli, Binomial, Categorical, \
Multinomial, OneHotCategorical
except Exception as e:
raise ImportError("{0}. Your TensorFlow version is not supported.".format(e))
def evaluate(metrics, data, n_samples=500, output_key=None, seed=None):
"""Evaluate fitted model using a set of metrics.
A metric, or scoring rule [@winkler1994evaluating], is a function of
observed data under the posterior predictive distribution. For
example in supervised metrics such as classification accuracy, the
observed data (true output) is compared to the posterior
predictive's mean (predicted output). In unsupervised metrics such
as log-likelihood, the probability of observing the data is
calculated under the posterior predictive's log-density.
Args:
metrics: list of str and/or (str, params: dict) tuples, str,
or (str, params: dict) tuple.
List of metrics or a single metric:
`'binary_accuracy'`,
`'categorical_accuracy'`,
`'sparse_categorical_accuracy'`,
`'log_loss'` or `'binary_crossentropy'`,
`'categorical_crossentropy'`,
`'sparse_categorical_crossentropy'`,
`'hinge'`,
`'squared_hinge'`,
`'mse'` or `'MSE'` or `'mean_squared_error'`,
`'mae'` or `'MAE'` or `'mean_absolute_error'`,
`'mape'` or `'MAPE'` or `'mean_absolute_percentage_error'`,
`'msle'` or `'MSLE'` or `'mean_squared_logarithmic_error'`,
`'poisson'`,
`'cosine'` or `'cosine_proximity'`,
`'log_lik'` or `'log_likelihood'`.
In lieu of a metric string, this method also accepts (str, params: dict)
tuples; the first element of this tuple is the metric string, and
the second is a dict of associated params. At present, this dict only
expects one key, `'average'`, which stipulates the type of averaging to
perform on those metrics that permit binary averaging. Permissible
options include: `None`, `'macro'` and `'micro'`.
data: dict.
Data to evaluate model with. It binds observed variables (of type
`RandomVariable` or `tf.Tensor`) to their realizations (of
type `tf.Tensor`). It can also bind placeholders (of type
`tf.Tensor`) used in the model to their realizations.
n_samples: int.
Number of posterior samples for making predictions, using the
posterior predictive distribution.
output_key: RandomVariable or tf.Tensor.
It is the key in `data` which corresponds to the model's output.
seed: a Python integer. Used to create a random seed for the
distribution
Returns:
list of float or float.
A list of evaluations or a single evaluation.
Raises:
NotImplementedError.
If an input metric does not match an implemented metric in Edward.
#### Examples
```python
# build posterior predictive after inference: it is
# parameterized by a posterior sample
x_post = ed.copy(x, {z: qz, beta: qbeta})
# log-likelihood performance
ed.evaluate('log_likelihood', data={x_post: x_train})
# classification accuracy
# here, `x_ph` is any features the model is defined with respect to,
# and `y_post` is the posterior predictive distribution
ed.evaluate('binary_accuracy', data={y_post: y_train, x_ph: x_train})
# mean squared error
ed.evaluate('mean_squared_error', data={y: y_data, x: x_data})
```
# mean squared logarithmic error with `'micro'` averaging
ed.evaluate(('mean_squared_logarithmic_error', {'average': 'micro'}),
data={y: y_data, x: x_data})
"""
sess = get_session()
if isinstance(metrics, str):
metrics = [metrics]
elif callable(metrics):
metrics = [metrics]
elif not isinstance(metrics, list):
raise TypeError("metrics must have type str or list, or be callable.")
check_data(data)
if not isinstance(n_samples, int):
raise TypeError("n_samples must have type int.")
if output_key is None:
# Default output_key to the only data key that isn't a placeholder.
keys = [key for key in six.iterkeys(data) if not
isinstance(key, tf.Tensor) or "Placeholder" not in key.op.type]
if len(keys) == 1:
output_key = keys[0]
else:
raise KeyError("User must specify output_key.")
elif not isinstance(output_key, RandomVariable):
raise TypeError("output_key must have type RandomVariable.")
# Create feed_dict for data placeholders that the model conditions
# on; it is necessary for all session runs.
feed_dict = {key: value for key, value in six.iteritems(data)
if isinstance(key, tf.Tensor) and "Placeholder" in key.op.type}
# Form true data.
y_true = data[output_key]
# Make predictions (if there are any supervised metrics).
if metrics != ['log_lik'] and metrics != ['log_likelihood']:
binary_discrete = (Bernoulli, Binomial)
categorical_discrete = (Categorical, Multinomial, OneHotCategorical)
total_count = sess.run(getattr(output_key, 'total_count', tf.constant(1.)))
if isinstance(output_key, binary_discrete + categorical_discrete):
# Average over realizations of their probabilities, then predict
# via argmax over probabilities.
probs = [sess.run(output_key.probs, feed_dict) for _ in range(n_samples)]
probs = np.sum(probs, axis=0) / n_samples
if isinstance(output_key, binary_discrete):
# make random prediction whenever probs is exactly 0.5
random = tf.random_uniform(shape=tf.shape(probs))
y_pred = tf.round(tf.where(tf.equal(0.5, probs), random, probs))
else:
if total_count > 1:
mode = compute_multinomial_mode(probs, total_count, seed)
if len(output_key.sample_shape):
y_pred = tf.reshape(tf.tile(mode, output_key.sample_shape),
[-1, len(probs)])
else:
y_pred = mode
else:
y_pred = tf.argmax(probs, len(probs.shape) - 1)
probs = tf.constant(probs)
else:
# Monte Carlo estimate the mean of the posterior predictive.
y_pred = [sess.run(output_key, feed_dict) for _ in range(n_samples)]
y_pred = tf.cast(tf.add_n(y_pred), y_pred[0].dtype) / \
tf.cast(n_samples, y_pred[0].dtype)
if len(y_true.shape) == 0:
y_true = tf.expand_dims(y_true, 0)
y_pred = tf.expand_dims(y_pred, 0)
# Evaluate y_true (according to y_pred if supervised) for all metrics.
evaluations = []
for metric in metrics:
if isinstance(metric, tuple):
metric, params = metric
else:
params = {}
if metric == 'accuracy' or metric == 'crossentropy':
# automate binary or sparse cat depending on its support
support = sess.run(tf.reduce_max(y_true), feed_dict)
if support <= 1:
metric = 'binary_' + metric
else:
metric = 'sparse_categorical_' + metric
if metric == 'binary_accuracy':
evaluations += [binary_accuracy(y_true, y_pred, **params)]
elif metric == 'categorical_accuracy':
evaluations += [categorical_accuracy(y_true, y_pred, **params)]
elif metric == 'sparse_categorical_accuracy':
evaluations += [sparse_categorical_accuracy(y_true, y_pred, **params)]
elif metric == 'log_loss' or metric == 'binary_crossentropy':
evaluations += [binary_crossentropy(y_true, y_pred, **params)]
elif metric == 'categorical_crossentropy':
evaluations += [categorical_crossentropy(y_true, y_pred, **params)]
elif metric == 'sparse_categorical_crossentropy':
evaluations += [sparse_categorical_crossentropy(y_true, y_pred, **params)]
elif metric == 'multinomial_accuracy':
evaluations += [multinomial_accuracy(y_true, y_pred, **params)]
elif metric == 'kl_divergence':
y_true_ = y_true / total_count
y_pred_ = probs
evaluations += [kl_divergence(y_true_, y_pred_, **params)]
elif metric == 'hinge':
evaluations += [hinge(y_true, y_pred, **params)]
elif metric == 'squared_hinge':
evaluations += [squared_hinge(y_true, y_pred, **params)]
elif (metric == 'mse' or metric == 'MSE' or
metric == 'mean_squared_error'):
evaluations += [mean_squared_error(y_true, y_pred, **params)]
elif (metric == 'mae' or metric == 'MAE' or
metric == 'mean_absolute_error'):
evaluations += [mean_absolute_error(y_true, y_pred, **params)]
elif (metric == 'mape' or metric == 'MAPE' or
metric == 'mean_absolute_percentage_error'):
evaluations += [mean_absolute_percentage_error(y_true, y_pred, **params)]
elif (metric == 'msle' or metric == 'MSLE' or
metric == 'mean_squared_logarithmic_error'):
evaluations += [mean_squared_logarithmic_error(y_true, y_pred, **params)]
elif metric == 'poisson':
evaluations += [poisson(y_true, y_pred, **params)]
elif metric == 'cosine' or metric == 'cosine_proximity':
evaluations += [cosine_proximity(y_true, y_pred, **params)]
elif metric == 'log_lik' or metric == 'log_likelihood':
# Monte Carlo estimate the log-density of the posterior predictive.
tensor = tf.reduce_mean(output_key.log_prob(y_true))
log_pred = [sess.run(tensor, feed_dict) for _ in range(n_samples)]
log_pred = tf.add_n(log_pred) / tf.cast(n_samples, tensor.dtype)
evaluations += [log_pred]
elif callable(metric):
evaluations += [metric(y_true, y_pred, **params)]
else:
raise NotImplementedError("Metric is not implemented: {}".format(metric))
if len(evaluations) == 1:
return sess.run(evaluations[0], feed_dict)
else:
return sess.run(evaluations, feed_dict)
# Classification metrics
def binary_accuracy(y_true, y_pred):
"""Binary prediction accuracy, also known as 0/1-loss.
Args:
y_true: tf.Tensor.
Tensor of 0s and 1s (most generally, any real values a and b).
y_pred: tf.Tensor.
Tensor of predictions, with same shape as `y_true`.
"""
y_true = tf.cast(y_true, tf.float32)
y_pred = tf.cast(y_pred, tf.float32)
return tf.reduce_mean(tf.cast(tf.equal(y_true, y_pred), tf.float32))
def categorical_accuracy(y_true, y_pred):
"""Multi-class prediction accuracy. One-hot representation for `y_true`.
Args:
y_true: tf.Tensor.
Tensor of 0s and 1s, where the outermost dimension of size `K`
has only one 1 per row.
y_pred: tf.Tensor.
Tensor of predictions, with shape `y_true.shape[:-1]`. Each
entry is an integer {0, 1, ..., K-1}.
"""
y_true = tf.cast(tf.argmax(y_true, len(y_true.shape) - 1), tf.float32)
y_pred = tf.cast(y_pred, tf.float32)
return tf.reduce_mean(tf.cast(tf.equal(y_true, y_pred), tf.float32))
def sparse_categorical_accuracy(y_true, y_pred):
"""Multi-class prediction accuracy. Label {0, 1, .., K-1}
representation for `y_true`.
Args:
y_true: tf.Tensor.
Tensor of integers {0, 1, ..., K-1}.
y_pred: tf.Tensor.
Tensor of predictions, with same shape as `y_true`.
"""
y_true = tf.cast(y_true, tf.float32)
y_pred = tf.cast(y_pred, tf.float32)
return tf.reduce_mean(tf.cast(tf.equal(y_true, y_pred), tf.float32))
# Classification metrics (with real-valued predictions)
def binary_crossentropy(y_true, y_pred):
"""Binary cross-entropy.
Args:
y_true: tf.Tensor.
Tensor of 0s and 1s.
y_pred: tf.Tensor.
Tensor of real values (logit probabilities), with same shape as
`y_true`.
"""
y_true = tf.cast(y_true, tf.float32)
y_pred = tf.cast(y_pred, tf.float32)
return tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=y_pred, labels=y_true))
def categorical_crossentropy(y_true, y_pred):
"""Multi-class cross entropy. One-hot representation for `y_true`.
Args:
y_true: tf.Tensor.
Tensor of 0s and 1s, where the outermost dimension of size K
has only one 1 per row.
y_pred: tf.Tensor.
Tensor of real values (logit probabilities), with same shape as
`y_true`. The outermost dimension is the number of classes.
"""
y_true = tf.cast(y_true, tf.float32)
y_pred = tf.cast(y_pred, tf.float32)
return tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits=y_pred, labels=y_true))
def sparse_categorical_crossentropy(y_true, y_pred):
"""Multi-class cross entropy. Label {0, 1, .., K-1} representation
for `y_true.`
Args:
y_true: tf.Tensor.
Tensor of integers {0, 1, ..., K-1}.
y_pred: tf.Tensor.
Tensor of real values (logit probabilities), with shape
`(y_true.shape, K)`. The outermost dimension is the number of classes.
"""
y_true = tf.cast(y_true, tf.int64)
y_pred = tf.cast(y_pred, tf.float32)
return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=y_pred, labels=y_true))
def multinomial_accuracy(y_true, y_pred):
"""Multinomial prediction accuracy. `y_true` is a tensor
of integers, where the outermost dimension gives a draw
from a Multinomial distribution.
NB: In evaluating the accuracy between two Multinomials
results may vary across evaluations. This is because Edward's
algorithm for computing `y_pred`, i.e. the Multinomial
mode, yields variable results if `any(isinstance(p, float)
for p in total_count * probs)` (where `probs` is a vector
of the predicted Multinomial probabilities).
"""
y_true = tf.cast(y_true, tf.float32)
y_pred = tf.cast(y_pred, tf.float32)
return tf.reduce_mean(tf.cast(tf.equal(y_true, y_pred), tf.float32))
def kl_divergence(y_true, y_pred):
"""Kullback-Leibler divergence between two probability distributions. A
vector of probabilities for `y_true`.
Args:
y_true: tf.Tensor.
Tensor of real values (probabilities) where the values in each row
of the outermost dimension sum to 1.
y_pred: tf.Tensor.
Same as `y_true`, and with the same shape.
"""
y_true = tf.cast(y_true, tf.float32)
y_pred = tf.cast(y_pred, tf.float32)
zeros = tf.zeros(shape=(tf.shape(y_true)))
summand = tf.where(tf.equal(y_true, 0.0), zeros,
y_true * (tf.log(y_true) - tf.log(y_pred)))
return tf.reduce_sum(summand)
def hinge(y_true, y_pred):
"""Hinge loss.
Args:
y_true: tf.Tensor.
Tensor of 0s and 1s.
y_pred: tf.Tensor.
Tensor of real values, with same shape as `y_true`.
"""
y_true = tf.cast(y_true, tf.float32)
y_pred = tf.cast(y_pred, tf.float32)
return tf.reduce_mean(tf.maximum(1.0 - y_true * y_pred, 0.0))
def squared_hinge(y_true, y_pred):
"""Squared hinge loss.
Args:
y_true: tf.Tensor.
Tensor of 0s and 1s.
y_pred: tf.Tensor.
Tensor of real values, with same shape as `y_true`.
"""
y_true = tf.cast(y_true, tf.float32)
y_pred = tf.cast(y_pred, tf.float32)
return tf.reduce_mean(tf.square(tf.maximum(1.0 - y_true * y_pred, 0.0)))
# Regression metrics
@with_binary_averaging
def mean_squared_error(y_true, y_pred):
"""Mean squared error loss.
Args:
y_true: tf.Tensor.
y_pred: tf.Tensor.
Tensors of same shape and type.
"""
return tf.reduce_mean(tf.square(y_pred - y_true), axis=-2)
@with_binary_averaging
def mean_absolute_error(y_true, y_pred):
"""Mean absolute error loss.
Args:
y_true: tf.Tensor.
y_pred: tf.Tensor.
Tensors of same shape and type.
"""
return tf.reduce_mean(tf.abs(y_pred - y_true), axis=-2)
@with_binary_averaging
def mean_absolute_percentage_error(y_true, y_pred):
"""Mean absolute percentage error loss.
Args:
y_true: tf.Tensor.
y_pred: tf.Tensor.
Tensors of same shape and type.
"""
diff = tf.abs((y_true - y_pred) / tf.clip_by_value(tf.abs(y_true),
1e-8, np.inf))
return 100.0 * tf.reduce_mean(diff, axis=-2)
@with_binary_averaging
def mean_squared_logarithmic_error(y_true, y_pred):
"""Mean squared logarithmic error loss.
Args:
y_true: tf.Tensor.
y_pred: tf.Tensor.
Tensors of same shape and type.
"""
first_log = tf.log(tf.clip_by_value(y_pred, 1e-8, np.inf) + 1.0)
second_log = tf.log(tf.clip_by_value(y_true, 1e-8, np.inf) + 1.0)
return tf.reduce_mean(tf.square(first_log - second_log), axis=-2)
def poisson(y_true, y_pred):
"""Negative Poisson log-likelihood of data `y_true` given predictions
`y_pred` (up to proportion).
Args:
y_true: tf.Tensor.
y_pred: tf.Tensor.
Tensors of same shape and type.
"""
return tf.reduce_sum(y_pred - y_true * tf.log(y_pred + 1e-8))
def cosine_proximity(y_true, y_pred):
"""Cosine similarity of two vectors.
Args:
y_true: tf.Tensor.
y_pred: tf.Tensor.
Tensors of same shape and type.
"""
y_true = tf.nn.l2_normalize(y_true, len(y_true.shape) - 1)
y_pred = tf.nn.l2_normalize(y_pred, len(y_pred.shape) - 1)
return tf.reduce_sum(y_true * y_pred)