Skip to content

Commit

Permalink
tests: add tabular data, model and tests
Browse files Browse the repository at this point in the history
  • Loading branch information
lucashervier authored and fel-thomas committed Jul 29, 2021
1 parent 7756010 commit d56a570
Show file tree
Hide file tree
Showing 2 changed files with 141 additions and 0 deletions.
130 changes: 130 additions & 0 deletions tests/attributions/test_tabular_data.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,130 @@
import numpy as np
import tensorflow as tf

from xplique.attributions import (Saliency, GradientInput, IntegratedGradients, SmoothGrad, VarGrad,
SquareGrad, Occlusion, Rise, GuidedBackprop, DeconvNet, Lime,
KernelShap)
from ..utils import generate_regression_model, generate_data

def _default_methods(model, output_layer_index):
return [
Saliency(model, output_layer_index),
GradientInput(model, output_layer_index),
SmoothGrad(model, output_layer_index),
VarGrad(model, output_layer_index),
SquareGrad(model, output_layer_index),
IntegratedGradients(model, output_layer_index),
GuidedBackprop(model, output_layer_index),
DeconvNet(model, output_layer_index),
Lime(model),
KernelShap(model),
Occlusion(model, patch_size=1, patch_stride=1),
# Rise(model)
]

def test_tabular_data():
"""Test applied to most attributions method"""

features_shape, output_shape, samples = ((10,), 1, 20)
model = generate_regression_model(features_shape, output_shape)
output_layer_index = -1

inputs_np, targets_np = generate_data(features_shape, output_shape, samples)
inputs_tf, targets_tf = tf.cast(inputs_np, tf.float32), tf.cast(targets_np, tf.float32)
dataset = tf.data.Dataset.from_tensor_slices((inputs_np, targets_np))
# batched_dataset = tf.data.Dataset.from_tensor_slices((inputs_np, targets_np)).batch(4)

methods = _default_methods(model, output_layer_index)

for inputs, targets in [(inputs_np, targets_np),
(inputs_tf, targets_tf),
(dataset, None),
# (batched_dataset, None)
]:
for method in methods:
try:
explanations = method.explain(inputs, targets)
except:
raise AssertionError(
"Explanation failed for method ", method.__class__.__name__)

# all explanation must have an explain method
assert hasattr(method, 'explain')

# all explanations returned must be numpy array
assert isinstance(explanations, tf.Tensor)

# all explanations shape should match features shape
assert explanations.shape == [samples, *features_shape]

def test_multioutput_regression():
"""Tests applied to most attribution methods"""

features_shape, output_shape, samples = ((10,), 4, 20)
model = generate_regression_model(features_shape, output_shape=output_shape)
output_layer_index = -1

inputs_np, targets_np = generate_data(features_shape, output_shape, samples)
inputs_tf, targets_tf = tf.cast(inputs_np, tf.float32), tf.cast(targets_np, tf.float32)
dataset = tf.data.Dataset.from_tensor_slices((inputs_np, targets_np))
# batched_dataset = tf.data.Dataset.from_tensor_slices((inputs_np, targets_np)).batch(4)

methods = _default_methods(model, output_layer_index)

for inputs, targets in [(inputs_np, targets_np),
(inputs_tf, targets_tf),
(dataset, None),
# (batched_dataset, None)
]:
for method in methods:
try:
explanations = method.explain(inputs, targets)
except:
raise AssertionError(
"Explanation failed for method ", method.__class__.__name__)

# all explanation must have an explain method
assert hasattr(method, 'explain')

# all explanations returned must be numpy array
assert isinstance(explanations, tf.Tensor)

# all explanations shape should match features shape
assert explanations.shape == [samples, *features_shape]

def test_batch_size():
"""
Ensure the functioning of attributions for special batch size cases with tabular data
"""

input_shape, nb_targets, samples = ((10,), 5, 20)
inputs, targets = generate_data(input_shape, nb_targets, samples)
model = generate_regression_model(input_shape, nb_targets)
output_layer_index = -1

batch_sizes = [None, 1, 32]

for bs in batch_sizes:

methods = [
Saliency(model, output_layer_index, bs),
GradientInput(model, output_layer_index, bs),
SmoothGrad(model, output_layer_index, bs),
VarGrad(model, output_layer_index, bs),
SquareGrad(model, output_layer_index, bs),
IntegratedGradients(model, output_layer_index, bs),
GuidedBackprop(model, output_layer_index, bs),
DeconvNet(model, output_layer_index, bs),
Lime(model, bs),
KernelShap(model, bs),
Occlusion(model, bs, patch_size=1, patch_stride=1),
# Rise(model, bs),
]

for method in methods:
try:
explanations = method.explain(inputs, targets)
except:
raise AssertionError(
"Explanation failed for method ", method.__class__.__name__,
" batch size ", bs)
11 changes: 11 additions & 0 deletions tests/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,17 @@ def generate_model(input_shape=(32, 32, 3), output_shape=10):

return model

def generate_regression_model(features_shape, output_shape=1):
model = Sequential()
model.add(Input(shape=features_shape))
model.add(Flatten())
model.add(Dense(64, activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(output_shape))
model.compile(loss='mean_absolute_error',
optimizer='sgd')

return model

def almost_equal(arr1, arr2, epsilon=1e-6):
"""Ensure two array are almost equal at an epsilon"""
Expand Down

0 comments on commit d56a570

Please sign in to comment.