Skip to content

Commit

Permalink
attributions: refacto deconvnet and guidedbackprop for pylint
Browse files Browse the repository at this point in the history
  • Loading branch information
Antonin POCHE committed Oct 19, 2023
1 parent 5fc13fd commit 47397a2
Show file tree
Hide file tree
Showing 8 changed files with 105 additions and 95 deletions.
2 changes: 1 addition & 1 deletion docs/api/attributions/methods/deconvnet.md
Original file line number Diff line number Diff line change
Expand Up @@ -46,4 +46,4 @@ explanations = method.explain(images, labels)
/1XproaVxXjO9nrBSyyy7BuKJ1vy21iHs2)
- [**DeconvNet**: Going Further](https://colab.research.google.com/drive/19eB3uwAtCKZgkoWtMzrF0LTJ-htF_KE7)

{{xplique.attributions.deconvnet.DeconvNet}}
{{xplique.attributions.gradient_override.deconvnet.DeconvNet}}
2 changes: 1 addition & 1 deletion docs/api/attributions/methods/guided_backpropagation.md
Original file line number Diff line number Diff line change
Expand Up @@ -45,4 +45,4 @@ explanations = method.explain(images, labels)
/1XproaVxXjO9nrBSyyy7BuKJ1vy21iHs2)
- [**Guided Backprop**: Going Further](https://colab.research.google.com/drive/19eB3uwAtCKZgkoWtMzrF0LTJ-htF_KE7)

{{xplique.attributions.guided_backpropagation.GuidedBackprop}}
{{xplique.attributions.gradient_override.guided_backpropagation.GuidedBackprop}}
3 changes: 1 addition & 2 deletions xplique/attributions/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,12 +8,11 @@
from .integrated_gradients import IntegratedGradients
from .occlusion import Occlusion
from .rise import Rise
from .guided_backpropagation import GuidedBackprop
from .deconvnet import DeconvNet
from .grad_cam_pp import GradCAMPP
from .lime import Lime
from .kernel_shap import KernelShap
from .object_detector import BoundingBoxesExplainer
from .global_sensitivity_analysis import SobolAttributionMethod, HsicAttributionMethod
from .gradient_override import DeconvNet, GuidedBackprop
from .gradient_statistics import SmoothGrad, VarGrad, SquareGrad
from . import global_sensitivity_analysis
6 changes: 6 additions & 0 deletions xplique/attributions/gradient_override/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
"""
Attributions methods based on gradients override
"""

from .deconvnet import DeconvNet
from .guided_backpropagation import GuidedBackprop
40 changes: 40 additions & 0 deletions xplique/attributions/gradient_override/deconvnet.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
"""
Module related to DeconvNet method
"""

from ...commons import deconv_relu_policy
from ...types import Callable

from .gradient_override import GradientOverride

class DeconvNet(GradientOverride):
"""
Used to compute the DeconvNet method, which modifies the classic Saliency procedure on
ReLU's non linearities, allowing only the positive gradients (even from negative inputs) to
pass through.
Ref. Zeiler & al., Visualizing and Understanding Convolutional Networks (2013).
https://arxiv.org/abs/1311.2901
Parameters
----------
model
The model from which we want to obtain explanations
output_layer
Layer to target for the outputs (e.g logits or after softmax).
If an `int` is provided it will be interpreted as a layer index.
If a `string` is provided it will look for the layer name.
Default to the last layer.
It is recommended to use the layer before Softmax.
batch_size
Number of inputs to explain at once, if None compute all at once.
operator
Function g to explain, g take 3 parameters (f, x, y) and should return a scalar,
with f the model, x the inputs and y the targets. If None, use the standard
operator g(f, x, y) = f(x)[y].
reducer
String, name of the reducer to use. Either "min", "mean", "max" or "sum".
"""

def _get_override_policy(self) -> Callable:
return deconv_relu_policy
Original file line number Diff line number Diff line change
@@ -1,23 +1,20 @@
"""
Module related to DeconvNet method
Module related to method overwriting gradient
"""

from abc import ABC, abstractmethod

import tensorflow as tf
import numpy as np

from .base import WhiteBoxExplainer, sanitize_input_output
from ..commons import override_relu_gradient, deconv_relu_policy, Tasks
from ..types import Union, Optional, OperatorSignature
from ..base import WhiteBoxExplainer, sanitize_input_output
from ...commons import override_relu_gradient, Tasks
from ...types import Union, Optional, OperatorSignature, Callable


class DeconvNet(WhiteBoxExplainer):
class GradientOverride(WhiteBoxExplainer, ABC):
"""
Used to compute the DeconvNet method, which modifies the classic Saliency procedure on
ReLU's non linearities, allowing only the positive gradients (even from negative inputs) to
pass through.
Ref. Zeiler & al., Visualizing and Understanding Convolutional Networks (2013).
https://arxiv.org/abs/1311.2901
Abstraction between DeconvNet and GuidedBackpropagation.
Parameters
----------
Expand Down Expand Up @@ -46,7 +43,12 @@ def __init__(self,
operator: Optional[Union[Tasks, str, OperatorSignature]] = None,
reducer: Optional[str] = "mean",):
super().__init__(model, output_layer, batch_size, operator, reducer)
self.model = override_relu_gradient(self.model, deconv_relu_policy)
override_policy = self._get_override_policy()
self.model = override_relu_gradient(self.model, override_policy)

@abstractmethod
def _get_override_policy(self) -> Callable:
raise NotImplementedError

@sanitize_input_output
@WhiteBoxExplainer._harmonize_channel_dimension
Expand Down
42 changes: 42 additions & 0 deletions xplique/attributions/gradient_override/guided_backpropagation.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
"""
Module related to Guided Backpropagation method
"""

from ...commons import guided_relu_policy
from ...types import Callable

from .gradient_override import GradientOverride


class GuidedBackprop(GradientOverride):
"""
Used to compute the Guided Backpropagation, which modifies the classic Saliency procedure on
ReLU's non linearities, allowing only the positive gradients from positive activations to pass
through.
Ref. Tobias & al., Striving for Simplicity: The All Convolutional Net (2014).
https://arxiv.org/abs/1412.6806
Parameters
----------
model
The model from which we want to obtain explanations
output_layer
Layer to target for the outputs (e.g logits or after softmax).
If an `int` is provided it will be interpreted as a layer index.
If a `string` is provided it will look for the layer name.
Default to the last layer.
It is recommended to use the layer before Softmax.
batch_size
Number of inputs to explain at once, if None compute all at once.
operator
Function g to explain, g take 3 parameters (f, x, y) and should return a scalar,
with f the model, x the inputs and y the targets. If None, use the standard
operator g(f, x, y) = f(x)[y].
reducer
String, name of the reducer to use. Either "min", "mean", "max" or "sum".
"""

def _get_override_policy(self) -> Callable:
return guided_relu_policy
79 changes: 0 additions & 79 deletions xplique/attributions/guided_backpropagation.py

This file was deleted.

0 comments on commit 47397a2

Please sign in to comment.