Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 11 additions & 0 deletions .circleci/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,13 @@ commands:
name: "Lint with black"
command: black --check --diff .

isort:
description: "Check import order with isort"
steps:
- run:
name: "Check import order with isort"
command: isort --check-only

mypy_check:
description: "Static type checking with mypy"
steps:
Expand Down Expand Up @@ -132,6 +139,7 @@ jobs:
args: "-n -f"
- lint_flake8
- lint_black
- isort
- mypy_check
- unit_tests
- sphinx
Expand All @@ -145,6 +153,7 @@ jobs:
args: "-f"
- lint_flake8
- lint_black
- isort
- mypy_check
- unit_tests
- sphinx
Expand Down Expand Up @@ -176,6 +185,7 @@ jobs:
args: "-n"
- lint_flake8
- lint_black
- isort
- mypy_check
- unit_tests
- sphinx
Expand All @@ -200,6 +210,7 @@ jobs:
args: "-n -f -d"
- lint_flake8
- lint_black
- isort
- unit_tests
- sphinx
- configure_github_bot
Expand Down
7 changes: 7 additions & 0 deletions .isort.cfg
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
[settings]
multi_line_output=3
include_trailing_comma=True
force_grid_wrap=0
use_parentheses=True
line_length=88
known_third_party=pytext,torchvision,bs4
2 changes: 1 addition & 1 deletion CONTRIBUTING.md
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ pip using `pip install isort`, and run locally by calling
```bash
isort
```
from the repository root. No additional configuration should be needed.
from the repository root. Configuration for isort is located in .isort.cfg.

We feel strongly that having a consistent code style is extremely important, so
CircleCI will fail on your PR if it does not adhere to the black or flake8 formatting style or isort import ordering.
Expand Down
55 changes: 28 additions & 27 deletions captum/attr/__init__.py
Original file line number Diff line number Diff line change
@@ -1,51 +1,52 @@
#!/usr/bin/env python3

from ._core.integrated_gradients import IntegratedGradients # noqa
from ._core.deep_lift import DeepLift, DeepLiftShap # noqa
from ._core.input_x_gradient import InputXGradient # noqa
from ._core.saliency import Saliency # noqa
from ._core.noise_tunnel import NoiseTunnel # noqa
from ._core.gradient_shap import GradientShap # noqa
from ._core.guided_backprop_deconvnet import GuidedBackprop, Deconvolution # noqa
from ._core.guided_grad_cam import GuidedGradCam # noqa
from ._core.feature_ablation import FeatureAblation # noqa
from ._core.feature_permutation import FeaturePermutation # noqa
from ._core.occlusion import Occlusion # noqa
from ._core.shapley_value import ShapleyValueSampling, ShapleyValues # noqa
from ._core.layer.layer_conductance import LayerConductance # noqa
from ._core.layer.layer_gradient_x_activation import LayerGradientXActivation # noqa
from ._core.layer.layer_activation import LayerActivation # noqa
from ._core.layer.internal_influence import InternalInfluence # noqa
from ._core.gradient_shap import GradientShap # noqa
from ._core.guided_backprop_deconvnet import Deconvolution # noqa
from ._core.guided_backprop_deconvnet import GuidedBackprop
from ._core.guided_grad_cam import GuidedGradCam # noqa
from ._core.input_x_gradient import InputXGradient # noqa
from ._core.integrated_gradients import IntegratedGradients # noqa
from ._core.layer.grad_cam import LayerGradCam # noqa
from ._core.layer.layer_deep_lift import LayerDeepLift, LayerDeepLiftShap # noqa
from ._core.layer.internal_influence import InternalInfluence # noqa
from ._core.layer.layer_activation import LayerActivation # noqa
from ._core.layer.layer_conductance import LayerConductance # noqa
from ._core.layer.layer_deep_lift import LayerDeepLift # noqa
from ._core.layer.layer_deep_lift import LayerDeepLiftShap
from ._core.layer.layer_gradient_shap import LayerGradientShap # noqa
from ._core.layer.layer_gradient_x_activation import LayerGradientXActivation # noqa
from ._core.layer.layer_integrated_gradients import LayerIntegratedGradients # noqa
from ._core.neuron.neuron_feature_ablation import NeuronFeatureAblation # noqa
from ._core.neuron.neuron_conductance import NeuronConductance # noqa
from ._core.neuron.neuron_deep_lift import NeuronDeepLift # noqa
from ._core.neuron.neuron_deep_lift import NeuronDeepLiftShap
from ._core.neuron.neuron_feature_ablation import NeuronFeatureAblation # noqa
from ._core.neuron.neuron_gradient import NeuronGradient # noqa
from ._core.neuron.neuron_integrated_gradients import NeuronIntegratedGradients # noqa
from ._core.neuron.neuron_deep_lift import NeuronDeepLift, NeuronDeepLiftShap # noqa
from ._core.neuron.neuron_gradient_shap import NeuronGradientShap # noqa
from ._core.neuron.neuron_guided_backprop_deconvnet import (
from ._core.neuron.neuron_guided_backprop_deconvnet import ( # noqa
NeuronDeconvolution,
NeuronGuidedBackprop,
) # noqa

)
from ._core.neuron.neuron_integrated_gradients import NeuronIntegratedGradients # noqa
from ._core.noise_tunnel import NoiseTunnel # noqa
from ._core.occlusion import Occlusion # noqa
from ._core.saliency import Saliency # noqa
from ._core.shapley_value import ShapleyValues, ShapleyValueSampling # noqa
from ._models.base import InterpretableEmbeddingBase # noqa
from ._models.base import (
InterpretableEmbeddingBase,
TokenReferenceBase,
configure_interpretable_embedding_layer,
remove_interpretable_embedding_layer,
) # noqa
)
from ._utils import visualization # noqa
from ._utils.attribution import Attribution # noqa
from ._utils.attribution import GradientAttribution # noqa
from ._utils.attribution import PerturbationAttribution # noqa
from ._utils.attribution import LayerAttribution # noqa
from ._utils.attribution import NeuronAttribution # noqa
from ._utils import visualization # noqa
from ._utils.summarizer import Summarizer, CommonSummarizer
from ._utils.stat import Mean, StdDev, MSE, Var, Min, Max, Sum, Count

from ._utils.attribution import PerturbationAttribution # noqa
from ._utils.stat import MSE, Count, Max, Mean, Min, StdDev, Sum, Var
from ._utils.summarizer import CommonSummarizer, Summarizer

__all__ = [
"Attribution",
Expand Down
33 changes: 16 additions & 17 deletions captum/attr/_core/deep_lift.py
Original file line number Diff line number Diff line change
@@ -1,41 +1,40 @@
#!/usr/bin/env python3
import typing
from typing import Tuple, Union, Any, List, Callable, cast

import warnings
from typing import Any, Callable, List, Tuple, Union, cast

import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from torch.nn import Module
from torch.utils.hooks import RemovableHandle

import numpy as np

from .._utils.attribution import GradientAttribution
from .._utils.common import (
_is_tuple,
_format_input,
ExpansionTypes,
_call_custom_attribution_func,
_compute_conv_delta_and_format_attrs,
_expand_additional_forward_args,
_expand_target,
_format_additional_forward_args,
_format_attributions,
_format_baseline,
_format_callable_baseline,
_format_attributions,
_format_input,
_format_tensor_into_tuples,
_format_additional_forward_args,
_is_tuple,
_run_forward,
_validate_input,
_expand_target,
_expand_additional_forward_args,
_tensorize_baseline,
_call_custom_attribution_func,
_compute_conv_delta_and_format_attrs,
ExpansionTypes,
_validate_input,
)
from .._utils.attribution import GradientAttribution
from .._utils.gradient import apply_gradient_requirements, undo_gradient_requirements
from .._utils.typing import (
TensorOrTupleOfTensorsGeneric,
BaselineType,
Literal,
TargetType,
BaselineType,
TensorOrTupleOfTensorsGeneric,
)


Expand Down
15 changes: 7 additions & 8 deletions captum/attr/_core/feature_ablation.py
Original file line number Diff line number Diff line change
@@ -1,24 +1,23 @@
#!/usr/bin/env python3

import torch
from typing import Any, Callable, Tuple, Union, cast

import torch
from torch import Tensor, dtype

from typing import Any, Callable, Tuple, Union, cast

from .._utils.attribution import PerturbationAttribution
from .._utils.common import (
_expand_additional_forward_args,
_expand_target,
_find_output_mode_and_verify,
_format_additional_forward_args,
_format_attributions,
_format_input,
_format_input_baseline,
_is_tuple,
_run_forward,
_expand_additional_forward_args,
_expand_target,
_format_additional_forward_args,
)
from .._utils.attribution import PerturbationAttribution
from .._utils.typing import TensorOrTupleOfTensorsGeneric, TargetType, BaselineType
from .._utils.typing import BaselineType, TargetType, TensorOrTupleOfTensorsGeneric


class FeatureAblation(PerturbationAttribution):
Expand Down
2 changes: 1 addition & 1 deletion captum/attr/_core/feature_permutation.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,8 @@
import torch
from torch import Tensor

from .feature_ablation import FeatureAblation
from .._utils.typing import TargetType, TensorOrTupleOfTensorsGeneric
from .feature_ablation import FeatureAblation


def _permute_feature(x: Tensor, feature_mask: Tensor) -> Tensor:
Expand Down
21 changes: 10 additions & 11 deletions captum/attr/_core/gradient_shap.py
Original file line number Diff line number Diff line change
@@ -1,26 +1,25 @@
#!/usr/bin/env python3
import torch
import typing
from typing import Any, Callable, Tuple, Union

import numpy as np
import torch

from .._utils.attribution import GradientAttribution
from .._utils.common import (
_is_tuple,
_format_input_baseline,
_format_callable_baseline,
_compute_conv_delta_and_format_attrs,
_format_callable_baseline,
_format_input_baseline,
_is_tuple,
)

from .noise_tunnel import NoiseTunnel
from typing import Any, Callable, Tuple, Union
from .._utils.typing import (
Tensor,
TensorOrTupleOfTensorsGeneric,
BaselineType,
Literal,
TargetType,
BaselineType,
Tensor,
TensorOrTupleOfTensorsGeneric,
)
import typing
from .noise_tunnel import NoiseTunnel


class GradientShap(GradientAttribution):
Expand Down
6 changes: 3 additions & 3 deletions captum/attr/_core/guided_backprop_deconvnet.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,15 @@
#!/usr/bin/env python3
import warnings
from typing import Any, List, Tuple, Union

import torch
import torch.nn.functional as F
from typing import Any, List, Union, Tuple

from torch import Tensor
from torch.nn import Module
from torch.utils.hooks import RemovableHandle

from .._utils.attribution import GradientAttribution
from .._utils.common import _format_input, _format_attributions, _is_tuple
from .._utils.common import _format_attributions, _format_input, _is_tuple
from .._utils.gradient import apply_gradient_requirements, undo_gradient_requirements
from .._utils.typing import TargetType, TensorOrTupleOfTensorsGeneric

Expand Down
17 changes: 8 additions & 9 deletions captum/attr/_core/guided_grad_cam.py
Original file line number Diff line number Diff line change
@@ -1,17 +1,16 @@
#!/usr/bin/env python3
import torch
import warnings
from typing import Any, List, Union

from .._utils.attribution import GradientAttribution, LayerAttribution
from .._utils.common import _format_input, _format_attributions, _is_tuple

from .layer.grad_cam import LayerGradCam
from .guided_backprop_deconvnet import GuidedBackprop

from torch.nn import Module
import torch
from torch import Tensor
from typing import Any, List, Union
from torch.nn import Module

from .._utils.attribution import GradientAttribution, LayerAttribution
from .._utils.common import _format_attributions, _format_input, _is_tuple
from .._utils.typing import TargetType, TensorOrTupleOfTensorsGeneric
from .guided_backprop_deconvnet import GuidedBackprop
from .layer.grad_cam import LayerGradCam


class GuidedGradCam(GradientAttribution):
Expand Down
2 changes: 1 addition & 1 deletion captum/attr/_core/input_x_gradient.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
#!/usr/bin/env python3
from typing import Any, Callable

from .._utils.common import _format_input, _format_attributions, _is_tuple
from .._utils.attribution import GradientAttribution
from .._utils.common import _format_attributions, _format_input, _is_tuple
from .._utils.gradient import apply_gradient_requirements, undo_gradient_requirements
from .._utils.typing import TargetType, TensorOrTupleOfTensorsGeneric

Expand Down
14 changes: 7 additions & 7 deletions captum/attr/_core/integrated_gradients.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,23 +6,23 @@
from torch import Tensor

from .._utils.approximation_methods import approximation_parameters
from .._utils.attribution import GradientAttribution
from .._utils.batching import _batched_operator
from .._utils.common import (
_validate_input,
_expand_additional_forward_args,
_expand_target,
_format_additional_forward_args,
_format_attributions,
_format_input_baseline,
_reshape_and_sum,
_expand_additional_forward_args,
_expand_target,
_is_tuple,
_reshape_and_sum,
_validate_input,
)
from .._utils.attribution import GradientAttribution
from .._utils.typing import (
TensorOrTupleOfTensorsGeneric,
BaselineType,
Literal,
TargetType,
BaselineType,
TensorOrTupleOfTensorsGeneric,
)


Expand Down
11 changes: 6 additions & 5 deletions captum/attr/_core/layer/grad_cam.py
Original file line number Diff line number Diff line change
@@ -1,19 +1,20 @@
#!/usr/bin/env python3
from typing import Callable, List, Tuple, Union, Any
from typing import Any, Callable, List, Tuple, Union

import torch
import torch.nn.functional as F
from torch import Tensor
from torch.nn import Module
import torch.nn.functional as F

from ..._utils.attribution import LayerAttribution, GradientAttribution
from ..._utils.attribution import GradientAttribution, LayerAttribution
from ..._utils.common import (
_format_input,
_format_additional_forward_args,
_format_attributions,
_format_input,
)
from ..._utils.gradient import (
compute_layer_gradients_and_eval,
apply_gradient_requirements,
compute_layer_gradients_and_eval,
undo_gradient_requirements,
)
from ..._utils.typing import TargetType
Expand Down
Loading