From b196bbe3d0d1814163c490ada9cae1419acc4ada Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Tue, 24 May 2022 18:22:08 -0600 Subject: [PATCH 01/42] Miscellaneous Fixes * Wrap all remaining `torch.__version__` calls in `version.parse`. * Remove unused version check in `typing.py`. * Expose `MaxPool2dRelaxed` to users so that tutorials using it work. * Expose `dataset` module to users. * Fixed `show` & `save_tensor_as_image` docs. --- captum/optim/__init__.py | 2 ++ captum/optim/_param/image/images.py | 11 +++++----- captum/optim/_utils/image/common.py | 8 ++++---- captum/optim/_utils/image/dataset.py | 15 ++++++++++---- captum/optim/_utils/typing.py | 20 ++++++++----------- captum/optim/models/__init__.py | 2 ++ .../optim/models/test_googlenet_places365.py | 18 ++++++++--------- tests/optim/param/test_images.py | 2 +- tests/optim/param/test_transforms.py | 2 +- 9 files changed, 44 insertions(+), 36 deletions(-) diff --git a/captum/optim/__init__.py b/captum/optim/__init__.py index 9177d5c62c..828ac03dd2 100644 --- a/captum/optim/__init__.py +++ b/captum/optim/__init__.py @@ -7,6 +7,7 @@ from captum.optim._param.image.images import ImageTensor # noqa: F401 from captum.optim._utils import circuits, reducer # noqa: F401 from captum.optim._utils.image import atlas # noqa: F401 +from captum.optim._utils.image import dataset # noqa: F401 from captum.optim._utils.image.common import ( # noqa: F401 hue_to_rgb, make_grid_image, @@ -28,6 +29,7 @@ "reducer", "make_grid_image", "atlas", + "dataset", "hue_to_rgb", "nchannels_to_rgb", "save_tensor_as_image", diff --git a/captum/optim/_param/image/images.py b/captum/optim/_param/image/images.py index fa313b38af..3fade94f64 100644 --- a/captum/optim/_param/image/images.py +++ b/captum/optim/_param/image/images.py @@ -117,10 +117,11 @@ def show( grid image. Default is set to None for no grid image creation. Default: None padding (int, optional): The amount of padding between images in the grid - images. This parameter only has an effect if `nrow` is not None. + images. This parameter only has an effect if `images_per_row` is not + None. Default: 2 pad_value (float, optional): The value to use for the padding. This - parameter only has an effect if `nrow` is not None. + parameter only has an effect if `images_per_row` is not None. Default: 0.0 """ show( @@ -158,10 +159,10 @@ def export( grid image. Default is set to None for no grid image creation. Default: None padding (int, optional): The amount of padding between images in the grid - images. This parameter only has an effect if `nrow` is not None. - Default: 2 + images. This parameter only has an effect if `images_per_row` is not + None. pad_value (float, optional): The value to use for the padding. This - parameter only has an effect if `nrow` is not None. + parameter only has an effect if `images_per_row` is not None. Default: 0.0 """ save_tensor_as_image( diff --git a/captum/optim/_utils/image/common.py b/captum/optim/_utils/image/common.py index f1cdc5f477..39a6ada5ea 100644 --- a/captum/optim/_utils/image/common.py +++ b/captum/optim/_utils/image/common.py @@ -90,10 +90,10 @@ def show( grid image. Default is set to None for no grid image creation. Default: None padding (int, optional): The amount of padding between images in the grid - images. This parameter only has an effect if nrow is not None. + images. This parameter only has an effect if `images_per_row` is not None. Default: 2 pad_value (float, optional): The value to use for the padding. This parameter - only has an effect if nrow is not None. + only has an effect if `images_per_row` is not None. Default: 0.0 """ @@ -140,10 +140,10 @@ def save_tensor_as_image( grid image. Default is set to None for no grid image creation. Default: None padding (int, optional): The amount of padding between images in the grid - images. This parameter only has an effect if `nrow` is not None. + images. This parameter only has an effect if `images_per_row` is not None. Default: 2 pad_value (float, optional): The value to use for the padding. This parameter - only has an effect if `nrow` is not None. + only has an effect if `images_per_row` is not None. Default: 0.0 """ diff --git a/captum/optim/_utils/image/dataset.py b/captum/optim/_utils/image/dataset.py index c894173990..66bf18b53a 100644 --- a/captum/optim/_utils/image/dataset.py +++ b/captum/optim/_utils/image/dataset.py @@ -1,6 +1,7 @@ from typing import cast import torch +from packaging import version try: from tqdm.auto import tqdm @@ -73,6 +74,15 @@ def dataset_cov_matrix( return cov_mtx +# Handle older versions of PyTorch +# Defined outside of function in order to support JIT +_torch_norm = ( + torch.linalg.norm + if version.parse(torch.__version__) >= version.parse("1.7.0") + else torch.norm +) + + def cov_matrix_to_klt( cov_mtx: torch.Tensor, normalize: bool = False, epsilon: float = 1e-10 ) -> torch.Tensor: @@ -90,13 +100,10 @@ def cov_matrix_to_klt( *tensor*: A KLT matrix for the specified covariance matrix. """ - # Handle older versions of PyTorch - torch_norm = torch.linalg.norm if torch.__version__ >= "1.9.0" else torch.norm - U, S, V = torch.svd(cov_mtx) svd_sqrt = U @ torch.diag(torch.sqrt(S + epsilon)) if normalize: - svd_sqrt / torch.max(torch_norm(svd_sqrt, dim=0)) + svd_sqrt / torch.max(_torch_norm(svd_sqrt, dim=0)) return svd_sqrt diff --git a/captum/optim/_utils/typing.py b/captum/optim/_utils/typing.py index a0e3d6f1c0..10d37bd835 100755 --- a/captum/optim/_utils/typing.py +++ b/captum/optim/_utils/typing.py @@ -1,7 +1,8 @@ import sys from typing import Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union -from torch import Tensor, __version__ +from torch import Tensor +from torch import distributions from torch.nn import Module from torch.optim import Optimizer @@ -33,16 +34,11 @@ def cleanup(self) -> None: LossFunction = Callable[[ModuleOutputMapping], Tensor] SingleTargetLossFunction = Callable[[Tensor], Tensor] -if __version__ < "1.4.0": - NumSeqOrTensorOrProbDistType = Union[Sequence[int], Sequence[float], Tensor] -else: - from torch import distributions - - NumSeqOrTensorOrProbDistType = Union[ - Sequence[int], - Sequence[float], - Tensor, - distributions.distribution.Distribution, - ] +NumSeqOrTensorOrProbDistType = Union[ + Sequence[int], + Sequence[float], + Tensor, + distributions.distribution.Distribution, +] IntSeqOrIntType = Union[List[int], Tuple[int], Tuple[int, int], int] TupleOfTensorsOrTensorType = Union[Tuple[Tensor, ...], Tensor] diff --git a/captum/optim/models/__init__.py b/captum/optim/models/__init__.py index 0f809d5ef5..687aab0f85 100755 --- a/captum/optim/models/__init__.py +++ b/captum/optim/models/__init__.py @@ -1,4 +1,5 @@ from ._common import ( # noqa: F401 + MaxPool2dRelaxed, RedirectedReluLayer, SkipLayer, collect_activations, @@ -17,6 +18,7 @@ ) __all__ = [ + "MaxPool2dRelaxed", "RedirectedReluLayer", "SkipLayer", "collect_activations", diff --git a/tests/optim/models/test_googlenet_places365.py b/tests/optim/models/test_googlenet_places365.py index d6e9cf321d..84f9291fb9 100644 --- a/tests/optim/models/test_googlenet_places365.py +++ b/tests/optim/models/test_googlenet_places365.py @@ -11,7 +11,7 @@ class TestInceptionV1Places365(BaseTest): def test_load_inceptionv1_places365_with_redirected_relu(self) -> None: - if torch.__version__ <= "1.6.0": + if version.parse(torch.__version__) <= version.parse("1.6.0"): raise unittest.SkipTest( "Skipping load pretrained InceptionV1 Places365 due to insufficient" + " Torch version." @@ -22,7 +22,7 @@ def test_load_inceptionv1_places365_with_redirected_relu(self) -> None: self.assertTrue(check_layer_in_model(model, RedirectedReluLayer)) def test_load_inceptionv1_places365_no_redirected_relu(self) -> None: - if torch.__version__ <= "1.6.0": + if version.parse(torch.__version__) <= version.parse("1.6.0"): raise unittest.SkipTest( "Skipping load pretrained InceptionV1 Places365 RedirectedRelu test" + " due to insufficient Torch version." @@ -34,7 +34,7 @@ def test_load_inceptionv1_places365_no_redirected_relu(self) -> None: self.assertTrue(check_layer_in_model(model, torch.nn.ReLU)) def test_load_inceptionv1_places365_linear(self) -> None: - if torch.__version__ <= "1.6.0": + if version.parse(torch.__version__) <= version.parse("1.6.0"): raise unittest.SkipTest( "Skipping load pretrained InceptionV1 Places365 linear test due to" + " insufficient Torch version." @@ -47,7 +47,7 @@ def test_load_inceptionv1_places365_linear(self) -> None: self.assertTrue(check_layer_in_model(model, torch.nn.AvgPool2d)) def test_inceptionv1_places365_transform(self) -> None: - if torch.__version__ <= "1.6.0": + if version.parse(torch.__version__) <= version.parse("1.6.0"): raise unittest.SkipTest( "Skipping InceptionV1 Places365 internal transform test due to" + " insufficient Torch version." @@ -62,7 +62,7 @@ def test_inceptionv1_places365_transform(self) -> None: assertTensorAlmostEqual(self, output, expected_output, 0) def test_inceptionv1_places365_transform_warning(self) -> None: - if torch.__version__ <= "1.6.0": + if version.parse(torch.__version__) <= version.parse("1.6.0"): raise unittest.SkipTest( "Skipping InceptionV1 Places365 internal transform warning test due" + " to insufficient Torch version." @@ -75,7 +75,7 @@ def test_inceptionv1_places365_transform_warning(self) -> None: model._transform_input(x) def test_inceptionv1_places365_load_and_forward(self) -> None: - if torch.__version__ <= "1.6.0": + if version.parse(torch.__version__) <= version.parse("1.6.0"): raise unittest.SkipTest( "Skipping basic pretrained InceptionV1 Places365 forward test due to" + " insufficient Torch version." @@ -86,7 +86,7 @@ def test_inceptionv1_places365_load_and_forward(self) -> None: self.assertEqual([list(o.shape) for o in outputs], [[1, 365]] * 3) def test_inceptionv1_places365_load_and_forward_diff_sizes(self) -> None: - if torch.__version__ <= "1.6.0": + if version.parse(torch.__version__) <= version.parse("1.6.0"): raise unittest.SkipTest( "Skipping pretrained InceptionV1 Places365 forward with different" + " sized inputs test due to insufficient Torch version." @@ -102,7 +102,7 @@ def test_inceptionv1_places365_load_and_forward_diff_sizes(self) -> None: self.assertEqual([list(o.shape) for o in outputs2], [[1, 365]] * 3) def test_inceptionv1_places365_forward_no_aux(self) -> None: - if torch.__version__ <= "1.6.0": + if version.parse(torch.__version__) <= version.parse("1.6.0"): raise unittest.SkipTest( "Skipping pretrained InceptionV1 Places365 with aux logits forward" + " test due to insufficient Torch version." @@ -113,7 +113,7 @@ def test_inceptionv1_places365_forward_no_aux(self) -> None: self.assertEqual(list(outputs.shape), [1, 365]) def test_inceptionv1_places365_forward_cuda(self) -> None: - if torch.__version__ <= "1.6.0": + if version.parse(torch.__version__) <= version.parse("1.6.0"): raise unittest.SkipTest( "Skipping pretrained InceptionV1 Places365 forward CUDA test due to" + " insufficient Torch version." diff --git a/tests/optim/param/test_images.py b/tests/optim/param/test_images.py index 617d34a3a3..0ca59c1920 100644 --- a/tests/optim/param/test_images.py +++ b/tests/optim/param/test_images.py @@ -443,7 +443,7 @@ def test_simple_tensor_parameterization_with_grad(self) -> None: self.assertTrue(image_param.tensor.requires_grad) def test_simple_tensor_parameterization_jit_module_with_grad(self) -> None: - if torch.__version__ <= "1.8.0": + if version.parse(torch.__version__) <= version.parse("1.8.0"): raise unittest.SkipTest( "Skipping SimpleTensorParameterization JIT module test due to" + " insufficient Torch version." diff --git a/tests/optim/param/test_transforms.py b/tests/optim/param/test_transforms.py index 385006a7ac..362fce9649 100644 --- a/tests/optim/param/test_transforms.py +++ b/tests/optim/param/test_transforms.py @@ -1335,7 +1335,7 @@ def test_ignore_alpha(self) -> None: assert rgb_tensor.size(1) == 3 def test_ignore_alpha_jit_module(self) -> None: - if torch.__version__ <= "1.8.0": + if version.parse(torch.__version__) <= version.parse("1.8.0"): raise unittest.SkipTest( "Skipping IgnoreAlpha JIT module test due to insufficient" + " Torch version." From 63843b59edc96c96fa0c844b2bd200c16a559c4c Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Sun, 5 Jun 2022 09:23:30 -0600 Subject: [PATCH 02/42] Add packaging library to setup.py --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 48bc6f4057..09fe441950 100755 --- a/setup.py +++ b/setup.py @@ -147,7 +147,7 @@ def get_package_files(root, subdirs): long_description=long_description, long_description_content_type="text/markdown", python_requires=">=3.6", - install_requires=["matplotlib", "numpy", "torch>=1.6"], + install_requires=["matplotlib", "numpy", "packaging", "torch>=1.6"], packages=find_packages(exclude=("tests", "tests.*")), extras_require={ "dev": DEV_REQUIRES, From 8b82a37959822d60669f5db9b700b348483a6361 Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Thu, 16 Jun 2022 12:13:04 -0600 Subject: [PATCH 03/42] Add alias for ImageTensor.open() --- captum/optim/_param/image/images.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/captum/optim/_param/image/images.py b/captum/optim/_param/image/images.py index 3fade94f64..e4c4c05210 100644 --- a/captum/optim/_param/image/images.py +++ b/captum/optim/_param/image/images.py @@ -69,6 +69,11 @@ def open(cls, path: str, scale: float = 255.0, mode: str = "RGB") -> "ImageTenso img_np = np.array(img.convert(mode)).astype(np.float32) return cls(img_np.transpose(2, 0, 1) / scale) + @classmethod + def load(cls, path: str, scale: float = 255.0, mode: str = "RGB") -> "ImageTensor": + """Alias of ImageTensor.open()""" + return cls.open(path=path, scale=scale, mode=mode) + def __repr__(self) -> str: prefix = "ImageTensor(" indent = len(prefix) From 9e9a6beb0cf7f5a8f4d55225d087e2240f0c1953 Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Mon, 20 Jun 2022 18:49:17 -0600 Subject: [PATCH 04/42] Add Conv2dSame to __all__ --- captum/optim/models/__init__.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/captum/optim/models/__init__.py b/captum/optim/models/__init__.py index 687aab0f85..121fa09257 100755 --- a/captum/optim/models/__init__.py +++ b/captum/optim/models/__init__.py @@ -6,6 +6,7 @@ get_model_layers, replace_layers, skip_layers, + Conv2dSame, ) from ._image.inception5h_classes import INCEPTION5H_CLASSES # noqa: F401 from ._image.inception_v1 import InceptionV1, googlenet # noqa: F401 @@ -18,6 +19,7 @@ ) __all__ = [ + "Conv2dSame", "MaxPool2dRelaxed", "RedirectedReluLayer", "SkipLayer", From 027038381e18c68cd4838039448290de472f1864 Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Mon, 20 Jun 2022 20:05:05 -0600 Subject: [PATCH 05/42] Fix doc formatting for Sphinx --- captum/optim/_utils/image/atlas.py | 2 ++ captum/optim/_utils/image/common.py | 7 +++++++ captum/optim/_utils/reducer.py | 6 ++++++ captum/optim/models/_common.py | 17 +++++++++++++---- 4 files changed, 28 insertions(+), 4 deletions(-) diff --git a/captum/optim/_utils/image/atlas.py b/captum/optim/_utils/image/atlas.py index 5954a3a471..dd68bccc64 100644 --- a/captum/optim/_utils/image/atlas.py +++ b/captum/optim/_utils/image/atlas.py @@ -78,6 +78,7 @@ def calc_grid_indices( ] Args: + xy_grid (torch.tensor): The xy coordinate grid activation samples, with a shape of: [n_points, 2]. grid_size (Tuple[int, int]): The grid_size of grid cells to use. The grid_size @@ -86,6 +87,7 @@ def calc_grid_indices( Default: (0.0, 1.0) y_extent (Tuple[float, float], optional): The y axis range to use. Default: (0.0, 1.0) + Returns: indices (list of list of torch.Tensors): List of lists of grid indices stored inside tensors to use. Each 1D tensor of indices has a size of: diff --git a/captum/optim/_utils/image/common.py b/captum/optim/_utils/image/common.py index 39a6ada5ea..77da453678 100644 --- a/captum/optim/_utils/image/common.py +++ b/captum/optim/_utils/image/common.py @@ -208,6 +208,7 @@ def _dot_cossim( a specified dimension. Args: + x (torch.Tensor): The tensor that you wish to compute the cosine similarity for in relation to tensor y. y (torch.Tensor): The tensor that you wish to compute the cosine similarity @@ -216,6 +217,7 @@ def _dot_cossim( dim (int, optional): The target dimension for computing cosine similarity. eps (float, optional): If cossim_pow is greater than zero, the desired epsilon value to use for cosine similarity calculations. + Returns: tensor (torch.Tensor): Dot cosine similarity between x and y, along the specified dim. @@ -241,13 +243,16 @@ def hue_to_rgb( ) -> torch.Tensor: """ Create an RGB unit vector based on a hue of the input angle. + Args: + angle (float): The hue angle to create an RGB color for. device (torch.device, optional): The device to create the angle color tensor on. Default: torch.device("cpu") warp (bool, optional): Whether or not to make colors more distinguishable. Default: True + Returns: color_vec (torch.Tensor): A color vector. """ @@ -293,6 +298,7 @@ def nchannels_to_rgb( Default: True eps (float, optional): An optional epsilon value. Default: 1e-4 + Returns: tensor (torch.Tensor): An NCHW RGB image tensor. """ @@ -326,6 +332,7 @@ def weights_to_heatmap_2d( no excitation or inhibition. Args: + weight (torch.Tensor): A 2d tensor to create the heatmap from. colors (list of str): A list of 5 strings containing hex triplet (six digit), three-byte hexadecimal color values to use for coloring diff --git a/captum/optim/_utils/reducer.py b/captum/optim/_utils/reducer.py index 2696d003d6..585d0157e0 100644 --- a/captum/optim/_utils/reducer.py +++ b/captum/optim/_utils/reducer.py @@ -22,6 +22,7 @@ class ChannelReducer: See here for more information: https://distill.pub/2018/building-blocks/ Args: + n_components (int, optional): The number of channels to reduce the target dimension to. reduction_alg (str or callable, optional): The desired dimensionality @@ -71,11 +72,14 @@ def fit_transform( ) -> torch.Tensor: """ Perform dimensionality reduction on an input tensor. + Args: + tensor (tensor): A tensor to perform dimensionality reduction on. swap_2nd_and_last_dims (bool, optional): If true, input channels are expected to be in the second dimension unless the input tensor has a shape of CHW. Default is set to True. + Returns: *tensor*: A tensor with one of it's dimensions reduced. """ @@ -131,8 +135,10 @@ def posneg(x: torch.Tensor, dim: int = 0) -> torch.Tensor: NMF with regular NMF. Args: + x (tensor): A tensor to make positive. dim (int, optional): The dimension to concatinate the two tensor halves at. + Returns: tensor (torch.tensor): A positive tensor for one-sided dimensionality reduction. diff --git a/captum/optim/models/_common.py b/captum/optim/models/_common.py index e65e281217..2e4352738b 100644 --- a/captum/optim/models/_common.py +++ b/captum/optim/models/_common.py @@ -112,13 +112,16 @@ def _transfer_layer_vars( """ Given a layer instance, create a new layer instance of another class with the same initialization variables as the original layer. + Args: + layer1: (nn.Module): A layer instance that you want to transfer initialization variables from. layer2: (nn.Module): The layer class to create with the variables from of layer1. kwargs: (Any, optional): Any additional variables to use when creating the new layer. + Returns: layer2 instance (nn.Module): An instance of layer2 with the initialization variables that it shares with layer1, and any specified additional @@ -273,13 +276,15 @@ class SkipLayer(torch.nn.Module): See nn.Identity for more details: https://pytorch.org/docs/stable/generated/torch.nn.Identity.html - - Args: - args (Any): Any argument. Arguments will be safely ignored. - kwargs (Any) Any keyword argument. Arguments will be safely ignored. """ def __init__(self, *args, **kwargs) -> None: + """ + Args: + + args (Any): Any argument. Arguments will be safely ignored. + kwargs (Any) Any keyword argument. Arguments will be safely ignored. + """ super().__init__() def forward( @@ -287,9 +292,11 @@ def forward( ) -> Union[torch.Tensor, Tuple[torch.Tensor]]: """ Args: + x (torch.Tensor or tuple of torch.Tensor): The input tensor or tensors. args (Any): Any argument. Arguments will be safely ignored. kwargs (Any) Any keyword argument. Arguments will be safely ignored. + Returns: x (torch.Tensor or tuple of torch.Tensor): The unmodified input tensor or tensors. @@ -306,7 +313,9 @@ def skip_layers( with layers that do nothing. This is useful for removing the nonlinear ReLU layers when creating expanded weights. + Args: + model (nn.Module): A PyTorch model instance. layers (nn.Module or list of nn.Module): The layer class type to replace in the model. From 73eedd11468ce8a50d2a5e34c9ce0d9b0da93563 Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Thu, 23 Jun 2022 08:59:12 -0600 Subject: [PATCH 06/42] Fix docs for Sphinx --- captum/optim/_core/optimization.py | 21 ++++++++++++++++++- captum/optim/models/_common.py | 9 ++++---- .../models/_image/inception_v1_places365.py | 4 ++++ 3 files changed, 28 insertions(+), 6 deletions(-) diff --git a/captum/optim/_core/optimization.py b/captum/optim/_core/optimization.py index cd11db9e34..ae5a78e652 100644 --- a/captum/optim/_core/optimization.py +++ b/captum/optim/_core/optimization.py @@ -35,6 +35,18 @@ class InputOptimization(Objective, Parameterized): For more details, see the following: https://github.com/tensorflow/lucid https://distill.pub/2017/feature-visualization/ + + Instance variables that be used in the optimize function and StopCriteria: + + :ivar model: initial value (nn.Module): The given model instance given when + initializing InputOptimization. + :ivar input_param: initial value (ImageParameterization): The given image + parameterization instance given when initializing InputOptimization. + :ivar loss_fn: initial value (Loss): The given composable loss instance given + when initializing InputOptimization. + :ivar transform: initial value (nn.Module): The given transform instance given + when initializing InputOptimization. If it was set to None during + initialization, then an instance of torch.nn.Identity will be returned. """ def __init__( @@ -95,7 +107,9 @@ def loss(self) -> torch.Tensor: return loss_value def cleanup(self) -> None: - r"""Garbage collection, mainly removing hooks.""" + r"""Garbage collection, mainly removing hooks. + This should only be run after optimize is finished running. + """ self.hooks.remove_hooks() # Targets are managed by ModuleOutputHooks; we mainly just want a convenient setter @@ -109,6 +123,11 @@ def targets(self, value: Iterable[nn.Module]) -> None: self.hooks = ModuleOutputsHook(value) def parameters(self) -> Iterable[nn.Parameter]: + """ + Returns: + parameters (iterable of nn.Parameter): An iterable of parameters in the + image parameterization. + """ return self.input_param.parameters() def optimize( diff --git a/captum/optim/models/_common.py b/captum/optim/models/_common.py index 2e4352738b..c9af0dc73f 100644 --- a/captum/optim/models/_common.py +++ b/captum/optim/models/_common.py @@ -147,8 +147,7 @@ def _transfer_layer_vars( class Conv2dSame(nn.Conv2d): """ Tensorflow like 'SAME' convolution wrapper for 2D convolutions. - TODO: Replace with torch.nn.Conv2d when support for padding='same' - is in stable version + torch.nn.Conv2d with padding='same' can be used when the stride is equal to 1. """ def __init__( @@ -190,7 +189,7 @@ def __init__( in_channels, out_channels, kernel_size, stride, 0, dilation, groups, bias ) - def calc_same_pad(self, i: int, k: int, s: int, d: int) -> int: + def _calc_same_pad(self, i: int, k: int, s: int, d: int) -> int: """ Calculate the required padding for a dimension. @@ -217,8 +216,8 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: """ ih, iw = x.size()[-2:] kh, kw = self.weight.size()[-2:] - pad_h = self.calc_same_pad(i=ih, k=kh, s=self.stride[0], d=self.dilation[0]) - pad_w = self.calc_same_pad(i=iw, k=kw, s=self.stride[1], d=self.dilation[1]) + pad_h = self._calc_same_pad(i=ih, k=kh, s=self.stride[0], d=self.dilation[0]) + pad_w = self._calc_same_pad(i=iw, k=kw, s=self.stride[1], d=self.dilation[1]) if pad_h > 0 or pad_w > 0: x = F.pad( diff --git a/captum/optim/models/_image/inception_v1_places365.py b/captum/optim/models/_image/inception_v1_places365.py index 5ebca2a9b5..85afc7b32d 100644 --- a/captum/optim/models/_image/inception_v1_places365.py +++ b/captum/optim/models/_image/inception_v1_places365.py @@ -24,6 +24,7 @@ def googlenet_places365( dataset. See here for more information: https://arxiv.org/abs/1610.02055 Args: + pretrained (bool, optional): If True, returns a model pre-trained on the MIT Places365 Standard dataset. Default: False @@ -47,6 +48,9 @@ def googlenet_places365( transform_input (bool, optional): If True, preprocesses the input according to the method with which it was trained on Places365. Default: True + + Returns: + **model** (InceptionV1Places365): An InceptionV1 Places365 model instance. """ if pretrained: From c45f6944995c33612995793ad612e53045941736 Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Thu, 23 Jun 2022 11:37:01 -0600 Subject: [PATCH 07/42] Minor fixes --- captum/optim/_core/optimization.py | 4 +-- tests/optim/core/test_optimization.py | 46 +++++++++++++++++++++++++++ 2 files changed, 48 insertions(+), 2 deletions(-) diff --git a/captum/optim/_core/optimization.py b/captum/optim/_core/optimization.py index ae5a78e652..68310087f6 100644 --- a/captum/optim/_core/optimization.py +++ b/captum/optim/_core/optimization.py @@ -42,8 +42,8 @@ class InputOptimization(Objective, Parameterized): initializing InputOptimization. :ivar input_param: initial value (ImageParameterization): The given image parameterization instance given when initializing InputOptimization. - :ivar loss_fn: initial value (Loss): The given composable loss instance given - when initializing InputOptimization. + :ivar loss_function: initial value (Loss): The given composable loss instance + given when initializing InputOptimization. :ivar transform: initial value (nn.Module): The given transform instance given when initializing InputOptimization. If it was set to None during initialization, then an instance of torch.nn.Identity will be returned. diff --git a/tests/optim/core/test_optimization.py b/tests/optim/core/test_optimization.py index 7f77cf4b4d..c0def7ffef 100644 --- a/tests/optim/core/test_optimization.py +++ b/tests/optim/core/test_optimization.py @@ -9,6 +9,52 @@ class TestInputOptimization(BaseTest): + def test_input_optimization_init(self) -> None: + if version.parse(torch.__version__) <= version.parse("1.6.0"): + raise unittest.SkipTest( + "Skipping InputOptimization init test due to insufficient Torch" + + " version." + ) + model = BasicModel_ConvNet_Optim() + loss_fn = opt.loss.ChannelActivation(model.layer, 1) + transform = torch.nn.Identity() + image_param = opt.images.NaturalImage() + obj = opt.InputOptimization( + model, loss_function=loss_fn, input_param=image_param, transform=transform + ) + + self.assertEqual(model, obj.model) + self.assertEqual(image_param, obj.input_param) + self.assertEqual(transform, obj.transform) + self.assertEqual(loss_fn, obj.loss_function) + self.assertEqual(list(image_param.parameters()), list(obj.parameters())) + + def test_input_optimization_custom_optimize(self) -> torch.Tensor: + if version.parse(torch.__version__) <= version.parse("1.6.0"): + raise unittest.SkipTest( + "Skipping InputOptimization custom optimze test due to insufficient" + + " Torch version." + ) + model = BasicModel_ConvNet_Optim() + loss_fn = opt.loss.ChannelActivation(model.layer, 0) + obj = opt.InputOptimization(model, loss_function=loss_fn) + + stop_criteria = opt.optimization.n_steps(512) + optimizer = torch.optim.Adam(obj.parameters(), lr=0.02) + + history, step = [], 0 + try: + while stop_criteria(step, obj, history, optimizer): + optimizer.zero_grad() + loss_value = -1.0 * obj.loss().mean() + history.append(loss_value.clone().detach()) + loss_value.backward() + optimizer.step() + step += 1 + finally: + obj.cleanup() + self.assertIsInstance(torch.stack(history), torch.Tensor) + def test_input_optimization(self) -> None: if version.parse(torch.__version__) <= version.parse("1.6.0"): raise unittest.SkipTest( From 4cf8cfc8033951a358d3759d8ab16437143c09d4 Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Thu, 23 Jun 2022 12:07:00 -0600 Subject: [PATCH 08/42] Fix test errors --- tests/optim/core/test_optimization.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/tests/optim/core/test_optimization.py b/tests/optim/core/test_optimization.py index c0def7ffef..e7b174e480 100644 --- a/tests/optim/core/test_optimization.py +++ b/tests/optim/core/test_optimization.py @@ -29,7 +29,7 @@ def test_input_optimization_init(self) -> None: self.assertEqual(loss_fn, obj.loss_function) self.assertEqual(list(image_param.parameters()), list(obj.parameters())) - def test_input_optimization_custom_optimize(self) -> torch.Tensor: + def test_input_optimization_custom_optimize(self) -> None: if version.parse(torch.__version__) <= version.parse("1.6.0"): raise unittest.SkipTest( "Skipping InputOptimization custom optimze test due to insufficient" @@ -39,7 +39,7 @@ def test_input_optimization_custom_optimize(self) -> torch.Tensor: loss_fn = opt.loss.ChannelActivation(model.layer, 0) obj = opt.InputOptimization(model, loss_function=loss_fn) - stop_criteria = opt.optimization.n_steps(512) + stop_criteria = opt.optimization.n_steps(512, show_progress=False) optimizer = torch.optim.Adam(obj.parameters(), lr=0.02) history, step = [], 0 @@ -53,7 +53,8 @@ def test_input_optimization_custom_optimize(self) -> torch.Tensor: step += 1 finally: obj.cleanup() - self.assertIsInstance(torch.stack(history), torch.Tensor) + history = torch.stack(history) + self.assertIsInstance(history, torch.Tensor) def test_input_optimization(self) -> None: if version.parse(torch.__version__) <= version.parse("1.6.0"): From 90f9592c32a8d18197ee03264f019c5d98c80af7 Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Thu, 23 Jun 2022 12:22:40 -0600 Subject: [PATCH 09/42] Fix mypy error --- tests/optim/core/test_optimization.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/optim/core/test_optimization.py b/tests/optim/core/test_optimization.py index e7b174e480..1cd3301a98 100644 --- a/tests/optim/core/test_optimization.py +++ b/tests/optim/core/test_optimization.py @@ -1,5 +1,6 @@ #!/usr/bin/env python3 import unittest +from typing import List import captum.optim as opt import torch @@ -42,7 +43,8 @@ def test_input_optimization_custom_optimize(self) -> None: stop_criteria = opt.optimization.n_steps(512, show_progress=False) optimizer = torch.optim.Adam(obj.parameters(), lr=0.02) - history, step = [], 0 + history: List[torch.Tensor] = [] + step = 0 try: while stop_criteria(step, obj, history, optimizer): optimizer.zero_grad() From ca3b5f970242de43c5d7569ae599883150c8d69d Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Mon, 27 Jun 2022 13:23:40 -0600 Subject: [PATCH 10/42] Update _common.py --- captum/optim/models/_common.py | 36 ++++++++++++++++++---------------- 1 file changed, 19 insertions(+), 17 deletions(-) diff --git a/captum/optim/models/_common.py b/captum/optim/models/_common.py index c9af0dc73f..d0a1d81208 100644 --- a/captum/optim/models/_common.py +++ b/captum/optim/models/_common.py @@ -87,10 +87,11 @@ def replace_layers( layer1: (Type[nn.Module]): The layer class that you want to transfer initialization variables from. layer2: (Type[nn.Module]): The layer class to create with the variables - from layer1. - transfer_vars (bool, optional): Wether or not to try and copy - initialization variables from layer1 instances to the replacement - layer2 instances. + from ``layer1``. + transfer_vars (bool, optional): Whether or not to try and copy + initialization variables from ``layer1`` instances to the replacement + ``layer2`` instances. + Default: ``False`` kwargs: (Any, optional): Any additional variables to use when creating the new layer. """ @@ -172,18 +173,19 @@ def __init__( kernel_size (int or tuple of int): The desired kernel size to use. stride (int or tuple of int, optional): The desired stride for the cross-correlation. - Default: 1 + Default: ``1`` padding (int or tuple of int, optional): This value is always set to 0. - Default: 0 + Default: ``0`` dilation (int or tuple of int, optional): The desired spacing between the kernel points. - Default: 1 + Default: ``1`` groups (int, optional): Number of blocked connections from input channels to output channels. Both in_channels and out_channels must be divisable by groups. - Default: 1 + Default: ``1`` bias (bool, optional): Whether or not to apply a learnable bias to the output. + Default: ``True`` """ super().__init__( in_channels, out_channels, kernel_size, stride, 0, dilation, groups, bias @@ -249,7 +251,7 @@ def collect_activations( given model. model_input (torch.Tensor or tuple of torch.Tensor, optional): Optionally provide an input tensor to use when collecting the target activations. - Default: torch.zeros(1, 3, 224, 224) + Default: ``torch.zeros(1, 3, 224, 224)`` Returns: activ_dict (ModuleOutputMapping): A dictionary of collected activations where @@ -269,9 +271,9 @@ class SkipLayer(torch.nn.Module): during the forward pass. Use cases include removing nonlinear activation layers like ReLU for circuits research. - This layer works almost exactly the same way that nn.Indentiy does, except it also - ignores any additional arguments passed to the forward function. Any layer replaced - by SkipLayer must have the same input and output shapes. + This layer works almost exactly the same way that ``nn.Indentiy`` does, except it + also ignores any additional arguments passed to the forward function. Any layer + replaced by ``SkipLayer`` must have the same input and output shapes. See nn.Identity for more details: https://pytorch.org/docs/stable/generated/torch.nn.Identity.html @@ -365,15 +367,15 @@ def __init__( Args: kernel_size (int or tuple of int): The size of the window to perform max & - average pooling with. + average pooling with. stride (int or tuple of int, optional): The stride window size to use. - Default: None + Default: ``None`` padding (int or tuple of int): The amount of zero padding to add to both - sides in the nn.MaxPool2d & nn.AvgPool2d modules. - Default: 0 + sides in the ``nn.MaxPool2d`` & ``nn.AvgPool2d`` modules. + Default: ``0`` ceil_mode (bool, optional): Whether to use ceil or floor for creating the output shape. - Default: False + Default: ``False`` """ super().__init__() self.maxpool = torch.nn.MaxPool2d( From e87c975531532f8e5dd1bf55d10612b3814369c8 Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Mon, 27 Jun 2022 15:19:09 -0600 Subject: [PATCH 11/42] Improve ImageTensor, Optimization, & submodule docs for Sphinx --- captum/optim/_core/optimization.py | 47 +++++++---- captum/optim/_param/image/images.py | 81 ++++++++++++------- captum/optim/_utils/image/dataset.py | 28 ++++--- .../models/_image/inception_v1_places365.py | 79 +++++++++--------- 4 files changed, 141 insertions(+), 94 deletions(-) diff --git a/captum/optim/_core/optimization.py b/captum/optim/_core/optimization.py index 68310087f6..4072b0f987 100644 --- a/captum/optim/_core/optimization.py +++ b/captum/optim/_core/optimization.py @@ -36,17 +36,29 @@ class InputOptimization(Objective, Parameterized): https://github.com/tensorflow/lucid https://distill.pub/2017/feature-visualization/ - Instance variables that be used in the optimize function and StopCriteria: + Example:: + + >>> model = opt.models.googlenet(pretrained=True) + >>> loss_fn = opt.loss.LayerActivation(model.mixed4c) + >>> image = opt.images.NaturalImage(size=(224, 224)) + >>> transform = opt.transforms.TransformationRobustness() + >>> + >>> obj = opt.InputOptimization(model, loss_fn, image, transform) + >>> history = obj.optimize(opt.optimization.n_steps(512)) + >>> image().show(figsize=(10, 10)) # Display results + + Instance variables that be used in the optimize function and StopCriteria + functions: :ivar model: initial value (nn.Module): The given model instance given when - initializing InputOptimization. + initializing ``InputOptimization``. :ivar input_param: initial value (ImageParameterization): The given image - parameterization instance given when initializing InputOptimization. + parameterization instance given when initializing ``InputOptimization``. :ivar loss_function: initial value (Loss): The given composable loss instance - given when initializing InputOptimization. + given when initializing ``InputOptimization``. :ivar transform: initial value (nn.Module): The given transform instance given - when initializing InputOptimization. If it was set to None during - initialization, then an instance of torch.nn.Identity will be returned. + when initializing ``InputOptimization``. If it was set to ``None`` during + initialization, then an instance of ``torch.nn.Identity`` will be returned. """ def __init__( @@ -142,17 +154,17 @@ def optimize( Args: stop_criteria (StopCriteria, optional): A function that is called - every iteration and returns a bool that determines whether - to stop the optimization. - See captum.optim.typing.StopCriteria for details. - optimizer (Optimizer, optional): An torch.optim.Optimizer used to - optimize the input based on the loss function. + every iteration and returns a bool that determines whether to stop the + optimization. + Default: ``n_steps(512)`` + optimizer (Optimizer, optional): An ``torch.optim.Optimizer`` used to + optimize the input based on the loss function. loss_summarize_fn (Callable, optional): The function to use for summarizing tensor outputs from loss functions. - Default: default_loss_summarize + Default: ``default_loss_summarize`` lr: (float, optional): If no optimizer is given, then lr is used as the learning rate for the Adam optimizer. - Default: 0.025 + Default: ``0.025`` Returns: history (torch.Tensor): A stack of loss values per iteration. The size @@ -182,13 +194,18 @@ def optimize( def n_steps(n: int, show_progress: bool = True) -> StopCriteria: """StopCriteria generator that uses number of steps as a stop criteria. + Example:: + + >>> stop_criteria = opt.optimization.n_steps(512, True) + Args: + n (int): Number of steps to run optimization. show_progress (bool, optional): Whether or not to show progress bar. - Default: True + Default: ``True`` Returns: - *StopCriteria* callable + *StopCriteria* (callable): A stop criteria function. """ if show_progress: diff --git a/captum/optim/_param/image/images.py b/captum/optim/_param/image/images.py index e4c4c05210..64400f24ff 100644 --- a/captum/optim/_param/image/images.py +++ b/captum/optim/_param/image/images.py @@ -21,6 +21,27 @@ class ImageTensor(torch.Tensor): + r""" + A subclass of torch.Tensor that provides functions for easy loading, saving, and + displaying image tensors. + + Example using file path or URL:: + + >>> image_tensor = opt.images.ImageTensor.load() + >>> image_tensor.export(filename="image_tensor.jpg") # Save image(s) + >>> image_tensor.show(figsize=(8, 8)) # Displays image(s) via Matplotlib + + Example using ``torch.Tensor``:: + + >>> image_tensor = torch.randn(1, 3, 224, 224) + >>> image_tensor = opt.images.ImageTensor(image_tensor) + + Example using ``np.ndarray``:: + + >>> image_tensor = np.random.rand(1, 3, 224, 224) + >>> image_tensor = opt.images.ImageTensor(image_tensor) + """ + @staticmethod def __new__( cls: Type["ImageTensor"], @@ -32,10 +53,10 @@ def __new__( Args: x (list or np.ndarray or torch.Tensor): A list, NumPy array, or PyTorch - tensor to create an `ImageTensor` from. + tensor to create an ``ImageTensor`` from. Returns: - x (ImageTensor): An `ImageTensor` instance. + x (ImageTensor): An ``ImageTensor`` instance. """ if isinstance(x, torch.Tensor) and x.is_cuda: x.show = MethodType(cls.show, x) @@ -45,17 +66,18 @@ def __new__( return super().__new__(cls, x, *args, **kwargs) @classmethod - def open(cls, path: str, scale: float = 255.0, mode: str = "RGB") -> "ImageTensor": + def load(cls, path: str, scale: float = 255.0, mode: str = "RGB") -> "ImageTensor": """ - Load an image file from a URL or local filepath directly into an `ImageTensor`. + Load an image file from a URL or local filepath directly into an + ``ImageTensor``. Args: path (str): A URL or filepath to an image. scale (float, optional): The image scale to use. - Default: 255.0 + Default: ``255.0`` mode (str, optional): The image loading mode / colorspace to use. - Default: "RGB" + Default: ``"RGB"`` Returns: x (ImageTensor): An `ImageTensor` instance. @@ -70,9 +92,9 @@ def open(cls, path: str, scale: float = 255.0, mode: str = "RGB") -> "ImageTenso return cls(img_np.transpose(2, 0, 1) / scale) @classmethod - def load(cls, path: str, scale: float = 255.0, mode: str = "RGB") -> "ImageTensor": - """Alias of ImageTensor.open()""" - return cls.open(path=path, scale=scale, mode=mode) + def open(cls, path: str, scale: float = 255.0, mode: str = "RGB") -> "ImageTensor": + r"""Alias for :func:`load`.""" + return cls.load(path=path, scale=scale, mode=mode) def __repr__(self) -> str: prefix = "ImageTensor(" @@ -109,25 +131,25 @@ def show( pad_value: float = 0.0, ) -> None: """ - Display an `ImageTensor`. + Display an ``ImageTensor`` instance. Args: figsize (Tuple[int, int], optional): height & width to use - for displaying the `ImageTensor` figure. - scale (float, optional): Value to multiply the `ImageTensor` by so that + for displaying the ``ImageTensor`` figure. + scale (float, optional): Value to multiply the ``ImageTensor`` by so that it's value range is [0-255] for display. - Default: 255.0 + Default: ``255.0`` images_per_row (int, optional): The number of images per row to use for the - grid image. Default is set to None for no grid image creation. - Default: None + grid image. Default is set to ``None`` for no grid image creation. + Default: ``None`` padding (int, optional): The amount of padding between images in the grid - images. This parameter only has an effect if `images_per_row` is not - None. - Default: 2 + images. This parameter only has an effect if ``images_per_row`` is not + ``None``. + Default: ``2`` pad_value (float, optional): The value to use for the padding. This - parameter only has an effect if `images_per_row` is not None. - Default: 0.0 + parameter only has an effect if ``images_per_row`` is not None. + Default: ``0.0`` """ show( self, @@ -152,23 +174,24 @@ def export( Args: - filename (str): The filename to use when saving the `ImageTensor` as an + filename (str): The filename to use when saving the ``ImageTensor`` as an image file. - scale (float, optional): Value to multiply the `ImageTensor` by so that + scale (float, optional): Value to multiply the ``ImageTensor`` by so that it's value range is [0-255] for saving. - Default: 255.0 + Default: ``255.0`` mode (str, optional): A PIL / Pillow supported colorspace. Default is set to None for automatic RGB / RGBA detection and usage. - Default: None + Default: ``None`` images_per_row (int, optional): The number of images per row to use for the grid image. Default is set to None for no grid image creation. - Default: None + Default: ``None`` padding (int, optional): The amount of padding between images in the grid - images. This parameter only has an effect if `images_per_row` is not - None. + images. This parameter only has an effect if ``images_per_row`` is not + ``None``. + Default: ``2`` pad_value (float, optional): The value to use for the padding. This - parameter only has an effect if `images_per_row` is not None. - Default: 0.0 + parameter only has an effect if ``images_per_row`` is not ``None``. + Default: ``0.0`` """ save_tensor_as_image( self, diff --git a/captum/optim/_utils/image/dataset.py b/captum/optim/_utils/image/dataset.py index 66bf18b53a..66eee6dc38 100644 --- a/captum/optim/_utils/image/dataset.py +++ b/captum/optim/_utils/image/dataset.py @@ -19,11 +19,11 @@ def image_cov(x: torch.Tensor) -> torch.Tensor: Args: - x (torch.Tensor): One or more NCHW image tensors stacked across the batch + x (torch.Tensor): One or more NCHW image tensors stacked across the batch dimension. Returns: - *tensor* (torch.Tensor): The average color channel covariance matrix for the + *tensor* (torch.Tensor): The average color channel covariance matrix for the for the input tensor, with a shape of: [n_channels, n_channels]. """ @@ -47,10 +47,10 @@ def dataset_cov_matrix( loader (torch.utils.data.DataLoader): The reference to a PyTorch dataloader instance. show_progress (bool, optional): Whether or not to display a tqdm progress bar. - Default: False + Default: ``False`` device (torch.device, optional): The PyTorch device to use for for calculating the cov matrix. - Default: torch.device("cpu") + Default: ``torch.device("cpu")`` Returns: *tensor*: A covariance matrix for the specified dataset. @@ -91,10 +91,12 @@ def cov_matrix_to_klt( Args: - cov_mtx (tensor): A 3 by 3 covariance matrix generated from a dataset. - normalize (bool): Whether or not to normalize the resulting KLT matrix. - Default: False - epsilon (float): + cov_mtx (tensor): A 3 by 3 covariance matrix generated from a dataset. + normalize (bool): Whether or not to normalize the resulting KLT matrix. + Default: ``False`` + epsilon (float, optional): A small epsilon value to use for numerical + stability. + Default: ``1e-10`` Returns: *tensor*: A KLT matrix for the specified covariance matrix. @@ -121,15 +123,15 @@ def dataset_klt_matrix( Args: - loader (torch.utils.data.DataLoader): The reference to a PyTorch + loader (torch.utils.data.DataLoader): The reference to a PyTorch dataloader instance. - normalize (bool): Whether or not to normalize the resulting KLT matrix. - Default: False + normalize (bool): Whether or not to normalize the resulting KLT matrix. + Default: ``False`` show_progress (bool, optional): Whether or not to display a tqdm progress bar. - Default: False + Default: ``False`` device (torch.device, optional): The PyTorch device to use for for calculating the cov matrix. - Default: torch.device("cpu") + Default: ``torch.device("cpu")`` Returns: *tensor*: A KLT matrix for the specified dataset. diff --git a/captum/optim/models/_image/inception_v1_places365.py b/captum/optim/models/_image/inception_v1_places365.py index 85afc7b32d..acd5f8fe7f 100644 --- a/captum/optim/models/_image/inception_v1_places365.py +++ b/captum/optim/models/_image/inception_v1_places365.py @@ -23,31 +23,36 @@ def googlenet_places365( The pretrained GoogleNet model was trained using the MIT Places365 Standard dataset. See here for more information: https://arxiv.org/abs/1610.02055 + Example:: + + >>> model = opt.models.googlenet_places365(pretrained=True) + >>> output = model(torch.zeros(1, 3, 224, 224)) + Args: - pretrained (bool, optional): If True, returns a model pre-trained on the MIT - Places365 Standard dataset. - Default: False - progress (bool, optional): If True, displays a progress bar of the download to - stderr - Default: True - model_path (str, optional): Optional path for InceptionV1 model file. - Default: None - replace_relus_with_redirectedrelu (bool, optional): If True, return pretrained - model with Redirected ReLU in place of ReLU layers. - Default: *True* when pretrained is True otherwise *False* - use_linear_modules_only (bool, optional): If True, return pretrained + pretrained (bool, optional): If ``True``, returns a model pre-trained on the + MIT Places365 Standard dataset. + Default: ``False`` + progress (bool, optional): If ``True``, displays a progress bar of the + download to stderr. + Default: ``True`` + model_path (str, optional): Optional path for the InceptionV1 model file. + Default: ``None`` + replace_relus_with_redirectedrelu (bool, optional): If ``True``, return + pretrained model with Redirected ReLU in place of ReLU layers. + Default: *``True``* when pretrained is True otherwise *``False``* + use_linear_modules_only (bool, optional): If ``True``, return pretrained model with all nonlinear layers replaced with linear equivalents. - Default: False - aux_logits (bool, optional): If True, adds two auxiliary branches that can + Default: ``False`` + aux_logits (bool, optional): If ``True``, adds two auxiliary branches that can improve training. - Default: True + Default: ``True`` out_features (int, optional): Number of output features in the model used for - training. Default: 365 when pretrained is True. - Default: 365 + training. + Default: ``365`` transform_input (bool, optional): If True, preprocesses the input according to the method with which it was trained on Places365. - Default: True + Default: ``True`` Returns: **model** (InceptionV1Places365): An InceptionV1 Places365 model instance. @@ -99,19 +104,19 @@ def __init__( out_features (int, optional): Number of output features in the model used for training. - Default: 365 - aux_logits (bool, optional): If True, adds two auxiliary branches that can - improve training. - Default: True - transform_input (bool, optional): If True, preprocesses the input according - to the method with which it was trained on Places365. - Default: True - replace_relus_with_redirectedrelu (bool, optional): If True, return + Default: ``365`` + aux_logits (bool, optional): If ``True``, adds two auxiliary branches that + can improve training. + Default: ``True`` + transform_input (bool, optional): If ``True``, preprocesses the input + according to the method with which it was trained on Places365. + Default: ``True`` + replace_relus_with_redirectedrelu (bool, optional): If ``True``, return pretrained model with Redirected ReLU in place of ReLU layers. - Default: False - use_linear_modules_only (bool, optional): If True, return pretrained model - with all nonlinear layers replaced with linear equivalents. - Default: False + Default: ``False`` + use_linear_modules_only (bool, optional): If ``True``, return pretrained + model with all nonlinear layers replaced with linear equivalents. + Default: ``False`` """ super().__init__() self.aux_logits = aux_logits @@ -295,10 +300,10 @@ def __init__( pool_proj (int, optional): activ (type of nn.Module, optional): The nn.Module class type to use for activation layers. - Default: nn.ReLU + Default: ``nn.ReLU`` p_layer (type of nn.Module, optional): The nn.Module class type to use for pooling layers. - Default: nn.MaxPool2d + Default: ``nn.MaxPool2d`` """ super().__init__() self.conv_1x1 = nn.Conv2d( @@ -392,13 +397,13 @@ def __init__( in_channels (int, optional): The number of input channels to use for the auxiliary branch. - Default: 508 + Default: ``508`` out_features (int, optional): The number of output features to use for the auxiliary branch. - Default: 1008 - activ (type of nn.Module, optional): The nn.Module class type to use for - activation layers. - Default: nn.ReLU + Default: ``1008`` + activ (type of nn.Module, optional): The ``nn.Module`` class type to use + for activation layers. + Default: ``nn.ReLU`` """ super().__init__() self.avg_pool = nn.AdaptiveAvgPool2d((4, 4)) From 2a592f0512b49ffb8f67e1ceb9d7be14cabc5e6a Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Mon, 27 Jun 2022 19:52:10 -0600 Subject: [PATCH 12/42] Adjust spacing in doc variables --- captum/optim/_utils/image/dataset.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/captum/optim/_utils/image/dataset.py b/captum/optim/_utils/image/dataset.py index 66eee6dc38..8b2c5669b5 100644 --- a/captum/optim/_utils/image/dataset.py +++ b/captum/optim/_utils/image/dataset.py @@ -53,7 +53,7 @@ def dataset_cov_matrix( Default: ``torch.device("cpu")`` Returns: - *tensor*: A covariance matrix for the specified dataset. + *tensor*: A covariance matrix for the specified dataset. """ if show_progress: @@ -99,7 +99,7 @@ def cov_matrix_to_klt( Default: ``1e-10`` Returns: - *tensor*: A KLT matrix for the specified covariance matrix. + *tensor*: A KLT matrix for the specified covariance matrix. """ U, S, V = torch.svd(cov_mtx) @@ -134,7 +134,7 @@ def dataset_klt_matrix( Default: ``torch.device("cpu")`` Returns: - *tensor*: A KLT matrix for the specified dataset. + *tensor*: A KLT matrix for the specified dataset. """ cov_mtx = dataset_cov_matrix(loader, show_progress=show_progress, device=device) From 8ceecafdff3e2bf1feefdef08639f397b851fd22 Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Tue, 28 Jun 2022 19:40:05 -0600 Subject: [PATCH 13/42] Improve dataset docs --- captum/optim/_utils/image/dataset.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/captum/optim/_utils/image/dataset.py b/captum/optim/_utils/image/dataset.py index 8b2c5669b5..6a36dd2644 100644 --- a/captum/optim/_utils/image/dataset.py +++ b/captum/optim/_utils/image/dataset.py @@ -44,7 +44,7 @@ def dataset_cov_matrix( Args: - loader (torch.utils.data.DataLoader): The reference to a PyTorch + loader (torch.utils.data.DataLoader): The reference to a PyTorch dataloader instance. show_progress (bool, optional): Whether or not to display a tqdm progress bar. Default: ``False`` @@ -53,7 +53,7 @@ def dataset_cov_matrix( Default: ``torch.device("cpu")`` Returns: - *tensor*: A covariance matrix for the specified dataset. + *tensor* (torch.Tensor): A covariance matrix for the specified dataset. """ if show_progress: @@ -99,7 +99,8 @@ def cov_matrix_to_klt( Default: ``1e-10`` Returns: - *tensor*: A KLT matrix for the specified covariance matrix. + *tensor* (torch.Tensor): A KLT matrix for the specified covariance + matrix. """ U, S, V = torch.svd(cov_mtx) @@ -134,7 +135,7 @@ def dataset_klt_matrix( Default: ``torch.device("cpu")`` Returns: - *tensor*: A KLT matrix for the specified dataset. + *tensor* (torch.Tensor): A KLT matrix for the specified dataset. """ cov_mtx = dataset_cov_matrix(loader, show_progress=show_progress, device=device) From 0491cca122bf64d0275464c1362aeab8c4bea756 Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Wed, 29 Jun 2022 08:41:52 -0600 Subject: [PATCH 14/42] Improve Sphinx docs --- captum/optim/models/_common.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/captum/optim/models/_common.py b/captum/optim/models/_common.py index d0a1d81208..4b460058f8 100644 --- a/captum/optim/models/_common.py +++ b/captum/optim/models/_common.py @@ -82,7 +82,14 @@ def replace_layers( Replace all target layers with new layers inside the specified model, possibly with the same initialization variables. + Example:: + + >>> model = opt.models.googlenet(pretrained=True) + >>> # Replace MaxPool2d layers with their AvgPool2d equivalents + >>> opt.models.replace_layers(model, nn.MaxPool2d, nn.AvgPool2d, True) + Args: + model: (nn.Module): A PyTorch model instance. layer1: (Type[nn.Module]): The layer class that you want to transfer initialization variables from. From 86f24bac11f4110aa17408701487d792e9afdca1 Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Wed, 29 Jun 2022 16:29:24 -0600 Subject: [PATCH 15/42] Improve ImageTensor docs --- captum/optim/_param/image/images.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/captum/optim/_param/image/images.py b/captum/optim/_param/image/images.py index 64400f24ff..7853761573 100644 --- a/captum/optim/_param/image/images.py +++ b/captum/optim/_param/image/images.py @@ -131,7 +131,8 @@ def show( pad_value: float = 0.0, ) -> None: """ - Display an ``ImageTensor`` instance. + Display image(s) in the ``ImageTensor`` instance using + :func:`captum.optim.show`. Args: @@ -170,7 +171,8 @@ def export( pad_value: float = 0.0, ) -> None: """ - Save an `ImageTensor` as an image file. + Save image(s) in the `ImageTensor` instance as an image file, using + :func:`captum.optim.save_tensor_as_image`. Args: From 975550e992524eb1ccea749e6ff44d9ad4a7a2c1 Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Fri, 1 Jul 2022 13:14:42 -0600 Subject: [PATCH 16/42] Add 'Feature Visualization' keyword --- setup.py | 1 + 1 file changed, 1 insertion(+) diff --git a/setup.py b/setup.py index 09fe441950..cd930850f8 100755 --- a/setup.py +++ b/setup.py @@ -133,6 +133,7 @@ def get_package_files(root, subdirs): "Model Understanding", "Feature Importance", "Neuron Importance", + "Feature Visualization", "PyTorch", ], classifiers=[ From 7530ae5c24a4e67ffb88448916419a02efbba849 Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Fri, 1 Jul 2022 18:50:35 -0600 Subject: [PATCH 17/42] Improve ImageTensor & Dataset docs (#552) * Update images.py * Update dataset.py * Update images.py --- captum/optim/_param/image/images.py | 5 +++-- captum/optim/_utils/image/dataset.py | 10 +++++----- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/captum/optim/_param/image/images.py b/captum/optim/_param/image/images.py index 7853761573..568b8edecd 100644 --- a/captum/optim/_param/image/images.py +++ b/captum/optim/_param/image/images.py @@ -29,7 +29,7 @@ class ImageTensor(torch.Tensor): >>> image_tensor = opt.images.ImageTensor.load() >>> image_tensor.export(filename="image_tensor.jpg") # Save image(s) - >>> image_tensor.show(figsize=(8, 8)) # Displays image(s) via Matplotlib + >>> image_tensor.show() # Displays image(s) via Matplotlib Example using ``torch.Tensor``:: @@ -124,7 +124,7 @@ def __torch_function__( def show( self, - figsize: Optional[Tuple[int, int]] = None, + figsize: Optional[Tuple[int, int]] = (8, 8), scale: float = 255.0, images_per_row: Optional[int] = None, padding: int = 2, @@ -138,6 +138,7 @@ def show( figsize (Tuple[int, int], optional): height & width to use for displaying the ``ImageTensor`` figure. + Default: ``(8, 8)`` scale (float, optional): Value to multiply the ``ImageTensor`` by so that it's value range is [0-255] for display. Default: ``255.0`` diff --git a/captum/optim/_utils/image/dataset.py b/captum/optim/_utils/image/dataset.py index 6a36dd2644..9d9108f44d 100644 --- a/captum/optim/_utils/image/dataset.py +++ b/captum/optim/_utils/image/dataset.py @@ -23,7 +23,7 @@ def image_cov(x: torch.Tensor) -> torch.Tensor: dimension. Returns: - *tensor* (torch.Tensor): The average color channel covariance matrix for the + tensor (torch.Tensor): The average color channel covariance matrix for the for the input tensor, with a shape of: [n_channels, n_channels]. """ @@ -53,7 +53,7 @@ def dataset_cov_matrix( Default: ``torch.device("cpu")`` Returns: - *tensor* (torch.Tensor): A covariance matrix for the specified dataset. + tensor (torch.Tensor): A covariance matrix for the specified dataset. """ if show_progress: @@ -91,7 +91,7 @@ def cov_matrix_to_klt( Args: - cov_mtx (tensor): A 3 by 3 covariance matrix generated from a dataset. + cov_mtx (torch.Tensor): A 3 by 3 covariance matrix generated from a dataset. normalize (bool): Whether or not to normalize the resulting KLT matrix. Default: ``False`` epsilon (float, optional): A small epsilon value to use for numerical @@ -99,7 +99,7 @@ def cov_matrix_to_klt( Default: ``1e-10`` Returns: - *tensor* (torch.Tensor): A KLT matrix for the specified covariance + tensor (torch.Tensor): A KLT matrix for the specified covariance matrix. """ @@ -135,7 +135,7 @@ def dataset_klt_matrix( Default: ``torch.device("cpu")`` Returns: - *tensor* (torch.Tensor): A KLT matrix for the specified dataset. + tensor (torch.Tensor): A KLT matrix for the specified dataset. """ cov_mtx = dataset_cov_matrix(loader, show_progress=show_progress, device=device) From 4a62c0b542a75ebcd3f3a0f70e306a2ee57383a3 Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Mon, 4 Jul 2022 18:25:34 -0600 Subject: [PATCH 18/42] Improve docs --- captum/optim/models/_image/inception_v1_places365.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/captum/optim/models/_image/inception_v1_places365.py b/captum/optim/models/_image/inception_v1_places365.py index acd5f8fe7f..c5df0b85b0 100644 --- a/captum/optim/models/_image/inception_v1_places365.py +++ b/captum/optim/models/_image/inception_v1_places365.py @@ -55,7 +55,7 @@ def googlenet_places365( Default: ``True`` Returns: - **model** (InceptionV1Places365): An InceptionV1 Places365 model instance. + model (InceptionV1Places365): An InceptionV1 Places365 model instance. """ if pretrained: From e63cee8f073d9d125a74fe45d0731807f7211183 Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Mon, 4 Jul 2022 18:40:34 -0600 Subject: [PATCH 19/42] Improve docs --- captum/optim/_param/image/images.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/captum/optim/_param/image/images.py b/captum/optim/_param/image/images.py index 568b8edecd..5bb8555a17 100644 --- a/captum/optim/_param/image/images.py +++ b/captum/optim/_param/image/images.py @@ -124,7 +124,7 @@ def __torch_function__( def show( self, - figsize: Optional[Tuple[int, int]] = (8, 8), + figsize: Optional[Tuple[int, int]] = None, scale: float = 255.0, images_per_row: Optional[int] = None, padding: int = 2, @@ -138,7 +138,7 @@ def show( figsize (Tuple[int, int], optional): height & width to use for displaying the ``ImageTensor`` figure. - Default: ``(8, 8)`` + Default: ``None`` scale (float, optional): Value to multiply the ``ImageTensor`` by so that it's value range is [0-255] for display. Default: ``255.0`` From 06db64f71cb6473ed9fca91fe041026b1dfc76f8 Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Tue, 5 Jul 2022 09:07:34 -0600 Subject: [PATCH 20/42] Improve dataset docs --- captum/optim/_utils/image/dataset.py | 26 ++++++++++++++++++++++---- 1 file changed, 22 insertions(+), 4 deletions(-) diff --git a/captum/optim/_utils/image/dataset.py b/captum/optim/_utils/image/dataset.py index 9d9108f44d..5319e4b9a6 100644 --- a/captum/optim/_utils/image/dataset.py +++ b/captum/optim/_utils/image/dataset.py @@ -42,6 +42,15 @@ def dataset_cov_matrix( """ Calculate the covariance matrix for an image dataset. + Example:: + + >>> # Load image dataset + >>> dataset = torchvision.datasets.ImageFolder("") + >>> dataset_loader = torch.utils.data.DataLoader(dataset) + >>> # Calculate dataset COV matrix + >>> cov_mtx = opt.dataset.dataset_cov(dataset_loader, True) + >>> print(cov_mtx) + Args: loader (torch.utils.data.DataLoader): The reference to a PyTorch @@ -117,10 +126,19 @@ def dataset_klt_matrix( device: torch.device = torch.device("cpu"), ) -> torch.Tensor: """ - Calculate the color correlation matrix, also known as - a Karhunen-Loève transform (KLT) matrix, for a dataset. - The color correlation matrix can then used in color decorrelation - transforms for models trained on the dataset. + Calculate the color correlation matrix, also known as a Karhunen-Loève transform + (KLT) matrix, for a dataset. The color correlation matrix can then used in color + decorrelation & recorrelation transforms like + :class:`captum.optim.transforms.ToRGB` for models trained on the dataset. + + Example:: + + >>> # Load image dataset + >>> dataset = torchvision.datasets.ImageFolder("") + >>> dataset_loader = torch.utils.data.DataLoader(dataset) + >>> # Calculate dataset KLT matrix + >>> klt_mtx = opt.dataset.dataset_klt_matrix(dataset_loader, True, True) + >>> print(klt_mtx) Args: From 10d2379c105c125ddaafbfe635646219f4457f61 Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Wed, 6 Jul 2022 14:42:59 -0600 Subject: [PATCH 21/42] Add missing Places365 InceptionModule docs --- .../models/_image/inception_v1_places365.py | 22 ++++++++++++------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/captum/optim/models/_image/inception_v1_places365.py b/captum/optim/models/_image/inception_v1_places365.py index c5df0b85b0..b92bbb6e6e 100644 --- a/captum/optim/models/_image/inception_v1_places365.py +++ b/captum/optim/models/_image/inception_v1_places365.py @@ -290,14 +290,20 @@ def __init__( """ Args: - in_channels (int, optional): The number of input channels to use for the - inception module. - c1x1 (int, optional): - c3x3reduce (int, optional): - c3x3 (int, optional): - c5x5reduce (int, optional): - c5x5 (int, optional): - pool_proj (int, optional): + in_channels (int): The number of input channels to use for the first + layers of the inception module branches. + c1x1 (int): The number of output channels to use for the first layer in + the c1x1 branch. + c3x3reduce (int): The number of output channels to use for the first layer + in the c3x3 branch. + c3x3 (int): The number of output channels to use for the second layer in + the c3x3 branch. + c5x5reduce (int): The number of output channels to use for the first layer + in the c5x5 branch. + c5x5 (int): The number of output channels to use for the second layer in + the c5x5 branch. + pool_proj (int): The number of output channels to use for the second layer + in the pool branch. activ (type of nn.Module, optional): The nn.Module class type to use for activation layers. Default: ``nn.ReLU`` From b376466519d2f70b7de04e2087a1d875781d8816 Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Sat, 9 Jul 2022 13:47:59 -0600 Subject: [PATCH 22/42] Improve Optimization docs --- captum/optim/_core/optimization.py | 33 +++++++++++++++--------------- 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/captum/optim/_core/optimization.py b/captum/optim/_core/optimization.py index 4072b0f987..424d643282 100644 --- a/captum/optim/_core/optimization.py +++ b/captum/optim/_core/optimization.py @@ -71,13 +71,13 @@ def __init__( r""" Args: - model (nn.Module, optional): The reference to PyTorch model instance. - input_param (nn.Module, optional): A module that generates an input, - consumed by the model. - transform (nn.Module, optional): A module that transforms or preprocesses - the input before being passed to the model. - loss_function (callable): The loss function to minimize during optimization - optimization. + model (nn.Module, optional): The reference to PyTorch model instance. + input_param (nn.Module, optional): A module that generates an input, + consumed by the model. + transform (nn.Module, optional): A module that transforms or preprocesses + the input before being passed to the model. + loss_function (callable): The loss function to minimize during + optimization. """ self.model = model or nn.Identity() # Grab targets from loss_function @@ -100,9 +100,9 @@ def loss(self) -> torch.Tensor: r"""Compute loss value for current iteration. Returns: - *tensor* representing **loss**: - - **loss** (*tensor*): - Size of the tensor corresponds to the targets passed. + tensor representing **loss**: + - **loss** (torch.Tensor): Size of the tensor corresponds to the targets + passed. """ input_t = self.input_param() @@ -153,12 +153,13 @@ def optimize( Args: - stop_criteria (StopCriteria, optional): A function that is called + stop_criteria (StopCriteria, optional): A function that is called every iteration and returns a bool that determines whether to stop the optimization. Default: ``n_steps(512)`` - optimizer (Optimizer, optional): An ``torch.optim.Optimizer`` used to - optimize the input based on the loss function. + optimizer (torch.optim.Optimizer, optional): A ``torch.optim.Optimizer`` + instance to use for optimizing the input based on the loss function. + Default: ``torch.optim.Adam`` loss_summarize_fn (Callable, optional): The function to use for summarizing tensor outputs from loss functions. Default: ``default_loss_summarize`` @@ -200,12 +201,12 @@ def n_steps(n: int, show_progress: bool = True) -> StopCriteria: Args: - n (int): Number of steps to run optimization. - show_progress (bool, optional): Whether or not to show progress bar. + n (int): Number of steps to run optimization. + show_progress (bool, optional): Whether or not to show progress bar. Default: ``True`` Returns: - *StopCriteria* (callable): A stop criteria function. + StopCriteria (callable): A stop criteria function. """ if show_progress: From 1821a2da25ce6ebb9f4d16a4d99174db6a3f6e5b Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Sun, 10 Jul 2022 09:56:37 -0600 Subject: [PATCH 23/42] http -> https --- captum/optim/models/_image/inception_v1_places365.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/captum/optim/models/_image/inception_v1_places365.py b/captum/optim/models/_image/inception_v1_places365.py index b92bbb6e6e..81bb7b98c1 100644 --- a/captum/optim/models/_image/inception_v1_places365.py +++ b/captum/optim/models/_image/inception_v1_places365.py @@ -18,7 +18,7 @@ def googlenet_places365( **kwargs: Any, ) -> "InceptionV1Places365": r"""GoogLeNet (also known as Inception v1 & Inception 5h) model architecture from - `"Going Deeper with Convolutions" `_. + `"Going Deeper with Convolutions" `_. The pretrained GoogleNet model was trained using the MIT Places365 Standard dataset. See here for more information: https://arxiv.org/abs/1610.02055 From adaf3674883910b10a3ae53c29cbf6f9dd147261 Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Sun, 10 Jul 2022 13:04:49 -0600 Subject: [PATCH 24/42] Improve InputOptimization docs --- captum/optim/_core/optimization.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/captum/optim/_core/optimization.py b/captum/optim/_core/optimization.py index 424d643282..7d26946e87 100644 --- a/captum/optim/_core/optimization.py +++ b/captum/optim/_core/optimization.py @@ -33,8 +33,9 @@ class InputOptimization(Objective, Parameterized): This is similar to gradient-based methods for adversarial examples, such as FGSM. The code for this was based on the implementation by the authors of Lucid. For more details, see the following: - https://github.com/tensorflow/lucid - https://distill.pub/2017/feature-visualization/ + + * https://github.com/tensorflow/lucid + * https://distill.pub/2017/feature-visualization/ Example:: From a66e7f5179a575f928523f8b854c7b54cb2e3874 Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Sat, 16 Jul 2022 09:15:30 -0600 Subject: [PATCH 25/42] Fix InputOptimization docs --- captum/optim/_core/optimization.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/captum/optim/_core/optimization.py b/captum/optim/_core/optimization.py index 7d26946e87..0bdfba8b6e 100644 --- a/captum/optim/_core/optimization.py +++ b/captum/optim/_core/optimization.py @@ -53,13 +53,14 @@ class InputOptimization(Objective, Parameterized): :ivar model: initial value (nn.Module): The given model instance given when initializing ``InputOptimization``. - :ivar input_param: initial value (ImageParameterization): The given image + :ivar input_param: initial value (InputParameterization): The given input parameterization instance given when initializing ``InputOptimization``. :ivar loss_function: initial value (Loss): The given composable loss instance given when initializing ``InputOptimization``. :ivar transform: initial value (nn.Module): The given transform instance given when initializing ``InputOptimization``. If it was set to ``None`` during - initialization, then an instance of ``torch.nn.Identity`` will be returned. + initialization, then an instance of :class:`torch.nn.Identity` will be + returned. """ def __init__( @@ -73,12 +74,12 @@ def __init__( Args: model (nn.Module, optional): The reference to PyTorch model instance. + loss_function (callable): The loss function to minimize during + optimization. input_param (nn.Module, optional): A module that generates an input, consumed by the model. transform (nn.Module, optional): A module that transforms or preprocesses the input before being passed to the model. - loss_function (callable): The loss function to minimize during - optimization. """ self.model = model or nn.Identity() # Grab targets from loss_function @@ -139,7 +140,7 @@ def parameters(self) -> Iterable[nn.Parameter]: """ Returns: parameters (iterable of nn.Parameter): An iterable of parameters in the - image parameterization. + input parameterization. """ return self.input_param.parameters() From 936bc84f2e3fae8f330a77b1d159b0e57900518a Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Sat, 16 Jul 2022 14:12:02 -0600 Subject: [PATCH 26/42] Update _common.py --- captum/optim/models/_common.py | 52 ++++++++++++++++++++-------------- 1 file changed, 30 insertions(+), 22 deletions(-) diff --git a/captum/optim/models/_common.py b/captum/optim/models/_common.py index 4b460058f8..6ced882ce8 100644 --- a/captum/optim/models/_common.py +++ b/captum/optim/models/_common.py @@ -16,6 +16,9 @@ def get_model_layers(model: nn.Module) -> List[str]: Args: model (nn.Module): A PyTorch model or module instance to collect layers from. + + Returns: + model_layers (List[str]): A list of hookable layers in the model. """ layers = [] @@ -68,6 +71,14 @@ class RedirectedReluLayer(nn.Module): @torch.jit.ignore def forward(self, input: torch.Tensor) -> torch.Tensor: + """ + Args: + + x (torch.Tensor): A tensor to pass through RedirectedReLU. + + Returns: + x (torch.Tensor): The output of RedirectedReLU. + """ return RedirectedReLU.apply(input) @@ -218,7 +229,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: """ Args: - x (torch.tensor): The input tensor to apply 2D convolution to. + x (torch.Tensor): The input tensor to apply 2D convolution to. Returns x (torch.Tensor): The input tensor after the 2D convolution was applied. @@ -254,7 +265,7 @@ def collect_activations( Args: model (nn.Module): A PyTorch model instance. - targets (nn.Module or list of nn.Module): One or more layer targets for the + targets (nn.Module or List[nn.Module]): One or more layer targets for the given model. model_input (torch.Tensor or tuple of torch.Tensor, optional): Optionally provide an input tensor to use when collecting the target activations. @@ -278,9 +289,9 @@ class SkipLayer(torch.nn.Module): during the forward pass. Use cases include removing nonlinear activation layers like ReLU for circuits research. - This layer works almost exactly the same way that ``nn.Indentiy`` does, except it - also ignores any additional arguments passed to the forward function. Any layer - replaced by ``SkipLayer`` must have the same input and output shapes. + This layer works almost exactly the same way that nn.Indentiy does, except it also + ignores any additional arguments passed to the forward function. Any layer replaced + by SkipLayer must have the same input and output shapes. See nn.Identity for more details: https://pytorch.org/docs/stable/generated/torch.nn.Identity.html @@ -290,24 +301,23 @@ def __init__(self, *args, **kwargs) -> None: """ Args: - args (Any): Any argument. Arguments will be safely ignored. - kwargs (Any) Any keyword argument. Arguments will be safely ignored. + args (Any, optional): Any argument. Arguments will be safely ignored. + kwargs (Any, optional) Any keyword argument. Arguments will be safely + ignored. """ super().__init__() - def forward( - self, x: Union[torch.Tensor, Tuple[torch.Tensor]], *args, **kwargs - ) -> Union[torch.Tensor, Tuple[torch.Tensor]]: + def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor: """ Args: - x (torch.Tensor or tuple of torch.Tensor): The input tensor or tensors. - args (Any): Any argument. Arguments will be safely ignored. - kwargs (Any) Any keyword argument. Arguments will be safely ignored. + x (torch.Tensor): The input tensor. + args (Any, optional): Any argument. Arguments will be safely ignored. + kwargs (Any, optional) Any keyword argument. Arguments will be safely + ignored. Returns: - x (torch.Tensor or tuple of torch.Tensor): The unmodified input tensor or - tensors. + x (torch.Tensor): The unmodified input tensor. """ return x @@ -316,17 +326,15 @@ def skip_layers( model: nn.Module, layers: Union[List[Type[nn.Module]], Type[nn.Module]] ) -> None: """ - This function is a wrapper function for - replace_layers and replaces the target layer - with layers that do nothing. - This is useful for removing the nonlinear ReLU - layers when creating expanded weights. + This function is a wrapper function for :func:`.replace_layers` and replaces the + target layer with layers that do nothing. This is useful for removing the nonlinear + ReLU layers when creating expanded weights. Args: model (nn.Module): A PyTorch model instance. - layers (nn.Module or list of nn.Module): The layer - class type to replace in the model. + layers (nn.Module or List[nn.Module]): The layer class type to replace in the + model. """ if not hasattr(layers, "__iter__"): layers = cast(Type[nn.Module], layers) From 96e2f8d016d619637c07b9c3c4093391d77890de Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Sun, 17 Jul 2022 12:17:25 -0600 Subject: [PATCH 27/42] Add aliases to InputOptimization and ImageTensor docs --- captum/optim/_core/optimization.py | 2 ++ captum/optim/_param/image/images.py | 2 ++ 2 files changed, 4 insertions(+) diff --git a/captum/optim/_core/optimization.py b/captum/optim/_core/optimization.py index 0bdfba8b6e..5636b63dbf 100644 --- a/captum/optim/_core/optimization.py +++ b/captum/optim/_core/optimization.py @@ -37,6 +37,8 @@ class InputOptimization(Objective, Parameterized): * https://github.com/tensorflow/lucid * https://distill.pub/2017/feature-visualization/ + Alias: ``captum.optim.InputOptimization`` + Example:: >>> model = opt.models.googlenet(pretrained=True) diff --git a/captum/optim/_param/image/images.py b/captum/optim/_param/image/images.py index 5bb8555a17..ee50396572 100644 --- a/captum/optim/_param/image/images.py +++ b/captum/optim/_param/image/images.py @@ -25,6 +25,8 @@ class ImageTensor(torch.Tensor): A subclass of torch.Tensor that provides functions for easy loading, saving, and displaying image tensors. + Alias: ``captum.optim.ImageTensor`` + Example using file path or URL:: >>> image_tensor = opt.images.ImageTensor.load() From f31b8ca2dbbb28e47b4d23532674f76f726b7cf4 Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Sun, 17 Jul 2022 14:27:46 -0600 Subject: [PATCH 28/42] Improve MaxPool2dRelaxed docs --- captum/optim/models/_common.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/captum/optim/models/_common.py b/captum/optim/models/_common.py index 6ced882ce8..3032bc4cf6 100644 --- a/captum/optim/models/_common.py +++ b/captum/optim/models/_common.py @@ -289,9 +289,9 @@ class SkipLayer(torch.nn.Module): during the forward pass. Use cases include removing nonlinear activation layers like ReLU for circuits research. - This layer works almost exactly the same way that nn.Indentiy does, except it also - ignores any additional arguments passed to the forward function. Any layer replaced - by SkipLayer must have the same input and output shapes. + This layer works almost exactly the same way that :class:`torch.nn.Identity` does, + except it also ignores any additional arguments passed to the forward function. + Any layer replaced by SkipLayer must have the same input and output shapes. See nn.Identity for more details: https://pytorch.org/docs/stable/generated/torch.nn.Identity.html @@ -355,9 +355,10 @@ class MaxPool2dRelaxed(torch.nn.Module): attributions of spatial posititions can be estimated using the rate at which increasing the neuron affects the output classes. - This layer peforms a MaxPool2d operation on the input, while using an equivalent - AvgPool2d layer to compute the gradient. This means that the forward pass returns - nn.MaxPool2d(input) while the backward pass uses nn.AvgPool2d(input). + This layer peforms a :class:`torch.nn.MaxPool2d` operation on the input, while + using an equivalent :class:`torch.nn.AvgPool2d` layer to compute the gradient. + This means that the forward pass returns ``nn.MaxPool2d(input)`` while the + backward pass uses ``nn.AvgPool2d(input)``. Carter, et al., "Activation Atlas", Distill, 2019. https://distill.pub/2019/activation-atlas/ From 199509ef6d091abb31b854c50c21606ca851064e Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Mon, 18 Jul 2022 15:07:50 -0600 Subject: [PATCH 29/42] Improve docstring type formatting --- captum/optim/_core/optimization.py | 44 +++++++++---------- captum/optim/_param/image/images.py | 8 ++-- captum/optim/models/_common.py | 24 +++++----- .../models/_image/inception_v1_places365.py | 10 ++--- 4 files changed, 43 insertions(+), 43 deletions(-) diff --git a/captum/optim/_core/optimization.py b/captum/optim/_core/optimization.py index 5636b63dbf..8b41e757fa 100644 --- a/captum/optim/_core/optimization.py +++ b/captum/optim/_core/optimization.py @@ -1,5 +1,3 @@ -"""captum.optim.optimization.""" - import warnings from typing import Callable, Iterable, Optional @@ -49,20 +47,6 @@ class InputOptimization(Objective, Parameterized): >>> obj = opt.InputOptimization(model, loss_fn, image, transform) >>> history = obj.optimize(opt.optimization.n_steps(512)) >>> image().show(figsize=(10, 10)) # Display results - - Instance variables that be used in the optimize function and StopCriteria - functions: - - :ivar model: initial value (nn.Module): The given model instance given when - initializing ``InputOptimization``. - :ivar input_param: initial value (InputParameterization): The given input - parameterization instance given when initializing ``InputOptimization``. - :ivar loss_function: initial value (Loss): The given composable loss instance - given when initializing ``InputOptimization``. - :ivar transform: initial value (nn.Module): The given transform instance given - when initializing ``InputOptimization``. If it was set to ``None`` during - initialization, then an instance of :class:`torch.nn.Identity` will be - returned. """ def __init__( @@ -76,12 +60,28 @@ def __init__( Args: model (nn.Module, optional): The reference to PyTorch model instance. - loss_function (callable): The loss function to minimize during - optimization. + loss_function (callable): The :mod:`.loss` objective instance to minimize + during optimization. input_param (nn.Module, optional): A module that generates an input, consumed by the model. transform (nn.Module, optional): A module that transforms or preprocesses the input before being passed to the model. + + Instance variables that be used in the :func:`.optimize` function and + StopCriteria functions: + + Attributes: + + model (torch.nn.Module): The given model instance given when initializing + ``InputOptimization``. If ``model`` was set to ``None`` during + initialization, then an instance of :class:`torch.nn.Identity` will be + returned. + input_param (InputParameterization): The given input parameterization + instance given when initializing ``InputOptimization``. + loss_function (Loss): The composable :mod:`.loss` instance given when + initializing ``InputOptimization``. + transform (torch.nn.Module): The given transform instance given when + initializing ``InputOptimization``. """ self.model = model or nn.Identity() # Grab targets from loss_function @@ -141,8 +141,8 @@ def targets(self, value: Iterable[nn.Module]) -> None: def parameters(self) -> Iterable[nn.Parameter]: """ Returns: - parameters (iterable of nn.Parameter): An iterable of parameters in the - input parameterization. + parameters (iterable of torch.nn.Parameter): An iterable of parameters in + the input parameterization. """ return self.input_param.parameters() @@ -164,10 +164,10 @@ def optimize( optimizer (torch.optim.Optimizer, optional): A ``torch.optim.Optimizer`` instance to use for optimizing the input based on the loss function. Default: ``torch.optim.Adam`` - loss_summarize_fn (Callable, optional): The function to use for summarizing + loss_summarize_fn (callable, optional): The function to use for summarizing tensor outputs from loss functions. Default: ``default_loss_summarize`` - lr: (float, optional): If no optimizer is given, then lr is used as the + lr (float, optional): If no optimizer is given, then lr is used as the learning rate for the Adam optimizer. Default: ``0.025`` diff --git a/captum/optim/_param/image/images.py b/captum/optim/_param/image/images.py index ee50396572..16e5f625e0 100644 --- a/captum/optim/_param/image/images.py +++ b/captum/optim/_param/image/images.py @@ -22,8 +22,8 @@ class ImageTensor(torch.Tensor): r""" - A subclass of torch.Tensor that provides functions for easy loading, saving, and - displaying image tensors. + A subclass of :class:`torch.Tensor` that provides functions for easy loading, + saving, and displaying image tensors. Alias: ``captum.optim.ImageTensor`` @@ -138,8 +138,8 @@ def show( Args: - figsize (Tuple[int, int], optional): height & width to use - for displaying the ``ImageTensor`` figure. + figsize (tuple of int, optional): The height & width to use for displaying + the ``ImageTensor`` figure, in the format of: (height, width). Default: ``None`` scale (float, optional): Value to multiply the ``ImageTensor`` by so that it's value range is [0-255] for display. diff --git a/captum/optim/models/_common.py b/captum/optim/models/_common.py index 3032bc4cf6..8b6695b33b 100644 --- a/captum/optim/models/_common.py +++ b/captum/optim/models/_common.py @@ -18,7 +18,7 @@ def get_model_layers(model: nn.Module) -> List[str]: model (nn.Module): A PyTorch model or module instance to collect layers from. Returns: - model_layers (List[str]): A list of hookable layers in the model. + model_layers (list of str): A list of hookable layers in the model. """ layers = [] @@ -101,16 +101,16 @@ def replace_layers( Args: - model: (nn.Module): A PyTorch model instance. - layer1: (Type[nn.Module]): The layer class that you want to transfer + model (nn.Module): A PyTorch model instance. + layer1 (Type[nn.Module]): The layer class that you want to transfer initialization variables from. - layer2: (Type[nn.Module]): The layer class to create with the variables + layer2 (Type[nn.Module]): The layer class to create with the variables from ``layer1``. transfer_vars (bool, optional): Whether or not to try and copy initialization variables from ``layer1`` instances to the replacement ``layer2`` instances. Default: ``False`` - kwargs: (Any, optional): Any additional variables to use when creating + kwargs (Any, optional): Any additional variables to use when creating the new layer. """ @@ -134,11 +134,11 @@ def _transfer_layer_vars( Args: - layer1: (nn.Module): A layer instance that you want to transfer + layer1 (nn.Module): A layer instance that you want to transfer initialization variables from. - layer2: (nn.Module): The layer class to create with the variables + layer2 (nn.Module): The layer class to create with the variables from of layer1. - kwargs: (Any, optional): Any additional variables to use when creating + kwargs (Any, optional): Any additional variables to use when creating the new layer. Returns: @@ -265,7 +265,7 @@ def collect_activations( Args: model (nn.Module): A PyTorch model instance. - targets (nn.Module or List[nn.Module]): One or more layer targets for the + targets (nn.Module or list of nn.Module): One or more layer targets for the given model. model_input (torch.Tensor or tuple of torch.Tensor, optional): Optionally provide an input tensor to use when collecting the target activations. @@ -333,7 +333,7 @@ def skip_layers( Args: model (nn.Module): A PyTorch model instance. - layers (nn.Module or List[nn.Module]): The layer class type to replace in the + layers (nn.Module or list of nn.Module): The layer class type to replace in the model. """ if not hasattr(layers, "__iter__"): @@ -382,8 +382,8 @@ def __init__( """ Args: - kernel_size (int or tuple of int): The size of the window to perform max & - average pooling with. + kernel_size (int or tuple of int): The size of the window to perform max + and average pooling with. stride (int or tuple of int, optional): The stride window size to use. Default: ``None`` padding (int or tuple of int): The amount of zero padding to add to both diff --git a/captum/optim/models/_image/inception_v1_places365.py b/captum/optim/models/_image/inception_v1_places365.py index 81bb7b98c1..62a6834e16 100644 --- a/captum/optim/models/_image/inception_v1_places365.py +++ b/captum/optim/models/_image/inception_v1_places365.py @@ -39,7 +39,7 @@ def googlenet_places365( model_path (str, optional): Optional path for the InceptionV1 model file. Default: ``None`` replace_relus_with_redirectedrelu (bool, optional): If ``True``, return - pretrained model with Redirected ReLU in place of ReLU layers. + pretrained model with :class:`.RedirectedReLU` in place of ReLU layers. Default: *``True``* when pretrained is True otherwise *``False``* use_linear_modules_only (bool, optional): If ``True``, return pretrained model with all nonlinear layers replaced with linear equivalents. @@ -112,7 +112,7 @@ def __init__( according to the method with which it was trained on Places365. Default: ``True`` replace_relus_with_redirectedrelu (bool, optional): If ``True``, return - pretrained model with Redirected ReLU in place of ReLU layers. + pretrained model with :class:`.RedirectedReLU` in place of ReLU layers. Default: ``False`` use_linear_modules_only (bool, optional): If ``True``, return pretrained model with all nonlinear layers replaced with linear equivalents. @@ -306,10 +306,10 @@ def __init__( in the pool branch. activ (type of nn.Module, optional): The nn.Module class type to use for activation layers. - Default: ``nn.ReLU`` + Default: :class:`torch.nn.ReLU` p_layer (type of nn.Module, optional): The nn.Module class type to use for pooling layers. - Default: ``nn.MaxPool2d`` + Default: :class:`torch.nn.MaxPool2d` """ super().__init__() self.conv_1x1 = nn.Conv2d( @@ -409,7 +409,7 @@ def __init__( Default: ``1008`` activ (type of nn.Module, optional): The ``nn.Module`` class type to use for activation layers. - Default: ``nn.ReLU`` + Default: :class:`torch.nn.ReLU` """ super().__init__() self.avg_pool = nn.AdaptiveAvgPool2d((4, 4)) From f2f1d5d3eccacc2201c8c50448f7570ea9f72773 Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Mon, 18 Jul 2022 15:28:14 -0600 Subject: [PATCH 30/42] Fix bug in skip_layers --- captum/optim/models/_common.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/captum/optim/models/_common.py b/captum/optim/models/_common.py index 8b6695b33b..5f3cb7677a 100644 --- a/captum/optim/models/_common.py +++ b/captum/optim/models/_common.py @@ -275,7 +275,7 @@ def collect_activations( activ_dict (ModuleOutputMapping): A dictionary of collected activations where the keys are the target layers. """ - if not isinstance(targets, list): + if not isinstance(targets, (list, tuple)): targets = [targets] targets = list(dict.fromkeys(targets)) catch_activ = ActivationFetcher(model, targets) @@ -336,7 +336,7 @@ def skip_layers( layers (nn.Module or list of nn.Module): The layer class type to replace in the model. """ - if not hasattr(layers, "__iter__"): + if not isinstance(layers, (tuple, list)): layers = cast(Type[nn.Module], layers) replace_layers(model, layers, SkipLayer) else: From a61461bb424a012f95329ffaebc3f4cf3791d6a9 Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Wed, 20 Jul 2022 13:34:59 -0600 Subject: [PATCH 31/42] Improve optimization docs --- captum/optim/_core/optimization.py | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/captum/optim/_core/optimization.py b/captum/optim/_core/optimization.py index 8b41e757fa..508f235534 100644 --- a/captum/optim/_core/optimization.py +++ b/captum/optim/_core/optimization.py @@ -59,16 +59,19 @@ def __init__( r""" Args: - model (nn.Module, optional): The reference to PyTorch model instance. - loss_function (callable): The :mod:`.loss` objective instance to minimize - during optimization. - input_param (nn.Module, optional): A module that generates an input, - consumed by the model. + model (nn.Module, optional): The reference to PyTorch model instance. Set + to ``None`` for no model instance. + loss_function (callable): The :mod:`Loss <.loss>` objective instance to + minimize during optimization. + input_param (InputParameterization, optional): A module that generates an + input, consumed by the model. Example: An + :mod:`ImageParameterization ` instance. transform (nn.Module, optional): A module that transforms or preprocesses - the input before being passed to the model. + the input before being passed to the model. Set to + :class:`torch.nn.Identity` for no transforms. - Instance variables that be used in the :func:`.optimize` function and - StopCriteria functions: + Instance variables that be used in the :func:`InputOptimization.optimize` + function, custom optimization functions, and StopCriteria functions: Attributes: @@ -78,8 +81,8 @@ def __init__( returned. input_param (InputParameterization): The given input parameterization instance given when initializing ``InputOptimization``. - loss_function (Loss): The composable :mod:`.loss` instance given when - initializing ``InputOptimization``. + loss_function (Loss): The composable :mod:`Loss <.loss>` instance given + when initializing ``InputOptimization``. transform (torch.nn.Module): The given transform instance given when initializing ``InputOptimization``. """ From 0ecff5d546ca27dd5f10e9db3f54ecfc6a3c5938 Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Wed, 20 Jul 2022 14:01:34 -0600 Subject: [PATCH 32/42] Improve InputOptimization.optimize's docstring --- captum/optim/_core/optimization.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/captum/optim/_core/optimization.py b/captum/optim/_core/optimization.py index 508f235534..541c1d8078 100644 --- a/captum/optim/_core/optimization.py +++ b/captum/optim/_core/optimization.py @@ -156,20 +156,20 @@ def optimize( loss_summarize_fn: Optional[Callable] = None, lr: float = 0.025, ) -> torch.Tensor: - r"""Optimize input based on loss function and objectives. + r"""Optimize input based on loss function and objectives. Args: stop_criteria (StopCriteria, optional): A function that is called every iteration and returns a bool that determines whether to stop the optimization. - Default: ``n_steps(512)`` + Default: :func:`n_steps(512) <.n_steps>` optimizer (torch.optim.Optimizer, optional): A ``torch.optim.Optimizer`` instance to use for optimizing the input based on the loss function. - Default: ``torch.optim.Adam`` + Default: :class:`torch.optim.Adam` loss_summarize_fn (callable, optional): The function to use for summarizing tensor outputs from loss functions. - Default: ``default_loss_summarize`` + Default: :func:`.default_loss_summarize` lr (float, optional): If no optimizer is given, then lr is used as the learning rate for the Adam optimizer. Default: ``0.025`` From aeb058d291ea089aecb5b047f0266590c0e88419 Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Thu, 21 Jul 2022 11:12:15 -0600 Subject: [PATCH 33/42] Improve InputOptimization docs --- captum/optim/_core/optimization.py | 4 ++-- captum/optim/models/_common.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/captum/optim/_core/optimization.py b/captum/optim/_core/optimization.py index 541c1d8078..0aac927116 100644 --- a/captum/optim/_core/optimization.py +++ b/captum/optim/_core/optimization.py @@ -29,8 +29,8 @@ class InputOptimization(Objective, Parameterized): """ Core function that optimizes an input to maximize a target (aka objective). This is similar to gradient-based methods for adversarial examples, such - as FGSM. The code for this was based on the implementation by the authors of Lucid. - For more details, see the following: + as :class:`FGSM `. The code for this was based on the + implementation by the authors of Lucid. For more details, see the following: * https://github.com/tensorflow/lucid * https://distill.pub/2017/feature-visualization/ diff --git a/captum/optim/models/_common.py b/captum/optim/models/_common.py index 5f3cb7677a..8fcc2a978a 100644 --- a/captum/optim/models/_common.py +++ b/captum/optim/models/_common.py @@ -272,8 +272,8 @@ def collect_activations( Default: ``torch.zeros(1, 3, 224, 224)`` Returns: - activ_dict (ModuleOutputMapping): A dictionary of collected activations where - the keys are the target layers. + activ_dict (dict[nn.Module, torch.Tensor]): A dictionary of collected + activations where the keys are the target layers. """ if not isinstance(targets, (list, tuple)): targets = [targets] From 1faadcda863f6f3a532ba8bb4d5ca2ab2dbbe36e Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Thu, 21 Jul 2022 11:21:28 -0600 Subject: [PATCH 34/42] Fix doc spacing --- captum/optim/_core/optimization.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/captum/optim/_core/optimization.py b/captum/optim/_core/optimization.py index 0aac927116..84b3b10a0f 100644 --- a/captum/optim/_core/optimization.py +++ b/captum/optim/_core/optimization.py @@ -156,7 +156,7 @@ def optimize( loss_summarize_fn: Optional[Callable] = None, lr: float = 0.025, ) -> torch.Tensor: - r"""Optimize input based on loss function and objectives. + r"""Optimize input based on loss function and objectives. Args: From 2cfa21be999a54e4b47984ea5dfd74c0f7b000cf Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Thu, 21 Jul 2022 19:35:16 -0600 Subject: [PATCH 35/42] Add Optim to run_mypy.sh --- scripts/run_mypy.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/run_mypy.sh b/scripts/run_mypy.sh index d2f7c8d076..2497be44bc 100755 --- a/scripts/run_mypy.sh +++ b/scripts/run_mypy.sh @@ -5,6 +5,7 @@ set -e # hints. mypy -p captum.attr --ignore-missing-imports --allow-redefinition +mypy -p captum.optim --ignore-missing-imports --allow-redefinition mypy -p captum.insights --ignore-missing-imports --allow-redefinition mypy -p captum.metrics --ignore-missing-imports --allow-redefinition mypy -p captum.robust --ignore-missing-imports --allow-redefinition From 1c50b87007178466f2fa81c428e1e2a40fe0a860 Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Wed, 27 Jul 2022 08:56:38 -0600 Subject: [PATCH 36/42] Fix grammar --- captum/optim/_utils/image/dataset.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/captum/optim/_utils/image/dataset.py b/captum/optim/_utils/image/dataset.py index 5319e4b9a6..7f03129ac7 100644 --- a/captum/optim/_utils/image/dataset.py +++ b/captum/optim/_utils/image/dataset.py @@ -57,8 +57,8 @@ def dataset_cov_matrix( dataloader instance. show_progress (bool, optional): Whether or not to display a tqdm progress bar. Default: ``False`` - device (torch.device, optional): The PyTorch device to use for for calculating - the cov matrix. + device (torch.device, optional): The PyTorch device to use for calculating the + cov matrix. Default: ``torch.device("cpu")`` Returns: @@ -148,8 +148,8 @@ def dataset_klt_matrix( Default: ``False`` show_progress (bool, optional): Whether or not to display a tqdm progress bar. Default: ``False`` - device (torch.device, optional): The PyTorch device to use for for calculating - the cov matrix. + device (torch.device, optional): The PyTorch device to use for calculating the + cov matrix. Default: ``torch.device("cpu")`` Returns: From 27b702ed7bade5c9b6b9d09bd5864d10c0701acf Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Wed, 27 Jul 2022 09:30:20 -0600 Subject: [PATCH 37/42] Fix spelling --- captum/optim/models/_common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/captum/optim/models/_common.py b/captum/optim/models/_common.py index 8fcc2a978a..2ba15cfa0d 100644 --- a/captum/optim/models/_common.py +++ b/captum/optim/models/_common.py @@ -198,7 +198,7 @@ def __init__( kernel points. Default: ``1`` groups (int, optional): Number of blocked connections from input channels - to output channels. Both in_channels and out_channels must be divisable + to output channels. Both in_channels and out_channels must be divisible by groups. Default: ``1`` bias (bool, optional): Whether or not to apply a learnable bias to the From 7924b87d3f1ee45d27fc27c32697317a28a57fe1 Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Wed, 27 Jul 2022 09:30:51 -0600 Subject: [PATCH 38/42] Remove Optim from run_mypy.sh for now --- scripts/run_mypy.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/scripts/run_mypy.sh b/scripts/run_mypy.sh index 2497be44bc..d2f7c8d076 100755 --- a/scripts/run_mypy.sh +++ b/scripts/run_mypy.sh @@ -5,7 +5,6 @@ set -e # hints. mypy -p captum.attr --ignore-missing-imports --allow-redefinition -mypy -p captum.optim --ignore-missing-imports --allow-redefinition mypy -p captum.insights --ignore-missing-imports --allow-redefinition mypy -p captum.metrics --ignore-missing-imports --allow-redefinition mypy -p captum.robust --ignore-missing-imports --allow-redefinition From 07c759363f03cf3ced3e41c702b97ab17d8efcb8 Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Wed, 27 Jul 2022 18:38:12 -0600 Subject: [PATCH 39/42] Fix Mypy type hints --- captum/optim/models/_common.py | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/captum/optim/models/_common.py b/captum/optim/models/_common.py index 2ba15cfa0d..30c574d60d 100644 --- a/captum/optim/models/_common.py +++ b/captum/optim/models/_common.py @@ -49,12 +49,12 @@ class RedirectedReLU(torch.autograd.Function): """ @staticmethod - def forward(self, input_tensor: torch.Tensor) -> torch.Tensor: + def forward(self, input_tensor: torch.Tensor) -> torch.Tensor: # type: ignore self.save_for_backward(input_tensor) return input_tensor.clamp(min=0) @staticmethod - def backward(self, grad_output: torch.Tensor) -> torch.Tensor: + def backward(self, grad_output: torch.Tensor) -> torch.Tensor: # type: ignore (input_tensor,) = self.saved_tensors relu_grad = grad_output.clone() relu_grad[input_tensor < 0] = 0 @@ -374,20 +374,25 @@ class MaxPool2dRelaxed(torch.nn.Module): def __init__( self, - kernel_size: Union[int, Tuple[int, ...]], - stride: Optional[Union[int, Tuple[int, ...]]] = None, - padding: Union[int, Tuple[int, ...]] = 0, + kernel_size: Union[int, Tuple[int, int]], + stride: Optional[Union[int, Tuple[int, int]]] = None, + padding: Union[int, Tuple[int, int]] = 0, ceil_mode: bool = False, ) -> None: """ Args: kernel_size (int or tuple of int): The size of the window to perform max - and average pooling with. + and average pooling with. Either a single int to use for both the + height & width or a tuple of 2 integers in format of: (height, width). stride (int or tuple of int, optional): The stride window size to use. + Either a single int to use for both the height & width or a tuple of 2 + integers in format of: (height, width). Default: ``None`` padding (int or tuple of int): The amount of zero padding to add to both - sides in the ``nn.MaxPool2d`` & ``nn.AvgPool2d`` modules. + sides in the ``nn.MaxPool2d`` & ``nn.AvgPool2d`` modules. Either a + single int to use for both the height & width or a tuple of 2 integers + in format of: (height, width). Default: ``0`` ceil_mode (bool, optional): Whether to use ceil or floor for creating the output shape. From 16dd3cf4871dcba8c3b92efa42366e80fd41cba2 Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Wed, 27 Jul 2022 18:39:24 -0600 Subject: [PATCH 40/42] Fix formatting --- captum/optim/models/_common.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/captum/optim/models/_common.py b/captum/optim/models/_common.py index 30c574d60d..8a2c6f7391 100644 --- a/captum/optim/models/_common.py +++ b/captum/optim/models/_common.py @@ -384,15 +384,15 @@ def __init__( kernel_size (int or tuple of int): The size of the window to perform max and average pooling with. Either a single int to use for both the - height & width or a tuple of 2 integers in format of: (height, width). + height & width or a tuple of 2 integers in format of: (height, width). stride (int or tuple of int, optional): The stride window size to use. Either a single int to use for both the height & width or a tuple of 2 - integers in format of: (height, width). + integers in format of: (height, width). Default: ``None`` padding (int or tuple of int): The amount of zero padding to add to both sides in the ``nn.MaxPool2d`` & ``nn.AvgPool2d`` modules. Either a - single int to use for both the height & width or a tuple of 2 integers - in format of: (height, width). + single int to use for both the height & width or a tuple of 2 integers + in format of: (height, width). Default: ``0`` ceil_mode (bool, optional): Whether to use ceil or floor for creating the output shape. From 7e2dbf9e23608fee29896357fb9095a70619a812 Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Thu, 28 Jul 2022 12:49:01 -0600 Subject: [PATCH 41/42] callable -> Callable --- captum/optim/_core/optimization.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/captum/optim/_core/optimization.py b/captum/optim/_core/optimization.py index 84b3b10a0f..6ce3fb3e13 100644 --- a/captum/optim/_core/optimization.py +++ b/captum/optim/_core/optimization.py @@ -61,7 +61,7 @@ def __init__( model (nn.Module, optional): The reference to PyTorch model instance. Set to ``None`` for no model instance. - loss_function (callable): The :mod:`Loss <.loss>` objective instance to + loss_function (Callable): The :mod:`Loss <.loss>` objective instance to minimize during optimization. input_param (InputParameterization, optional): A module that generates an input, consumed by the model. Example: An @@ -167,7 +167,7 @@ def optimize( optimizer (torch.optim.Optimizer, optional): A ``torch.optim.Optimizer`` instance to use for optimizing the input based on the loss function. Default: :class:`torch.optim.Adam` - loss_summarize_fn (callable, optional): The function to use for summarizing + loss_summarize_fn (Callable, optional): The function to use for summarizing tensor outputs from loss functions. Default: :func:`.default_loss_summarize` lr (float, optional): If no optimizer is given, then lr is used as the @@ -213,7 +213,7 @@ def n_steps(n: int, show_progress: bool = True) -> StopCriteria: Default: ``True`` Returns: - StopCriteria (callable): A stop criteria function. + StopCriteria (Callable): A stop criteria function. """ if show_progress: From ca84f7b6d8d2f1315c63d99f1a071df397ddcb80 Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Fri, 29 Jul 2022 14:11:50 -0600 Subject: [PATCH 42/42] Docstring Improvements --- captum/optim/models/_common.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/captum/optim/models/_common.py b/captum/optim/models/_common.py index 8a2c6f7391..9fa9cda942 100644 --- a/captum/optim/models/_common.py +++ b/captum/optim/models/_common.py @@ -49,12 +49,12 @@ class RedirectedReLU(torch.autograd.Function): """ @staticmethod - def forward(self, input_tensor: torch.Tensor) -> torch.Tensor: # type: ignore + def forward(self, input_tensor: torch.Tensor) -> torch.Tensor: self.save_for_backward(input_tensor) return input_tensor.clamp(min=0) @staticmethod - def backward(self, grad_output: torch.Tensor) -> torch.Tensor: # type: ignore + def backward(self, grad_output: torch.Tensor) -> torch.Tensor: (input_tensor,) = self.saved_tensors relu_grad = grad_output.clone() relu_grad[input_tensor < 0] = 0 @@ -262,6 +262,13 @@ def collect_activations( """ Collect target activations for a model. + Example:: + + >>> model = opt.models.googlenet(pretrained=True) + >>> target = model.mixed4c # Target layer + >>> activ_dict = opt.models.collect_activations(model, target) + >>> activations = activ_dict[target] # Get activations from dict + Args: model (nn.Module): A PyTorch model instance.