diff --git a/torchvision/datasets/vision.py b/torchvision/datasets/vision.py index 591c0bb0c0c..b65a2d4f319 100644 --- a/torchvision/datasets/vision.py +++ b/torchvision/datasets/vision.py @@ -4,6 +4,8 @@ import torch import torch.utils.data as data +from ..utils import _log_api_usage_once + class VisionDataset(data.Dataset): """ @@ -33,7 +35,7 @@ def __init__( transform: Optional[Callable] = None, target_transform: Optional[Callable] = None, ) -> None: - torch._C._log_api_usage_once(f"torchvision.datasets.{self.__class__.__name__}") + _log_api_usage_once(self) if isinstance(root, torch._six.string_classes): root = os.path.expanduser(root) self.root = root diff --git a/torchvision/models/alexnet.py b/torchvision/models/alexnet.py index 32a8711f64c..fd735f4a7a4 100644 --- a/torchvision/models/alexnet.py +++ b/torchvision/models/alexnet.py @@ -4,6 +4,7 @@ import torch.nn as nn from .._internally_replaced_utils import load_state_dict_from_url +from ..utils import _log_api_usage_once __all__ = ["AlexNet", "alexnet"] @@ -17,6 +18,7 @@ class AlexNet(nn.Module): def __init__(self, num_classes: int = 1000, dropout: float = 0.5) -> None: super(AlexNet, self).__init__() + _log_api_usage_once(self) self.features = nn.Sequential( nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2), nn.ReLU(inplace=True), diff --git a/torchvision/models/densenet.py b/torchvision/models/densenet.py index a34b50e41e9..d9ff52e55ae 100644 --- a/torchvision/models/densenet.py +++ b/torchvision/models/densenet.py @@ -9,6 +9,7 @@ from torch import Tensor from .._internally_replaced_utils import load_state_dict_from_url +from ..utils import _log_api_usage_once __all__ = ["DenseNet", "densenet121", "densenet169", "densenet201", "densenet161"] @@ -162,6 +163,7 @@ def __init__( ) -> None: super(DenseNet, self).__init__() + _log_api_usage_once(self) # First convolution self.features = nn.Sequential( diff --git a/torchvision/models/efficientnet.py b/torchvision/models/efficientnet.py index d8356d83748..46352464c35 100644 --- a/torchvision/models/efficientnet.py +++ b/torchvision/models/efficientnet.py @@ -9,6 +9,7 @@ from .._internally_replaced_utils import load_state_dict_from_url from ..ops.misc import ConvNormActivation, SqueezeExcitation +from ..utils import _log_api_usage_once from ._utils import _make_divisible @@ -169,6 +170,7 @@ def __init__( norm_layer (Optional[Callable[..., nn.Module]]): Module specifying the normalization layer to use """ super().__init__() + _log_api_usage_once(self) if not inverted_residual_setting: raise ValueError("The inverted_residual_setting should not be empty") diff --git a/torchvision/models/googlenet.py b/torchvision/models/googlenet.py index fb4dcfae956..cfc3cf12309 100644 --- a/torchvision/models/googlenet.py +++ b/torchvision/models/googlenet.py @@ -8,6 +8,7 @@ from torch import Tensor from .._internally_replaced_utils import load_state_dict_from_url +from ..utils import _log_api_usage_once __all__ = ["GoogLeNet", "googlenet", "GoogLeNetOutputs", "_GoogLeNetOutputs"] @@ -75,6 +76,7 @@ def __init__( dropout_aux: float = 0.7, ) -> None: super(GoogLeNet, self).__init__() + _log_api_usage_once(self) if blocks is None: blocks = [BasicConv2d, Inception, InceptionAux] if init_weights is None: diff --git a/torchvision/models/inception.py b/torchvision/models/inception.py index 562d5e10a9a..a8b7cfc2b2f 100644 --- a/torchvision/models/inception.py +++ b/torchvision/models/inception.py @@ -7,6 +7,7 @@ from torch import nn, Tensor from .._internally_replaced_utils import load_state_dict_from_url +from ..utils import _log_api_usage_once __all__ = ["Inception3", "inception_v3", "InceptionOutputs", "_InceptionOutputs"] @@ -73,6 +74,7 @@ def __init__( dropout: float = 0.5, ) -> None: super(Inception3, self).__init__() + _log_api_usage_once(self) if inception_blocks is None: inception_blocks = [BasicConv2d, InceptionA, InceptionB, InceptionC, InceptionD, InceptionE, InceptionAux] if init_weights is None: diff --git a/torchvision/models/mnasnet.py b/torchvision/models/mnasnet.py index 3f48f82c41e..7be52b3f9be 100644 --- a/torchvision/models/mnasnet.py +++ b/torchvision/models/mnasnet.py @@ -6,6 +6,7 @@ from torch import Tensor from .._internally_replaced_utils import load_state_dict_from_url +from ..utils import _log_api_usage_once __all__ = ["MNASNet", "mnasnet0_5", "mnasnet0_75", "mnasnet1_0", "mnasnet1_3"] @@ -97,6 +98,7 @@ class MNASNet(torch.nn.Module): def __init__(self, alpha: float, num_classes: int = 1000, dropout: float = 0.2) -> None: super(MNASNet, self).__init__() + _log_api_usage_once(self) assert alpha > 0.0 self.alpha = alpha self.num_classes = num_classes diff --git a/torchvision/models/mobilenetv2.py b/torchvision/models/mobilenetv2.py index 031faa9d572..9ccbc229975 100644 --- a/torchvision/models/mobilenetv2.py +++ b/torchvision/models/mobilenetv2.py @@ -7,6 +7,7 @@ from .._internally_replaced_utils import load_state_dict_from_url from ..ops.misc import ConvNormActivation +from ..utils import _log_api_usage_once from ._utils import _make_divisible @@ -110,6 +111,7 @@ def __init__( """ super(MobileNetV2, self).__init__() + _log_api_usage_once(self) if block is None: block = InvertedResidual diff --git a/torchvision/models/mobilenetv3.py b/torchvision/models/mobilenetv3.py index fbcd894353f..49641948a24 100644 --- a/torchvision/models/mobilenetv3.py +++ b/torchvision/models/mobilenetv3.py @@ -7,6 +7,7 @@ from .._internally_replaced_utils import load_state_dict_from_url from ..ops.misc import ConvNormActivation, SqueezeExcitation as SElayer +from ..utils import _log_api_usage_once from ._utils import _make_divisible @@ -150,6 +151,7 @@ def __init__( dropout (float): The droupout probability """ super().__init__() + _log_api_usage_once(self) if not inverted_residual_setting: raise ValueError("The inverted_residual_setting should not be empty") diff --git a/torchvision/models/regnet.py b/torchvision/models/regnet.py index 8c6628e25ac..85f53751dd0 100644 --- a/torchvision/models/regnet.py +++ b/torchvision/models/regnet.py @@ -13,6 +13,7 @@ from .._internally_replaced_utils import load_state_dict_from_url from ..ops.misc import ConvNormActivation, SqueezeExcitation +from ..utils import _log_api_usage_once from ._utils import _make_divisible @@ -309,6 +310,7 @@ def __init__( activation: Optional[Callable[..., nn.Module]] = None, ) -> None: super().__init__() + _log_api_usage_once(self) if stem_type is None: stem_type = SimpleStemIN diff --git a/torchvision/models/resnet.py b/torchvision/models/resnet.py index 7584ebb98ea..bfb23ab2b02 100644 --- a/torchvision/models/resnet.py +++ b/torchvision/models/resnet.py @@ -5,6 +5,7 @@ from torch import Tensor from .._internally_replaced_utils import load_state_dict_from_url +from ..utils import _log_api_usage_once __all__ = [ @@ -173,6 +174,7 @@ def __init__( norm_layer: Optional[Callable[..., nn.Module]] = None, ) -> None: super(ResNet, self).__init__() + _log_api_usage_once(self) if norm_layer is None: norm_layer = nn.BatchNorm2d self._norm_layer = norm_layer diff --git a/torchvision/models/shufflenetv2.py b/torchvision/models/shufflenetv2.py index a9bb58fc9d1..dcfeb687dca 100644 --- a/torchvision/models/shufflenetv2.py +++ b/torchvision/models/shufflenetv2.py @@ -5,6 +5,7 @@ from torch import Tensor from .._internally_replaced_utils import load_state_dict_from_url +from ..utils import _log_api_usage_once __all__ = ["ShuffleNetV2", "shufflenet_v2_x0_5", "shufflenet_v2_x1_0", "shufflenet_v2_x1_5", "shufflenet_v2_x2_0"] @@ -99,6 +100,7 @@ def __init__( inverted_residual: Callable[..., nn.Module] = InvertedResidual, ) -> None: super(ShuffleNetV2, self).__init__() + _log_api_usage_once(self) if len(stages_repeats) != 3: raise ValueError("expected stages_repeats as list of 3 positive ints") diff --git a/torchvision/models/squeezenet.py b/torchvision/models/squeezenet.py index c4a3265211f..ca564c52f54 100644 --- a/torchvision/models/squeezenet.py +++ b/torchvision/models/squeezenet.py @@ -5,6 +5,7 @@ import torch.nn.init as init from .._internally_replaced_utils import load_state_dict_from_url +from ..utils import _log_api_usage_once __all__ = ["SqueezeNet", "squeezenet1_0", "squeezenet1_1"] @@ -35,6 +36,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: class SqueezeNet(nn.Module): def __init__(self, version: str = "1_0", num_classes: int = 1000, dropout: float = 0.5) -> None: super(SqueezeNet, self).__init__() + _log_api_usage_once(self) self.num_classes = num_classes if version == "1_0": self.features = nn.Sequential( diff --git a/torchvision/models/vgg.py b/torchvision/models/vgg.py index 93b626c7d43..ea815ade502 100644 --- a/torchvision/models/vgg.py +++ b/torchvision/models/vgg.py @@ -4,6 +4,7 @@ import torch.nn as nn from .._internally_replaced_utils import load_state_dict_from_url +from ..utils import _log_api_usage_once __all__ = [ @@ -36,6 +37,7 @@ def __init__( self, features: nn.Module, num_classes: int = 1000, init_weights: bool = True, dropout: float = 0.5 ) -> None: super(VGG, self).__init__() + _log_api_usage_once(self) self.features = features self.avgpool = nn.AdaptiveAvgPool2d((7, 7)) self.classifier = nn.Sequential( diff --git a/torchvision/utils.py b/torchvision/utils.py index a71e0f234b4..54ef29230d0 100644 --- a/torchvision/utils.py +++ b/torchvision/utils.py @@ -303,3 +303,7 @@ def draw_segmentation_masks( def _generate_color_palette(num_masks: int): palette = torch.tensor([2 ** 25 - 1, 2 ** 15 - 1, 2 ** 21 - 1]) return [tuple((i * palette) % 255) for i in range(num_masks)] + + +def _log_api_usage_once(obj: object) -> None: + torch._C._log_api_usage_once(f"{obj.__module__}.{obj.__class__.__name__}")