diff --git a/torchvision/datasets/vision.py b/torchvision/datasets/vision.py index 22fc85322e4..cd4399c6c79 100644 --- a/torchvision/datasets/vision.py +++ b/torchvision/datasets/vision.py @@ -35,7 +35,7 @@ def __init__( transform: Optional[Callable] = None, target_transform: Optional[Callable] = None, ) -> None: - _log_api_usage_once(self) + _log_api_usage_once("datasets", self.__class__.__name__) if isinstance(root, torch._six.string_classes): root = os.path.expanduser(root) self.root = root diff --git a/torchvision/models/alexnet.py b/torchvision/models/alexnet.py index bb812febdc4..ae36054386f 100644 --- a/torchvision/models/alexnet.py +++ b/torchvision/models/alexnet.py @@ -18,7 +18,7 @@ class AlexNet(nn.Module): def __init__(self, num_classes: int = 1000, dropout: float = 0.5) -> None: super().__init__() - _log_api_usage_once(self) + _log_api_usage_once("models", self.__class__.__name__) self.features = nn.Sequential( nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2), nn.ReLU(inplace=True), diff --git a/torchvision/models/densenet.py b/torchvision/models/densenet.py index 14e318360af..289c027f94d 100644 --- a/torchvision/models/densenet.py +++ b/torchvision/models/densenet.py @@ -163,7 +163,7 @@ def __init__( ) -> None: super().__init__() - _log_api_usage_once(self) + _log_api_usage_once("models", self.__class__.__name__) # First convolution self.features = nn.Sequential( diff --git a/torchvision/models/detection/generalized_rcnn.py b/torchvision/models/detection/generalized_rcnn.py index 37ef1820d71..b92f8637d6d 100644 --- a/torchvision/models/detection/generalized_rcnn.py +++ b/torchvision/models/detection/generalized_rcnn.py @@ -27,7 +27,7 @@ class GeneralizedRCNN(nn.Module): def __init__(self, backbone: nn.Module, rpn: nn.Module, roi_heads: nn.Module, transform: nn.Module) -> None: super().__init__() - _log_api_usage_once(self) + _log_api_usage_once("models", self.__class__.__name__) self.transform = transform self.backbone = backbone self.rpn = rpn diff --git a/torchvision/models/detection/retinanet.py b/torchvision/models/detection/retinanet.py index e5ced9870ba..d123f5bd6ff 100644 --- a/torchvision/models/detection/retinanet.py +++ b/torchvision/models/detection/retinanet.py @@ -337,7 +337,7 @@ def __init__( topk_candidates=1000, ): super().__init__() - _log_api_usage_once(self) + _log_api_usage_once("models", self.__class__.__name__) if not hasattr(backbone, "out_channels"): raise ValueError( diff --git a/torchvision/models/detection/ssd.py b/torchvision/models/detection/ssd.py index 5778a07075d..5df8138ecef 100644 --- a/torchvision/models/detection/ssd.py +++ b/torchvision/models/detection/ssd.py @@ -182,7 +182,7 @@ def __init__( positive_fraction: float = 0.25, ): super().__init__() - _log_api_usage_once(self) + _log_api_usage_once("models", self.__class__.__name__) self.backbone = backbone diff --git a/torchvision/models/detection/ssdlite.py b/torchvision/models/detection/ssdlite.py index d35dfbf78d3..f434a865763 100644 --- a/torchvision/models/detection/ssdlite.py +++ b/torchvision/models/detection/ssdlite.py @@ -120,7 +120,7 @@ def __init__( min_depth: int = 16, ): super().__init__() - _log_api_usage_once(self) + _log_api_usage_once("models", self.__class__.__name__) assert not backbone[c4_pos].use_res_connect self.features = nn.Sequential( diff --git a/torchvision/models/efficientnet.py b/torchvision/models/efficientnet.py index 6837018c09e..249e2400f63 100644 --- a/torchvision/models/efficientnet.py +++ b/torchvision/models/efficientnet.py @@ -170,7 +170,7 @@ def __init__( norm_layer (Optional[Callable[..., nn.Module]]): Module specifying the normalization layer to use """ super().__init__() - _log_api_usage_once(self) + _log_api_usage_once("models", self.__class__.__name__) if not inverted_residual_setting: raise ValueError("The inverted_residual_setting should not be empty") diff --git a/torchvision/models/googlenet.py b/torchvision/models/googlenet.py index 5e6375c1191..7ead46a4ef6 100644 --- a/torchvision/models/googlenet.py +++ b/torchvision/models/googlenet.py @@ -39,7 +39,7 @@ def __init__( dropout_aux: float = 0.7, ) -> None: super().__init__() - _log_api_usage_once(self) + _log_api_usage_once("models", self.__class__.__name__) if blocks is None: blocks = [BasicConv2d, Inception, InceptionAux] if init_weights is None: diff --git a/torchvision/models/inception.py b/torchvision/models/inception.py index 322c2370bdd..714e5e9e406 100644 --- a/torchvision/models/inception.py +++ b/torchvision/models/inception.py @@ -37,7 +37,7 @@ def __init__( dropout: float = 0.5, ) -> None: super().__init__() - _log_api_usage_once(self) + _log_api_usage_once("models", self.__class__.__name__) if inception_blocks is None: inception_blocks = [BasicConv2d, InceptionA, InceptionB, InceptionC, InceptionD, InceptionE, InceptionAux] if init_weights is None: diff --git a/torchvision/models/mnasnet.py b/torchvision/models/mnasnet.py index 5eb27904f90..bff4e061a72 100644 --- a/torchvision/models/mnasnet.py +++ b/torchvision/models/mnasnet.py @@ -98,7 +98,7 @@ class MNASNet(torch.nn.Module): def __init__(self, alpha: float, num_classes: int = 1000, dropout: float = 0.2) -> None: super().__init__() - _log_api_usage_once(self) + _log_api_usage_once("models", self.__class__.__name__) assert alpha > 0.0 self.alpha = alpha self.num_classes = num_classes diff --git a/torchvision/models/mobilenetv2.py b/torchvision/models/mobilenetv2.py index 1a470953df5..1e21f299654 100644 --- a/torchvision/models/mobilenetv2.py +++ b/torchvision/models/mobilenetv2.py @@ -111,7 +111,7 @@ def __init__( """ super().__init__() - _log_api_usage_once(self) + _log_api_usage_once("models", self.__class__.__name__) if block is None: block = InvertedResidual diff --git a/torchvision/models/mobilenetv3.py b/torchvision/models/mobilenetv3.py index 97239bea8ad..306fa8d2b49 100644 --- a/torchvision/models/mobilenetv3.py +++ b/torchvision/models/mobilenetv3.py @@ -151,7 +151,7 @@ def __init__( dropout (float): The droupout probability """ super().__init__() - _log_api_usage_once(self) + _log_api_usage_once("models", self.__class__.__name__) if not inverted_residual_setting: raise ValueError("The inverted_residual_setting should not be empty") diff --git a/torchvision/models/optical_flow/raft.py b/torchvision/models/optical_flow/raft.py index 5e3388d66b2..18bf7dbafc3 100644 --- a/torchvision/models/optical_flow/raft.py +++ b/torchvision/models/optical_flow/raft.py @@ -440,7 +440,7 @@ def __init__(self, *, feature_encoder, context_encoder, corr_block, update_block If ``None`` (default), the flow is upsampled using interpolation. """ super().__init__() - _log_api_usage_once(self) + _log_api_usage_once("models", self.__class__.__name__) self.feature_encoder = feature_encoder self.context_encoder = context_encoder diff --git a/torchvision/models/regnet.py b/torchvision/models/regnet.py index 85f53751dd0..417054fe547 100644 --- a/torchvision/models/regnet.py +++ b/torchvision/models/regnet.py @@ -310,7 +310,7 @@ def __init__( activation: Optional[Callable[..., nn.Module]] = None, ) -> None: super().__init__() - _log_api_usage_once(self) + _log_api_usage_once("models", self.__class__.__name__) if stem_type is None: stem_type = SimpleStemIN diff --git a/torchvision/models/resnet.py b/torchvision/models/resnet.py index b0bb8d13ade..d7dd3480ff6 100644 --- a/torchvision/models/resnet.py +++ b/torchvision/models/resnet.py @@ -174,7 +174,7 @@ def __init__( norm_layer: Optional[Callable[..., nn.Module]] = None, ) -> None: super().__init__() - _log_api_usage_once(self) + _log_api_usage_once("models", self.__class__.__name__) if norm_layer is None: norm_layer = nn.BatchNorm2d self._norm_layer = norm_layer diff --git a/torchvision/models/segmentation/_utils.py b/torchvision/models/segmentation/_utils.py index 0bbea5d3e81..209a7a57c26 100644 --- a/torchvision/models/segmentation/_utils.py +++ b/torchvision/models/segmentation/_utils.py @@ -13,7 +13,7 @@ class _SimpleSegmentationModel(nn.Module): def __init__(self, backbone: nn.Module, classifier: nn.Module, aux_classifier: Optional[nn.Module] = None) -> None: super().__init__() - _log_api_usage_once(self) + _log_api_usage_once("models", self.__class__.__name__) self.backbone = backbone self.classifier = classifier self.aux_classifier = aux_classifier diff --git a/torchvision/models/segmentation/lraspp.py b/torchvision/models/segmentation/lraspp.py index f6c2583cac1..77243822434 100644 --- a/torchvision/models/segmentation/lraspp.py +++ b/torchvision/models/segmentation/lraspp.py @@ -38,7 +38,7 @@ def __init__( self, backbone: nn.Module, low_channels: int, high_channels: int, num_classes: int, inter_channels: int = 128 ) -> None: super().__init__() - _log_api_usage_once(self) + _log_api_usage_once("models", self.__class__.__name__) self.backbone = backbone self.classifier = LRASPPHead(low_channels, high_channels, num_classes, inter_channels) diff --git a/torchvision/models/shufflenetv2.py b/torchvision/models/shufflenetv2.py index f3758c54aaf..9bec542d6f5 100644 --- a/torchvision/models/shufflenetv2.py +++ b/torchvision/models/shufflenetv2.py @@ -100,7 +100,7 @@ def __init__( inverted_residual: Callable[..., nn.Module] = InvertedResidual, ) -> None: super().__init__() - _log_api_usage_once(self) + _log_api_usage_once("models", self.__class__.__name__) if len(stages_repeats) != 3: raise ValueError("expected stages_repeats as list of 3 positive ints") diff --git a/torchvision/models/squeezenet.py b/torchvision/models/squeezenet.py index 2c1a30f225d..6c14c779c0c 100644 --- a/torchvision/models/squeezenet.py +++ b/torchvision/models/squeezenet.py @@ -36,7 +36,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: class SqueezeNet(nn.Module): def __init__(self, version: str = "1_0", num_classes: int = 1000, dropout: float = 0.5) -> None: super().__init__() - _log_api_usage_once(self) + _log_api_usage_once("models", self.__class__.__name__) self.num_classes = num_classes if version == "1_0": self.features = nn.Sequential( diff --git a/torchvision/models/vgg.py b/torchvision/models/vgg.py index e31fc542ca6..c96bc5e1d8d 100644 --- a/torchvision/models/vgg.py +++ b/torchvision/models/vgg.py @@ -37,7 +37,7 @@ def __init__( self, features: nn.Module, num_classes: int = 1000, init_weights: bool = True, dropout: float = 0.5 ) -> None: super().__init__() - _log_api_usage_once(self) + _log_api_usage_once("models", self.__class__.__name__) self.features = features self.avgpool = nn.AdaptiveAvgPool2d((7, 7)) self.classifier = nn.Sequential( diff --git a/torchvision/models/video/resnet.py b/torchvision/models/video/resnet.py index f6899cfafeb..475608886c6 100644 --- a/torchvision/models/video/resnet.py +++ b/torchvision/models/video/resnet.py @@ -209,7 +209,7 @@ def __init__( zero_init_residual (bool, optional): Zero init bottleneck residual BN. Defaults to False. """ super().__init__() - _log_api_usage_once(self) + _log_api_usage_once("models", self.__class__.__name__) self.inplanes = 64 self.stem = stem() diff --git a/torchvision/ops/boxes.py b/torchvision/ops/boxes.py index 5ec46669be2..537c23480d8 100644 --- a/torchvision/ops/boxes.py +++ b/torchvision/ops/boxes.py @@ -34,7 +34,7 @@ def nms(boxes: Tensor, scores: Tensor, iou_threshold: float) -> Tensor: Tensor: int64 tensor with the indices of the elements that have been kept by NMS, sorted in decreasing order of scores """ - _log_api_usage_once("torchvision.ops.nms") + _log_api_usage_once("ops", "nms") _assert_has_ops() return torch.ops.torchvision.nms(boxes, scores, iou_threshold) @@ -63,7 +63,7 @@ def batched_nms( Tensor: int64 tensor with the indices of the elements that have been kept by NMS, sorted in decreasing order of scores """ - _log_api_usage_once("torchvision.ops.batched_nms") + _log_api_usage_once("ops", "batched_nms") # Benchmarks that drove the following thresholds are at # https://github.com/pytorch/vision/issues/1311#issuecomment-781329339 if boxes.numel() > (4000 if boxes.device.type == "cpu" else 20000) and not torchvision._is_tracing(): @@ -122,7 +122,7 @@ def remove_small_boxes(boxes: Tensor, min_size: float) -> Tensor: Tensor[K]: indices of the boxes that have both sides larger than min_size """ - _log_api_usage_once("torchvision.ops.remove_small_boxes") + _log_api_usage_once("ops", "remove_small_boxes") ws, hs = boxes[:, 2] - boxes[:, 0], boxes[:, 3] - boxes[:, 1] keep = (ws >= min_size) & (hs >= min_size) keep = torch.where(keep)[0] @@ -141,7 +141,7 @@ def clip_boxes_to_image(boxes: Tensor, size: Tuple[int, int]) -> Tensor: Returns: Tensor[N, 4]: clipped boxes """ - _log_api_usage_once("torchvision.ops.clip_boxes_to_image") + _log_api_usage_once("ops", "clip_boxes_to_image") dim = boxes.dim() boxes_x = boxes[..., 0::2] boxes_y = boxes[..., 1::2] @@ -182,7 +182,7 @@ def box_convert(boxes: Tensor, in_fmt: str, out_fmt: str) -> Tensor: Tensor[N, 4]: Boxes into converted format. """ - _log_api_usage_once("torchvision.ops.box_convert") + _log_api_usage_once("ops", "box_convert") allowed_fmts = ("xyxy", "xywh", "cxcywh") if in_fmt not in allowed_fmts or out_fmt not in allowed_fmts: raise ValueError("Unsupported Bounding Box Conversions for given in_fmt and out_fmt") @@ -232,7 +232,7 @@ def box_area(boxes: Tensor) -> Tensor: Returns: Tensor[N]: the area for each box """ - _log_api_usage_once("torchvision.ops.box_area") + _log_api_usage_once("ops", "box_area") boxes = _upcast(boxes) return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) @@ -268,7 +268,7 @@ def box_iou(boxes1: Tensor, boxes2: Tensor) -> Tensor: Returns: Tensor[N, M]: the NxM matrix containing the pairwise IoU values for every element in boxes1 and boxes2 """ - _log_api_usage_once("torchvision.ops.box_iou") + _log_api_usage_once("ops", "box_iou") inter, union = _box_inter_union(boxes1, boxes2) iou = inter / union return iou @@ -291,7 +291,7 @@ def generalized_box_iou(boxes1: Tensor, boxes2: Tensor) -> Tensor: for every element in boxes1 and boxes2 """ - _log_api_usage_once("torchvision.ops.generalized_box_iou") + _log_api_usage_once("ops", "generalized_box_iou") # degenerate boxes gives inf / nan results # so do an early check assert (boxes1[:, 2:] >= boxes1[:, :2]).all() @@ -323,7 +323,7 @@ def masks_to_boxes(masks: torch.Tensor) -> torch.Tensor: Returns: Tensor[N, 4]: bounding boxes """ - _log_api_usage_once("torchvision.ops.masks_to_boxes") + _log_api_usage_once("ops", "masks_to_boxes") if masks.numel() == 0: return torch.zeros((0, 4), device=masks.device, dtype=torch.float) diff --git a/torchvision/ops/deform_conv.py b/torchvision/ops/deform_conv.py index 59b1cec049e..b756274df71 100644 --- a/torchvision/ops/deform_conv.py +++ b/torchvision/ops/deform_conv.py @@ -61,7 +61,7 @@ def deform_conv2d( >>> torch.Size([4, 5, 8, 8]) """ - _log_api_usage_once("torchvision.ops.deform_conv2d") + _log_api_usage_once("ops", "deform_conv2d") _assert_has_ops() out_channels = weight.shape[0] diff --git a/torchvision/ops/feature_pyramid_network.py b/torchvision/ops/feature_pyramid_network.py index 93caa47d04b..e73d1ee853b 100644 --- a/torchvision/ops/feature_pyramid_network.py +++ b/torchvision/ops/feature_pyramid_network.py @@ -77,7 +77,7 @@ def __init__( extra_blocks: Optional[ExtraFPNBlock] = None, ): super().__init__() - _log_api_usage_once(self) + _log_api_usage_once("ops", self.__class__.__name__) self.inner_blocks = nn.ModuleList() self.layer_blocks = nn.ModuleList() for in_channels in in_channels_list: diff --git a/torchvision/ops/focal_loss.py b/torchvision/ops/focal_loss.py index 1a149ed4120..d19e7465720 100644 --- a/torchvision/ops/focal_loss.py +++ b/torchvision/ops/focal_loss.py @@ -32,7 +32,7 @@ def sigmoid_focal_loss( Returns: Loss tensor with the reduction option applied. """ - _log_api_usage_once("torchvision.ops.sigmoid_focal_loss") + _log_api_usage_once("ops", "sigmoid_focal_loss") p = torch.sigmoid(inputs) ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none") p_t = p * targets + (1 - p) * (1 - targets) diff --git a/torchvision/ops/misc.py b/torchvision/ops/misc.py index 392517cb772..0189d1b140f 100644 --- a/torchvision/ops/misc.py +++ b/torchvision/ops/misc.py @@ -61,7 +61,7 @@ def __init__( warnings.warn("`n` argument is deprecated and has been renamed `num_features`", DeprecationWarning) num_features = n super().__init__() - _log_api_usage_once(self) + _log_api_usage_once("ops", self.__class__.__name__) self.eps = eps self.register_buffer("weight", torch.ones(num_features)) self.register_buffer("bias", torch.zeros(num_features)) @@ -155,7 +155,7 @@ def __init__( if activation_layer is not None: layers.append(activation_layer(inplace=inplace)) super().__init__(*layers) - _log_api_usage_once(self) + _log_api_usage_once("ops", self.__class__.__name__) self.out_channels = out_channels @@ -179,7 +179,7 @@ def __init__( scale_activation: Callable[..., torch.nn.Module] = torch.nn.Sigmoid, ) -> None: super().__init__() - _log_api_usage_once(self) + _log_api_usage_once("ops", self.__class__.__name__) self.avgpool = torch.nn.AdaptiveAvgPool2d(1) self.fc1 = torch.nn.Conv2d(input_channels, squeeze_channels, 1) self.fc2 = torch.nn.Conv2d(squeeze_channels, input_channels, 1) diff --git a/torchvision/ops/poolers.py b/torchvision/ops/poolers.py index 05cf5e4032e..cb3abc70f80 100644 --- a/torchvision/ops/poolers.py +++ b/torchvision/ops/poolers.py @@ -276,7 +276,7 @@ def __init__( canonical_level: int = 4, ): super().__init__() - _log_api_usage_once(self) + _log_api_usage_once("ops", self.__class__.__name__) if isinstance(output_size, int): output_size = (output_size, output_size) self.featmap_names = featmap_names diff --git a/torchvision/ops/ps_roi_align.py b/torchvision/ops/ps_roi_align.py index 4ed4ead89ff..f24a41866e9 100644 --- a/torchvision/ops/ps_roi_align.py +++ b/torchvision/ops/ps_roi_align.py @@ -43,7 +43,7 @@ def ps_roi_align( Returns: Tensor[K, C / (output_size[0] * output_size[1]), output_size[0], output_size[1]]: The pooled RoIs """ - _log_api_usage_once("torchvision.ops.ps_roi_align") + _log_api_usage_once("ops", "ps_roi_align") _assert_has_ops() check_roi_boxes_shape(boxes) rois = boxes diff --git a/torchvision/ops/ps_roi_pool.py b/torchvision/ops/ps_roi_pool.py index 6bab125f04f..9079aaacc6e 100644 --- a/torchvision/ops/ps_roi_pool.py +++ b/torchvision/ops/ps_roi_pool.py @@ -37,7 +37,7 @@ def ps_roi_pool( Returns: Tensor[K, C / (output_size[0] * output_size[1]), output_size[0], output_size[1]]: The pooled RoIs. """ - _log_api_usage_once("torchvision.ops.ps_roi_pool") + _log_api_usage_once("ops", "ps_roi_pool") _assert_has_ops() check_roi_boxes_shape(boxes) rois = boxes diff --git a/torchvision/ops/roi_align.py b/torchvision/ops/roi_align.py index 3f80383855b..509c0777d2d 100644 --- a/torchvision/ops/roi_align.py +++ b/torchvision/ops/roi_align.py @@ -50,7 +50,7 @@ def roi_align( Returns: Tensor[K, C, output_size[0], output_size[1]]: The pooled RoIs. """ - _log_api_usage_once("torchvision.ops.roi_align") + _log_api_usage_once("ops", "roi_align") _assert_has_ops() check_roi_boxes_shape(boxes) rois = boxes diff --git a/torchvision/ops/roi_pool.py b/torchvision/ops/roi_pool.py index deef590c953..a8f9c4d2092 100644 --- a/torchvision/ops/roi_pool.py +++ b/torchvision/ops/roi_pool.py @@ -39,7 +39,7 @@ def roi_pool( Returns: Tensor[K, C, output_size[0], output_size[1]]: The pooled RoIs. """ - _log_api_usage_once("torchvision.ops.roi_pool") + _log_api_usage_once("ops", "roi_pool") _assert_has_ops() check_roi_boxes_shape(boxes) rois = boxes diff --git a/torchvision/ops/stochastic_depth.py b/torchvision/ops/stochastic_depth.py index b2a0aec233d..40143d06141 100644 --- a/torchvision/ops/stochastic_depth.py +++ b/torchvision/ops/stochastic_depth.py @@ -23,7 +23,7 @@ def stochastic_depth(input: Tensor, p: float, mode: str, training: bool = True) Returns: Tensor[N, ...]: The randomly zeroed tensor. """ - _log_api_usage_once("torchvision.ops.stochastic_depth") + _log_api_usage_once("ops", "stochastic_depth") if p < 0.0 or p > 1.0: raise ValueError(f"drop probability has to be between 0 and 1, but got {p}") if mode not in ["batch", "row"]: diff --git a/torchvision/prototype/models/vision_transformer.py b/torchvision/prototype/models/vision_transformer.py index ae8eee45539..f03a87480b2 100644 --- a/torchvision/prototype/models/vision_transformer.py +++ b/torchvision/prototype/models/vision_transformer.py @@ -140,7 +140,7 @@ def __init__( norm_layer: Callable[..., torch.nn.Module] = partial(nn.LayerNorm, eps=1e-6), ): super().__init__() - _log_api_usage_once(self) + _log_api_usage_once("models", self.__class__.__name__) torch._assert(image_size % patch_size == 0, "Input shape indivisible by patch size!") self.image_size = image_size self.patch_size = patch_size diff --git a/torchvision/utils.py b/torchvision/utils.py index b11f4ebeecf..f45b1271cb6 100644 --- a/torchvision/utils.py +++ b/torchvision/utils.py @@ -1,7 +1,7 @@ import math import pathlib import warnings -from typing import Union, Optional, List, Tuple, BinaryIO, no_type_check +from typing import Union, Optional, List, Tuple, BinaryIO import numpy as np import torch @@ -375,13 +375,7 @@ def _generate_color_palette(num_masks: int): return [tuple((i * palette) % 255) for i in range(num_masks)] -@no_type_check -def _log_api_usage_once(obj: str) -> None: # type: ignore +def _log_api_usage_once(module: str, name: str) -> None: if torch.jit.is_scripting() or torch.jit.is_tracing(): return - # NOTE: obj can be an object as well, but mocking it here to be - # only a string to appease torchscript - if isinstance(obj, str): - torch._C._log_api_usage_once(obj) - else: - torch._C._log_api_usage_once(f"{obj.__module__}.{obj.__class__.__name__}") + torch._C._log_api_usage_once(f"torchvision.{module}.{name}")