Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion torchvision/datasets/vision.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ def __init__(
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
) -> None:
_log_api_usage_once("datasets", self.__class__.__name__)
_log_api_usage_once(self.__class__.__qualname__)
if isinstance(root, torch._six.string_classes):
root = os.path.expanduser(root)
self.root = root
Expand Down
2 changes: 1 addition & 1 deletion torchvision/models/alexnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
class AlexNet(nn.Module):
def __init__(self, num_classes: int = 1000, dropout: float = 0.5) -> None:
super().__init__()
_log_api_usage_once("models", self.__class__.__name__)
_log_api_usage_once(self.__class__.__qualname__)
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),
nn.ReLU(inplace=True),
Expand Down
2 changes: 1 addition & 1 deletion torchvision/models/densenet.py
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,7 @@ def __init__(
) -> None:

super().__init__()
_log_api_usage_once("models", self.__class__.__name__)
_log_api_usage_once(self.__class__.__qualname__)

# First convolution
self.features = nn.Sequential(
Expand Down
2 changes: 1 addition & 1 deletion torchvision/models/detection/generalized_rcnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ class GeneralizedRCNN(nn.Module):

def __init__(self, backbone: nn.Module, rpn: nn.Module, roi_heads: nn.Module, transform: nn.Module) -> None:
super().__init__()
_log_api_usage_once("models", self.__class__.__name__)
_log_api_usage_once(self.__class__.__qualname__)
self.transform = transform
self.backbone = backbone
self.rpn = rpn
Expand Down
2 changes: 1 addition & 1 deletion torchvision/models/detection/retinanet.py
Original file line number Diff line number Diff line change
Expand Up @@ -337,7 +337,7 @@ def __init__(
topk_candidates=1000,
):
super().__init__()
_log_api_usage_once("models", self.__class__.__name__)
_log_api_usage_once(self.__class__.__qualname__)

if not hasattr(backbone, "out_channels"):
raise ValueError(
Expand Down
2 changes: 1 addition & 1 deletion torchvision/models/detection/ssd.py
Original file line number Diff line number Diff line change
Expand Up @@ -182,7 +182,7 @@ def __init__(
positive_fraction: float = 0.25,
):
super().__init__()
_log_api_usage_once("models", self.__class__.__name__)
_log_api_usage_once(self.__class__.__qualname__)

self.backbone = backbone

Expand Down
2 changes: 1 addition & 1 deletion torchvision/models/detection/ssdlite.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ def __init__(
min_depth: int = 16,
):
super().__init__()
_log_api_usage_once("models", self.__class__.__name__)
_log_api_usage_once(self.__class__.__qualname__)

assert not backbone[c4_pos].use_res_connect
self.features = nn.Sequential(
Expand Down
2 changes: 1 addition & 1 deletion torchvision/models/efficientnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,7 @@ def __init__(
norm_layer (Optional[Callable[..., nn.Module]]): Module specifying the normalization layer to use
"""
super().__init__()
_log_api_usage_once("models", self.__class__.__name__)
_log_api_usage_once(self.__class__.__qualname__)

if not inverted_residual_setting:
raise ValueError("The inverted_residual_setting should not be empty")
Expand Down
2 changes: 1 addition & 1 deletion torchvision/models/googlenet.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ def __init__(
dropout_aux: float = 0.7,
) -> None:
super().__init__()
_log_api_usage_once("models", self.__class__.__name__)
_log_api_usage_once(self.__class__.__qualname__)
if blocks is None:
blocks = [BasicConv2d, Inception, InceptionAux]
if init_weights is None:
Expand Down
2 changes: 1 addition & 1 deletion torchvision/models/inception.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ def __init__(
dropout: float = 0.5,
) -> None:
super().__init__()
_log_api_usage_once("models", self.__class__.__name__)
_log_api_usage_once(self.__class__.__qualname__)
if inception_blocks is None:
inception_blocks = [BasicConv2d, InceptionA, InceptionB, InceptionC, InceptionD, InceptionE, InceptionAux]
if init_weights is None:
Expand Down
2 changes: 1 addition & 1 deletion torchvision/models/mnasnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ class MNASNet(torch.nn.Module):

def __init__(self, alpha: float, num_classes: int = 1000, dropout: float = 0.2) -> None:
super().__init__()
_log_api_usage_once("models", self.__class__.__name__)
_log_api_usage_once(self.__class__.__qualname__)
assert alpha > 0.0
self.alpha = alpha
self.num_classes = num_classes
Expand Down
2 changes: 1 addition & 1 deletion torchvision/models/mobilenetv2.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ def __init__(

"""
super().__init__()
_log_api_usage_once("models", self.__class__.__name__)
_log_api_usage_once(self.__class__.__qualname__)

if block is None:
block = InvertedResidual
Expand Down
2 changes: 1 addition & 1 deletion torchvision/models/mobilenetv3.py
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ def __init__(
dropout (float): The droupout probability
"""
super().__init__()
_log_api_usage_once("models", self.__class__.__name__)
_log_api_usage_once(self.__class__.__qualname__)

if not inverted_residual_setting:
raise ValueError("The inverted_residual_setting should not be empty")
Expand Down
2 changes: 1 addition & 1 deletion torchvision/models/optical_flow/raft.py
Original file line number Diff line number Diff line change
Expand Up @@ -440,7 +440,7 @@ def __init__(self, *, feature_encoder, context_encoder, corr_block, update_block
If ``None`` (default), the flow is upsampled using interpolation.
"""
super().__init__()
_log_api_usage_once("models", self.__class__.__name__)
_log_api_usage_once(self.__class__.__qualname__)

self.feature_encoder = feature_encoder
self.context_encoder = context_encoder
Expand Down
2 changes: 1 addition & 1 deletion torchvision/models/regnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -310,7 +310,7 @@ def __init__(
activation: Optional[Callable[..., nn.Module]] = None,
) -> None:
super().__init__()
_log_api_usage_once("models", self.__class__.__name__)
_log_api_usage_once(self.__class__.__qualname__)

if stem_type is None:
stem_type = SimpleStemIN
Expand Down
2 changes: 1 addition & 1 deletion torchvision/models/resnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -174,7 +174,7 @@ def __init__(
norm_layer: Optional[Callable[..., nn.Module]] = None,
) -> None:
super().__init__()
_log_api_usage_once("models", self.__class__.__name__)
_log_api_usage_once(self.__class__.__qualname__)
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
Expand Down
2 changes: 1 addition & 1 deletion torchvision/models/segmentation/_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ class _SimpleSegmentationModel(nn.Module):

def __init__(self, backbone: nn.Module, classifier: nn.Module, aux_classifier: Optional[nn.Module] = None) -> None:
super().__init__()
_log_api_usage_once("models", self.__class__.__name__)
_log_api_usage_once(self.__class__.__qualname__)
self.backbone = backbone
self.classifier = classifier
self.aux_classifier = aux_classifier
Expand Down
2 changes: 1 addition & 1 deletion torchvision/models/segmentation/lraspp.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ def __init__(
self, backbone: nn.Module, low_channels: int, high_channels: int, num_classes: int, inter_channels: int = 128
) -> None:
super().__init__()
_log_api_usage_once("models", self.__class__.__name__)
_log_api_usage_once(self.__class__.__qualname__)
self.backbone = backbone
self.classifier = LRASPPHead(low_channels, high_channels, num_classes, inter_channels)

Expand Down
2 changes: 1 addition & 1 deletion torchvision/models/shufflenetv2.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ def __init__(
inverted_residual: Callable[..., nn.Module] = InvertedResidual,
) -> None:
super().__init__()
_log_api_usage_once("models", self.__class__.__name__)
_log_api_usage_once(self.__class__.__qualname__)

if len(stages_repeats) != 3:
raise ValueError("expected stages_repeats as list of 3 positive ints")
Expand Down
2 changes: 1 addition & 1 deletion torchvision/models/squeezenet.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor:
class SqueezeNet(nn.Module):
def __init__(self, version: str = "1_0", num_classes: int = 1000, dropout: float = 0.5) -> None:
super().__init__()
_log_api_usage_once("models", self.__class__.__name__)
_log_api_usage_once(self.__class__.__qualname__)
self.num_classes = num_classes
if version == "1_0":
self.features = nn.Sequential(
Expand Down
2 changes: 1 addition & 1 deletion torchvision/models/vgg.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ def __init__(
self, features: nn.Module, num_classes: int = 1000, init_weights: bool = True, dropout: float = 0.5
) -> None:
super().__init__()
_log_api_usage_once("models", self.__class__.__name__)
_log_api_usage_once(self.__class__.__qualname__)
self.features = features
self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
self.classifier = nn.Sequential(
Expand Down
2 changes: 1 addition & 1 deletion torchvision/models/video/resnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -209,7 +209,7 @@ def __init__(
zero_init_residual (bool, optional): Zero init bottleneck residual BN. Defaults to False.
"""
super().__init__()
_log_api_usage_once("models", self.__class__.__name__)
_log_api_usage_once(self.__class__.__qualname__)
self.inplanes = 64

self.stem = stem()
Expand Down
18 changes: 9 additions & 9 deletions torchvision/ops/boxes.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ def nms(boxes: Tensor, scores: Tensor, iou_threshold: float) -> Tensor:
Tensor: int64 tensor with the indices of the elements that have been kept
by NMS, sorted in decreasing order of scores
"""
_log_api_usage_once("ops", "nms")
_log_api_usage_once("torchvision.ops.boxes.nms")
_assert_has_ops()
return torch.ops.torchvision.nms(boxes, scores, iou_threshold)

Expand Down Expand Up @@ -63,7 +63,7 @@ def batched_nms(
Tensor: int64 tensor with the indices of the elements that have been kept by NMS, sorted
in decreasing order of scores
"""
_log_api_usage_once("ops", "batched_nms")
_log_api_usage_once("torchvision.ops.boxes.batched_nms")
# Benchmarks that drove the following thresholds are at
# https://github.com/pytorch/vision/issues/1311#issuecomment-781329339
if boxes.numel() > (4000 if boxes.device.type == "cpu" else 20000) and not torchvision._is_tracing():
Expand Down Expand Up @@ -122,7 +122,7 @@ def remove_small_boxes(boxes: Tensor, min_size: float) -> Tensor:
Tensor[K]: indices of the boxes that have both sides
larger than min_size
"""
_log_api_usage_once("ops", "remove_small_boxes")
_log_api_usage_once("torchvision.ops.boxes.remove_small_boxes")
ws, hs = boxes[:, 2] - boxes[:, 0], boxes[:, 3] - boxes[:, 1]
keep = (ws >= min_size) & (hs >= min_size)
keep = torch.where(keep)[0]
Expand All @@ -141,7 +141,7 @@ def clip_boxes_to_image(boxes: Tensor, size: Tuple[int, int]) -> Tensor:
Returns:
Tensor[N, 4]: clipped boxes
"""
_log_api_usage_once("ops", "clip_boxes_to_image")
_log_api_usage_once("torchvision.ops.boxes.clip_boxes_to_image")
dim = boxes.dim()
boxes_x = boxes[..., 0::2]
boxes_y = boxes[..., 1::2]
Expand Down Expand Up @@ -182,7 +182,7 @@ def box_convert(boxes: Tensor, in_fmt: str, out_fmt: str) -> Tensor:
Tensor[N, 4]: Boxes into converted format.
"""

_log_api_usage_once("ops", "box_convert")
_log_api_usage_once("torchvision.ops.boxes.box_convert")
allowed_fmts = ("xyxy", "xywh", "cxcywh")
if in_fmt not in allowed_fmts or out_fmt not in allowed_fmts:
raise ValueError("Unsupported Bounding Box Conversions for given in_fmt and out_fmt")
Expand Down Expand Up @@ -232,7 +232,7 @@ def box_area(boxes: Tensor) -> Tensor:
Returns:
Tensor[N]: the area for each box
"""
_log_api_usage_once("ops", "box_area")
_log_api_usage_once("torchvision.ops.boxes.box_area")
boxes = _upcast(boxes)
return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])

Expand Down Expand Up @@ -268,7 +268,7 @@ def box_iou(boxes1: Tensor, boxes2: Tensor) -> Tensor:
Returns:
Tensor[N, M]: the NxM matrix containing the pairwise IoU values for every element in boxes1 and boxes2
"""
_log_api_usage_once("ops", "box_iou")
_log_api_usage_once("torchvision.ops.boxes.box_iou")
inter, union = _box_inter_union(boxes1, boxes2)
iou = inter / union
return iou
Expand All @@ -291,7 +291,7 @@ def generalized_box_iou(boxes1: Tensor, boxes2: Tensor) -> Tensor:
for every element in boxes1 and boxes2
"""

_log_api_usage_once("ops", "generalized_box_iou")
_log_api_usage_once("torchvision.ops.boxes.generalized_box_iou")
# degenerate boxes gives inf / nan results
# so do an early check
assert (boxes1[:, 2:] >= boxes1[:, :2]).all()
Expand Down Expand Up @@ -323,7 +323,7 @@ def masks_to_boxes(masks: torch.Tensor) -> torch.Tensor:
Returns:
Tensor[N, 4]: bounding boxes
"""
_log_api_usage_once("ops", "masks_to_boxes")
_log_api_usage_once("torchvision.ops.boxes.masks_to_boxes")
if masks.numel() == 0:
return torch.zeros((0, 4), device=masks.device, dtype=torch.float)

Expand Down
2 changes: 1 addition & 1 deletion torchvision/ops/deform_conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ def deform_conv2d(
>>> torch.Size([4, 5, 8, 8])
"""

_log_api_usage_once("ops", "deform_conv2d")
_log_api_usage_once("torchvision.ops.deform_conv.deform_conv2d")
_assert_has_ops()
out_channels = weight.shape[0]

Expand Down
2 changes: 1 addition & 1 deletion torchvision/ops/feature_pyramid_network.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ def __init__(
extra_blocks: Optional[ExtraFPNBlock] = None,
):
super().__init__()
_log_api_usage_once("ops", self.__class__.__name__)
_log_api_usage_once(self.__class__.__qualname__)
self.inner_blocks = nn.ModuleList()
self.layer_blocks = nn.ModuleList()
for in_channels in in_channels_list:
Expand Down
2 changes: 1 addition & 1 deletion torchvision/ops/focal_loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ def sigmoid_focal_loss(
Returns:
Loss tensor with the reduction option applied.
"""
_log_api_usage_once("ops", "sigmoid_focal_loss")
_log_api_usage_once("torchvision.ops.focal_loss.sigmoid_focal_loss")
p = torch.sigmoid(inputs)
ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none")
p_t = p * targets + (1 - p) * (1 - targets)
Expand Down
6 changes: 3 additions & 3 deletions torchvision/ops/misc.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ def __init__(
warnings.warn("`n` argument is deprecated and has been renamed `num_features`", DeprecationWarning)
num_features = n
super().__init__()
_log_api_usage_once("ops", self.__class__.__name__)
_log_api_usage_once(self.__class__.__qualname__)
self.eps = eps
self.register_buffer("weight", torch.ones(num_features))
self.register_buffer("bias", torch.zeros(num_features))
Expand Down Expand Up @@ -155,7 +155,7 @@ def __init__(
if activation_layer is not None:
layers.append(activation_layer(inplace=inplace))
super().__init__(*layers)
_log_api_usage_once("ops", self.__class__.__name__)
_log_api_usage_once(self.__class__.__qualname__)
self.out_channels = out_channels


Expand All @@ -179,7 +179,7 @@ def __init__(
scale_activation: Callable[..., torch.nn.Module] = torch.nn.Sigmoid,
) -> None:
super().__init__()
_log_api_usage_once("ops", self.__class__.__name__)
_log_api_usage_once(self.__class__.__qualname__)
self.avgpool = torch.nn.AdaptiveAvgPool2d(1)
self.fc1 = torch.nn.Conv2d(input_channels, squeeze_channels, 1)
self.fc2 = torch.nn.Conv2d(squeeze_channels, input_channels, 1)
Expand Down
2 changes: 1 addition & 1 deletion torchvision/ops/poolers.py
Original file line number Diff line number Diff line change
Expand Up @@ -276,7 +276,7 @@ def __init__(
canonical_level: int = 4,
):
super().__init__()
_log_api_usage_once("ops", self.__class__.__name__)
_log_api_usage_once(self.__class__.__qualname__)
if isinstance(output_size, int):
output_size = (output_size, output_size)
self.featmap_names = featmap_names
Expand Down
2 changes: 1 addition & 1 deletion torchvision/ops/ps_roi_align.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ def ps_roi_align(
Returns:
Tensor[K, C / (output_size[0] * output_size[1]), output_size[0], output_size[1]]: The pooled RoIs
"""
_log_api_usage_once("ops", "ps_roi_align")
_log_api_usage_once("torchvision.ops.ps_roi_align.ps_roi_align")
_assert_has_ops()
check_roi_boxes_shape(boxes)
rois = boxes
Expand Down
2 changes: 1 addition & 1 deletion torchvision/ops/ps_roi_pool.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ def ps_roi_pool(
Returns:
Tensor[K, C / (output_size[0] * output_size[1]), output_size[0], output_size[1]]: The pooled RoIs.
"""
_log_api_usage_once("ops", "ps_roi_pool")
_log_api_usage_once("torchvision.ops.ps_roi_pool.ps_roi_pool")
_assert_has_ops()
check_roi_boxes_shape(boxes)
rois = boxes
Expand Down
2 changes: 1 addition & 1 deletion torchvision/ops/roi_align.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ def roi_align(
Returns:
Tensor[K, C, output_size[0], output_size[1]]: The pooled RoIs.
"""
_log_api_usage_once("ops", "roi_align")
_log_api_usage_once("torchvision.ops.roi_align.roi_align")
_assert_has_ops()
check_roi_boxes_shape(boxes)
rois = boxes
Expand Down
2 changes: 1 addition & 1 deletion torchvision/ops/roi_pool.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ def roi_pool(
Returns:
Tensor[K, C, output_size[0], output_size[1]]: The pooled RoIs.
"""
_log_api_usage_once("ops", "roi_pool")
_log_api_usage_once("torchvision.ops.roi_pool.roi_pool")
_assert_has_ops()
check_roi_boxes_shape(boxes)
rois = boxes
Expand Down
2 changes: 1 addition & 1 deletion torchvision/ops/stochastic_depth.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ def stochastic_depth(input: Tensor, p: float, mode: str, training: bool = True)
Returns:
Tensor[N, ...]: The randomly zeroed tensor.
"""
_log_api_usage_once("ops", "stochastic_depth")
_log_api_usage_once("torchvision.ops.stochastic_depth.stochastic_depth")
if p < 0.0 or p > 1.0:
raise ValueError(f"drop probability has to be between 0 and 1, but got {p}")
if mode not in ["batch", "row"]:
Expand Down
2 changes: 1 addition & 1 deletion torchvision/prototype/models/vision_transformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,7 @@ def __init__(
norm_layer: Callable[..., torch.nn.Module] = partial(nn.LayerNorm, eps=1e-6),
):
super().__init__()
_log_api_usage_once("models", self.__class__.__name__)
_log_api_usage_once(self.__class__.__qualname__)
torch._assert(image_size % patch_size == 0, "Input shape indivisible by patch size!")
self.image_size = image_size
self.patch_size = patch_size
Expand Down
Loading