From 3376b2661f48a6b408c618f49b3d08e0b4f9d815 Mon Sep 17 00:00:00 2001 From: Vasilis Vryniotis Date: Thu, 12 May 2022 15:21:24 +0100 Subject: [PATCH 1/6] Remove old "minimum input size" from docstrings. --- torchvision/models/alexnet.py | 2 -- torchvision/models/densenet.py | 4 ---- torchvision/models/googlenet.py | 2 -- torchvision/models/inception.py | 1 - torchvision/models/quantization/googlenet.py | 2 -- torchvision/models/squeezenet.py | 3 --- 6 files changed, 14 deletions(-) diff --git a/torchvision/models/alexnet.py b/torchvision/models/alexnet.py index 6c9ae4932d7..dff0bbad1a4 100644 --- a/torchvision/models/alexnet.py +++ b/torchvision/models/alexnet.py @@ -74,8 +74,6 @@ class AlexNet_Weights(WeightsEnum): def alexnet(*, weights: Optional[AlexNet_Weights] = None, progress: bool = True, **kwargs: Any) -> AlexNet: """AlexNet model architecture from `One weird trick for parallelizing convolutional neural networks `__. - The required minimum input size of the model is 63x63. - .. note:: AlexNet was originally introduced in the `ImageNet Classification with Deep Convolutional Neural Networks diff --git a/torchvision/models/densenet.py b/torchvision/models/densenet.py index bf46b113691..47447991f0e 100644 --- a/torchvision/models/densenet.py +++ b/torchvision/models/densenet.py @@ -332,7 +332,6 @@ class DenseNet201_Weights(WeightsEnum): def densenet121(*, weights: Optional[DenseNet121_Weights] = None, progress: bool = True, **kwargs: Any) -> DenseNet: r"""Densenet-121 model from `Densely Connected Convolutional Networks `_. - The required minimum input size of the model is 29x29. Args: weights (:class:`~torchvision.models.DenseNet121_Weights`, optional): The @@ -358,7 +357,6 @@ def densenet121(*, weights: Optional[DenseNet121_Weights] = None, progress: bool def densenet161(*, weights: Optional[DenseNet161_Weights] = None, progress: bool = True, **kwargs: Any) -> DenseNet: r"""Densenet-161 model from `Densely Connected Convolutional Networks `_. - The required minimum input size of the model is 29x29. Args: weights (:class:`~torchvision.models.DenseNet161_Weights`, optional): The @@ -384,7 +382,6 @@ def densenet161(*, weights: Optional[DenseNet161_Weights] = None, progress: bool def densenet169(*, weights: Optional[DenseNet169_Weights] = None, progress: bool = True, **kwargs: Any) -> DenseNet: r"""Densenet-169 model from `Densely Connected Convolutional Networks `_. - The required minimum input size of the model is 29x29. Args: weights (:class:`~torchvision.models.DenseNet169_Weights`, optional): The @@ -410,7 +407,6 @@ def densenet169(*, weights: Optional[DenseNet169_Weights] = None, progress: bool def densenet201(*, weights: Optional[DenseNet201_Weights] = None, progress: bool = True, **kwargs: Any) -> DenseNet: r"""Densenet-201 model from `Densely Connected Convolutional Networks `_. - The required minimum input size of the model is 29x29. Args: weights (:class:`~torchvision.models.DenseNet201_Weights`, optional): The diff --git a/torchvision/models/googlenet.py b/torchvision/models/googlenet.py index 37c20d72cdd..755740abc11 100644 --- a/torchvision/models/googlenet.py +++ b/torchvision/models/googlenet.py @@ -298,8 +298,6 @@ def googlenet(*, weights: Optional[GoogLeNet_Weights] = None, progress: bool = T """GoogLeNet (Inception v1) model architecture from `Going Deeper with Convolutions `_. - The required minimum input size of the model is 15x15. - Args: weights (:class:`~torchvision.models.GoogLeNet_Weights`, optional): The pretrained weights for the model. See diff --git a/torchvision/models/inception.py b/torchvision/models/inception.py index 5b89a1e5c9a..0abd195742c 100644 --- a/torchvision/models/inception.py +++ b/torchvision/models/inception.py @@ -430,7 +430,6 @@ def inception_v3(*, weights: Optional[Inception_V3_Weights] = None, progress: bo """ Inception v3 model architecture from `Rethinking the Inception Architecture for Computer Vision `_. - The required minimum input size of the model is 75x75. .. note:: **Important**: In contrast to the other models the inception_v3 expects tensors with a size of diff --git a/torchvision/models/quantization/googlenet.py b/torchvision/models/quantization/googlenet.py index 36ec453a38f..892b3408706 100644 --- a/torchvision/models/quantization/googlenet.py +++ b/torchvision/models/quantization/googlenet.py @@ -147,8 +147,6 @@ def googlenet( weights. Quantized models only support inference and run on CPUs. GPU inference is not yet supported - The required minimum input size of the model is 15x15. - Args: weights (:class:`~torchvision.models.quantization.GoogLeNet_QuantizedWeights` or :class:`~torchvision.models.GoogLeNet_Weights`, optional): The pretrained weights for the model. See diff --git a/torchvision/models/squeezenet.py b/torchvision/models/squeezenet.py index afe9c18887f..dd474cbe7f7 100644 --- a/torchvision/models/squeezenet.py +++ b/torchvision/models/squeezenet.py @@ -162,8 +162,6 @@ def squeezenet1_0( accuracy with 50x fewer parameters and <0.5MB model size `_ paper. - The required minimum input size of the model is 21x21. - Args: weights (:class:`~torchvision.models.SqueezeNet1_0_Weights`, optional): The pretrained weights to use. See @@ -193,7 +191,6 @@ def squeezenet1_1( SqueezeNet 1.1 has 2.4x less computation and slightly fewer parameters than SqueezeNet 1.0, without sacrificing accuracy. - The required minimum input size of the model is 17x17. Args: weights (:class:`~torchvision.models.SqueezeNet1_1_Weights`, optional): The From 3f4f22fe79ed29586ba351cf370c29da2f4c48ca Mon Sep 17 00:00:00 2001 From: Vasilis Vryniotis Date: Thu, 12 May 2022 16:28:14 +0100 Subject: [PATCH 2/6] Remove "currently only XYZ weights available" --- docs/source/models/mnasnet.rst | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/docs/source/models/mnasnet.rst b/docs/source/models/mnasnet.rst index f902bc2ef38..e31b4aca1b6 100644 --- a/docs/source/models/mnasnet.rst +++ b/docs/source/models/mnasnet.rst @@ -11,8 +11,7 @@ Search for Mobile `__ paper. Model builders -------------- -The following model builders can be used to instanciate an MNASNet model. Currently -only ``mnasnet0_5`` and ``mnasnet1_0`` can be instantiated with pre-trained weights. +The following model builders can be used to instanciate an MNASNet model. All the model builders internally rely on the ``torchvision.models.mnasnet.MNASNet`` base class. Please refer to the `source code From 75270bcd0f14e07f0c0ad14c946eb4a04e1889a5 Mon Sep 17 00:00:00 2001 From: Vasilis Vryniotis Date: Thu, 12 May 2022 16:47:33 +0100 Subject: [PATCH 3/6] Fix description of wide_resnet101_2 --- torchvision/models/resnet.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/torchvision/models/resnet.py b/torchvision/models/resnet.py index f2910cdc281..e8ed33d5080 100644 --- a/torchvision/models/resnet.py +++ b/torchvision/models/resnet.py @@ -831,8 +831,8 @@ def wide_resnet101_2( The model is the same as ResNet except for the bottleneck number of channels which is twice larger in every block. The number of channels in outer 1x1 - convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 - channels, and in Wide ResNet-50-2 has 2048-1024-2048. + convolutions is the same, e.g. last block in ResNet-101 has 2048-512-2048 + channels, and in Wide ResNet-101-2 has 2048-1024-2048. Args: weights (:class:`~torchvision.models.Wide_ResNet101_2_Weights`, optional): The From 6b88d7aaa435991ad21b057bd038295004f68513 Mon Sep 17 00:00:00 2001 From: Vasilis Vryniotis Date: Thu, 12 May 2022 16:55:11 +0100 Subject: [PATCH 4/6] Make display license URLs as links. --- docs/source/conf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/conf.py b/docs/source/conf.py index f4b38075c8b..4127d262670 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -349,7 +349,7 @@ def inject_weight_metadata(app, what, name, obj, options, lines): lines += [custom_docs, ""] for k, v in meta_with_metrics.items(): - if k == "recipe": + if k in {"recipe", "license"}: v = f"`link <{v}>`__" table.append((str(k), str(v))) table = tabulate(table, tablefmt="rst") From 5eccb92ea1bf7e8eb031f2781284c9a2652760ef Mon Sep 17 00:00:00 2001 From: Vasilis Vryniotis Date: Thu, 12 May 2022 17:11:51 +0100 Subject: [PATCH 5/6] Clarify the order of dims of min_size. --- docs/source/conf.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/source/conf.py b/docs/source/conf.py index 4127d262670..b2bea2b9d86 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -351,6 +351,8 @@ def inject_weight_metadata(app, what, name, obj, options, lines): for k, v in meta_with_metrics.items(): if k in {"recipe", "license"}: v = f"`link <{v}>`__" + elif k == "min_size": + v = f"height={v[0]}, width={v[1]}" table.append((str(k), str(v))) table = tabulate(table, tablefmt="rst") lines += [".. rst-class:: table-weights"] # Custom CSS class, see custom_torchvision.css From 5a39316f1b156c7dd936476469393a26a9a35058 Mon Sep 17 00:00:00 2001 From: Vasilis Vryniotis Date: Thu, 12 May 2022 17:23:17 +0100 Subject: [PATCH 6/6] Remove lengthy keypoint_names from meta-table. --- docs/source/conf.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/source/conf.py b/docs/source/conf.py index b2bea2b9d86..234dd48324d 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -342,7 +342,9 @@ def inject_weight_metadata(app, what, name, obj, options, lines): metrics = meta.pop("metrics", {}) meta_with_metrics = dict(meta, **metrics) - meta_with_metrics.pop("categories", None) # We don't want to document these, they can be too long + # We don't want to document these, they can be too long + for k in ["categories", "keypoint_names"]: + meta_with_metrics.pop(k, None) custom_docs = meta_with_metrics.pop("_docs", None) # Custom per-Weights docs if custom_docs is not None: