diff --git a/docs/source/conf.py b/docs/source/conf.py index 1be5de77e33..3b12fedfb0e 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -315,7 +315,7 @@ def inject_weight_metadata(app, what, name, obj, options, lines): used within the autoclass directive. """ - if obj.__name__.endswith("_Weights"): + if obj.__name__.endswith(("_Weights", "_QuantizedWeights")): lines[:] = [ "The model builder above accepts the following values as the ``weights`` parameter.", f"``{obj.__name__}.DEFAULT`` is equivalent to ``{obj.DEFAULT}``.", @@ -349,7 +349,8 @@ def inject_weight_metadata(app, what, name, obj, options, lines): def generate_weights_table(module, table_name, metrics, include_patterns=None, exclude_patterns=None): - weight_enums = [getattr(module, name) for name in dir(module) if name.endswith("_Weights")] + weights_endswith = "_QuantizedWeights" if module.__name__.split(".")[-1] == "quantization" else "_Weights" + weight_enums = [getattr(module, name) for name in dir(module) if name.endswith(weights_endswith)] weights = [w for weight_enum in weight_enums for w in weight_enum] if include_patterns is not None: @@ -382,6 +383,9 @@ def generate_weights_table(module, table_name, metrics, include_patterns=None, e generate_weights_table(module=M, table_name="classification", metrics=[("acc@1", "Acc@1"), ("acc@5", "Acc@5")]) +generate_weights_table( + module=M.quantization, table_name="classification_quant", metrics=[("acc@1", "Acc@1"), ("acc@5", "Acc@5")] +) generate_weights_table( module=M.detection, table_name="detection", metrics=[("box_map", "Box MAP")], exclude_patterns=["Mask", "Keypoint"] ) diff --git a/docs/source/models/googlenet_quant.rst b/docs/source/models/googlenet_quant.rst new file mode 100644 index 00000000000..acb2737b52b --- /dev/null +++ b/docs/source/models/googlenet_quant.rst @@ -0,0 +1,24 @@ +Quantized GoogLeNet +=================== + +.. currentmodule:: torchvision.models.quantization + +The Quantized GoogleNet model is based on the `Going Deeper with Convolutions `__ +paper. + + +Model builders +-------------- + +The following model builders can be used to instanciate a quantized GoogLeNet +model, with or without pre-trained weights. All the model builders internally +rely on the ``torchvision.models.quantization.googlenet.QuantizableGoogLeNet`` +base class. Please refer to the `source code +`_ +for more details about this class. + +.. autosummary:: + :toctree: generated/ + :template: function.rst + + googlenet diff --git a/docs/source/models_new.rst b/docs/source/models_new.rst index 2054a6a2fe1..31b349ac37b 100644 --- a/docs/source/models_new.rst +++ b/docs/source/models_new.rst @@ -65,6 +65,27 @@ Accuracies are reported on ImageNet .. include:: generated/classification_table.rst +Quantized models +---------------- + +.. currentmodule:: torchvision.models.quantization + +The following quantized classification models are available, with or without +pre-trained weights: + +.. toctree:: + :maxdepth: 1 + + models/googlenet_quant + + +Table of all available quantized classification weights +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Accuracies are reported on ImageNet + +.. include:: generated/classification_quant_table.rst + Semantic Segmentation ===================== diff --git a/torchvision/models/googlenet.py b/torchvision/models/googlenet.py index 3de5662d59b..a8a4c8bc3b9 100644 --- a/torchvision/models/googlenet.py +++ b/torchvision/models/googlenet.py @@ -295,8 +295,9 @@ class GoogLeNet_Weights(WeightsEnum): @handle_legacy_interface(weights=("pretrained", GoogLeNet_Weights.IMAGENET1K_V1)) def googlenet(*, weights: Optional[GoogLeNet_Weights] = None, progress: bool = True, **kwargs: Any) -> GoogLeNet: - r"""GoogLeNet (Inception v1) model architecture from + """GoogLeNet (Inception v1) model architecture from `Going Deeper with Convolutions `_. + The required minimum input size of the model is 15x15. Args: diff --git a/torchvision/models/quantization/googlenet.py b/torchvision/models/quantization/googlenet.py index 00cf37fc349..4dbfa8065c3 100644 --- a/torchvision/models/quantization/googlenet.py +++ b/torchvision/models/quantization/googlenet.py @@ -141,18 +141,34 @@ def googlenet( quantize: bool = False, **kwargs: Any, ) -> QuantizableGoogLeNet: - r"""GoogLeNet (Inception v1) model architecture from - `"Going Deeper with Convolutions" `_. + """GoogLeNet (Inception v1) model architecture from `Going Deeper with Convolutions `__. - Note that quantize = True returns a quantized model with 8 bit + Note that ``quantize = True`` returns a quantized model with 8 bit weights. Quantized models only support inference and run on CPUs. GPU inference is not yet supported + The required minimum input size of the model is 15x15. + Args: - weights (GoogLeNet_QuantizedWeights or GoogLeNet_Weights, optional): The pretrained - weights for the model - progress (bool): If True, displays a progress bar of the download to stderr - quantize (bool): If True, return a quantized version of the model + weights (:class:`~torchvision.models.quantization.GoogLeNet_QuantizedWeights` or :class:`~torchvision.models.GoogLeNet_Weights`, optional): The + pretrained weights for the model. See + :class:`~torchvision.models.quantization.GoogLeNet_QuantizedWeights` below for + more details, and possible values. By default, no pre-trained + weights are used. + progress (bool, optional): If True, displays a progress bar of the + download to stderr. Default is True. + quantize (bool, optional): If True, return a quantized version of the model. Default is False. + **kwargs: parameters passed to the ``torchvision.models.quantization.QuantizableGoogLeNet`` + base class. Please refer to the `source code + `_ + for more details about this class. + + .. autoclass:: torchvision.models.quantization.GoogLeNet_QuantizedWeights + :members: + + .. autoclass:: torchvision.models.GoogLeNet_Weights + :members: + :noindex: """ weights = (GoogLeNet_QuantizedWeights if quantize else GoogLeNet_Weights).verify(weights)