diff --git a/docs/source/models/mobilenetv2_quant.rst b/docs/source/models/mobilenetv2_quant.rst new file mode 100644 index 00000000000..e5397378fab --- /dev/null +++ b/docs/source/models/mobilenetv2_quant.rst @@ -0,0 +1,24 @@ +Quantized MobileNet V2 +====================== + +.. currentmodule:: torchvision.models.quantization + +The Quantized MobileNet V2 model is based on the `MobileNetV2: Inverted Residuals and Linear +Bottlenecks `__ paper. + + +Model builders +-------------- + +The following model builders can be used to instantiate a quantized MobileNetV2 +model, with or without pre-trained weights. All the model builders internally +rely on the ``torchvision.models.quantization.mobilenetv2.QuantizableMobileNetV2`` +base class. Please refer to the `source code +`_ +for more details about this class. + +.. autosummary:: + :toctree: generated/ + :template: function.rst + + mobilenet_v2 diff --git a/docs/source/models_new.rst b/docs/source/models_new.rst index db511425cb4..43374582f2a 100644 --- a/docs/source/models_new.rst +++ b/docs/source/models_new.rst @@ -76,6 +76,7 @@ pre-trained weights: :maxdepth: 1 models/googlenet_quant + models/mobilenetv2_quant Table of all available quantized classification weights diff --git a/torchvision/models/quantization/mobilenetv2.py b/torchvision/models/quantization/mobilenetv2.py index 2d73cc77008..ce72967730c 100644 --- a/torchvision/models/quantization/mobilenetv2.py +++ b/torchvision/models/quantization/mobilenetv2.py @@ -101,7 +101,7 @@ def mobilenet_v2( ) -> QuantizableMobileNetV2: """ Constructs a MobileNetV2 architecture from - `"MobileNetV2: Inverted Residuals and Linear Bottlenecks" + `MobileNetV2: Inverted Residuals and Linear Bottlenecks `_. Note that quantize = True returns a quantized model with 8 bit @@ -109,10 +109,22 @@ def mobilenet_v2( GPU inference is not yet supported Args: - weights (GoogLeNet_QuantizedWeights or GoogLeNet_Weights, optional): The pretrained - weights for the model - progress (bool): If True, displays a progress bar of the download to stderr - quantize(bool): If True, returns a quantized model, else returns a float model + weights (:class:`~torchvision.models.quantization.MobileNet_V2_QuantizedWeights` or :class:`~torchvision.models.MobileNet_V2_Weights`, optional): The + pretrained weights for the model. See + :class:`~torchvision.models.quantization.MobileNet_V2_QuantizedWeights` below for + more details, and possible values. By default, no pre-trained + weights are used. + progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True. + quantize (bool, optional): If True, returns a quantized version of the model. Default is False. + **kwargs: parameters passed to the ``torchvision.models.quantization.QuantizableMobileNetV2`` + base class. Please refer to the `source code + `_ + for more details about this class. + .. autoclass:: torchvision.models.quantization.MobileNet_V2_QuantizedWeights + :members: + .. autoclass:: torchvision.models.MobileNet_V2_Weights + :members: + :noindex: """ weights = (MobileNet_V2_QuantizedWeights if quantize else MobileNet_V2_Weights).verify(weights)