diff --git a/docs/source/models.rst b/docs/source/models.rst
index 8f918514fb7..7262d332847 100644
--- a/docs/source/models.rst
+++ b/docs/source/models.rst
@@ -479,7 +479,8 @@ Model                             Acc@1          Acc@5
 ================================  =============  =============
 MobileNet V2                      71.658         90.150
 MobileNet V3 Large                73.004         90.858
-ShuffleNet V2                     68.360         87.582
+ShuffleNet V2 x1.0                68.360         87.582
+ShuffleNet V2 x0.5                57.972         79.780
 ResNet 18                         69.494         88.882
 ResNet 50                         75.920         92.814
 ResNext 101 32x8d                 78.986         94.480
diff --git a/references/classification/README.md b/references/classification/README.md
index 302e9d57562..2db1a783417 100644
--- a/references/classification/README.md
+++ b/references/classification/README.md
@@ -168,7 +168,7 @@ For all post training quantized models, the settings are:
 ```
 python train_quantization.py --device='cpu' --post-training-quantize --backend='fbgemm' --model='$MODEL'
 ```
-Here `$MODEL` is one of `googlenet`, `inception_v3`, `resnet18`, `resnet50`, `resnext101_32x8d` and `shufflenet_v2_x1_0`.
+Here `$MODEL` is one of `googlenet`, `inception_v3`, `resnet18`, `resnet50`, `resnext101_32x8d`, `shufflenet_v2_x0_5` and `shufflenet_v2_x1_0`.
 
 ### QAT MobileNetV2
 
diff --git a/torchvision/models/quantization/shufflenetv2.py b/torchvision/models/quantization/shufflenetv2.py
index c3b79b57c02..c316bb7047f 100644
--- a/torchvision/models/quantization/shufflenetv2.py
+++ b/torchvision/models/quantization/shufflenetv2.py
@@ -1,4 +1,4 @@
-from typing import Any
+from typing import Any, Optional
 
 import torch
 import torch.nn as nn
@@ -12,15 +12,11 @@
     "QuantizableShuffleNetV2",
     "shufflenet_v2_x0_5",
     "shufflenet_v2_x1_0",
-    "shufflenet_v2_x1_5",
-    "shufflenet_v2_x2_0",
 ]
 
 quant_model_urls = {
-    "shufflenetv2_x0.5_fbgemm": None,
+    "shufflenetv2_x0.5_fbgemm": "https://download.pytorch.org/models/quantized/shufflenetv2_x0.5_fbgemm-00845098.pth",
     "shufflenetv2_x1.0_fbgemm": "https://download.pytorch.org/models/quantized/shufflenetv2_x1_fbgemm-db332c57.pth",
-    "shufflenetv2_x1.5_fbgemm": None,
-    "shufflenetv2_x2.0_fbgemm": None,
 }
 
 
@@ -96,6 +92,7 @@ def _shufflenetv2(
         assert pretrained in [True, False]
 
     if pretrained:
+        model_url: Optional[str] = None
         if quantize:
             model_url = quant_model_urls[arch + "_" + backend]
         else:
@@ -147,45 +144,3 @@ def shufflenet_v2_x1_0(
     return _shufflenetv2(
         "shufflenetv2_x1.0", pretrained, progress, quantize, [4, 8, 4], [24, 116, 232, 464, 1024], **kwargs
     )
-
-
-def shufflenet_v2_x1_5(
-    pretrained: bool = False,
-    progress: bool = True,
-    quantize: bool = False,
-    **kwargs: Any,
-) -> QuantizableShuffleNetV2:
-    """
-    Constructs a ShuffleNetV2 with 1.5x output channels, as described in
-    `"ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design"
-    <https://arxiv.org/abs/1807.11164>`_.
-
-    Args:
-        pretrained (bool): If True, returns a model pre-trained on ImageNet
-        progress (bool): If True, displays a progress bar of the download to stderr
-        quantize (bool): If True, return a quantized version of the model
-    """
-    return _shufflenetv2(
-        "shufflenetv2_x1.5", pretrained, progress, quantize, [4, 8, 4], [24, 176, 352, 704, 1024], **kwargs
-    )
-
-
-def shufflenet_v2_x2_0(
-    pretrained: bool = False,
-    progress: bool = True,
-    quantize: bool = False,
-    **kwargs: Any,
-) -> QuantizableShuffleNetV2:
-    """
-    Constructs a ShuffleNetV2 with 2.0x output channels, as described in
-    `"ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design"
-    <https://arxiv.org/abs/1807.11164>`_.
-
-    Args:
-        pretrained (bool): If True, returns a model pre-trained on ImageNet
-        progress (bool): If True, displays a progress bar of the download to stderr
-        quantize (bool): If True, return a quantized version of the model
-    """
-    return _shufflenetv2(
-        "shufflenetv2_x2.0", pretrained, progress, quantize, [4, 8, 4], [24, 244, 488, 976, 2048], **kwargs
-    )