From 585aa2aeba825f67f0210e5a45dfe591edb57e96 Mon Sep 17 00:00:00 2001 From: "deepsource-autofix[bot]" <62050782+deepsource-autofix[bot]@users.noreply.github.com> Date: Wed, 13 Oct 2021 00:25:59 +0200 Subject: [PATCH] Use `is` to compare type of objects Co-authored-by: deepsource-autofix[bot] <62050782+deepsource-autofix[bot]@users.noreply.github.com> --- torchvision/models/quantization/googlenet.py | 2 +- torchvision/models/quantization/inception.py | 2 +- torchvision/models/quantization/mobilenetv2.py | 6 +++--- torchvision/models/quantization/mobilenetv3.py | 6 +++--- torchvision/models/quantization/resnet.py | 2 +- torchvision/models/quantization/shufflenetv2.py | 2 +- torchvision/models/quantization/utils.py | 2 +- 7 files changed, 11 insertions(+), 11 deletions(-) diff --git a/torchvision/models/quantization/googlenet.py b/torchvision/models/quantization/googlenet.py index 4b6d25e013c..6266e296c03 100644 --- a/torchvision/models/quantization/googlenet.py +++ b/torchvision/models/quantization/googlenet.py @@ -167,5 +167,5 @@ def fuse_model(self) -> None: """ for m in self.modules(): - if type(m) == QuantizableBasicConv2d: + if type(m) is QuantizableBasicConv2d: m.fuse_model() diff --git a/torchvision/models/quantization/inception.py b/torchvision/models/quantization/inception.py index acad3f6df53..ea9326276f2 100644 --- a/torchvision/models/quantization/inception.py +++ b/torchvision/models/quantization/inception.py @@ -247,5 +247,5 @@ def fuse_model(self) -> None: """ for m in self.modules(): - if type(m) == QuantizableBasicConv2d: + if type(m) is QuantizableBasicConv2d: m.fuse_model() diff --git a/torchvision/models/quantization/mobilenetv2.py b/torchvision/models/quantization/mobilenetv2.py index a2c88cdd388..8626de19aab 100644 --- a/torchvision/models/quantization/mobilenetv2.py +++ b/torchvision/models/quantization/mobilenetv2.py @@ -30,7 +30,7 @@ def forward(self, x: Tensor) -> Tensor: def fuse_model(self) -> None: for idx in range(len(self.conv)): - if type(self.conv[idx]) == nn.Conv2d: + if type(self.conv[idx]) is nn.Conv2d: fuse_modules(self.conv, [str(idx), str(idx + 1)], inplace=True) @@ -54,9 +54,9 @@ def forward(self, x: Tensor) -> Tensor: def fuse_model(self) -> None: for m in self.modules(): - if type(m) == ConvNormActivation: + if type(m) is ConvNormActivation: fuse_modules(m, ["0", "1", "2"], inplace=True) - if type(m) == QuantizableInvertedResidual: + if type(m) is QuantizableInvertedResidual: m.fuse_model() diff --git a/torchvision/models/quantization/mobilenetv3.py b/torchvision/models/quantization/mobilenetv3.py index ad195d178c7..e19912e25b0 100644 --- a/torchvision/models/quantization/mobilenetv3.py +++ b/torchvision/models/quantization/mobilenetv3.py @@ -99,12 +99,12 @@ def forward(self, x: Tensor) -> Tensor: def fuse_model(self) -> None: for m in self.modules(): - if type(m) == ConvNormActivation: + if type(m) is ConvNormActivation: modules_to_fuse = ["0", "1"] - if len(m) == 3 and type(m[2]) == nn.ReLU: + if len(m) == 3 and type(m[2]) is nn.ReLU: modules_to_fuse.append("2") fuse_modules(m, modules_to_fuse, inplace=True) - elif type(m) == QuantizableSqueezeExcitation: + elif type(m) is QuantizableSqueezeExcitation: m.fuse_model() diff --git a/torchvision/models/quantization/resnet.py b/torchvision/models/quantization/resnet.py index f7124798254..596ae56d85b 100644 --- a/torchvision/models/quantization/resnet.py +++ b/torchvision/models/quantization/resnet.py @@ -104,7 +104,7 @@ def fuse_model(self) -> None: fuse_modules(self, ["conv1", "bn1", "relu"], inplace=True) for m in self.modules(): - if type(m) == QuantizableBottleneck or type(m) == QuantizableBasicBlock: + if type(m) is QuantizableBottleneck or type(m) is QuantizableBasicBlock: m.fuse_model() diff --git a/torchvision/models/quantization/shufflenetv2.py b/torchvision/models/quantization/shufflenetv2.py index a4c4aede665..76920433399 100644 --- a/torchvision/models/quantization/shufflenetv2.py +++ b/torchvision/models/quantization/shufflenetv2.py @@ -68,7 +68,7 @@ def fuse_model(self) -> None: if name in ["conv1", "conv5"]: torch.quantization.fuse_modules(m, [["0", "1", "2"]], inplace=True) for m in self.modules(): - if type(m) == QuantizableInvertedResidual: + if type(m) is QuantizableInvertedResidual: if len(m.branch1._modules.items()) > 0: torch.quantization.fuse_modules(m.branch1, [["0", "1"], ["2", "3", "4"]], inplace=True) torch.quantization.fuse_modules( diff --git a/torchvision/models/quantization/utils.py b/torchvision/models/quantization/utils.py index 74a8287030b..22edee47621 100644 --- a/torchvision/models/quantization/utils.py +++ b/torchvision/models/quantization/utils.py @@ -9,7 +9,7 @@ def _replace_relu(module: nn.Module) -> None: # Checking for explicit type instead of instance # as we only want to replace modules of the exact type # not inherited classes - if type(mod) == nn.ReLU or type(mod) == nn.ReLU6: + if type(mod) is nn.ReLU or type(mod) is nn.ReLU6: reassign[name] = nn.ReLU(inplace=False) for key, value in reassign.items():