From d071ef58778a11174fbc5b4319712d9530752051 Mon Sep 17 00:00:00 2001 From: frgfm Date: Wed, 21 Oct 2020 17:43:58 +0200 Subject: [PATCH 1/8] style: Added annotation typing for vgg --- torchvision/models/vgg.py | 33 ++++++++++++++++++++------------- 1 file changed, 20 insertions(+), 13 deletions(-) diff --git a/torchvision/models/vgg.py b/torchvision/models/vgg.py index dba534f651d..a39fe89f0c3 100644 --- a/torchvision/models/vgg.py +++ b/torchvision/models/vgg.py @@ -1,6 +1,8 @@ import torch import torch.nn as nn from .utils import load_state_dict_from_url +from typing import Union +from torch.jit.annotations import List __all__ = [ @@ -23,7 +25,12 @@ class VGG(nn.Module): - def __init__(self, features, num_classes=1000, init_weights=True): + def __init__( + self, + features: nn.Module, + num_classes: int = 1000, + init_weights: bool = True + ): super(VGG, self).__init__() self.features = features self.avgpool = nn.AdaptiveAvgPool2d((7, 7)) @@ -39,14 +46,14 @@ def __init__(self, features, num_classes=1000, init_weights=True): if init_weights: self._initialize_weights() - def forward(self, x): + def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.features(x) x = self.avgpool(x) x = torch.flatten(x, 1) x = self.classifier(x) return x - def _initialize_weights(self): + def _initialize_weights(self) -> None: for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') @@ -60,7 +67,7 @@ def _initialize_weights(self): nn.init.constant_(m.bias, 0) -def make_layers(cfg, batch_norm=False): +def make_layers(cfg: List[Union[str, int]], batch_norm: bool = False) -> nn.Sequential: layers = [] in_channels = 3 for v in cfg: @@ -84,7 +91,7 @@ def make_layers(cfg, batch_norm=False): } -def _vgg(arch, cfg, batch_norm, pretrained, progress, **kwargs): +def _vgg(arch: str, cfg: str, batch_norm: bool, pretrained: bool, progress: bool, **kwargs) -> VGG: if pretrained: kwargs['init_weights'] = False model = VGG(make_layers(cfgs[cfg], batch_norm=batch_norm), **kwargs) @@ -95,7 +102,7 @@ def _vgg(arch, cfg, batch_norm, pretrained, progress, **kwargs): return model -def vgg11(pretrained=False, progress=True, **kwargs): +def vgg11(pretrained: bool = False, progress: bool = True, **kwargs) -> VGG: r"""VGG 11-layer model (configuration "A") from `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `_ @@ -106,7 +113,7 @@ def vgg11(pretrained=False, progress=True, **kwargs): return _vgg('vgg11', 'A', False, pretrained, progress, **kwargs) -def vgg11_bn(pretrained=False, progress=True, **kwargs): +def vgg11_bn(pretrained: bool = False, progress: bool = True, **kwargs) -> VGG: r"""VGG 11-layer model (configuration "A") with batch normalization `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `_ @@ -117,7 +124,7 @@ def vgg11_bn(pretrained=False, progress=True, **kwargs): return _vgg('vgg11_bn', 'A', True, pretrained, progress, **kwargs) -def vgg13(pretrained=False, progress=True, **kwargs): +def vgg13(pretrained: bool = False, progress: bool = True, **kwargs) -> VGG: r"""VGG 13-layer model (configuration "B") `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `_ @@ -128,7 +135,7 @@ def vgg13(pretrained=False, progress=True, **kwargs): return _vgg('vgg13', 'B', False, pretrained, progress, **kwargs) -def vgg13_bn(pretrained=False, progress=True, **kwargs): +def vgg13_bn(pretrained: bool = False, progress: bool = True, **kwargs) -> VGG: r"""VGG 13-layer model (configuration "B") with batch normalization `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `_ @@ -139,7 +146,7 @@ def vgg13_bn(pretrained=False, progress=True, **kwargs): return _vgg('vgg13_bn', 'B', True, pretrained, progress, **kwargs) -def vgg16(pretrained=False, progress=True, **kwargs): +def vgg16(pretrained: bool = False, progress: bool = True, **kwargs) -> VGG: r"""VGG 16-layer model (configuration "D") `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `_ @@ -150,7 +157,7 @@ def vgg16(pretrained=False, progress=True, **kwargs): return _vgg('vgg16', 'D', False, pretrained, progress, **kwargs) -def vgg16_bn(pretrained=False, progress=True, **kwargs): +def vgg16_bn(pretrained: bool = False, progress: bool = True, **kwargs) -> VGG: r"""VGG 16-layer model (configuration "D") with batch normalization `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `_ @@ -161,7 +168,7 @@ def vgg16_bn(pretrained=False, progress=True, **kwargs): return _vgg('vgg16_bn', 'D', True, pretrained, progress, **kwargs) -def vgg19(pretrained=False, progress=True, **kwargs): +def vgg19(pretrained: bool = False, progress: bool = True, **kwargs) -> VGG: r"""VGG 19-layer model (configuration "E") `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `_ @@ -172,7 +179,7 @@ def vgg19(pretrained=False, progress=True, **kwargs): return _vgg('vgg19', 'E', False, pretrained, progress, **kwargs) -def vgg19_bn(pretrained=False, progress=True, **kwargs): +def vgg19_bn(pretrained: bool = False, progress: bool = True, **kwargs) -> VGG: r"""VGG 19-layer model (configuration 'E') with batch normalization `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `_ From ee8567473745392a51aea23faa4e7b38103a9586 Mon Sep 17 00:00:00 2001 From: frgfm Date: Wed, 21 Oct 2020 22:42:06 +0200 Subject: [PATCH 2/8] fix: Fixed annotation typing --- torchvision/models/vgg.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/torchvision/models/vgg.py b/torchvision/models/vgg.py index a39fe89f0c3..219dc233d32 100644 --- a/torchvision/models/vgg.py +++ b/torchvision/models/vgg.py @@ -2,7 +2,7 @@ import torch.nn as nn from .utils import load_state_dict_from_url from typing import Union -from torch.jit.annotations import List +from torch.jit.annotations import List, Dict __all__ = [ @@ -71,19 +71,19 @@ def make_layers(cfg: List[Union[str, int]], batch_norm: bool = False) -> nn.Sequ layers = [] in_channels = 3 for v in cfg: - if v == 'M': - layers += [nn.MaxPool2d(kernel_size=2, stride=2)] - else: - conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1) + if isinstance(v, int): + conv2d = nn.Conv2d(in_channels, int(v), kernel_size=3, padding=1) if batch_norm: layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)] else: layers += [conv2d, nn.ReLU(inplace=True)] in_channels = v + else: + layers += [nn.MaxPool2d(kernel_size=2, stride=2)] return nn.Sequential(*layers) -cfgs = { +cfgs: Dict[str, List[Union[str, int]]] = { 'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], 'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], 'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'], From 02196ff416e2ccfe1127d1c1a8745e47afa2fd93 Mon Sep 17 00:00:00 2001 From: frgfm Date: Thu, 22 Oct 2020 12:06:59 +0200 Subject: [PATCH 3/8] refactor: Removed un-necessary import --- torchvision/models/vgg.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/torchvision/models/vgg.py b/torchvision/models/vgg.py index 219dc233d32..165a9e26221 100644 --- a/torchvision/models/vgg.py +++ b/torchvision/models/vgg.py @@ -1,8 +1,7 @@ import torch import torch.nn as nn from .utils import load_state_dict_from_url -from typing import Union -from torch.jit.annotations import List, Dict +from typing import Union, List, Dict __all__ = [ From bcecc51000737d46a776ac4abb2a75f74f832b34 Mon Sep 17 00:00:00 2001 From: frgfm Date: Thu, 22 Oct 2020 18:49:49 +0200 Subject: [PATCH 4/8] fix: Added missing annotation for kwargs --- torchvision/models/vgg.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/torchvision/models/vgg.py b/torchvision/models/vgg.py index 165a9e26221..e48b4efe637 100644 --- a/torchvision/models/vgg.py +++ b/torchvision/models/vgg.py @@ -1,7 +1,7 @@ import torch import torch.nn as nn from .utils import load_state_dict_from_url -from typing import Union, List, Dict +from typing import Union, List, Dict, Any __all__ = [ @@ -90,7 +90,7 @@ def make_layers(cfg: List[Union[str, int]], batch_norm: bool = False) -> nn.Sequ } -def _vgg(arch: str, cfg: str, batch_norm: bool, pretrained: bool, progress: bool, **kwargs) -> VGG: +def _vgg(arch: str, cfg: str, batch_norm: bool, pretrained: bool, progress: bool, **kwargs: Any) -> VGG: if pretrained: kwargs['init_weights'] = False model = VGG(make_layers(cfgs[cfg], batch_norm=batch_norm), **kwargs) @@ -101,7 +101,7 @@ def _vgg(arch: str, cfg: str, batch_norm: bool, pretrained: bool, progress: bool return model -def vgg11(pretrained: bool = False, progress: bool = True, **kwargs) -> VGG: +def vgg11(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG: r"""VGG 11-layer model (configuration "A") from `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `_ @@ -112,7 +112,7 @@ def vgg11(pretrained: bool = False, progress: bool = True, **kwargs) -> VGG: return _vgg('vgg11', 'A', False, pretrained, progress, **kwargs) -def vgg11_bn(pretrained: bool = False, progress: bool = True, **kwargs) -> VGG: +def vgg11_bn(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG: r"""VGG 11-layer model (configuration "A") with batch normalization `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `_ @@ -123,7 +123,7 @@ def vgg11_bn(pretrained: bool = False, progress: bool = True, **kwargs) -> VGG: return _vgg('vgg11_bn', 'A', True, pretrained, progress, **kwargs) -def vgg13(pretrained: bool = False, progress: bool = True, **kwargs) -> VGG: +def vgg13(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG: r"""VGG 13-layer model (configuration "B") `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `_ @@ -134,7 +134,7 @@ def vgg13(pretrained: bool = False, progress: bool = True, **kwargs) -> VGG: return _vgg('vgg13', 'B', False, pretrained, progress, **kwargs) -def vgg13_bn(pretrained: bool = False, progress: bool = True, **kwargs) -> VGG: +def vgg13_bn(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG: r"""VGG 13-layer model (configuration "B") with batch normalization `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `_ @@ -145,7 +145,7 @@ def vgg13_bn(pretrained: bool = False, progress: bool = True, **kwargs) -> VGG: return _vgg('vgg13_bn', 'B', True, pretrained, progress, **kwargs) -def vgg16(pretrained: bool = False, progress: bool = True, **kwargs) -> VGG: +def vgg16(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG: r"""VGG 16-layer model (configuration "D") `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `_ @@ -156,7 +156,7 @@ def vgg16(pretrained: bool = False, progress: bool = True, **kwargs) -> VGG: return _vgg('vgg16', 'D', False, pretrained, progress, **kwargs) -def vgg16_bn(pretrained: bool = False, progress: bool = True, **kwargs) -> VGG: +def vgg16_bn(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG: r"""VGG 16-layer model (configuration "D") with batch normalization `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `_ @@ -167,7 +167,7 @@ def vgg16_bn(pretrained: bool = False, progress: bool = True, **kwargs) -> VGG: return _vgg('vgg16_bn', 'D', True, pretrained, progress, **kwargs) -def vgg19(pretrained: bool = False, progress: bool = True, **kwargs) -> VGG: +def vgg19(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG: r"""VGG 19-layer model (configuration "E") `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `_ @@ -178,7 +178,7 @@ def vgg19(pretrained: bool = False, progress: bool = True, **kwargs) -> VGG: return _vgg('vgg19', 'E', False, pretrained, progress, **kwargs) -def vgg19_bn(pretrained: bool = False, progress: bool = True, **kwargs) -> VGG: +def vgg19_bn(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG: r"""VGG 19-layer model (configuration 'E') with batch normalization `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `_ From 3e914c228e7b9ab834ce2fd063b5297d5a0b0233 Mon Sep 17 00:00:00 2001 From: frgfm Date: Thu, 22 Oct 2020 21:10:59 +0200 Subject: [PATCH 5/8] fix: Fixed constructor typing --- torchvision/models/vgg.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/torchvision/models/vgg.py b/torchvision/models/vgg.py index e48b4efe637..b0d3aefcf5f 100644 --- a/torchvision/models/vgg.py +++ b/torchvision/models/vgg.py @@ -29,7 +29,7 @@ def __init__( features: nn.Module, num_classes: int = 1000, init_weights: bool = True - ): + ) -> None: super(VGG, self).__init__() self.features = features self.avgpool = nn.AdaptiveAvgPool2d((7, 7)) @@ -71,7 +71,7 @@ def make_layers(cfg: List[Union[str, int]], batch_norm: bool = False) -> nn.Sequ in_channels = 3 for v in cfg: if isinstance(v, int): - conv2d = nn.Conv2d(in_channels, int(v), kernel_size=3, padding=1) + conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1) if batch_norm: layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)] else: From 52be09b2e4ad004bdc854477ff5953fd007a4f91 Mon Sep 17 00:00:00 2001 From: frgfm Date: Fri, 23 Oct 2020 11:38:00 +0200 Subject: [PATCH 6/8] refactor: Refactored typing to minize changes --- torchvision/models/vgg.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/torchvision/models/vgg.py b/torchvision/models/vgg.py index b0d3aefcf5f..a7657b77ada 100644 --- a/torchvision/models/vgg.py +++ b/torchvision/models/vgg.py @@ -70,15 +70,16 @@ def make_layers(cfg: List[Union[str, int]], batch_norm: bool = False) -> nn.Sequ layers = [] in_channels = 3 for v in cfg: - if isinstance(v, int): + if v == 'M': + layers += [nn.MaxPool2d(kernel_size=2, stride=2)] + else: + v = int(v) conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1) if batch_norm: layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)] else: layers += [conv2d, nn.ReLU(inplace=True)] in_channels = v - else: - layers += [nn.MaxPool2d(kernel_size=2, stride=2)] return nn.Sequential(*layers) From 26cc36c34c7d9b5de1184a7976cca6ddfdc66f71 Mon Sep 17 00:00:00 2001 From: frgfm Date: Fri, 23 Oct 2020 11:39:57 +0200 Subject: [PATCH 7/8] refactor: Refactored typing cast --- torchvision/models/vgg.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/torchvision/models/vgg.py b/torchvision/models/vgg.py index a7657b77ada..6ba6414c0ef 100644 --- a/torchvision/models/vgg.py +++ b/torchvision/models/vgg.py @@ -1,7 +1,7 @@ import torch import torch.nn as nn from .utils import load_state_dict_from_url -from typing import Union, List, Dict, Any +from typing import Union, List, Dict, Any, cast __all__ = [ @@ -73,7 +73,7 @@ def make_layers(cfg: List[Union[str, int]], batch_norm: bool = False) -> nn.Sequ if v == 'M': layers += [nn.MaxPool2d(kernel_size=2, stride=2)] else: - v = int(v) + v = cast(int, v) conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1) if batch_norm: layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)] From 351eb8d37821a61afa58ff10fb2471efa76cda5b Mon Sep 17 00:00:00 2001 From: frgfm Date: Fri, 23 Oct 2020 13:26:46 +0200 Subject: [PATCH 8/8] fix: Fixed module list typing --- torchvision/models/vgg.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/torchvision/models/vgg.py b/torchvision/models/vgg.py index 6ba6414c0ef..433b27e9d97 100644 --- a/torchvision/models/vgg.py +++ b/torchvision/models/vgg.py @@ -67,7 +67,7 @@ def _initialize_weights(self) -> None: def make_layers(cfg: List[Union[str, int]], batch_norm: bool = False) -> nn.Sequential: - layers = [] + layers: List[nn.Module] = [] in_channels = 3 for v in cfg: if v == 'M':