From eeb7bfe793a6aedbf6137403be56fbc1224ed62a Mon Sep 17 00:00:00 2001 From: ericup Date: Sun, 8 May 2022 12:48:48 +0200 Subject: [PATCH] Update all U-Nets --- celldetection/__meta__.py | 2 +- celldetection/models/unet.py | 276 ++++++++++++++++++++++++----------- 2 files changed, 192 insertions(+), 86 deletions(-) diff --git a/celldetection/__meta__.py b/celldetection/__meta__.py index c9d2fc5..a939582 100644 --- a/celldetection/__meta__.py +++ b/celldetection/__meta__.py @@ -1,5 +1,5 @@ __title__ = 'CellDetection' -__version__ = '0.3.2' +__version__ = '0.4.0' __author__ = 'Eric Upschulte' __email__ = 'e.upschulte@fz-juelich.de' __license__ = 'Apache License, Version 2.0' diff --git a/celldetection/models/unet.py b/celldetection/models/unet.py index 071bd4c..1c9f06a 100644 --- a/celldetection/models/unet.py +++ b/celldetection/models/unet.py @@ -50,28 +50,34 @@ def __init__( in_channels_list, out_channels: int, block_cls: nn.Module, - final_activation=None + block_kwargs: dict = None, + final_activation=None, + interpolate='nearest', + initialize=True ): super().__init__([], 0) + icl = in_channels_list + block_kwargs = {} if block_kwargs is None else block_kwargs + self.interpolate = interpolate self.out_channels = out_channels - for j, in_channels in enumerate(in_channels_list): + self.out_layer = nn.Conv2d(icl[0], out_channels, (1, 1)) if out_channels > 0 else None + + for j, in_channels in enumerate(icl): if in_channels == 0: raise ValueError("in_channels=0 is currently not supported") - oc = out_channels if j <= 0 else in_channels_list[j - 1] - inner_block_module = nn.Identity() if oc <= 0 else nn.Conv2d(in_channels, oc, (1, 1)) - self.inner_blocks.append(inner_block_module) - if j <= 0 or oc <= 0: - layer_block_module = nn.Identity() - else: - layer_block_module = block_cls(in_channels, oc) - self.layer_blocks.append(layer_block_module) - - # initialize parameters now to avoid modifying the initialization of top_blocks - for m in self.children(): - if isinstance(m, nn.Conv2d): - nn.init.kaiming_uniform_(m.weight, a=1) - nn.init.constant_(m.bias, 0) - self.final_activation = lookup_nn(final_activation) + if j > 0: + inner = nn.Conv2d(in_channels, icl[j - 1], (1, 1)) if icl[j - 1] < in_channels else nn.Identity() + self.inner_blocks.append(inner) + if j < len(icl) - 1: + layer_block_module = block_cls(in_channels + min(icl[j:j+2]), in_channels, **block_kwargs) + self.layer_blocks.append(layer_block_module) + + if initialize: + for m in self.children(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_uniform_(m.weight, a=1) + nn.init.constant_(m.bias, 0) + self.final_activation = None if final_activation is None else lookup_nn(final_activation) def forward(self, x: Dict[str, Tensor], size: List[int]) -> Dict[str, Tensor]: """ @@ -96,19 +102,18 @@ def forward(self, x: Dict[str, Tensor], size: List[int]) -> Dict[str, Tensor]: 3: Tensor[1, 512, 16, 16] } """ - # unpack OrderedDict into two lists for easier handling names = list(x.keys()) x = list(x.values()) last_inner = x[-1] results = [last_inner] - idx = -1 for idx in range(len(x) - 2, -1, -1): inner_lateral = x[idx] feat_shape = inner_lateral.shape[-2:] - inner_top_down = F.interpolate(last_inner, size=feat_shape, mode="nearest") # adjust size - inner_top_down = self.get_result_from_inner_blocks(inner_top_down, idx + 1) # reduce channels + kw = {} if self.interpolate == 'nearest' else {'align_corners': False} + inner_top_down = F.interpolate(last_inner, size=feat_shape, mode=self.interpolate, **kw) # adjust size + inner_top_down = self.get_result_from_inner_blocks(inner_top_down, idx) # reduce channels last_inner = torch.cat((inner_lateral, inner_top_down), 1) # concat - last_inner = self.get_result_from_layer_blocks(last_inner, idx + 1) # apply layer + last_inner = self.get_result_from_layer_blocks(last_inner, idx) # apply layer results.insert(0, last_inner) if self.extra_blocks is not None: @@ -117,8 +122,10 @@ def forward(self, x: Dict[str, Tensor], size: List[int]) -> Dict[str, Tensor]: final = results[0] else: final = F.interpolate(last_inner, size=size, mode="bilinear", align_corners=False) - final = self.get_result_from_inner_blocks(final, idx) - final = self.final_activation(final) + if self.out_layer is not None: + final = self.out_layer(final) + if self.final_activation is not None: + final = self.final_activation(final) if self.out_channels: return final results.insert(0, final) @@ -128,7 +135,8 @@ def forward(self, x: Dict[str, Tensor], size: List[int]) -> Dict[str, Tensor]: class BackboneAsUNet(nn.Module): - def __init__(self, backbone, return_layers, in_channels_list, out_channels, block, final_activation=None): + def __init__(self, backbone, return_layers, in_channels_list, out_channels, block, block_kwargs: dict = None, + final_activation=None, interpolate='nearest', **kwargs): super(BackboneAsUNet, self).__init__() block = block or TwoConvNormRelu self.body = IntermediateLayerGetter(backbone, return_layers=return_layers) @@ -136,8 +144,11 @@ def __init__(self, backbone, return_layers, in_channels_list, out_channels, bloc in_channels_list=in_channels_list, out_channels=out_channels, block_cls=block, + block_kwargs=block_kwargs, # extra_blocks=LastLevelMaxPool(), - final_activation=final_activation + final_activation=final_activation, + interpolate=interpolate, + **kwargs ) self.out_channels = list(in_channels_list) @@ -149,7 +160,8 @@ def forward(self, inputs): class UNet(BackboneAsUNet): def __init__(self, backbone, out_channels: int, return_layers: dict = None, - block: Type[nn.Module] = TwoConvNormRelu, final_activation=None): + block: Type[nn.Module] = TwoConvNormRelu, block_kwargs: dict = None, final_activation=None, + interpolate='nearest', **kwargs): """U-Net. Examples: @@ -185,12 +197,28 @@ def __init__(self, backbone, out_channels: int, return_layers: dict = None, in_channels_list=list(backbone.out_channels), out_channels=out_channels, block=block, - final_activation=final_activation if out_channels else None + block_kwargs=block_kwargs, + final_activation=final_activation if out_channels else None, + interpolate=interpolate, + **kwargs ) +def _ni_pretrained(pretrained): + if pretrained: + raise NotImplemented('The `pretrained` option is not yet available for this model.') + + +def _default_unet_kwargs(backbone_kwargs, pretrained=False): + _ni_pretrained(pretrained) + kw = dict() + kw.update({} if backbone_kwargs is None else backbone_kwargs) + return kw + + class U22(UNet): - def __init__(self, in_channels, out_channels, final_activation=None, block_cls=None): + def __init__(self, in_channels, out_channels, final_activation=None, backbone_kwargs=None, pretrained=False, + block_cls=None, **kwargs): """U-Net 22. U-Net with 22 convolutions on 5 feature resolutions (1, 1/2, 1/4, 1/8, 1/16) and one final output layer. @@ -201,13 +229,23 @@ def __init__(self, in_channels, out_channels, final_activation=None, block_cls=N Args: in_channels: Number of input channels. out_channels: Number of output channels. If set to 0, the output layer is omitted. + final_activation: Final activation function. + backbone_kwargs: Keyword arguments for encoder. + pretrained: Whether to use a pretrained encoder. If True default weights are used. + Alternatively, ``pretrained`` can be a URL of a ``state_dict`` that is hosted online. + block_cls: Module class that defines a convolutional block. Default: ``TwoConvNormRelu``. + **kwargs: Additional keyword arguments for ``cd.models.UNet``. """ - super().__init__(UNetEncoder(in_channels=in_channels, block_cls=block_cls), out_channels=out_channels, - final_activation=final_activation, block=block_cls) + super().__init__( + UNetEncoder(in_channels=in_channels, block_cls=block_cls, + **_default_unet_kwargs(backbone_kwargs, pretrained)), + out_channels=out_channels, final_activation=final_activation, block=block_cls, **kwargs + ) class ResUNet(UNet): - def __init__(self, in_channels, out_channels, final_activation=None, **kwargs): + def __init__(self, in_channels, out_channels, final_activation=None, backbone_kwargs=None, pretrained=False, + block_cls=None, **kwargs): """Residual U-Net. U-Net with residual blocks. @@ -216,14 +254,23 @@ def __init__(self, in_channels, out_channels, final_activation=None, **kwargs): in_channels: Number of input channels. out_channels: Number of output channels. If set to 0, the output layer is omitted. final_activation: Final activation function. - **kwargs: Keyword arguments for `UNetEncoder`. + backbone_kwargs: Keyword arguments for encoder. + pretrained: Whether to use a pretrained encoder. If True default weights are used. + Alternatively, ``pretrained`` can be a URL of a ``state_dict`` that is hosted online. + block_cls: Module class that defines a convolutional block. Default: ``ResBlock``. + **kwargs: Additional keyword arguments for ``cd.models.UNet``. """ - super().__init__(UNetEncoder(in_channels=in_channels, block_cls=ResBlock, **kwargs), out_channels=out_channels, - final_activation=final_activation, block=ResBlock) + block_cls = block_cls or ResBlock + super().__init__( + UNetEncoder(in_channels=in_channels, block_cls=block_cls, + **_default_unet_kwargs(backbone_kwargs, pretrained)), + out_channels=out_channels, final_activation=final_activation, block=block_cls, **kwargs + ) class SlimU22(UNet): - def __init__(self, in_channels, out_channels, final_activation=None, block_cls=None): + def __init__(self, in_channels, out_channels, final_activation=None, backbone_kwargs=None, pretrained=False, + block_cls=None, **kwargs): """Slim U-Net 22. U-Net with 22 convolutions on 5 feature resolutions (1, 1/2, 1/4, 1/8, 1/16) and one final output layer. @@ -233,14 +280,22 @@ def __init__(self, in_channels, out_channels, final_activation=None, block_cls=N in_channels: Number of input channels. out_channels: Number of output channels. If set to 0, the output layer is omitted. final_activation: Final activation function. Only used if ``out_channels > 0``. + backbone_kwargs: Keyword arguments for encoder. + pretrained: Whether to use a pretrained encoder. If True default weights are used. + Alternatively, ``pretrained`` can be a URL of a ``state_dict`` that is hosted online. block_cls: Module class that defines a convolutional block. Default: ``TwoConvNormRelu``. + **kwargs: Additional keyword arguments for ``cd.models.UNet``. """ - super().__init__(UNetEncoder(in_channels=in_channels, base_channels=32, block_cls=block_cls), - out_channels=out_channels, final_activation=final_activation, block=block_cls) + super().__init__( + UNetEncoder(in_channels=in_channels, base_channels=32, block_cls=block_cls, + **_default_unet_kwargs(backbone_kwargs, pretrained)), + out_channels=out_channels, final_activation=final_activation, block=block_cls, **kwargs + ) class WideU22(UNet): - def __init__(self, in_channels, out_channels, final_activation=None, block_cls=None): + def __init__(self, in_channels, out_channels, final_activation=None, backbone_kwargs=None, pretrained=False, + block_cls=None, **kwargs): """Slim U-Net 22. U-Net with 22 convolutions on 5 feature resolutions (1, 1/2, 1/4, 1/8, 1/16) and one final output layer. @@ -250,14 +305,22 @@ def __init__(self, in_channels, out_channels, final_activation=None, block_cls=N in_channels: Number of input channels. out_channels: Number of output channels. If set to 0, the output layer is omitted. final_activation: Final activation function. Only used if ``out_channels > 0``. + backbone_kwargs: Keyword arguments for encoder. + pretrained: Whether to use a pretrained encoder. If True default weights are used. + Alternatively, ``pretrained`` can be a URL of a ``state_dict`` that is hosted online. block_cls: Module class that defines a convolutional block. Default: ``TwoConvNormRelu``. + **kwargs: Additional keyword arguments for ``cd.models.UNet``. """ - super().__init__(UNetEncoder(in_channels=in_channels, base_channels=128, block_cls=block_cls), - out_channels=out_channels, final_activation=final_activation, block=block_cls) + super().__init__( + UNetEncoder(in_channels=in_channels, base_channels=128, block_cls=block_cls, + **_default_unet_kwargs(backbone_kwargs, pretrained)), + out_channels=out_channels, final_activation=final_activation, block=block_cls, **kwargs + ) class U17(UNet): - def __init__(self, in_channels, out_channels, final_activation=None, block_cls=None): + def __init__(self, in_channels, out_channels, final_activation=None, backbone_kwargs=None, pretrained=False, + block_cls=None, **kwargs): """U-Net 17. U-Net with 17 convolutions on 4 feature resolutions (1, 1/2, 1/4, 1/8) and one final output layer. @@ -266,14 +329,22 @@ def __init__(self, in_channels, out_channels, final_activation=None, block_cls=N in_channels: Number of input channels. out_channels: Number of output channels. If set to 0, the output layer is omitted. final_activation: Final activation function. Only used if ``out_channels > 0``. + backbone_kwargs: Keyword arguments for encoder. + pretrained: Whether to use a pretrained encoder. If True default weights are used. + Alternatively, ``pretrained`` can be a URL of a ``state_dict`` that is hosted online. block_cls: Module class that defines a convolutional block. Default: ``TwoConvNormRelu``. + **kwargs: Additional keyword arguments for ``cd.models.UNet``. """ - super().__init__(UNetEncoder(in_channels=in_channels, depth=4, block_cls=block_cls), out_channels=out_channels, - final_activation=final_activation, block=block_cls) + super().__init__( + UNetEncoder(in_channels=in_channels, depth=4, block_cls=block_cls, + **_default_unet_kwargs(backbone_kwargs, pretrained)), + out_channels=out_channels, final_activation=final_activation, block=block_cls, **kwargs + ) class U12(UNet): - def __init__(self, in_channels, out_channels, final_activation=None, block_cls=None): + def __init__(self, in_channels, out_channels, final_activation=None, backbone_kwargs=None, pretrained=False, + block_cls=None, **kwargs): """U-Net 12. U-Net with 12 convolutions on 3 feature resolutions (1, 1/2, 1/4) and one final output layer. @@ -282,14 +353,28 @@ def __init__(self, in_channels, out_channels, final_activation=None, block_cls=N in_channels: Number of input channels. out_channels: Number of output channels. If set to 0, the output layer is omitted. final_activation: Final activation function. Only used if ``out_channels > 0``. + backbone_kwargs: Keyword arguments for encoder. + pretrained: Whether to use a pretrained encoder. If True default weights are used. + Alternatively, ``pretrained`` can be a URL of a ``state_dict`` that is hosted online. block_cls: Module class that defines a convolutional block. Default: ``TwoConvNormRelu``. + **kwargs: Additional keyword arguments for ``cd.models.UNet``. """ - super().__init__(UNetEncoder(in_channels=in_channels, depth=3, block_cls=block_cls), out_channels=out_channels, - final_activation=final_activation, block=block_cls) + super().__init__( + UNetEncoder(in_channels=in_channels, depth=3, block_cls=block_cls, + **_default_unet_kwargs(backbone_kwargs, pretrained)), + out_channels=out_channels, final_activation=final_activation, block=block_cls, **kwargs + ) + + +def _default_res_kwargs(backbone_kwargs, pretrained=False): + kw = dict(fused_initial=False, pretrained=pretrained) + kw.update({} if backbone_kwargs is None else backbone_kwargs) + return kw class ResNet18UNet(UNet): - def __init__(self, in_channels, out_channels, final_activation=None, res_kwargs=None, **kwargs): + def __init__(self, in_channels, out_channels, final_activation=None, backbone_kwargs=None, pretrained=False, + block_cls=None, **kwargs): """ResNet 18 U-Net. A U-Net with ResNet 18 encoder. @@ -298,96 +383,117 @@ def __init__(self, in_channels, out_channels, final_activation=None, res_kwargs= in_channels: Number of input channels. out_channels: Number of output channels. If set to 0, the output layer is omitted. final_activation: Final activation function. Only used if ``out_channels > 0``. - res_kwargs: Keyword arguments for encoder. - **kwargs: + backbone_kwargs: Keyword arguments for encoder. + pretrained: Whether to use a pretrained encoder. If True default weights are used. + Alternatively, ``pretrained`` can be a URL of a ``state_dict`` that is hosted online. + block_cls: Module class that defines a convolutional block. Default: ``TwoConvNormRelu``. + **kwargs: Additional keyword arguments for ``cd.models.UNet``. """ - super().__init__(ResNet18(in_channels, **(res_kwargs or {})), out_channels, final_activation=final_activation, - **kwargs) + super().__init__(ResNet18(in_channels, **_default_res_kwargs(backbone_kwargs, pretrained)), + out_channels, final_activation=final_activation, block=block_cls, **kwargs) class ResNet34UNet(UNet): - def __init__(self, in_channels, out_channels, final_activation=None, res_kwargs=None, **kwargs): - super().__init__(ResNet34(in_channels, **(res_kwargs or {})), out_channels, final_activation=final_activation, - **kwargs) + def __init__(self, in_channels, out_channels, final_activation=None, backbone_kwargs=None, pretrained=False, + block_cls=None, **kwargs): + super().__init__(ResNet34(in_channels, **_default_res_kwargs(backbone_kwargs, pretrained)), + out_channels, final_activation=final_activation, block=block_cls, **kwargs) __init__.__doc__ = ResNet18UNet.__init__.__doc__.replace('ResNet 18', 'ResNet 34') class ResNet50UNet(UNet): - def __init__(self, in_channels, out_channels, final_activation=None, res_kwargs=None, **kwargs): - super().__init__(ResNet50(in_channels, **(res_kwargs or {})), out_channels, final_activation=final_activation, - **kwargs) + def __init__(self, in_channels, out_channels, final_activation=None, backbone_kwargs=None, pretrained=False, + block_cls=None, **kwargs): + super().__init__(ResNet50(in_channels, **_default_res_kwargs(backbone_kwargs, pretrained)), + out_channels, final_activation=final_activation, block=block_cls, **kwargs) __init__.__doc__ = ResNet18UNet.__init__.__doc__.replace('ResNet 18', 'ResNet 50') class ResNet101UNet(UNet): - def __init__(self, in_channels, out_channels, final_activation=None, res_kwargs=None, **kwargs): - super().__init__(ResNet101(in_channels, **(res_kwargs or {})), out_channels, final_activation=final_activation, - **kwargs) + def __init__(self, in_channels, out_channels, final_activation=None, backbone_kwargs=None, pretrained=False, + block_cls=None, **kwargs): + super().__init__(ResNet101(in_channels, **_default_res_kwargs(backbone_kwargs, pretrained)), + out_channels, final_activation=final_activation, block=block_cls, **kwargs) __init__.__doc__ = ResNet18UNet.__init__.__doc__.replace('ResNet 18', 'ResNet 101') class ResNet152UNet(UNet): - def __init__(self, in_channels, out_channels, final_activation=None, res_kwargs=None, **kwargs): - super().__init__(ResNet152(in_channels, **(res_kwargs or {})), out_channels, final_activation=final_activation, - **kwargs) + def __init__(self, in_channels, out_channels, final_activation=None, backbone_kwargs=None, pretrained=False, + block_cls=None, **kwargs): + super().__init__(ResNet152(in_channels, **_default_res_kwargs(backbone_kwargs, pretrained)), + out_channels, final_activation=final_activation, block=block_cls, **kwargs) __init__.__doc__ = ResNet18UNet.__init__.__doc__.replace('ResNet 18', 'ResNet 152') class ResNeXt50UNet(UNet): - def __init__(self, in_channels, out_channels, final_activation=None, res_kwargs=None, **kwargs): - super().__init__(ResNeXt50_32x4d(in_channels, **(res_kwargs or {})), out_channels, - final_activation=final_activation, **kwargs) + def __init__(self, in_channels, out_channels, final_activation=None, backbone_kwargs=None, pretrained=False, + block_cls=None, **kwargs): + super().__init__( + ResNeXt50_32x4d(in_channels, **_default_res_kwargs(backbone_kwargs, pretrained)), + out_channels, final_activation=final_activation, block=block_cls, **kwargs) __init__.__doc__ = ResNet18UNet.__init__.__doc__.replace('ResNet 18', 'ResNeXt 50') class ResNeXt101UNet(UNet): - def __init__(self, in_channels, out_channels, final_activation=None, res_kwargs=None, **kwargs): - super().__init__(ResNeXt101_32x8d(in_channels, **(res_kwargs or {})), out_channels, - final_activation=final_activation, **kwargs) + def __init__(self, in_channels, out_channels, final_activation=None, backbone_kwargs=None, pretrained=False, + block_cls=None, **kwargs): + super().__init__( + ResNeXt101_32x8d(in_channels, **_default_res_kwargs(backbone_kwargs, pretrained)), + out_channels, final_activation=final_activation, block=block_cls, **kwargs) __init__.__doc__ = ResNet18UNet.__init__.__doc__.replace('ResNet 18', 'ResNeXt 101') class ResNeXt152UNet(UNet): - def __init__(self, in_channels, out_channels, final_activation=None, res_kwargs=None, **kwargs): - super().__init__(ResNeXt152_32x8d(in_channels, **(res_kwargs or {})), out_channels, - final_activation=final_activation, **kwargs) + def __init__(self, in_channels, out_channels, final_activation=None, backbone_kwargs=None, pretrained=False, + block_cls=None, **kwargs): + super().__init__( + ResNeXt152_32x8d(in_channels, **_default_res_kwargs(backbone_kwargs, pretrained)), + out_channels, final_activation=final_activation, block=block_cls, **kwargs) __init__.__doc__ = ResNet18UNet.__init__.__doc__.replace('ResNet 18', 'ResNeXt 152') class WideResNet50UNet(UNet): - def __init__(self, in_channels, out_channels, final_activation=None, res_kwargs=None, **kwargs): - super().__init__(WideResNet50_2(in_channels, **(res_kwargs or {})), out_channels, - final_activation=final_activation, **kwargs) + def __init__(self, in_channels, out_channels, final_activation=None, backbone_kwargs=None, pretrained=False, + block_cls=None, **kwargs): + super().__init__( + WideResNet50_2(in_channels, **_default_res_kwargs(backbone_kwargs, pretrained)), + out_channels, final_activation=final_activation, block=block_cls, **kwargs) __init__.__doc__ = ResNet18UNet.__init__.__doc__.replace('ResNet 18', 'Wide ResNet 50') class WideResNet101UNet(UNet): - def __init__(self, in_channels, out_channels, final_activation=None, res_kwargs=None, **kwargs): - super().__init__(WideResNet101_2(in_channels, **(res_kwargs or {})), out_channels, - final_activation=final_activation, **kwargs) + def __init__(self, in_channels, out_channels, final_activation=None, backbone_kwargs=None, pretrained=False, + block_cls=None, **kwargs): + super().__init__( + WideResNet101_2(in_channels, **_default_res_kwargs(backbone_kwargs, pretrained)), + out_channels, final_activation=final_activation, block=block_cls, **kwargs) __init__.__doc__ = ResNet18UNet.__init__.__doc__.replace('ResNet 18', 'Wide ResNet 101') class MobileNetV3SmallUNet(UNet): - def __init__(self, in_channels, out_channels, final_activation=None, res_kwargs=None, **kwargs): - super().__init__(MobileNetV3Small(in_channels, **(res_kwargs or {})), out_channels, - final_activation=final_activation, **kwargs) + def __init__(self, in_channels, out_channels, final_activation=None, backbone_kwargs=None, pretrained=False, + block_cls=None, **kwargs): + _ni_pretrained(pretrained) + super().__init__(MobileNetV3Small(in_channels, **(backbone_kwargs or {})), out_channels, + final_activation=final_activation, block=block_cls, **kwargs) __init__.__doc__ = ResNet18UNet.__init__.__doc__.replace('ResNet 18', 'Small MobileNet V3') class MobileNetV3LargeUNet(UNet): - def __init__(self, in_channels, out_channels, final_activation=None, res_kwargs=None, **kwargs): - super().__init__(MobileNetV3Large(in_channels, **(res_kwargs or {})), out_channels, - final_activation=final_activation, **kwargs) + def __init__(self, in_channels, out_channels, final_activation=None, backbone_kwargs=None, pretrained=False, + block_cls=None, **kwargs): + _ni_pretrained(pretrained) + super().__init__(MobileNetV3Large(in_channels, **(backbone_kwargs or {})), out_channels, + final_activation=final_activation, block=block_cls, **kwargs) __init__.__doc__ = ResNet18UNet.__init__.__doc__.replace('ResNet 18', 'Large MobileNet V3')