diff --git a/references/classification/transforms.py b/references/classification/transforms.py index 69ee4182c54..892b4e7e6c0 100644 --- a/references/classification/transforms.py +++ b/references/classification/transforms.py @@ -72,13 +72,15 @@ def forward(self, batch: Tensor, target: Tensor) -> Tuple[Tensor, Tensor]: return batch, target def __repr__(self) -> str: - s = self.__class__.__name__ + "(" - s += "num_classes={num_classes}" - s += ", p={p}" - s += ", alpha={alpha}" - s += ", inplace={inplace}" - s += ")" - return s.format(**self.__dict__) + s = ( + f"{self.__class__.__name__}(" + f"num_classes={self.num_classes}" + f", p={self.p}" + f", alpha={self.alpha}" + f", inplace={self.inplace}" + f")" + ) + return s class RandomCutmix(torch.nn.Module): @@ -162,10 +164,12 @@ def forward(self, batch: Tensor, target: Tensor) -> Tuple[Tensor, Tensor]: return batch, target def __repr__(self) -> str: - s = self.__class__.__name__ + "(" - s += "num_classes={num_classes}" - s += ", p={p}" - s += ", alpha={alpha}" - s += ", inplace={inplace}" - s += ")" - return s.format(**self.__dict__) + s = ( + f"{self.__class__.__name__}(" + f"num_classes={self.num_classes}" + f", p={self.p}" + f", alpha={self.alpha}" + f", inplace={self.inplace}" + f")" + ) + return s diff --git a/test/test_datasets_download.py b/test/test_datasets_download.py index 4bf31eba92b..4d2e475e1df 100644 --- a/test/test_datasets_download.py +++ b/test/test_datasets_download.py @@ -180,7 +180,7 @@ def __init__(self, url, md5=None, id=None): self.md5 = md5 self.id = id or url - def __repr__(self): + def __repr__(self) -> str: return self.id diff --git a/torchvision/models/detection/anchor_utils.py b/torchvision/models/detection/anchor_utils.py index 395bf6bbda6..202294153ea 100644 --- a/torchvision/models/detection/anchor_utils.py +++ b/torchvision/models/detection/anchor_utils.py @@ -239,13 +239,15 @@ def _grid_default_boxes( return torch.cat(default_boxes, dim=0) def __repr__(self) -> str: - s = self.__class__.__name__ + "(" - s += "aspect_ratios={aspect_ratios}" - s += ", clip={clip}" - s += ", scales={scales}" - s += ", steps={steps}" - s += ")" - return s.format(**self.__dict__) + s = ( + f"{self.__class__.__name__}(" + f"aspect_ratios={self.aspect_ratios}" + f", clip={self.clip}" + f", scales={self.scales}" + f", steps={self.steps}" + ")" + ) + return s def forward(self, image_list: ImageList, feature_maps: List[Tensor]) -> List[Tensor]: grid_sizes = [feature_map.shape[-2:] for feature_map in feature_maps] diff --git a/torchvision/models/detection/transform.py b/torchvision/models/detection/transform.py index 90d19cfc8de..960e28500a1 100644 --- a/torchvision/models/detection/transform.py +++ b/torchvision/models/detection/transform.py @@ -260,7 +260,7 @@ def postprocess( return result def __repr__(self) -> str: - format_string = self.__class__.__name__ + "(" + format_string = f"{self.__class__.__name__}(" _indent = "\n " format_string += f"{_indent}Normalize(mean={self.image_mean}, std={self.image_std})" format_string += f"{_indent}Resize(min_size={self.min_size}, max_size={self.max_size}, mode='bilinear')" diff --git a/torchvision/models/efficientnet.py b/torchvision/models/efficientnet.py index 6837018c09e..f7eba46cb39 100644 --- a/torchvision/models/efficientnet.py +++ b/torchvision/models/efficientnet.py @@ -61,15 +61,17 @@ def __init__( self.num_layers = self.adjust_depth(num_layers, depth_mult) def __repr__(self) -> str: - s = self.__class__.__name__ + "(" - s += "expand_ratio={expand_ratio}" - s += ", kernel={kernel}" - s += ", stride={stride}" - s += ", input_channels={input_channels}" - s += ", out_channels={out_channels}" - s += ", num_layers={num_layers}" - s += ")" - return s.format(**self.__dict__) + s = ( + f"{self.__class__.__name__}(" + f"expand_ratio={self.expand_ratio}" + f", kernel={self.kernel}" + f", stride={self.stride}" + f", input_channels={self.input_channels}" + f", out_channels={self.out_channels}" + f", num_layers={self.num_layers}" + f")" + ) + return s @staticmethod def adjust_channels(channels: int, width_mult: float, min_value: Optional[int] = None) -> int: diff --git a/torchvision/ops/deform_conv.py b/torchvision/ops/deform_conv.py index 9f9ed49f4b9..bb4400e5c29 100644 --- a/torchvision/ops/deform_conv.py +++ b/torchvision/ops/deform_conv.py @@ -179,14 +179,17 @@ def forward(self, input: Tensor, offset: Tensor, mask: Optional[Tensor] = None) ) def __repr__(self) -> str: - s = self.__class__.__name__ + "(" - s += "{in_channels}" - s += ", {out_channels}" - s += ", kernel_size={kernel_size}" - s += ", stride={stride}" - s += ", padding={padding}" if self.padding != (0, 0) else "" - s += ", dilation={dilation}" if self.dilation != (1, 1) else "" - s += ", groups={groups}" if self.groups != 1 else "" + s = ( + f"{self.__class__.__name__}(" + f"{self.in_channels}" + f", {self.out_channels}" + f", kernel_size={self.kernel_size}" + f", stride={self.stride}" + ) + s += f", padding={self.padding}" if self.padding != (0, 0) else "" + s += f", dilation={self.dilation}" if self.dilation != (1, 1) else "" + s += f", groups={self.groups}" if self.groups != 1 else "" s += ", bias=False" if self.bias is None else "" s += ")" - return s.format(**self.__dict__) + + return s diff --git a/torchvision/ops/ps_roi_align.py b/torchvision/ops/ps_roi_align.py index b01dc35e129..7153e49ac05 100644 --- a/torchvision/ops/ps_roi_align.py +++ b/torchvision/ops/ps_roi_align.py @@ -78,9 +78,11 @@ def forward(self, input: Tensor, rois: Tensor) -> Tensor: return ps_roi_align(input, rois, self.output_size, self.spatial_scale, self.sampling_ratio) def __repr__(self) -> str: - tmpstr = self.__class__.__name__ + "(" - tmpstr += "output_size=" + str(self.output_size) - tmpstr += ", spatial_scale=" + str(self.spatial_scale) - tmpstr += ", sampling_ratio=" + str(self.sampling_ratio) - tmpstr += ")" - return tmpstr + s = ( + f"{self.__class__.__name__}(" + f"output_size={self.output_size}" + f", spatial_scale={self.spatial_scale}" + f", sampling_ratio={self.sampling_ratio}" + f")" + ) + return s diff --git a/torchvision/ops/ps_roi_pool.py b/torchvision/ops/ps_roi_pool.py index 867383eacf1..a27c36ee76c 100644 --- a/torchvision/ops/ps_roi_pool.py +++ b/torchvision/ops/ps_roi_pool.py @@ -64,8 +64,5 @@ def forward(self, input: Tensor, rois: Tensor) -> Tensor: return ps_roi_pool(input, rois, self.output_size, self.spatial_scale) def __repr__(self) -> str: - tmpstr = self.__class__.__name__ + "(" - tmpstr += "output_size=" + str(self.output_size) - tmpstr += ", spatial_scale=" + str(self.spatial_scale) - tmpstr += ")" - return tmpstr + s = f"{self.__class__.__name__}(output_size={self.output_size}, spatial_scale={self.spatial_scale})" + return s diff --git a/torchvision/ops/roi_align.py b/torchvision/ops/roi_align.py index ed412965932..131c1b81d0f 100644 --- a/torchvision/ops/roi_align.py +++ b/torchvision/ops/roi_align.py @@ -86,10 +86,12 @@ def forward(self, input: Tensor, rois: Tensor) -> Tensor: return roi_align(input, rois, self.output_size, self.spatial_scale, self.sampling_ratio, self.aligned) def __repr__(self) -> str: - tmpstr = self.__class__.__name__ + "(" - tmpstr += "output_size=" + str(self.output_size) - tmpstr += ", spatial_scale=" + str(self.spatial_scale) - tmpstr += ", sampling_ratio=" + str(self.sampling_ratio) - tmpstr += ", aligned=" + str(self.aligned) - tmpstr += ")" - return tmpstr + s = ( + f"{self.__class__.__name__}(" + f"output_size={self.output_size}" + f", spatial_scale={self.spatial_scale}" + f", sampling_ratio={self.sampling_ratio}" + f", aligned={self.aligned}" + f")" + ) + return s diff --git a/torchvision/ops/roi_pool.py b/torchvision/ops/roi_pool.py index 37a8f42058c..37cbf7febee 100644 --- a/torchvision/ops/roi_pool.py +++ b/torchvision/ops/roi_pool.py @@ -66,8 +66,5 @@ def forward(self, input: Tensor, rois: Tensor) -> Tensor: return roi_pool(input, rois, self.output_size, self.spatial_scale) def __repr__(self) -> str: - tmpstr = self.__class__.__name__ + "(" - tmpstr += "output_size=" + str(self.output_size) - tmpstr += ", spatial_scale=" + str(self.spatial_scale) - tmpstr += ")" - return tmpstr + s = f"{self.__class__.__name__}(output_size={self.output_size}, spatial_scale={self.spatial_scale})" + return s diff --git a/torchvision/ops/stochastic_depth.py b/torchvision/ops/stochastic_depth.py index 9f5d432d79e..ff8167b2315 100644 --- a/torchvision/ops/stochastic_depth.py +++ b/torchvision/ops/stochastic_depth.py @@ -62,8 +62,5 @@ def forward(self, input: Tensor) -> Tensor: return stochastic_depth(input, self.p, self.mode, self.training) def __repr__(self) -> str: - tmpstr = self.__class__.__name__ + "(" - tmpstr += "p=" + str(self.p) - tmpstr += ", mode=" + str(self.mode) - tmpstr += ")" - return tmpstr + s = f"{self.__class__.__name__}(p={self.p}, mode={self.mode})" + return s diff --git a/torchvision/prototype/features/_feature.py b/torchvision/prototype/features/_feature.py index cd52f1f80ad..1837ffc1e89 100644 --- a/torchvision/prototype/features/_feature.py +++ b/torchvision/prototype/features/_feature.py @@ -96,5 +96,5 @@ def __torch_function__( return cls(output, like=args[0]) - def __repr__(self): + def __repr__(self) -> str: return torch.Tensor.__repr__(self).replace("tensor", type(self).__name__) diff --git a/torchvision/prototype/models/_api.py b/torchvision/prototype/models/_api.py index e27b4d7adda..4ba0ee05f08 100644 --- a/torchvision/prototype/models/_api.py +++ b/torchvision/prototype/models/_api.py @@ -67,7 +67,7 @@ def from_str(cls, value: str) -> "WeightsEnum": def get_state_dict(self, progress: bool) -> OrderedDict: return load_state_dict_from_url(self.url, progress=progress) - def __repr__(self): + def __repr__(self) -> str: return f"{self.__class__.__name__}.{self._name_}" def __getattr__(self, name): diff --git a/torchvision/transforms/_transforms_video.py b/torchvision/transforms/_transforms_video.py index 32fa0191959..4a36c8abbf9 100644 --- a/torchvision/transforms/_transforms_video.py +++ b/torchvision/transforms/_transforms_video.py @@ -46,8 +46,8 @@ def __call__(self, clip): i, j, h, w = self.get_params(clip, self.size) return F.crop(clip, i, j, h, w) - def __repr__(self): - return self.__class__.__name__ + f"(size={self.size})" + def __repr__(self) -> str: + return f"{self.__class__.__name__}(size={self.size})" class RandomResizedCropVideo(RandomResizedCrop): @@ -79,11 +79,8 @@ def __call__(self, clip): i, j, h, w = self.get_params(clip, self.scale, self.ratio) return F.resized_crop(clip, i, j, h, w, self.size, self.interpolation_mode) - def __repr__(self): - return ( - self.__class__.__name__ - + f"(size={self.size}, interpolation_mode={self.interpolation_mode}, scale={self.scale}, ratio={self.ratio})" - ) + def __repr__(self) -> str: + return f"{self.__class__.__name__}(size={self.size}, interpolation_mode={self.interpolation_mode}, scale={self.scale}, ratio={self.ratio})" class CenterCropVideo: @@ -103,8 +100,8 @@ def __call__(self, clip): """ return F.center_crop(clip, self.crop_size) - def __repr__(self): - return self.__class__.__name__ + f"(crop_size={self.crop_size})" + def __repr__(self) -> str: + return f"{self.__class__.__name__}(crop_size={self.crop_size})" class NormalizeVideo: @@ -128,8 +125,8 @@ def __call__(self, clip): """ return F.normalize(clip, self.mean, self.std, self.inplace) - def __repr__(self): - return self.__class__.__name__ + f"(mean={self.mean}, std={self.std}, inplace={self.inplace})" + def __repr__(self) -> str: + return f"{self.__class__.__name__}(mean={self.mean}, std={self.std}, inplace={self.inplace})" class ToTensorVideo: @@ -150,7 +147,7 @@ def __call__(self, clip): """ return F.to_tensor(clip) - def __repr__(self): + def __repr__(self) -> str: return self.__class__.__name__ @@ -175,5 +172,5 @@ def __call__(self, clip): clip = F.hflip(clip) return clip - def __repr__(self): - return self.__class__.__name__ + f"(p={self.p})" + def __repr__(self) -> str: + return f"{self.__class__.__name__}(p={self.p})" diff --git a/torchvision/transforms/autoaugment.py b/torchvision/transforms/autoaugment.py index 228b2f8dd9b..d58077c9b14 100644 --- a/torchvision/transforms/autoaugment.py +++ b/torchvision/transforms/autoaugment.py @@ -280,7 +280,7 @@ def forward(self, img: Tensor) -> Tensor: return img def __repr__(self) -> str: - return self.__class__.__name__ + f"(policy={self.policy}, fill={self.fill})" + return f"{self.__class__.__name__}(policy={self.policy}, fill={self.fill})" class RandAugment(torch.nn.Module): @@ -363,14 +363,16 @@ def forward(self, img: Tensor) -> Tensor: return img def __repr__(self) -> str: - s = self.__class__.__name__ + "(" - s += "num_ops={num_ops}" - s += ", magnitude={magnitude}" - s += ", num_magnitude_bins={num_magnitude_bins}" - s += ", interpolation={interpolation}" - s += ", fill={fill}" - s += ")" - return s.format(**self.__dict__) + s = ( + f"{self.__class__.__name__}(" + f"num_ops={self.num_ops}" + f", magnitude={self.magnitude}" + f", num_magnitude_bins={self.num_magnitude_bins}" + f", interpolation={self.interpolation}" + f", fill={self.fill}" + f")" + ) + return s class TrivialAugmentWide(torch.nn.Module): @@ -448,9 +450,11 @@ def forward(self, img: Tensor) -> Tensor: return _apply_op(img, op_name, magnitude, interpolation=self.interpolation, fill=fill) def __repr__(self) -> str: - s = self.__class__.__name__ + "(" - s += "num_magnitude_bins={num_magnitude_bins}" - s += ", interpolation={interpolation}" - s += ", fill={fill}" - s += ")" - return s.format(**self.__dict__) + s = ( + f"{self.__class__.__name__}(" + f"num_magnitude_bins={self.num_magnitude_bins}" + f", interpolation={self.interpolation}" + f", fill={self.fill}" + f")" + ) + return s diff --git a/torchvision/transforms/transforms.py b/torchvision/transforms/transforms.py index 5fdc8df89f4..9fc79c1d8cc 100644 --- a/torchvision/transforms/transforms.py +++ b/torchvision/transforms/transforms.py @@ -95,7 +95,7 @@ def __call__(self, img): img = t(img) return img - def __repr__(self): + def __repr__(self) -> str: format_string = self.__class__.__name__ + "(" for t in self.transforms: format_string += "\n" @@ -134,8 +134,8 @@ def __call__(self, pic): """ return F.to_tensor(pic) - def __repr__(self): - return self.__class__.__name__ + "()" + def __repr__(self) -> str: + return f"{self.__class__.__name__}()" class PILToTensor: @@ -161,8 +161,8 @@ def __call__(self, pic): """ return F.pil_to_tensor(pic) - def __repr__(self): - return self.__class__.__name__ + "()" + def __repr__(self) -> str: + return f"{self.__class__.__name__}()" class ConvertImageDtype(torch.nn.Module): @@ -226,7 +226,7 @@ def __call__(self, pic): """ return F.to_pil_image(pic, self.mode) - def __repr__(self): + def __repr__(self) -> str: format_string = self.__class__.__name__ + "(" if self.mode is not None: format_string += f"mode={self.mode}" @@ -269,8 +269,8 @@ def forward(self, tensor: Tensor) -> Tensor: """ return F.normalize(tensor, self.mean, self.std, self.inplace) - def __repr__(self): - return self.__class__.__name__ + f"(mean={self.mean}, std={self.std})" + def __repr__(self) -> str: + return f"{self.__class__.__name__}(mean={self.mean}, std={self.std})" class Resize(torch.nn.Module): @@ -348,9 +348,9 @@ def forward(self, img): """ return F.resize(img, self.size, self.interpolation, self.max_size, self.antialias) - def __repr__(self): + def __repr__(self) -> str: detail = f"(size={self.size}, interpolation={self.interpolation.value}, max_size={self.max_size}, antialias={self.antialias})" - return self.__class__.__name__ + detail + return f"{self.__class__.__name__}{detail}" class CenterCrop(torch.nn.Module): @@ -380,8 +380,8 @@ def forward(self, img): """ return F.center_crop(img, self.size) - def __repr__(self): - return self.__class__.__name__ + f"(size={self.size})" + def __repr__(self) -> str: + return f"{self.__class__.__name__}(size={self.size})" class Pad(torch.nn.Module): @@ -453,8 +453,8 @@ def forward(self, img): """ return F.pad(img, self.padding, self.fill, self.padding_mode) - def __repr__(self): - return self.__class__.__name__ + f"(padding={self.padding}, fill={self.fill}, padding_mode={self.padding_mode})" + def __repr__(self) -> str: + return f"{self.__class__.__name__}(padding={self.padding}, fill={self.fill}, padding_mode={self.padding_mode})" class Lambda: @@ -473,8 +473,8 @@ def __init__(self, lambd): def __call__(self, img): return self.lambd(img) - def __repr__(self): - return self.__class__.__name__ + "()" + def __repr__(self) -> str: + return f"{self.__class__.__name__}()" class RandomTransforms: @@ -493,7 +493,7 @@ def __init__(self, transforms): def __call__(self, *args, **kwargs): raise NotImplementedError() - def __repr__(self): + def __repr__(self) -> str: format_string = self.__class__.__name__ + "(" for t in self.transforms: format_string += "\n" @@ -535,7 +535,7 @@ def forward(self, img): img = t(img) return img - def __repr__(self): + def __repr__(self) -> str: format_string = self.__class__.__name__ + "(" format_string += f"\n p={self.p}" for t in self.transforms: @@ -569,10 +569,8 @@ def __call__(self, *args): t = random.choices(self.transforms, weights=self.p)[0] return t(*args) - def __repr__(self): - format_string = super().__repr__() - format_string += f"(p={self.p})" - return format_string + def __repr__(self) -> str: + return f"{super().__repr__()}(p={self.p})" class RandomCrop(torch.nn.Module): @@ -679,8 +677,8 @@ def forward(self, img): return F.crop(img, i, j, h, w) - def __repr__(self): - return self.__class__.__name__ + f"(size={self.size}, padding={self.padding})" + def __repr__(self) -> str: + return f"{self.__class__.__name__}(size={self.size}, padding={self.padding})" class RandomHorizontalFlip(torch.nn.Module): @@ -710,8 +708,8 @@ def forward(self, img): return F.hflip(img) return img - def __repr__(self): - return self.__class__.__name__ + f"(p={self.p})" + def __repr__(self) -> str: + return f"{self.__class__.__name__}(p={self.p})" class RandomVerticalFlip(torch.nn.Module): @@ -741,8 +739,8 @@ def forward(self, img): return F.vflip(img) return img - def __repr__(self): - return self.__class__.__name__ + f"(p={self.p})" + def __repr__(self) -> str: + return f"{self.__class__.__name__}(p={self.p})" class RandomPerspective(torch.nn.Module): @@ -842,8 +840,8 @@ def get_params(width: int, height: int, distortion_scale: float) -> Tuple[List[L endpoints = [topleft, topright, botright, botleft] return startpoints, endpoints - def __repr__(self): - return self.__class__.__name__ + f"(p={self.p})" + def __repr__(self) -> str: + return f"{self.__class__.__name__}(p={self.p})" class RandomResizedCrop(torch.nn.Module): @@ -954,7 +952,7 @@ def forward(self, img): i, j, h, w = self.get_params(img, self.scale, self.ratio) return F.resized_crop(img, i, j, h, w, self.size, self.interpolation) - def __repr__(self): + def __repr__(self) -> str: interpolate_str = self.interpolation.value format_string = self.__class__.__name__ + f"(size={self.size}" format_string += f", scale={tuple(round(s, 4) for s in self.scale)}" @@ -1006,8 +1004,8 @@ def forward(self, img): """ return F.five_crop(img, self.size) - def __repr__(self): - return self.__class__.__name__ + f"(size={self.size})" + def __repr__(self) -> str: + return f"{self.__class__.__name__}(size={self.size})" class TenCrop(torch.nn.Module): @@ -1056,8 +1054,8 @@ def forward(self, img): """ return F.ten_crop(img, self.size, self.vertical_flip) - def __repr__(self): - return self.__class__.__name__ + f"(size={self.size}, vertical_flip={self.vertical_flip})" + def __repr__(self) -> str: + return f"{self.__class__.__name__}(size={self.size}, vertical_flip={self.vertical_flip})" class LinearTransformation(torch.nn.Module): @@ -1130,11 +1128,13 @@ def forward(self, tensor: Tensor) -> Tensor: tensor = transformed_tensor.view(shape) return tensor - def __repr__(self): - format_string = self.__class__.__name__ + "(transformation_matrix=" - format_string += str(self.transformation_matrix.tolist()) + ")" - format_string += ", (mean_vector=" + str(self.mean_vector.tolist()) + ")" - return format_string + def __repr__(self) -> str: + s = ( + f"{self.__class__.__name__}(transformation_matrix=" + f"{self.transformation_matrix.tolist()}" + f", mean_vector={self.mean_vector.tolist()})" + ) + return s class ColorJitter(torch.nn.Module): @@ -1242,13 +1242,15 @@ def forward(self, img): return img - def __repr__(self): - format_string = self.__class__.__name__ + "(" - format_string += f"brightness={self.brightness}" - format_string += f", contrast={self.contrast}" - format_string += f", saturation={self.saturation}" - format_string += f", hue={self.hue})" - return format_string + def __repr__(self) -> str: + s = ( + f"{self.__class__.__name__}(" + f"brightness={self.brightness}" + f", contrast={self.contrast}" + f", saturation={self.saturation}" + f", hue={self.hue})" + ) + return s class RandomRotation(torch.nn.Module): @@ -1346,7 +1348,7 @@ def forward(self, img): return F.rotate(img, angle, self.resample, self.expand, self.center, fill) - def __repr__(self): + def __repr__(self) -> str: interpolate_str = self.interpolation.value format_string = self.__class__.__name__ + f"(degrees={self.degrees}" format_string += f", interpolation={interpolate_str}" @@ -1529,24 +1531,17 @@ def forward(self, img): return F.affine(img, *ret, interpolation=self.interpolation, fill=fill, center=self.center) - def __repr__(self): - s = "{name}(degrees={degrees}" - if self.translate is not None: - s += ", translate={translate}" - if self.scale is not None: - s += ", scale={scale}" - if self.shear is not None: - s += ", shear={shear}" - if self.interpolation != InterpolationMode.NEAREST: - s += ", interpolation={interpolation}" - if self.fill != 0: - s += ", fill={fill}" - if self.center is not None: - s += ", center={center}" + def __repr__(self) -> str: + s = f"{self.__class__.__name__}(degrees={self.degrees}" + s += f", translate={self.translate}" if self.translate is not None else "" + s += f", scale={self.scale}" if self.scale is not None else "" + s += f", shear={self.shear}" if self.shear is not None else "" + s += f", interpolation={self.interpolation.value}" if self.interpolation != InterpolationMode.NEAREST else "" + s += f", fill={self.fill}" if self.fill != 0 else "" + s += f", center={self.center}" if self.center is not None else "" s += ")" - d = dict(self.__dict__) - d["interpolation"] = self.interpolation.value - return s.format(name=self.__class__.__name__, **d) + + return s class Grayscale(torch.nn.Module): @@ -1580,8 +1575,8 @@ def forward(self, img): """ return F.rgb_to_grayscale(img, num_output_channels=self.num_output_channels) - def __repr__(self): - return self.__class__.__name__ + f"(num_output_channels={self.num_output_channels})" + def __repr__(self) -> str: + return f"{self.__class__.__name__}(num_output_channels={self.num_output_channels})" class RandomGrayscale(torch.nn.Module): @@ -1618,8 +1613,8 @@ def forward(self, img): return F.rgb_to_grayscale(img, num_output_channels=num_output_channels) return img - def __repr__(self): - return self.__class__.__name__ + f"(p={self.p})" + def __repr__(self) -> str: + return f"{self.__class__.__name__}(p={self.p})" class RandomErasing(torch.nn.Module): @@ -1748,13 +1743,16 @@ def forward(self, img): return F.erase(img, x, y, h, w, v, self.inplace) return img - def __repr__(self): - s = f"(p={self.p}, " - s += f"scale={self.scale}, " - s += f"ratio={self.ratio}, " - s += f"value={self.value}, " - s += f"inplace={self.inplace})" - return self.__class__.__name__ + s + def __repr__(self) -> str: + s = ( + f"{self.__class__.__name__}" + f"(p={self.p}, " + f"scale={self.scale}, " + f"ratio={self.ratio}, " + f"value={self.value}, " + f"inplace={self.inplace})" + ) + return s class GaussianBlur(torch.nn.Module): @@ -1818,10 +1816,9 @@ def forward(self, img: Tensor) -> Tensor: sigma = self.get_params(self.sigma[0], self.sigma[1]) return F.gaussian_blur(img, self.kernel_size, [sigma, sigma]) - def __repr__(self): - s = f"(kernel_size={self.kernel_size}, " - s += f"sigma={self.sigma})" - return self.__class__.__name__ + s + def __repr__(self) -> str: + s = f"{self.__class__.__name__}(kernel_size={self.kernel_size}, sigma={self.sigma})" + return s def _setup_size(size, error_msg): @@ -1883,8 +1880,8 @@ def forward(self, img): return F.invert(img) return img - def __repr__(self): - return self.__class__.__name__ + f"(p={self.p})" + def __repr__(self) -> str: + return f"{self.__class__.__name__}(p={self.p})" class RandomPosterize(torch.nn.Module): @@ -1916,8 +1913,8 @@ def forward(self, img): return F.posterize(img, self.bits) return img - def __repr__(self): - return self.__class__.__name__ + f"(bits={self.bits},p={self.p})" + def __repr__(self) -> str: + return f"{self.__class__.__name__}(bits={self.bits},p={self.p})" class RandomSolarize(torch.nn.Module): @@ -1949,8 +1946,8 @@ def forward(self, img): return F.solarize(img, self.threshold) return img - def __repr__(self): - return self.__class__.__name__ + f"(threshold={self.threshold},p={self.p})" + def __repr__(self) -> str: + return f"{self.__class__.__name__}(threshold={self.threshold},p={self.p})" class RandomAdjustSharpness(torch.nn.Module): @@ -1982,8 +1979,8 @@ def forward(self, img): return F.adjust_sharpness(img, self.sharpness_factor) return img - def __repr__(self): - return self.__class__.__name__ + f"(sharpness_factor={self.sharpness_factor},p={self.p})" + def __repr__(self) -> str: + return f"{self.__class__.__name__}(sharpness_factor={self.sharpness_factor},p={self.p})" class RandomAutocontrast(torch.nn.Module): @@ -2013,8 +2010,8 @@ def forward(self, img): return F.autocontrast(img) return img - def __repr__(self): - return self.__class__.__name__ + f"(p={self.p})" + def __repr__(self) -> str: + return f"{self.__class__.__name__}(p={self.p})" class RandomEqualize(torch.nn.Module): @@ -2044,5 +2041,5 @@ def forward(self, img): return F.equalize(img) return img - def __repr__(self): - return self.__class__.__name__ + f"(p={self.p})" + def __repr__(self) -> str: + return f"{self.__class__.__name__}(p={self.p})"