Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
40 commits
Select commit Hold shift + click to select a range
31fadbe
Adding multiweight support for shufflenetv2 prototype models
jdsgomes Oct 29, 2021
1e578b7
Revert "Adding multiweight support for shufflenetv2 prototype models"
jdsgomes Oct 29, 2021
85e4429
Merge branch 'pytorch:main' into main
jdsgomes Oct 29, 2021
4e3d900
Adding multiweight support for shufflenetv2 prototype models
jdsgomes Oct 29, 2021
615b612
Revert "Adding multiweight support for shufflenetv2 prototype models"
jdsgomes Oct 29, 2021
a0bbece
Merge branch 'pytorch:main' into main
jdsgomes Oct 31, 2021
ba966f4
Merge branch 'pytorch:main' into main
jdsgomes Nov 1, 2021
6cdd49b
Merge branch 'pytorch:main' into main
jdsgomes Dec 10, 2021
d4f1638
Merge branch 'pytorch:main' into main
jdsgomes Dec 17, 2021
8626002
Merge branch 'pytorch:main' into main
jdsgomes Jan 24, 2022
f18f86f
Remove module vs method name clash
jdsgomes Jan 24, 2022
5c3668f
Merge branch 'main' into fix-giou-naming-bug
jdsgomes Jan 24, 2022
977414f
Merge branch 'pytorch:main' into main
jdsgomes Jan 26, 2022
67e95d2
Merge branch 'pytorch:main' into main
jdsgomes Jan 28, 2022
95f1271
add model contribution guidelines
jdsgomes Jan 29, 2022
1f79d42
update CONTRIBUTING_MODELS.md
jdsgomes Jan 29, 2022
2592c8d
Merge branch 'main' into model-contrib-guidelines
jdsgomes Jan 29, 2022
d8ab592
Fix formatting and typo
jdsgomes Jan 29, 2022
480683d
fix in-document links
jdsgomes Jan 29, 2022
9a3de4a
Merge branch 'main' into model-contrib-guidelines
jdsgomes Jan 29, 2022
74b5219
Update CONTRIBUTING.md
jdsgomes Jan 30, 2022
e06c180
remove Implementation Details section
jdsgomes Jan 31, 2022
1c4abae
Merge branch 'pytorch:main' into main
jdsgomes Feb 7, 2022
55e628b
Merge branch 'pytorch:main' into main
jdsgomes Feb 8, 2022
0d3b4b4
Consolidating __repr__ strings
jdsgomes Feb 8, 2022
da36356
Merge branch 'pytorch:main' into main
jdsgomes Feb 8, 2022
a2f369d
add model contribution guidelines
jdsgomes Jan 29, 2022
ad38624
update CONTRIBUTING_MODELS.md
jdsgomes Jan 29, 2022
da97cde
Fix formatting and typo
jdsgomes Jan 29, 2022
452b727
fix in-document links
jdsgomes Jan 29, 2022
89cfaca
Update CONTRIBUTING.md
jdsgomes Jan 30, 2022
217f451
remove Implementation Details section
jdsgomes Jan 31, 2022
6ecd675
Consolidating __repr__ strings
jdsgomes Feb 8, 2022
48fe604
fix merge
jdsgomes Feb 8, 2022
c7da168
fix merge
jdsgomes Feb 8, 2022
27e1604
fix merge
jdsgomes Feb 8, 2022
c25619d
remove unused code
jdsgomes Feb 8, 2022
eab1371
adress PR comments
jdsgomes Feb 9, 2022
1d3910f
fix flake8 error
jdsgomes Feb 9, 2022
febfd09
Merge branch 'main' into consolidate___repr__
jdsgomes Feb 9, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
32 changes: 18 additions & 14 deletions references/classification/transforms.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,13 +72,15 @@ def forward(self, batch: Tensor, target: Tensor) -> Tuple[Tensor, Tensor]:
return batch, target

def __repr__(self) -> str:
s = self.__class__.__name__ + "("
s += "num_classes={num_classes}"
s += ", p={p}"
s += ", alpha={alpha}"
s += ", inplace={inplace}"
s += ")"
return s.format(**self.__dict__)
s = (
f"{self.__class__.__name__}("
f"num_classes={self.num_classes}"
f", p={self.p}"
f", alpha={self.alpha}"
f", inplace={self.inplace}"
f")"
)
return s


class RandomCutmix(torch.nn.Module):
Expand Down Expand Up @@ -162,10 +164,12 @@ def forward(self, batch: Tensor, target: Tensor) -> Tuple[Tensor, Tensor]:
return batch, target

def __repr__(self) -> str:
s = self.__class__.__name__ + "("
s += "num_classes={num_classes}"
s += ", p={p}"
s += ", alpha={alpha}"
s += ", inplace={inplace}"
s += ")"
return s.format(**self.__dict__)
s = (
f"{self.__class__.__name__}("
f"num_classes={self.num_classes}"
f", p={self.p}"
f", alpha={self.alpha}"
f", inplace={self.inplace}"
f")"
)
return s
2 changes: 1 addition & 1 deletion test/test_datasets_download.py
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,7 @@ def __init__(self, url, md5=None, id=None):
self.md5 = md5
self.id = id or url

def __repr__(self):
def __repr__(self) -> str:
return self.id


Expand Down
16 changes: 9 additions & 7 deletions torchvision/models/detection/anchor_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -239,13 +239,15 @@ def _grid_default_boxes(
return torch.cat(default_boxes, dim=0)

def __repr__(self) -> str:
s = self.__class__.__name__ + "("
s += "aspect_ratios={aspect_ratios}"
s += ", clip={clip}"
s += ", scales={scales}"
s += ", steps={steps}"
s += ")"
return s.format(**self.__dict__)
s = (
f"{self.__class__.__name__}("
f"aspect_ratios={self.aspect_ratios}"
f", clip={self.clip}"
f", scales={self.scales}"
f", steps={self.steps}"
")"
)
return s

def forward(self, image_list: ImageList, feature_maps: List[Tensor]) -> List[Tensor]:
grid_sizes = [feature_map.shape[-2:] for feature_map in feature_maps]
Expand Down
2 changes: 1 addition & 1 deletion torchvision/models/detection/transform.py
Original file line number Diff line number Diff line change
Expand Up @@ -260,7 +260,7 @@ def postprocess(
return result

def __repr__(self) -> str:
format_string = self.__class__.__name__ + "("
format_string = f"{self.__class__.__name__}("
_indent = "\n "
format_string += f"{_indent}Normalize(mean={self.image_mean}, std={self.image_std})"
format_string += f"{_indent}Resize(min_size={self.min_size}, max_size={self.max_size}, mode='bilinear')"
Expand Down
20 changes: 11 additions & 9 deletions torchvision/models/efficientnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,15 +61,17 @@ def __init__(
self.num_layers = self.adjust_depth(num_layers, depth_mult)

def __repr__(self) -> str:
s = self.__class__.__name__ + "("
s += "expand_ratio={expand_ratio}"
s += ", kernel={kernel}"
s += ", stride={stride}"
s += ", input_channels={input_channels}"
s += ", out_channels={out_channels}"
s += ", num_layers={num_layers}"
s += ")"
return s.format(**self.__dict__)
s = (
f"{self.__class__.__name__}("
f"expand_ratio={self.expand_ratio}"
f", kernel={self.kernel}"
f", stride={self.stride}"
f", input_channels={self.input_channels}"
f", out_channels={self.out_channels}"
f", num_layers={self.num_layers}"
f")"
)
return s

@staticmethod
def adjust_channels(channels: int, width_mult: float, min_value: Optional[int] = None) -> int:
Expand Down
21 changes: 12 additions & 9 deletions torchvision/ops/deform_conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -179,14 +179,17 @@ def forward(self, input: Tensor, offset: Tensor, mask: Optional[Tensor] = None)
)

def __repr__(self) -> str:
s = self.__class__.__name__ + "("
s += "{in_channels}"
s += ", {out_channels}"
s += ", kernel_size={kernel_size}"
s += ", stride={stride}"
s += ", padding={padding}" if self.padding != (0, 0) else ""
s += ", dilation={dilation}" if self.dilation != (1, 1) else ""
s += ", groups={groups}" if self.groups != 1 else ""
s = (
f"{self.__class__.__name__}("
f"{self.in_channels}"
f", {self.out_channels}"
f", kernel_size={self.kernel_size}"
f", stride={self.stride}"
)
s += f", padding={self.padding}" if self.padding != (0, 0) else ""
s += f", dilation={self.dilation}" if self.dilation != (1, 1) else ""
s += f", groups={self.groups}" if self.groups != 1 else ""
s += ", bias=False" if self.bias is None else ""
s += ")"
return s.format(**self.__dict__)

return s
14 changes: 8 additions & 6 deletions torchvision/ops/ps_roi_align.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,9 +78,11 @@ def forward(self, input: Tensor, rois: Tensor) -> Tensor:
return ps_roi_align(input, rois, self.output_size, self.spatial_scale, self.sampling_ratio)

def __repr__(self) -> str:
tmpstr = self.__class__.__name__ + "("
tmpstr += "output_size=" + str(self.output_size)
tmpstr += ", spatial_scale=" + str(self.spatial_scale)
tmpstr += ", sampling_ratio=" + str(self.sampling_ratio)
tmpstr += ")"
return tmpstr
s = (
f"{self.__class__.__name__}("
f"output_size={self.output_size}"
f", spatial_scale={self.spatial_scale}"
f", sampling_ratio={self.sampling_ratio}"
f")"
)
return s
7 changes: 2 additions & 5 deletions torchvision/ops/ps_roi_pool.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,8 +64,5 @@ def forward(self, input: Tensor, rois: Tensor) -> Tensor:
return ps_roi_pool(input, rois, self.output_size, self.spatial_scale)

def __repr__(self) -> str:
tmpstr = self.__class__.__name__ + "("
tmpstr += "output_size=" + str(self.output_size)
tmpstr += ", spatial_scale=" + str(self.spatial_scale)
tmpstr += ")"
return tmpstr
s = f"{self.__class__.__name__}(output_size={self.output_size}, spatial_scale={self.spatial_scale})"
return s
16 changes: 9 additions & 7 deletions torchvision/ops/roi_align.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,10 +86,12 @@ def forward(self, input: Tensor, rois: Tensor) -> Tensor:
return roi_align(input, rois, self.output_size, self.spatial_scale, self.sampling_ratio, self.aligned)

def __repr__(self) -> str:
tmpstr = self.__class__.__name__ + "("
tmpstr += "output_size=" + str(self.output_size)
tmpstr += ", spatial_scale=" + str(self.spatial_scale)
tmpstr += ", sampling_ratio=" + str(self.sampling_ratio)
tmpstr += ", aligned=" + str(self.aligned)
tmpstr += ")"
return tmpstr
s = (
f"{self.__class__.__name__}("
f"output_size={self.output_size}"
f", spatial_scale={self.spatial_scale}"
f", sampling_ratio={self.sampling_ratio}"
f", aligned={self.aligned}"
f")"
)
return s
7 changes: 2 additions & 5 deletions torchvision/ops/roi_pool.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,8 +66,5 @@ def forward(self, input: Tensor, rois: Tensor) -> Tensor:
return roi_pool(input, rois, self.output_size, self.spatial_scale)

def __repr__(self) -> str:
tmpstr = self.__class__.__name__ + "("
tmpstr += "output_size=" + str(self.output_size)
tmpstr += ", spatial_scale=" + str(self.spatial_scale)
tmpstr += ")"
return tmpstr
s = f"{self.__class__.__name__}(output_size={self.output_size}, spatial_scale={self.spatial_scale})"
return s
7 changes: 2 additions & 5 deletions torchvision/ops/stochastic_depth.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,8 +62,5 @@ def forward(self, input: Tensor) -> Tensor:
return stochastic_depth(input, self.p, self.mode, self.training)

def __repr__(self) -> str:
tmpstr = self.__class__.__name__ + "("
tmpstr += "p=" + str(self.p)
tmpstr += ", mode=" + str(self.mode)
tmpstr += ")"
return tmpstr
s = f"{self.__class__.__name__}(p={self.p}, mode={self.mode})"
return s
2 changes: 1 addition & 1 deletion torchvision/prototype/features/_feature.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,5 +96,5 @@ def __torch_function__(

return cls(output, like=args[0])

def __repr__(self):
def __repr__(self) -> str:
return torch.Tensor.__repr__(self).replace("tensor", type(self).__name__)
2 changes: 1 addition & 1 deletion torchvision/prototype/models/_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ def from_str(cls, value: str) -> "WeightsEnum":
def get_state_dict(self, progress: bool) -> OrderedDict:
return load_state_dict_from_url(self.url, progress=progress)

def __repr__(self):
def __repr__(self) -> str:
return f"{self.__class__.__name__}.{self._name_}"

def __getattr__(self, name):
Expand Down
25 changes: 11 additions & 14 deletions torchvision/transforms/_transforms_video.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,8 +46,8 @@ def __call__(self, clip):
i, j, h, w = self.get_params(clip, self.size)
return F.crop(clip, i, j, h, w)

def __repr__(self):
return self.__class__.__name__ + f"(size={self.size})"
def __repr__(self) -> str:
return f"{self.__class__.__name__}(size={self.size})"


class RandomResizedCropVideo(RandomResizedCrop):
Expand Down Expand Up @@ -79,11 +79,8 @@ def __call__(self, clip):
i, j, h, w = self.get_params(clip, self.scale, self.ratio)
return F.resized_crop(clip, i, j, h, w, self.size, self.interpolation_mode)

def __repr__(self):
return (
self.__class__.__name__
+ f"(size={self.size}, interpolation_mode={self.interpolation_mode}, scale={self.scale}, ratio={self.ratio})"
)
def __repr__(self) -> str:
return f"{self.__class__.__name__}(size={self.size}, interpolation_mode={self.interpolation_mode}, scale={self.scale}, ratio={self.ratio})"


class CenterCropVideo:
Expand All @@ -103,8 +100,8 @@ def __call__(self, clip):
"""
return F.center_crop(clip, self.crop_size)

def __repr__(self):
return self.__class__.__name__ + f"(crop_size={self.crop_size})"
def __repr__(self) -> str:
return f"{self.__class__.__name__}(crop_size={self.crop_size})"


class NormalizeVideo:
Expand All @@ -128,8 +125,8 @@ def __call__(self, clip):
"""
return F.normalize(clip, self.mean, self.std, self.inplace)

def __repr__(self):
return self.__class__.__name__ + f"(mean={self.mean}, std={self.std}, inplace={self.inplace})"
def __repr__(self) -> str:
return f"{self.__class__.__name__}(mean={self.mean}, std={self.std}, inplace={self.inplace})"


class ToTensorVideo:
Expand All @@ -150,7 +147,7 @@ def __call__(self, clip):
"""
return F.to_tensor(clip)

def __repr__(self):
def __repr__(self) -> str:
return self.__class__.__name__


Expand All @@ -175,5 +172,5 @@ def __call__(self, clip):
clip = F.hflip(clip)
return clip

def __repr__(self):
return self.__class__.__name__ + f"(p={self.p})"
def __repr__(self) -> str:
return f"{self.__class__.__name__}(p={self.p})"
34 changes: 19 additions & 15 deletions torchvision/transforms/autoaugment.py
Original file line number Diff line number Diff line change
Expand Up @@ -280,7 +280,7 @@ def forward(self, img: Tensor) -> Tensor:
return img

def __repr__(self) -> str:
return self.__class__.__name__ + f"(policy={self.policy}, fill={self.fill})"
return f"{self.__class__.__name__}(policy={self.policy}, fill={self.fill})"


class RandAugment(torch.nn.Module):
Expand Down Expand Up @@ -363,14 +363,16 @@ def forward(self, img: Tensor) -> Tensor:
return img

def __repr__(self) -> str:
s = self.__class__.__name__ + "("
s += "num_ops={num_ops}"
s += ", magnitude={magnitude}"
s += ", num_magnitude_bins={num_magnitude_bins}"
s += ", interpolation={interpolation}"
s += ", fill={fill}"
s += ")"
return s.format(**self.__dict__)
s = (
f"{self.__class__.__name__}("
f"num_ops={self.num_ops}"
f", magnitude={self.magnitude}"
f", num_magnitude_bins={self.num_magnitude_bins}"
f", interpolation={self.interpolation}"
f", fill={self.fill}"
f")"
)
return s


class TrivialAugmentWide(torch.nn.Module):
Expand Down Expand Up @@ -448,9 +450,11 @@ def forward(self, img: Tensor) -> Tensor:
return _apply_op(img, op_name, magnitude, interpolation=self.interpolation, fill=fill)

def __repr__(self) -> str:
s = self.__class__.__name__ + "("
s += "num_magnitude_bins={num_magnitude_bins}"
s += ", interpolation={interpolation}"
s += ", fill={fill}"
s += ")"
return s.format(**self.__dict__)
s = (
f"{self.__class__.__name__}("
f"num_magnitude_bins={self.num_magnitude_bins}"
f", interpolation={self.interpolation}"
f", fill={self.fill}"
f")"
)
return s
Loading