diff --git a/references/detection/coco_utils.py b/references/detection/coco_utils.py index 31946b5c85a..efbbdd3f4ea 100644 --- a/references/detection/coco_utils.py +++ b/references/detection/coco_utils.py @@ -195,7 +195,7 @@ def convert_to_coco_api(ds): def get_coco_api_from_dataset(dataset): - for i in range(10): + for _ in range(10): if isinstance(dataset, torchvision.datasets.CocoDetection): break if isinstance(dataset, torch.utils.data.Subset): diff --git a/references/detection/group_by_aspect_ratio.py b/references/detection/group_by_aspect_ratio.py index 572bf7734be..b468d8a90aa 100644 --- a/references/detection/group_by_aspect_ratio.py +++ b/references/detection/group_by_aspect_ratio.py @@ -101,7 +101,7 @@ def __len__(self): collate_fn=lambda x: x[0]) aspect_ratios = [] with tqdm(total=len(dataset)) as pbar: - for i, (img, _) in enumerate(data_loader): + for _i, (img, _) in enumerate(data_loader): pbar.update(1) height, width = img.shape[-2:] aspect_ratio = float(height) / float(width) diff --git a/references/detection/train.py b/references/detection/train.py index 0a187686dd8..c8fb01ab1da 100644 --- a/references/detection/train.py +++ b/references/detection/train.py @@ -9,8 +9,6 @@ import torchvision.models.detection import torchvision.models.detection.mask_rcnn -from torchvision import transforms - from coco_utils import get_coco, get_coco_kp from group_by_aspect_ratio import GroupedBatchSampler, create_aspect_ratio_groups diff --git a/test/test_ops.py b/test/test_ops.py index 737d9186df7..14bc9705bb5 100644 --- a/test/test_ops.py +++ b/test/test_ops.py @@ -14,7 +14,9 @@ def setUpClass(cls): cls.dtype = torch.float64 def slow_roi_pooling(self, x, rois, pool_h, pool_w, spatial_scale=1, - device=torch.device('cpu'), dtype=torch.float64): + device=None, dtype=torch.float64): + if device is None: + device = torch.device("cpu") c = x.size(1) y = torch.zeros(rois.size(0), c, pool_h, pool_w, dtype=dtype, device=device) diff --git a/test/test_transforms.py b/test/test_transforms.py index 72e770f42fd..062839ac53d 100644 --- a/test/test_transforms.py +++ b/test/test_transforms.py @@ -141,7 +141,7 @@ def test_randomresized_params(self): img = to_pil_image(img) size = 100 epsilon = 0.05 - for i in range(10): + for _ in range(10): scale_min = round(random.random(), 2) scale_range = (scale_min, scale_min + round(random.random(), 2)) aspect_min = max(round(random.random(), 2), epsilon) @@ -153,7 +153,7 @@ def test_randomresized_params(self): aspect_ratio_obtained == 1.0) def test_randomperspective(self): - for i in range(10): + for _ in range(10): height = random.randint(24, 32) * 2 width = random.randint(24, 32) * 2 img = torch.ones(3, height, width)