From 5a607c81c5e1fd90b4cb07f042ad62ef95cee970 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 3 Nov 2021 23:36:53 +0100 Subject: [PATCH] Fix float zeros format (#5491) * Fix float zeros format * 255 to integer --- detect.py | 2 +- export.py | 2 +- models/common.py | 4 ++-- models/experimental.py | 2 +- models/tf.py | 6 +++--- models/yolo.py | 4 ++-- train.py | 8 ++++---- utils/activations.py | 2 +- utils/augmentations.py | 2 +- utils/autoanchor.py | 10 +++++----- utils/datasets.py | 8 ++++---- utils/general.py | 2 +- utils/loss.py | 10 +++++----- utils/plots.py | 2 +- utils/torch_utils.py | 6 +++--- val.py | 2 +- 16 files changed, 36 insertions(+), 36 deletions(-) diff --git a/detect.py b/detect.py index f9c7bac3fca2..a8d7e0b6a8c9 100644 --- a/detect.py +++ b/detect.py @@ -136,7 +136,7 @@ def wrap_frozen_graph(gd, inputs, outputs): else: img = torch.from_numpy(img).to(device) img = img.half() if half else img.float() # uint8 to fp16/32 - img /= 255.0 # 0 - 255 to 0.0 - 1.0 + img /= 255 # 0 - 255 to 0.0 - 1.0 if len(img.shape) == 3: img = img[None] # expand for batch dim t2 = time_sync() diff --git a/export.py b/export.py index 47dbcab50144..7d32094fda27 100644 --- a/export.py +++ b/export.py @@ -117,7 +117,7 @@ def export_coreml(model, im, file, prefix=colorstr('CoreML:')): model.train() # CoreML exports should be placed in model.train() mode ts = torch.jit.trace(model, im, strict=False) # TorchScript model - ct_model = ct.convert(ts, inputs=[ct.ImageType('image', shape=im.shape, scale=1 / 255.0, bias=[0, 0, 0])]) + ct_model = ct.convert(ts, inputs=[ct.ImageType('image', shape=im.shape, scale=1 / 255, bias=[0, 0, 0])]) ct_model.save(f) LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') diff --git a/models/common.py b/models/common.py index 8b70a6fea595..4c43719aab68 100644 --- a/models/common.py +++ b/models/common.py @@ -339,7 +339,7 @@ def forward(self, imgs, size=640, augment=False, profile=False): x = [letterbox(im, new_shape=shape1, auto=False)[0] for im in imgs] # pad x = np.stack(x, 0) if n > 1 else x[0][None] # stack x = np.ascontiguousarray(x.transpose((0, 3, 1, 2))) # BHWC to BCHW - x = torch.from_numpy(x).to(p.device).type_as(p) / 255. # uint8 to fp16/32 + x = torch.from_numpy(x).to(p.device).type_as(p) / 255 # uint8 to fp16/32 t.append(time_sync()) with amp.autocast(enabled=p.device.type != 'cpu'): @@ -362,7 +362,7 @@ class Detections: def __init__(self, imgs, pred, files, times=None, names=None, shape=None): super().__init__() d = pred[0].device # device - gn = [torch.tensor([*(im.shape[i] for i in [1, 0, 1, 0]), 1., 1.], device=d) for im in imgs] # normalizations + gn = [torch.tensor([*(im.shape[i] for i in [1, 0, 1, 0]), 1, 1], device=d) for im in imgs] # normalizations self.imgs = imgs # list of images as numpy arrays self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls) self.names = names # class names diff --git a/models/experimental.py b/models/experimental.py index 2e92ccb36faf..a2883120fe83 100644 --- a/models/experimental.py +++ b/models/experimental.py @@ -32,7 +32,7 @@ def __init__(self, n, weight=False): # n: number of inputs self.weight = weight # apply weights boolean self.iter = range(n - 1) # iter object if weight: - self.w = nn.Parameter(-torch.arange(1., n) / 2, requires_grad=True) # layer weights + self.w = nn.Parameter(-torch.arange(1.0, n) / 2, requires_grad=True) # layer weights def forward(self, x): y = x[0] # no weight diff --git a/models/tf.py b/models/tf.py index 6c07410e03a5..92a89aa65459 100644 --- a/models/tf.py +++ b/models/tf.py @@ -98,7 +98,7 @@ def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None): self.conv = TFConv(c1 * 4, c2, k, s, p, g, act, w.conv) def call(self, inputs): # x(b,w,h,c) -> y(b,w/2,h/2,4c) - # inputs = inputs / 255. # normalize 0-255 to 0-1 + # inputs = inputs / 255 # normalize 0-255 to 0-1 return self.conv(tf.concat([inputs[:, ::2, ::2, :], inputs[:, 1::2, ::2, :], inputs[:, ::2, 1::2, :], @@ -227,7 +227,7 @@ def call(self, inputs): if not self.training: # inference y = tf.sigmoid(x[i]) - xy = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy + xy = (y[..., 0:2] * 2 - 0.5 + self.grid[i]) * self.stride[i] # xy wh = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # Normalize xywh to 0-1 to reduce calibration error xy /= tf.constant([[self.imgsz[1], self.imgsz[0]]], dtype=tf.float32) @@ -414,7 +414,7 @@ def representative_dataset_gen(dataset, ncalib=100): for n, (path, img, im0s, vid_cap, string) in enumerate(dataset): input = np.transpose(img, [1, 2, 0]) input = np.expand_dims(input, axis=0).astype(np.float32) - input /= 255.0 + input /= 255 yield [input] if n >= ncalib: break diff --git a/models/yolo.py b/models/yolo.py index 38a17d9e7ba4..55d113f72348 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -60,10 +60,10 @@ def forward(self, x): y = x[i].sigmoid() if self.inplace: - y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy + y[..., 0:2] = (y[..., 0:2] * 2 - 0.5 + self.grid[i]) * self.stride[i] # xy y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh else: # for YOLOv5 on AWS Inferentia https://github.com/ultralytics/yolov5/pull/2953 - xy = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy + xy = (y[..., 0:2] * 2 - 0.5 + self.grid[i]) * self.stride[i] # xy wh = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh y = torch.cat((xy, wh, y[..., 4:]), -1) z.append(y.view(bs, -1, self.no)) diff --git a/train.py b/train.py index 4886034d811f..8a19a10c4841 100644 --- a/train.py +++ b/train.py @@ -246,9 +246,9 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Model parameters nl = de_parallel(model).model[-1].nl # number of detection layers (to scale hyps) - hyp['box'] *= 3. / nl # scale to layers - hyp['cls'] *= nc / 80. * 3. / nl # scale to classes and layers - hyp['obj'] *= (imgsz / 640) ** 2 * 3. / nl # scale to image size and layers + hyp['box'] *= 3 / nl # scale to layers + hyp['cls'] *= nc / 80 * 3 / nl # scale to classes and layers + hyp['obj'] *= (imgsz / 640) ** 2 * 3 / nl # scale to image size and layers hyp['label_smoothing'] = opt.label_smoothing model.nc = nc # attach number of classes to model model.hyp = hyp # attach hyperparameters to model @@ -293,7 +293,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary optimizer.zero_grad() for i, (imgs, targets, paths, _) in pbar: # batch ------------------------------------------------------------- ni = i + nb * epoch # number integrated batches (since train start) - imgs = imgs.to(device, non_blocking=True).float() / 255.0 # uint8 to float32, 0-255 to 0.0-1.0 + imgs = imgs.to(device, non_blocking=True).float() / 255 # uint8 to float32, 0-255 to 0.0-1.0 # Warmup if ni <= nw: diff --git a/utils/activations.py b/utils/activations.py index 62eb532b3f95..4c7d46c32104 100644 --- a/utils/activations.py +++ b/utils/activations.py @@ -19,7 +19,7 @@ class Hardswish(nn.Module): # export-friendly version of nn.Hardswish() @staticmethod def forward(x): # return x * F.hardsigmoid(x) # for torchscript and CoreML - return x * F.hardtanh(x + 3, 0., 6.) / 6. # for torchscript, CoreML and ONNX + return x * F.hardtanh(x + 3, 0.0, 6.0) / 6.0 # for torchscript, CoreML and ONNX # Mish https://github.com/digantamisra98/Mish -------------------------------------------------------------------------- diff --git a/utils/augmentations.py b/utils/augmentations.py index b3cbbf913b65..a8e7e5c00f2a 100644 --- a/utils/augmentations.py +++ b/utils/augmentations.py @@ -124,7 +124,7 @@ def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleF def random_perspective(im, targets=(), segments=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0, border=(0, 0)): - # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10)) + # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(0.1, 0.1), scale=(0.9, 1.1), shear=(-10, 10)) # targets = [cls, xyxy] height = im.shape[0] + border[0] * 2 # shape(h,w,c) diff --git a/utils/autoanchor.py b/utils/autoanchor.py index 6b3c661be2f7..af0aa7de65ac 100644 --- a/utils/autoanchor.py +++ b/utils/autoanchor.py @@ -34,10 +34,10 @@ def check_anchors(dataset, model, thr=4.0, imgsz=640): def metric(k): # compute metric r = wh[:, None] / k[None] - x = torch.min(r, 1. / r).min(2)[0] # ratio metric + x = torch.min(r, 1 / r).min(2)[0] # ratio metric best = x.max(1)[0] # best_x - aat = (x > 1. / thr).float().sum(1).mean() # anchors above threshold - bpr = (best > 1. / thr).float().mean() # best possible recall + aat = (x > 1 / thr).float().sum(1).mean() # anchors above threshold + bpr = (best > 1 / thr).float().mean() # best possible recall return bpr, aat anchors = m.anchors.clone() * m.stride.to(m.anchors.device).view(-1, 1, 1) # current anchors @@ -80,12 +80,12 @@ def kmean_anchors(dataset='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen """ from scipy.cluster.vq import kmeans - thr = 1. / thr + thr = 1 / thr prefix = colorstr('autoanchor: ') def metric(k, wh): # compute metrics r = wh[:, None] / k[None] - x = torch.min(r, 1. / r).min(2)[0] # ratio metric + x = torch.min(r, 1 / r).min(2)[0] # ratio metric # x = wh_iou(wh, torch.tensor(k)) # iou metric return x, x.max(1)[0] # x, best_x diff --git a/utils/datasets.py b/utils/datasets.py index 7fce122942f7..ce561b7f99a0 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -634,13 +634,13 @@ def collate_fn4(batch): n = len(shapes) // 4 img4, label4, path4, shapes4 = [], [], path[:n], shapes[:n] - ho = torch.tensor([[0., 0, 0, 1, 0, 0]]) - wo = torch.tensor([[0., 0, 1, 0, 0, 0]]) - s = torch.tensor([[1, 1, .5, .5, .5, .5]]) # scale + ho = torch.tensor([[0.0, 0, 0, 1, 0, 0]]) + wo = torch.tensor([[0.0, 0, 1, 0, 0, 0]]) + s = torch.tensor([[1, 1, 0.5, 0.5, 0.5, 0.5]]) # scale for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW i *= 4 if random.random() < 0.5: - im = F.interpolate(img[i].unsqueeze(0).float(), scale_factor=2., mode='bilinear', align_corners=False)[ + im = F.interpolate(img[i].unsqueeze(0).float(), scale_factor=2.0, mode='bilinear', align_corners=False)[ 0].type(img[i].type()) l = label[i] else: diff --git a/utils/general.py b/utils/general.py index d8cac8daac22..22086a79464d 100755 --- a/utils/general.py +++ b/utils/general.py @@ -802,7 +802,7 @@ def apply_classifier(x, model, img, im0): im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32 - im /= 255.0 # 0 - 255 to 0.0 - 1.0 + im /= 255 # 0 - 255 to 0.0 - 1.0 ims.append(im) pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction diff --git a/utils/loss.py b/utils/loss.py index e8ce42ad994a..194c8e503e0e 100644 --- a/utils/loss.py +++ b/utils/loss.py @@ -108,7 +108,7 @@ def __init__(self, model, autobalance=False): BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g) det = model.module.model[-1] if is_parallel(model) else model.model[-1] # Detect() module - self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, .02]) # P3-P7 + self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, 0.02]) # P3-P7 self.ssi = list(det.stride).index(16) if autobalance else 0 # stride 16 index self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, 1.0, h, autobalance for k in 'na', 'nc', 'nl', 'anchors': @@ -129,7 +129,7 @@ def __call__(self, p, targets): # predictions, targets, model ps = pi[b, a, gj, gi] # prediction subset corresponding to targets # Regression - pxy = ps[:, :2].sigmoid() * 2. - 0.5 + pxy = ps[:, :2].sigmoid() * 2 - 0.5 pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i] pbox = torch.cat((pxy, pwh), 1) # predicted box iou = bbox_iou(pbox.T, tbox[i], x1y1x2y2=False, CIoU=True) # iou(prediction, target) @@ -189,15 +189,15 @@ def build_targets(self, p, targets): if nt: # Matches r = t[:, :, 4:6] / anchors[:, None] # wh ratio - j = torch.max(r, 1. / r).max(2)[0] < self.hyp['anchor_t'] # compare + j = torch.max(r, 1 / r).max(2)[0] < self.hyp['anchor_t'] # compare # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2)) t = t[j] # filter # Offsets gxy = t[:, 2:4] # grid xy gxi = gain[[2, 3]] - gxy # inverse - j, k = ((gxy % 1. < g) & (gxy > 1.)).T - l, m = ((gxi % 1. < g) & (gxi > 1.)).T + j, k = ((gxy % 1 < g) & (gxy > 1)).T + l, m = ((gxi % 1 < g) & (gxi > 1)).T j = torch.stack((torch.ones_like(j), j, k, l, m)) t = t.repeat((5, 1, 1))[j] offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j] diff --git a/utils/plots.py b/utils/plots.py index 00cda6d8d986..92bc8c700152 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -155,7 +155,7 @@ def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max if isinstance(targets, torch.Tensor): targets = targets.cpu().numpy() if np.max(images[0]) <= 1: - images *= 255.0 # de-normalise (optional) + images *= 255 # de-normalise (optional) bs, _, h, w = images.shape # batch size, _, height, width bs = min(bs, max_subplots) # limit plot images ns = np.ceil(bs ** 0.5) # number of subplots (square) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 6e619d9c6955..793e8d8ffd3e 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -111,7 +111,7 @@ def profile(input, ops, n=10, device=None): for m in ops if isinstance(ops, list) else [ops]: m = m.to(device) if hasattr(m, 'to') else m # device m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m - tf, tb, t = 0., 0., [0., 0., 0.] # dt forward, backward + tf, tb, t = 0, 0, [0, 0, 0] # dt forward, backward try: flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPs except: @@ -177,7 +177,7 @@ def find_modules(model, mclass=nn.Conv2d): def sparsity(model): # Return global model sparsity - a, b = 0., 0. + a, b = 0, 0 for p in model.parameters(): a += p.numel() b += (p == 0).sum() @@ -336,7 +336,7 @@ def update(self, model): for k, v in self.ema.state_dict().items(): if v.dtype.is_floating_point: v *= d - v += (1. - d) * msd[k].detach() + v += (1 - d) * msd[k].detach() def update_attr(self, model, include=(), exclude=('process_group', 'reducer')): # Update EMA attributes diff --git a/val.py b/val.py index 1fc98c71198b..fb49fc902b90 100644 --- a/val.py +++ b/val.py @@ -164,7 +164,7 @@ def run(data, t1 = time_sync() img = img.to(device, non_blocking=True) img = img.half() if half else img.float() # uint8 to fp16/32 - img /= 255.0 # 0 - 255 to 0.0 - 1.0 + img /= 255 # 0 - 255 to 0.0 - 1.0 targets = targets.to(device) nb, _, height, width = img.shape # batch size, channels, height, width t2 = time_sync()