Skip to content

Commit

Permalink
updates
Browse files Browse the repository at this point in the history
  • Loading branch information
glenn-jocher committed Mar 4, 2020
1 parent 35eae3a commit 1430a1e
Show file tree
Hide file tree
Showing 2 changed files with 11 additions and 5 deletions.
10 changes: 5 additions & 5 deletions test.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,18 +84,18 @@ def test(cfg,
# Disable gradients
with torch.no_grad():
# Run model
t = time.time()
t = torch_utils.time_synchronized()
inf_out, train_out = model(imgs) # inference and training outputs
t0 += time.time() - t
t0 += torch_utils.time_synchronized() - t

# Compute loss
if hasattr(model, 'hyp'): # if model has loss hyperparameters
loss += compute_loss(train_out, targets, model)[1][:3].cpu() # GIoU, obj, cls

# Run NMS
t = time.time()
output = non_max_suppression(inf_out, conf_thres=conf_thres, iou_thres=iou_thres)
t1 += time.time() - t
t = torch_utils.time_synchronized()
output = non_max_suppression(inf_out, conf_thres=conf_thres, iou_thres=iou_thres) # nms
t1 += torch_utils.time_synchronized() - t

# Statistics per image
for si, pred in enumerate(output):
Expand Down
6 changes: 6 additions & 0 deletions utils/torch_utils.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import os
import time

import torch
import torch.backends.cudnn as cudnn
Expand Down Expand Up @@ -40,6 +41,11 @@ def select_device(device='', apex=False, batch_size=None):
return torch.device('cuda:0' if cuda else 'cpu')


def time_synchronized():
torch.cuda.synchronize() if torch.cuda.is_available() else None
return time.time()


def fuse_conv_and_bn(conv, bn):
# https://tehnokv.com/posts/fusing-batchnorm-and-conv/
with torch.no_grad():
Expand Down

0 comments on commit 1430a1e

Please sign in to comment.