Skip to content

Commit

Permalink
update
Browse files Browse the repository at this point in the history
  • Loading branch information
jiemin.fang committed Jun 23, 2019
1 parent 8b672bf commit 636e166
Show file tree
Hide file tree
Showing 4 changed files with 7 additions and 67 deletions.
4 changes: 0 additions & 4 deletions dataset/imagenet_data.py
@@ -1,9 +1,6 @@
import os
import random

import numpy as np
import torch
import torch.utils.data
import torchvision
import torchvision.transforms as vision_transforms

Expand Down Expand Up @@ -85,4 +82,3 @@ def getTestLoader(self, batch_size, shuffle=False):
num_workers=self.num_workers,
pin_memory=self.pin_memory)
return test_loader

2 changes: 1 addition & 1 deletion tools/multadds_count.py
@@ -1,5 +1,5 @@
import torch
import numpy as np;

# Original implementation:
# https://github.com/warmspringwinds/pytorch-segmentation-detection/blob/master/pytorch_segmentation_detection/utils/flops_benchmark.py

Expand Down
66 changes: 6 additions & 60 deletions tools/utils.py
@@ -1,7 +1,6 @@
import os
import logging
import os
import shutil
import time

import numpy as np
import torch
Expand Down Expand Up @@ -83,29 +82,12 @@ def create_exp_dir(path):
print('Experiment dir : {}'.format(path))


def prod_sample(weights, sample_num):
sampled_indices = []

for j in range(sample_num):
while 1:
sample_factor = np.random.rand()
for k in range(len(weights)):
if sample_factor >= torch.sum(weights[0:k]) and \
sample_factor < torch.sum(weights[0:k+1]):
sampled_id = k
break
else:
continue
if sampled_id not in sampled_indices:
sampled_indices.append(sampled_id)
break
else:
continue

return sampled_indices


def cross_entropy_with_label_smoothing(pred, target, label_smoothing=0.):
"""
Label smoothing implementation.
This function is taken from https://github.com/MIT-HAN-LAB/ProxylessNAS/blob/master/proxyless_nas/utils.py
"""

logsoftmax = nn.LogSoftmax().cuda()
n_classes = pred.size(1)
# convert to one-hot
Expand All @@ -115,39 +97,3 @@ def cross_entropy_with_label_smoothing(pred, target, label_smoothing=0.):
# label smoothing
soft_target = soft_target * (1 - label_smoothing) + label_smoothing / n_classes
return torch.mean(torch.sum(- soft_target * logsoftmax(pred), 1))


def latency_measure(module, input_size, batch_size, meas_times, mode='gpu'):
assert mode in ['gpu', 'cpu']

latency = []
module.eval()
input_size = (batch_size,) + tuple(input_size)
input_data = torch.randn(input_size)
if mode=='gpu':
input_data = input_data.cuda()

for i in range(meas_times):
with torch.no_grad():
start = time.time()
_ = module(input_data)
if i >= 100:
latency.append(time.time() - start)
print(np.mean(latency) * 1e3, 'ms')
return np.mean(latency) * 1e3


def latency_measure_fw(module, input_data, meas_times):
latency = []
module.eval()

for i in range(meas_times):
with torch.no_grad():
start = time.time()
output_data = module(input_data)
if i >= 100:
latency.append(time.time() - start)

print(np.mean(latency) * 1e3, 'ms')
return np.mean(latency) * 1e3, output_data

2 changes: 0 additions & 2 deletions validation.py
Expand Up @@ -5,11 +5,9 @@
import sys
import time

import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.utils

from configs.config import cfg
from dataset import imagenet_data
Expand Down

0 comments on commit 636e166

Please sign in to comment.