-
Notifications
You must be signed in to change notification settings - Fork 0
/
evaluate_prediction.py
145 lines (120 loc) · 5.29 KB
/
evaluate_prediction.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
import argparse
import time
import torch.backends.cudnn as cudnn
from utils.test_imagefolder import TestImageFolder
import pandas as pd
from utils.functions import *
from utils.image_preprocess import *
from models import *
parser = argparse.ArgumentParser(description='PyTorch ImageNet100_64*64 Evaluating')
parser.add_argument('--data', default='./data', type=str, metavar='N',
help='root directory of dataset where directory train_data or val_data exists')
parser.add_argument('--result', default='./Results',
type=str, metavar='N', help='root directory of results')
parser.add_argument('--arch', '-a', metavar='ARCH', default='resnet35',
help='model architecture: resnet35')
parser.add_argument('--num-classes', default=200, type=int,help='define the number of classes')
parser.add_argument('-b', '--batch-size', default=128, type=int, metavar='N', help='mini-batch size (default: 128) used for test')
parser.add_argument('--print-freq', '-p', default=10, type=int, metavar='N', help='print frequency (default: 10)')
parser.add_argument('--model-dir', default='./Results/resnet35-52.04/resnet35-52.04.pth.tar',
type=str, metavar='PATH', help='path to latest checkpoint (default: none)')
parser.add_argument('--cuda', default=torch.cuda.is_available(), type=bool, help='whether cuda is in use.')
parser.add_argument('--workers', default=8, type=int, metavar='N',
help='number of data loading workers(for linux default 8;for Windows default 0)')
def main():
global args
args = parser.parse_args()
# mkdir a new folder to store the checkpoint and best model
if not os.path.exists(args.result):
os.makedirs(args.result)
# Model building
print('=> Building model...')
modeltype = globals()[args.arch]
model = modeltype(num_classes=args.num_classes)
print(model)
# optionally resume from a checkpoint
if args.model_dir:
if os.path.isfile(args.model_dir):
print('=> loading checkpoint "{}"'.format(args.model_dir))
if args.cuda:
checkpoint = torch.load(args.model_dir)
else:
checkpoint = torch.load(args.model_dir, map_location=torch.device('cpu'))
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})".format(args.model_dir, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.model_dir))
if args.cuda:
print('GPU mode! ')
model = nn.DataParallel(model).cuda()
cudnn.benchmark = True
else:
print('CPU mode! Cuda is not available!')
# Data loading and preprocessing
print('=> loading imagenet200 data...')
test_dir = os.path.join(args.data, 'test')
test_dataset = TestImageFolder(test_dir, transform=transforms_test())
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=args.batch_size,
shuffle=False, num_workers=args.workers)
# get prediction
score = validate(test_loader, model)
if args.cuda:
score = score.cpu()
# compute top1 accuracy
_, pred = score.topk(1, 1, True, True)
pred = pred.squeeze().numpy()
# write scores to a csv file
print('Writing scores to test_prediction.csv.......')
csv_file = os.path.join(args.result, 'test_prediction.csv')
imgs = []
for i in range(len(test_dataset)):
imgs.append('%05d.png'%i)
dataframe =pd.DataFrame({'Id': imgs, 'Prediction': pred})
dataframe.to_csv(csv_file, index=False)
print('Done!')
def validate(val_loader, model):
batch_time = AverageMeter()
# switch to evaluate mode
model.eval()
score = None
with torch.no_grad():
end = time.time()
for i, (input, index) in enumerate(val_loader):
if args.cuda:
input = input.cuda(non_blocking=True)
# compute output
if len(input.size()) > 4: # 5-D tensor
bs, crops, ch, h, w = input.size()
output = model(input.view(-1, ch, h, w))
# fuse scores among all crops
output = output.view(bs, crops, -1).mean(dim=1)
else:
output = model(input)
if i == 0:
score = output
else:
score = torch.cat([score, output], dim=0)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\tTime {batch_time.val:.3f} ({batch_time.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time))
print(score.shape)
return score
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
if __name__=='__main__':
main()