/
test.lua
286 lines (232 loc) · 8.43 KB
/
test.lua
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
----------------------------------------------------------------
-- Activity-Recognition-with-CNN-and-RNN
-- https://github.com/chihyaoma/Activity-Recognition-with-CNN-and-RNN
--
--
-- This is a testing code for implementing the RNN model with LSTM
-- written by Chih-Yao Ma.
--
-- The code will take feature vectors (from CNN model) from contiguous
-- frames and train against the ground truth, i.e. the labeling of video classes.
--
-- Contact: Chih-Yao Ma at <cyma@gatech.edu>
----------------------------------------------------------------
local nn = require 'nn'
local sys = require 'sys'
local xlua = require 'xlua' -- xlua provides useful tools, like progress bars
local optim = require 'optim'
local pastalog = require 'pastalog'
print(sys.COLORS.red .. '==> defining some tools')
-- model:
local m = require 'model'
local model = m.model
local criterion = m.criterion
-- This matrix records the current confusion across classes
local confusion = optim.ConfusionMatrix(classes)
-- Logger:
local testLogger = optim.Logger(paths.concat(opt.save,'test.log'))
-- Batch test:
local inputs = torch.Tensor(opt.batchSize, opt.inputSize, opt.rho)
local targets = torch.Tensor(opt.batchSize)
local labels = {}
local prob = {}
-- if opt.averagePred == true then
-- predsFrames = torch.Tensor(opt.batchSize, nClass, opt.rho-1)
predsFrames = torch.Tensor(opt.batchSize, nClass, opt.rho - opt.numSegment + 1)
-- end
local logsoftmax = nn.LogSoftMax()
if opt.cuda == true then
inputs = inputs:cuda()
targets = targets:cuda()
logsoftmax = logsoftmax:cuda()
end
-- test function
function test(testData, testTarget)
-- local vars
local time = sys.clock()
local timer = torch.Timer()
local dataTimer = torch.Timer()
-- replace the JoinTable in model with CAddTable
-- model:remove(1)
-- model:insert(nn.View(#opt.hiddenSize, opt.batchSize, opt.inputSize, -1),1)
-- model:remove(#model.modules)
-- model:add(nn.CAddTable())
if opt.cuda == true then
model:cuda()
end
-- Sets Dropout layer to have a different behaviour during evaluation.
model:evaluate()
-- test over test data
print(sys.COLORS.red .. '==> testing on test set:')
if opt.testOnly then
epoch = 1
end
local top1Sum, top3Sum, lossSum = 0.0, 0.0, 0.0
local N = 0
for t = 1,testData:size(1),opt.batchSize do
local dataTime = dataTimer:time().real
-- disp progress
xlua.progress(t, testData:size(1))
-- create mini batch
local idx = 1
inputs:fill(0)
targets:fill(0)
for i = t,t+opt.batchSize-1 do
if i <= testData:size(1) then
inputs[idx] = testData[i]--:float()
targets[idx] = testTarget[i]
idx = idx + 1
end
end
local idxBound = idx - 1
local top1, top3
if opt.averagePred == true then
-- make prediction for each of the images frames, start from frame #2
idx = 1
for i = 2, opt.rho do
-- extract various length of frames
local Index = torch.range(1, i)
local indLong = torch.LongTensor():resize(Index:size()):copy(Index)
local inputsPreFrames = inputs:index(3, indLong)
-- inputsPreFrames = inputsPreFrames:transpose(2,3):transpose(1,2)
-- enable this when use tri-data method
-- replicate the testing data and feed into LSTM cells seperately
-- inputsPreFrames = torch.repeatTensor(inputsPreFrames,#opt.hiddenSize,1,1)
-- feedforward pass the trained model
-- print(inputsPreFrames:size())
-- local temp = model:forward(inputsPreFrames)
-- print(temp)
-- error('test')
predsFrames[{{},{},idx}] = model:forward(inputsPreFrames)
idx = idx + 1
end
-- Convert log probabilities back to [0, 1]
-- need to fix here
predsFrames:exp()
-- average all the prediction across all frames
preds = torch.mean(predsFrames, 3):squeeze()
else
-- make prediction for each of the images frames, start depends on # of numSegment
-- idx = 1
-- for i = opt.numSegment, opt.rho do
-- -- extract various length of frames
-- local Index = torch.range(1, i)
-- local indLong = torch.LongTensor():resize(Index:size()):copy(Index)
-- local inputsPreFrames = inputs:index(3, indLong)
--
-- local inputsSegments = {}
-- local segmentBasis = math.floor(i/opt.numSegment)
--
-- for s = 1, opt.numSegment do
-- table.insert(inputsSegments, inputsPreFrames[{{}, {}, {segmentBasis*(s-1) + 1,segmentBasis*s}}])
-- end
--
-- predsFrames[{{},{},idx}] = model:forward(inputsSegments):float()
--
-- idx = idx + 1
-- end
--
-- predsFrames:exp()
-- -- average all the prediction across all frames
-- preds = torch.mean(predsFrames, 3):squeeze()
-- inputsSegments = {inputs[{{}, {}, {1,8}}], inputs[{{}, {}, {9,17}}], inputs[{{}, {}, {18,25}}]}
-- inputsSegments = {inputs[{{}, {}, {1,5}}], inputs[{{}, {}, {6,10}}], inputs[{{}, {}, {11,15}}], inputs[{{}, {}, {16,20}}], inputs[{{}, {}, {21,25}}]}
local inputsSegments = {}
local segmentBasis = math.floor(inputs:size(3)/opt.numSegment)
for s = 1, opt.numSegment do
table.insert(inputsSegments, inputs[{{}, {}, {segmentBasis*(s-1) + 1,segmentBasis*s}}])
end
preds = model:forward(inputsSegments)
preds = logsoftmax:forward(preds):exp()
end
-- discard the redundant predictions and targets
if (t + opt.batchSize - 1) > testData:size(1) then
preds = preds:sub(1,idxBound)
end
top1, top3 = computeScore(preds, targets:sub(1,idxBound), 1)
top1Sum = top1Sum + top1*idxBound
top3Sum = top3Sum + top3*idxBound
N = N + idxBound
print(('%.3f | Test: [%d][%d/%d] | Time %.3f Data %.3f top1 %7.3f (%7.3f) top3 %7.3f (%7.3f)'):format(
bestAcc, epoch-1, t, testData:size(1), timer:time().real, dataTime, top1, top1Sum / N, top3, top3Sum / N))
-- Get the top N class indexes and probabilities
local topN = 3
local probLog, predLabels = preds:topk(topN, true, true)
idx = 1
for i = t,t+opt.batchSize-1 do
if i <= testData:size(1) then
labels[i] = {}
prob[i] = {}
for j = 1, topN do
labels[i][j] = classes[predLabels[idx][j]]
prob[i][j] = probLog[idx][j]
end
idx = idx + 1
end
end
-- confusion
for i = 1,idxBound do
confusion:add(preds[i], targets:sub(1,idxBound)[i])
end
end
-- revert back to the original model for training again
-- model:remove(1)
-- model:insert(nn.View(#opt.hiddenSize, opt.batchSize/#opt.hiddenSize, opt.inputSize, -1),1)
-- model:remove(#model.modules)
-- model:add(nn.JoinTable(1))
if opt.cuda == true then
model:cuda()
end
-- timing
time = sys.clock() - time
time = time / testData:size(1)
print("\n==> time to test 1 sample = " .. (time*1000) .. 'ms')
timer:reset()
dataTimer:reset()
-- print confusion matrix
print(confusion)
assert(#labels == testData:size(1), 'predictions dimension mismatch with testing data..')
-- if the performance is so far the best..
local bestModel = false
if confusion.totalValid * 100 >= bestAcc then
bestModel = true
bestAcc = confusion.totalValid * 100
-- save the labels and probabilities into file
torch.save(opt.save .. '/labels.txt', labels,'ascii')
torch.save(opt.save .. '/prob.txt', prob,'ascii')
if opt.saveModel == true then
if confusion.totalValid * 100 >= 94.2 then
checkpoints.save(epoch-1, model, optimState, bestModel, confusion.totalValid*100)
end
end
end
print(sys.COLORS.red .. '==> Best testing accuracy = ' .. bestAcc .. '%')
print(sys.COLORS.red .. (' * Finished epoch # %d top1: %7.3f top3: %7.3f\n'):format(
epoch-1, top1Sum / N, top3Sum / N))
local modelName = 'DropOut=' .. opt.dropout
local epochSeriesName = 'epoch-top1-' .. opt.pastalogName
pastalog(modelName, epochSeriesName, confusion.totalValid * 100, epoch, 'http://ct5250-12.ece.gatech.edu:8120/data')
-- update log/plot
testLogger:add{['epoch'] = epoch-1, ['top-1 error'] = confusion.totalValid * 100}
if opt.plot then
testLogger:style{['% mean class accuracy (test set)'] = '-'}
testLogger:plot()
end
confusion:zero()
end
function computeScore(output, target)
-- Coputes the top1 and top3 error rate
local batchSize = output:size(1)
local _ , predictions = output:float():sort(2, true) -- descending
-- Find which predictions match the target
local correct = predictions:eq(
target:long():view(batchSize, 1):expandAs(output))
-- Top-1 score
local top1 = 1.0 - (correct:narrow(2, 1, 1):sum() / batchSize)
-- Top-3 score, if there are at least 3 classes
local len = math.min(3, correct:size(2))
local top3 = 1.0 - (correct:narrow(2, 1, len):sum() / batchSize)
return top1 * 100, top3 * 100
end
-- Export:
return test