Skip to content

Commit

Permalink
merged
Browse files Browse the repository at this point in the history
  • Loading branch information
anewell committed Sep 26, 2016
2 parents 88ca96e + 4637618 commit d8a6577
Show file tree
Hide file tree
Showing 21 changed files with 994 additions and 840 deletions.
1 change: 1 addition & 0 deletions .gitignore
Expand Up @@ -3,3 +3,4 @@
*.pyc
exp/
data/**/images
src/models/old
Binary file added data/mpii/annot.h5
Binary file not shown.
149 changes: 0 additions & 149 deletions src/data.lua

This file was deleted.

101 changes: 0 additions & 101 deletions src/dataloader.lua

This file was deleted.

109 changes: 109 additions & 0 deletions src/itorch/.ipynb_checkpoints/demo-checkpoint.ipynb

Large diffs are not rendered by default.

196 changes: 196 additions & 0 deletions src/itorch/demo.ipynb

Large diffs are not rendered by default.

35 changes: 21 additions & 14 deletions src/main.lua
@@ -1,32 +1,39 @@
require 'paths'
paths.dofile('ref.lua') -- Parse command line input and do global variable initialization
paths.dofile('data.lua') -- Set up data processing
paths.dofile('model.lua') -- Read in network model
paths.dofile('train.lua') -- Load up training/testing functions

-- Set up data loader
torch.setnumthreads(1)
local Dataloader = require 'dataloader'
loader = Dataloader.create(opt)
local Dataloader = paths.dofile('util/dataloader.lua')
loader = Dataloader.create(opt, dataset, ref)

isFinished = false -- Finish early if validation accuracy plateaus, can be adjusted with opt.threshold
-- Initialize logs
ref.log = {}
ref.log.train = Logger(paths.concat(opt.save, 'train.log'), opt.continue)
ref.log.valid = Logger(paths.concat(opt.save, 'valid.log'), opt.continue)

-- Main training loop
for i=1,opt.nEpochs do
train()
valid()
collectgarbage()
print("==> Starting epoch: " .. epoch .. "/" .. (opt.nEpochs + opt.epochNumber - 1))
if opt.trainIters > 0 then train() end
if opt.validIters > 0 then valid() end
epoch = epoch + 1
if isFinished then break end
collectgarbage()
end

-- Update options/reference for last epoch
-- Update reference for last epoch
opt.lastEpoch = epoch - 1
torch.save(opt.save .. '/options.t7', opt)

-- Generate final predictions on validation set
if opt.finalPredictions == 1 then predict() end

-- Save model
model:clearState()
torch.save(paths.concat(opt.save,'final_model.t7'), model)
torch.save(paths.concat(opt.save,'options.t7'), opt)
torch.save(paths.concat(opt.save,'optimState.t7'), optimState)
torch.save(paths.concat(opt.save,'final_model.t7'), model)

-- Generate final predictions on validation set
if opt.finalPredictions then
ref.log = {}
loader.test = Dataloader(opt, dataset, ref, 'test')
predict()
end
58 changes: 37 additions & 21 deletions src/misc/convert_annot.py
Expand Up @@ -3,9 +3,18 @@
import sys
import mpii

keys = ['index','person','imgname','center','scale','part','visible','normalize','torsoangle']
keys = ['index','person','imgname','center','scale','part','visible','normalize','torsoangle','multi','istrain']
annot = {k:[] for k in keys}
dotrain = True

# Set up index reference for multiperson training
multiRef = np.zeros(mpii.nimages)
trainRef = mpii.annot['img_train'][0][0][0]
allIdxs = np.arange(0,trainRef.shape[0])
with h5py.File('../../data/mpii/annot/multi-idxs.h5','r') as f:
mTrain = f['train'][:] - 1
mTest = f['test'][:] - 1
multiRef[allIdxs[trainRef == 1][mTrain]] = 1
multiRef[allIdxs[trainRef == 0][mTest]] = 1

# Get image filenames
imgnameRef = mpii.annot['annolist'][0][0][0]['image'][:]
Expand All @@ -14,37 +23,44 @@
print "\r",idx,
sys.stdout.flush()

if mpii.istrain(idx) == dotrain:
for person in xrange(mpii.numpeople(idx)):
c,s = mpii.location(idx,person)
if not c[0] == -1:
# Adjust center/scale slightly to avoid cropping limbs
# (in hindsight this should have been done in the Torch code...)
c[1] += 15 * s
s *= 1.25
for person in xrange(mpii.numpeople(idx)):
c,s = mpii.location(idx,person)
if not c[0] == -1:
# Add info to annotation list
annot['index'] += [idx]
annot['person'] += [person]
imgname = np.zeros(16)
refname = str(imgnameRef[idx][0][0][0][0])
for i in range(len(refname)): imgname[i] = ord(refname[i])
annot['imgname'] += [imgname]
annot['center'] += [c]
annot['scale'] += [s]
annot['multi'] += [multiRef[idx]]

if mpii.istrain(idx) == True:
# Part annotations and visibility
coords = np.zeros((16,2))
vis = np.zeros(16)
for part in xrange(16):
coords[part],vis[part] = mpii.partinfo(idx,person,part)

# Add info to annotation list
annot['index'] += [idx]
annot['person'] += [person]
annot['imgname'] += [str(imgnameRef[idx][0][0][0][0])]
annot['center'] += [c]
annot['scale'] += [s]
annot['part'] += [coords]
annot['visible'] += [vis]
annot['normalize'] += [mpii.normalization(idx,person)]
annot['torsoangle'] += [mpii.torsoangle(idx,person)]
annot['istrain'] += [1]
else:
annot['part'] += [-np.ones((16,2))]
annot['visible'] += [np.zeros(16)]
annot['normalize'] += [1]
annot['torsoangle'] += [0]
if trainRef[idx] == 0: # Test image
annot['istrain'] += [0]
else: # Training image (something missing in annot)
annot['istrain'] += [2]

print ""

with h5py.File('mpii-annot-train.h5','w') as f:
with h5py.File('mpii-annot.h5','w') as f:
f.attrs['name'] = 'mpii'
for k in keys:
if not k == 'imgname': annot[k] = np.array(annot[k])
f[k] = annot[k]

f[k] = np.array(annot[k])

0 comments on commit d8a6577

Please sign in to comment.