Skip to content

Commit

Permalink
initial commit
Browse files Browse the repository at this point in the history
  • Loading branch information
Anurag Ranjan committed Jun 21, 2018
1 parent 786b18e commit 9d90dc0
Show file tree
Hide file tree
Showing 52 changed files with 5,816 additions and 0 deletions.
62 changes: 62 additions & 0 deletions EPECriterion.lua
@@ -0,0 +1,62 @@
local EPECriterion, parent = torch.class('nn.EPECriterion', 'nn.Criterion')

-- Computes average endpoint error for batchSize x ChannelSize x Height x Width
-- flow fields or general multidimensional matrices.

local eps = 1e-12

function EPECriterion:__init()
parent.__init(self)
self.sizeAverage = true
end

function EPECriterion:updateOutput(input, target)
-- TODO: Assertion for 4D tensor and appropriate flow fields
assert( input:nElement() == target:nElement(),
"input and target size mismatch")

self.buffer = self.buffer or input.new()

local buffer = self.buffer
local output
local npixels

buffer:resizeAs(input)
npixels = input:nElement()/2 -- 2 channel flow fields

buffer:add(input, -1, target):pow(2)
output = torch.sum(buffer,2):sqrt() -- second channel is flow
output = output:sum()

output = output / npixels

self.output = output

return self.output
end

function EPECriterion:updateGradInput(input, target)

assert( input:nElement() == target:nElement(),
"input and target size mismatch")

self.buffer = self.buffer or input.new()

local buffer = self.buffer
local gradInput = self.gradInput
local npixels
local loss

buffer:resizeAs(input)
npixels = input:nElement()/2

buffer:add(input, -1, target):pow(2)
loss = torch.sum(buffer,2):sqrt():add(eps) -- forms the denominator
loss = torch.cat(loss, loss, 2) -- Repeat tensor to scale the gradients

gradInput:resizeAs(input)
gradInput:add(input, -1, target):cdiv(loss)
-- TODO: scale it appropriately to account for Average Error
gradInput = gradInput / npixels
return gradInput
end
51 changes: 51 additions & 0 deletions EPECriterionUnitTest.lua
@@ -0,0 +1,51 @@
require 'torch'
local totem = require 'totem'
local autograd = require 'autograd'
local util = require 'autograd.util'
local gradcheck = require 'autograd.gradcheck' {randomizeInput = true}
local gradcheckConstant = require 'autograd.gradcheck' {randomizeInput = false}
local tester = totem.Tester()
local stringx = require 'pl.stringx'

include('EPECriterion.lua')

local eps = 1e-12

local function epe(input, target)
-- TODO: Assertion for 4D tensor and appropriate flow fields
-- assert( input:nElement() == target:nElement(),
-- "input and target size mismatch")


--buffer = input
local npixels = torch.nElement(input)/2 -- 2 channel flow fields

local buffer = torch.pow(torch.add(input, -1, target), 2)
local output = torch.sqrt(torch.sum(buffer,2)) -- second channel is flow
output = torch.sum(output)

output = output / npixels

return output
end

local epeCriterion = nn.EPECriterion()
local autoepeCriterion = autograd.nn.AutoCriterion('AutoEPE')(epe)

for i=1,10 do
local input = torch.rand(4,2,32,32)
local target = torch.rand(4,2,32,32)

local loss = epeCriterion:forward(input, target)
local autoloss = autoepeCriterion:forward(input, target)
local grads = epeCriterion:backward(input, target)
local autograds = autoepeCriterion:backward(input, target)

assert(torch.abs(loss - autoloss) < 1e-6, "Test Failed, Check Loss Function" )
assert((grads - autograds):abs():max() < 1e-6, "Test Failed, Check Gradient Function" )

print("Test " ..i .." Passed!")

end


58 changes: 58 additions & 0 deletions data.lua
@@ -0,0 +1,58 @@
--
-- Copyright (c) 2014, Facebook, Inc.
-- All rights reserved.
--
-- This source code is licensed under the BSD-style license found in the
-- LICENSE file in the root directory of this source tree. An additional grant
-- of patent rights can be found in the PATENTS file in the same directory.
--
local ffi = require 'ffi'
local Threads = require 'threads'
Threads.serialization('threads.sharedserialize')

-- This script contains the logic to create K threads for parallel data-loading.
-- For the data-loading details, look at donkey.lua
-------------------------------------------------------------------------------
do -- start K datathreads (donkeys)
if opt.nDonkeys > 0 then
local options = opt -- make an upvalue to serialize over to donkey threads
donkeys = Threads(
opt.nDonkeys,
function()
require 'torch'
end,
function(idx)
opt = options -- pass to all donkeys via upvalue
tid = idx
local seed = opt.manualSeed + idx
torch.manualSeed(seed)
print(string.format('Starting donkey with id: %d seed: %d', tid, seed))
paths.dofile('donkey.lua')
end
);
else -- single threaded data loading. useful for debugging
paths.dofile('donkey.lua')
donkeys = {}
function donkeys:addjob(f1, f2) f2(f1()) end
function donkeys:synchronize() end
end
end

--[[
nClasses = nil
classes = nil
donkeys:addjob(function() return trainLoader.classes end, function(c) classes = c end)
donkeys:synchronize()
nClasses = #classes
assert(nClasses, "Failed to get nClasses")
assert(nClasses == opt.nClasses,
"nClasses is reported different in the data loader, and in the commandline options")
print('nClasses: ', nClasses)
torch.save(paths.concat(opt.save, 'classes.t7'), classes)
--]]
nTest = 0
donkeys:addjob(function() return testLoader:size() end, function(c) nTest = c end)
donkeys:synchronize()
assert(nTest > 0, "Failed to get nTest")
print('nTest: ', nTest)
160 changes: 160 additions & 0 deletions dataset.lua
@@ -0,0 +1,160 @@
require 'torch'
torch.setdefaulttensortype('torch.FloatTensor')
local ffi = require 'ffi'
local class = require('pl.class')
local dir = require 'pl.dir'
local tablex = require 'pl.tablex'
local argcheck = require 'argcheck'
local flowX = require 'flowExtensions'
require 'sys'
require 'xlua'
require 'image'

local dataset = torch.class('dataLoader')

local initcheck = argcheck{
pack=true,
help=[[
A dataset class for loading images and dense outputs such as optical flow
or segmentations in large datasets. Tested only on Linux (as it uses
command-line linux utilities to scale up)
]],
{name="inputSize",
type="table",
help="the size of the input images"},

{name="flowSize",
type="table",
help="the size of the network output"},

{name="samplingMode",
type="string",
help="Sampling mode: random | balanced ",
default = "balanced"},

{name="verbose",
type="boolean",
help="Verbose mode during initialization",
default = false},

{name="samples",
type="table",
help="samples of training or testing images",
opt = true},

{name="sampleHookTrain",
type="function",
help="applied to sample during training(ex: for lighting jitter). "
.. "It takes the image path as input",
opt = true},

{name="sampleHookTest",
type="function",
help="applied to sample during testing",
opt = true},
}

function dataset:__init(...)

-- argcheck
local args = initcheck(...)
print(args)
for k,v in pairs(args) do self[k] = v end

if not self.sampleHookTrain then self.sampleHookTrain = self.defaultSampleHook end
if not self.sampleHookTest then self.sampleHookTest = self.defaultSampleHook end

local function tableFind(t, o) for k,v in pairs(t) do if v == o then return k end end end

self.numSamples = #self.samples
assert(self.numSamples > 0, "Could not find any sample in the given input paths")

if self.verbose then print(self.numSamples .. ' samples found.') end
end

-- size(), size(class)
function dataset:size(class, list)
return self.numSamples
end


-- converts a table of samples (and corresponding labels) to a clean tensor
local function tableToOutput(self, imgTable, flowTable)
local images, flows
-- local quantity = #scalarTable
local quantity = #imgTable
--print('inputSize' .. self.inputSize[1])
--print('flowSize' .. self.flowSize[1], flowTable[1]:size()[1])

assert(imgTable[1]:size()[1] == self.inputSize[1])
assert(flowTable[1]:size()[1] == self.flowSize[1])

images = torch.Tensor(quantity,
self.inputSize[1], self.inputSize[2], self.inputSize[3])
flows = torch.Tensor(quantity,
self.flowSize[1], self.flowSize[2], self.flowSize[3])
--print('images size', images:size())
--print('imgtable size', imgTable[1]:size())
for i=1,quantity do
images[i]:copy(imgTable[i])
flows[i]:copy(flowTable[i])
end
return images, flows
end

-- sampler, samples from the training set.
local function getFlowTable(flows )
local flowTab = { flowX.scaleBatch(flows, 0.125),
flowX.scaleBatch(flows, 0.25),
flowX.scaleBatch(flows, 0.5),
flows}
return flowTab
end

function dataset:sample(quantity)
assert(quantity)
local imgTable = {}
local flowTable = {}
--print('Quantity ' ..quantity)
for i=1,quantity do
local id = torch.random(1, self.numSamples)
local img, flow = self:sampleHookTrain(id) -- single element[not tensor] from a row

--print("Printing Image and Output Sizes in dataset sample")
--print(img:size())
--print(output:size())


--local out = self:getById(id)
table.insert(imgTable, img)
table.insert(flowTable, flow)
end
-- print('Image table dim' .. imgTable[1]:dim() .. 'Output Table dim' .. outputTable[1]:dim())
local images, flows = tableToOutput(self, imgTable, flowTable)
local flowTab = getFlowTable(flows)
return images, flowTab
end

function dataset:get(i1, i2)
--local indices = self.samplingIds[{{i1, i2}}];
local quantity = i2 - i1 + 1;
assert(quantity > 0)
-- now that indices has been initialized, get the samples
local imgTable = {}
local flowTable = {}

for i=1,quantity do
-- load the sample
--print(indices[i], indices[i][1])
local img, flow = self:sampleHookTest(i1+i-1)
-- local out = self:sampleHookTest(imgpath)
table.insert(imgTable, img)
table.insert(flowTable, flow)
end
local images, flows = tableToOutput(self, imgTable, flowTable)
local flowTab = getFlowTable(flows)
return images, flowTab
end


return dataset

0 comments on commit 9d90dc0

Please sign in to comment.