Skip to content

Commit

Permalink
updated render spiral, classification code, added scene info script
Browse files Browse the repository at this point in the history
  • Loading branch information
andymiller committed Mar 31, 2012
1 parent 4e03a06 commit ffaabbf
Show file tree
Hide file tree
Showing 9 changed files with 245 additions and 71 deletions.
9 changes: 8 additions & 1 deletion build.py
Expand Up @@ -23,6 +23,7 @@
parser.add_option("-v", "--variance", action="store", type="float", dest="var", default="-1.0", help="Specify fixed mog3 variance, otherwise learn it")
parser.add_option("-d", "--downSamp", action="store", type="float", dest="downSamp", default="1.0", help="Specify if images/cams should be downsampled before updating")
parser.add_option("-n", "--numSkip", action="store", type="int", dest="skip", default=1, help="Specify how many images to use in each pass (1=every, 2=every other...)")
parser.add_option("-a", "--render", action="store_true", dest="render", default=False, help="Render images from update viewpoints")
(options, args) = parser.parse_args()
print options
print args
Expand Down Expand Up @@ -80,7 +81,7 @@
for p in range(0,NUMPASSES):

#update using every image once
frames = range(0,len(imgs)-1)[::options.skip];
frames = range(0,len(imgs))[::options.skip];
random.shuffle(frames);
for idx, i in enumerate(frames):
print "Pass: ", p, ", Iteration ", idx, " of ", len(frames)
Expand All @@ -96,9 +97,15 @@
remove_from_db([img, pcam])
img = dimg
pcam = dcam
ni,nj = dni, dnj

#update call
scene.update(pcam, img, True, mask, "", options.var);

if options.render:
rimg = scene.render(pcam, ni, nj)
save_image(rimg, "update_%i.tiff"%(idx))

#refine
if idx%REFINE_INTERVAL==0 and REFINE_ON:
scene.refine();
Expand Down
99 changes: 69 additions & 30 deletions classifier/boxm2_classify.py
Expand Up @@ -3,6 +3,7 @@
import random
from sklearn import svm, datasets
from optparse import OptionParser
from PIL import Image
import classify_image as ci
import transImage as ti

Expand All @@ -22,8 +23,8 @@ def load_flat_file(self,fname):
irPixels = []
for line in f:
l = line.split()
if not(l[0] == "noclass" or l[0] == "trees"): continue
if l[0]=="noclass" and random.random() > .01: continue
#if not(l[0] == "noclass" or l[0] == "trees" or l[0]== "water"): continue
if l[0]=="noclass" and random.random() > .5: continue

#initialize class int
datClass = l[0]
Expand All @@ -41,10 +42,17 @@ def load_flat_file(self,fname):
self.target = np.array(self.target)
eoPixels = np.array(eoPixels)
irPixels = np.array(irPixels)
self.eoPixels = eoPixels
self.irPixels = irPixels
print "EO File pixels shape: ", eoPixels.shape
self.intToClass = dict((v,k) for k, v in self.classMap.iteritems())

#trasnform IR dataset to just take brightnes (total intensity), and ratios
self.data = ti.features(eoPixels, irPixels)
print self.data.shape
self.reducer = ti.LDAFeatures()
self.data = self.reducer.features(eoPixels, irPixels, self.target)

#print info
print "Data shape: ", self.data.shape
for c,v in self.classMap.iteritems():
print c, ":", np.sum(self.target==v), "items in training set"

Expand All @@ -67,45 +75,76 @@ def load_flat_file(self,fname):

# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
svc = svm.SVC(kernel='rbf').fit(X, Y)

rbf_svc = svm.SVC(kernel='rbf', gamma=0.7).fit(X, Y)
eoImg = "/home/acm/Dropbox/cvg/MatClass/Annotations/eoImgs/exp_000.png"
irImg = "/home/acm/Dropbox/cvg/MatClass/Annotations/irImgs/exp_000.png"
#ci.classify_pixels(eoImg, irImg, svc, imgs);

rbf_svc = svm.SVC(kernel='rbf', gamma=0.7).fit(X, Y)
poly_svc = svm.SVC(kernel='poly', degree=3).fit(X, Y)
lin_svc = svm.LinearSVC().fit(X, Y)
#data, pixelZ = ci.classify_pixels(eoImg, irImg, imgs.reducer, rbf_svc, imgs);
#print "Pixel xmin, xmax, ", data[:,0].min(), data[:,0].max()
#print " ymin, ymax, ", data[:,1].min(), data[:,1].max()


#DEBUG#####
eo = Image.open(eoImg)
ir = Image.open(irImg)
eoPix = np.float32(eo) / 255.0
irPix = np.float32(ir) / 255.0
eoDat = eoPix.reshape( (eoPix.shape[0]*eoPix.shape[1], eoPix.shape[2]) )
eoDat = eoDat[:,:3]
irDat = irPix.reshape( (irPix.shape[0]*irPix.shape[1], 1) )
#pl.plot(imgs.eoPixels[:,0], imgs.eoPixels[:,1], "o", c="red", label="File")
#pl.plot(eoDat[:,0], eoDat[:,1], "x", c="blue", label="File")
#pl.legend()
#pl.show()

#poly_svc = svm.SVC(kernel='poly', degree=3).fit(X, Y)
lin_svc = svm.LinearSVC().fit(X, Y)
print "SVM Learned"

# create a mesh to plot in
h = .002 # step size in the mesh
h = 50 # step size in mesh
x_min, x_max = X[:, 0].min() - .002, X[:, 0].max() + .002
y_min, y_max = X[:, 1].min() - .002, X[:, 1].max() + .002
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))

#incorporate pixel data
#x_min = min(x_min, data[:,0].min())
#x_max = max(x_max, data[:,0].max())
#y_min = min(y_min, data[:,1].min())
#y_max = max(y_max, data[:,1].max())
xx, yy = np.meshgrid(np.arange(x_min, x_max, (x_max-x_min)/h),
np.arange(y_min, y_max, (y_max-y_min)/h))
print xx.shape

# title for the plots
models = [ rbf_svc ]
titles = ['SVC with linear kernel',
'SVC with RBF kernel',
'SVC with polynomial (degree 3) kernel',
'LinearSVC (linear kernel)']
pl.set_cmap(pl.cm.Paired)
for i, clf in enumerate((svc, rbf_svc, poly_svc, lin_svc)):
# Plot the decision boundary. For that, we will asign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
pl.subplot(2, 2, i + 1)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])

# Put the result into a color plot
Z = Z.reshape(xx.shape)
pl.set_cmap(pl.cm.Paired)
pl.contourf(xx, yy, Z)
pl.axis('off')

# Plot also the training points
pl.scatter(X[:, 0], X[:, 1], c=Y)

pl.title(titles[i])
for i, clf in enumerate( models ):
# Plot the decision boundary. For that, we will asign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
pl.subplot(1, len(models), i + 1)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])

# Put the result into a color plot
Z = Z.reshape(xx.shape)
pl.set_cmap(pl.cm.Paired)
pl.contourf(xx, yy, Z)
#pl.axis('off')

# Plot also the training points
colors = ["red", "green", "blue", "yellow", "black"]
for c,i in imgs.classMap.iteritems():
x = X[Y==i]
pl.plot(x[:,0], x[:,1], "o", c=colors[i], label=c)

#plot image points
#pl.plot(data[:,0], data[:,1], "o", c="orange", label="Pixels")

#legend and title
pl.legend()
pl.title(titles[i])

pl.show()

Expand Down
19 changes: 13 additions & 6 deletions classifier/classify_image.py
Expand Up @@ -5,25 +5,32 @@
from PIL import Image
import transImage as ti

def classify_pixels(eoName, irName, model, dataset):
def classify_pixels(eoName, irName,reducer,model,dataset=None):
eo = Image.open(eoName)
ir = Image.open(irName)
eoPix = np.float32(eo) / 255.0
irPix = np.float32(ir) / 255.0

eoDat = eoPix.reshape( (eoPix.shape[0]*eoPix.shape[1], eoPix.shape[2]) )
eoDat = eoDat[:,:3]
irDat = irPix.reshape( (irPix.shape[0]*irPix.shape[1], 1) )
data = ti.features(eoDat, irDat)

#reduce data
data = reducer.features(eoDat, irDat)
Z = np.array(model.predict(data))

#print out classes
print "Num pixels classified: "
for name,val in dataset.classMap.iteritems():
print name, np.sum(Z==val)
if dataset:
print "Num pixels classified: "
for name,val in dataset.classMap.iteritems():
print name, np.sum(Z==val)

#shape
Z = Z.reshape(irPix.shape)
print Z.shape
print "Image shape: ", Z.shape

#try saving it out
newImg = Image.fromarray(Z)
newImg.save("test.tiff")

return data, Z
35 changes: 35 additions & 0 deletions classifier/lda_example.py
@@ -0,0 +1,35 @@
import pylab as pl

from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.lda import LDA

iris = datasets.load_iris()

X = iris.data
y = iris.target
target_names = iris.target_names

pca = PCA(n_components=2)
X_r = pca.fit(X).transform(X)

lda = LDA(n_components=2)
X_r2 = lda.fit(X, y).transform(X)

# Percentage of variance explained for each components
print 'explained variance ratio (first two components):', \
pca.explained_variance_ratio_

pl.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
pl.scatter(X_r[y == i, 0], X_r[y == i, 1], c=c, label=target_name)
pl.legend()
pl.title('PCA of IRIS dataset')

pl.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
pl.scatter(X_r2[y == i, 0], X_r2[y == i, 1], c=c, label=target_name)
pl.legend()
pl.title('LDA of IRIS dataset')

pl.show()
Binary file modified classifier/test.tiff
Binary file not shown.
64 changes: 46 additions & 18 deletions classifier/transImage.py
@@ -1,26 +1,54 @@
import numpy as np
import pylab as pl
from sklearn.decomposition import PCA
from sklearn.lda import LDA


class LDAFeatures:

def __init__(self):
self.lda = None

def features(self, eoPixels, irPixels, gt=None):
#grab feature stack
fullFeatures = naive_features(eoPixels, irPixels)

#if the LDA from ground truth exists already, transform new features
if gt==None and self.lda != None:
return self.lda.transform(fullFeatures)
assert gt != None

#otherwise, train LDA
self.lda = LDA(n_components=2).fit(fullFeatures,gt)
return self.lda.transform(fullFeatures)

class PCAFeatures:
def features(self, eoPixels, irPixels):
fullFeatures = naive_features(eoPixels, irPixels)
self.pca = PCA(n_components=2).fit(fullFeatures)
return self.pca.transform(fullFeatures)


def naive_features(eoPixels, irPixels):
"""Stacks a bunch of ratios/differences into a
high dimensional feature vector
"""
#create sqr differences features for each channel
allPix = np.column_stack( (eoPixels, irPixels) )
intensity = np.sum(eoPixels[:,:3]) + irPixels[:,0] #total intensity
diffs = []
for i in range(4):
for j in range(4):
if i==j: continue
diff = (allPix[:,i] - allPix[:,j]) / intensity
diffs.append(diff)

def features(eoPixels, irPixels):
#gbDiff = green_blue_difference(eoPixels, irPixels)
irDiff = ir_difference(eoPixels, irPixels)
rRatio = pixelRatio(eoPixels, irPixels, "red")
gRatio = pixelRatio(eoPixels, irPixels, "green")
#bRatio = pixelRatio(eoPixels, irPixels, "blue")
#return np.column_stack( (gbDiff, irDiff, rRatio, bRatio, gRatio) )
return np.column_stack( (gRatio, rRatio) )

def ir_difference(eoPixels, irPixels):
irdiff = irPixels[:,0] - eoPixels[:,2]
intensity = np.sum(eoPixels[:,:3]) + irPixels[:,0]
return irdiff/intensity

def green_blue_difference(eoPixels, irPixels):
"""Assuming eo and ir pixes are passed in, returns array of green blue diff"""
assert eoPixels.shape[0] == irPixels.shape[0]
gbdiff = eoPixels[:,1] - eoPixels[:,2]
intensity = np.sum(eoPixels[:,:3]) + irPixels[:,0]
return gbdiff / intensity
bRatio = pixelRatio(eoPixels, irPixels, "blue")
iRatio = pixelRatio(eoPixels, irPixels, "ir")
return np.column_stack( [rRatio, bRatio, gRatio, iRatio]+diffs )


def pixelRatio(eoPixels, irPixels, pixel_type="red"):
"""Green / (Green + Red + Blue + IR)"""
Expand Down
2 changes: 1 addition & 1 deletion paint.py
Expand Up @@ -57,7 +57,7 @@
for p in range(0,NUMPASSES):

#update using every image once
frames = range(0,len(imgs)-1)[::options.skip];
frames = range(0,len(imgs))[::options.skip];
random.shuffle(frames);
for idx, i in enumerate(frames):
print "Pass: ", p, ", Iteration ", idx, " of ", len(frames)
Expand Down

0 comments on commit ffaabbf

Please sign in to comment.