Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse files

updated render spiral, classification code, added scene info script

  • Loading branch information...
commit ffaabbf6995c9f52f731ef9f5944f4ecfd3b8506 1 parent 4e03a06
@andymiller authored
View
9 build.py
@@ -23,6 +23,7 @@
parser.add_option("-v", "--variance", action="store", type="float", dest="var", default="-1.0", help="Specify fixed mog3 variance, otherwise learn it")
parser.add_option("-d", "--downSamp", action="store", type="float", dest="downSamp", default="1.0", help="Specify if images/cams should be downsampled before updating")
parser.add_option("-n", "--numSkip", action="store", type="int", dest="skip", default=1, help="Specify how many images to use in each pass (1=every, 2=every other...)")
+parser.add_option("-a", "--render", action="store_true", dest="render", default=False, help="Render images from update viewpoints")
(options, args) = parser.parse_args()
print options
print args
@@ -80,7 +81,7 @@
for p in range(0,NUMPASSES):
#update using every image once
- frames = range(0,len(imgs)-1)[::options.skip];
+ frames = range(0,len(imgs))[::options.skip];
random.shuffle(frames);
for idx, i in enumerate(frames):
print "Pass: ", p, ", Iteration ", idx, " of ", len(frames)
@@ -96,9 +97,15 @@
remove_from_db([img, pcam])
img = dimg
pcam = dcam
+ ni,nj = dni, dnj
#update call
scene.update(pcam, img, True, mask, "", options.var);
+
+ if options.render:
+ rimg = scene.render(pcam, ni, nj)
+ save_image(rimg, "update_%i.tiff"%(idx))
+
#refine
if idx%REFINE_INTERVAL==0 and REFINE_ON:
scene.refine();
View
99 classifier/boxm2_classify.py
@@ -3,6 +3,7 @@
import random
from sklearn import svm, datasets
from optparse import OptionParser
+from PIL import Image
import classify_image as ci
import transImage as ti
@@ -22,8 +23,8 @@ def load_flat_file(self,fname):
irPixels = []
for line in f:
l = line.split()
- if not(l[0] == "noclass" or l[0] == "trees"): continue
- if l[0]=="noclass" and random.random() > .01: continue
+ #if not(l[0] == "noclass" or l[0] == "trees" or l[0]== "water"): continue
+ if l[0]=="noclass" and random.random() > .5: continue
#initialize class int
datClass = l[0]
@@ -41,10 +42,17 @@ def load_flat_file(self,fname):
self.target = np.array(self.target)
eoPixels = np.array(eoPixels)
irPixels = np.array(irPixels)
+ self.eoPixels = eoPixels
+ self.irPixels = irPixels
+ print "EO File pixels shape: ", eoPixels.shape
+ self.intToClass = dict((v,k) for k, v in self.classMap.iteritems())
#trasnform IR dataset to just take brightnes (total intensity), and ratios
- self.data = ti.features(eoPixels, irPixels)
- print self.data.shape
+ self.reducer = ti.LDAFeatures()
+ self.data = self.reducer.features(eoPixels, irPixels, self.target)
+
+ #print info
+ print "Data shape: ", self.data.shape
for c,v in self.classMap.iteritems():
print c, ":", np.sum(self.target==v), "items in training set"
@@ -67,45 +75,76 @@ def load_flat_file(self,fname):
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
- svc = svm.SVC(kernel='rbf').fit(X, Y)
-
+ rbf_svc = svm.SVC(kernel='rbf', gamma=0.7).fit(X, Y)
eoImg = "/home/acm/Dropbox/cvg/MatClass/Annotations/eoImgs/exp_000.png"
irImg = "/home/acm/Dropbox/cvg/MatClass/Annotations/irImgs/exp_000.png"
- #ci.classify_pixels(eoImg, irImg, svc, imgs);
-
- rbf_svc = svm.SVC(kernel='rbf', gamma=0.7).fit(X, Y)
- poly_svc = svm.SVC(kernel='poly', degree=3).fit(X, Y)
- lin_svc = svm.LinearSVC().fit(X, Y)
+ #data, pixelZ = ci.classify_pixels(eoImg, irImg, imgs.reducer, rbf_svc, imgs);
+ #print "Pixel xmin, xmax, ", data[:,0].min(), data[:,0].max()
+ #print " ymin, ymax, ", data[:,1].min(), data[:,1].max()
+
+
+ #DEBUG#####
+ eo = Image.open(eoImg)
+ ir = Image.open(irImg)
+ eoPix = np.float32(eo) / 255.0
+ irPix = np.float32(ir) / 255.0
+ eoDat = eoPix.reshape( (eoPix.shape[0]*eoPix.shape[1], eoPix.shape[2]) )
+ eoDat = eoDat[:,:3]
+ irDat = irPix.reshape( (irPix.shape[0]*irPix.shape[1], 1) )
+ #pl.plot(imgs.eoPixels[:,0], imgs.eoPixels[:,1], "o", c="red", label="File")
+ #pl.plot(eoDat[:,0], eoDat[:,1], "x", c="blue", label="File")
+ #pl.legend()
+ #pl.show()
+
+ #poly_svc = svm.SVC(kernel='poly', degree=3).fit(X, Y)
+ lin_svc = svm.LinearSVC().fit(X, Y)
+ print "SVM Learned"
# create a mesh to plot in
- h = .002 # step size in the mesh
+ h = 50 # step size in mesh
x_min, x_max = X[:, 0].min() - .002, X[:, 0].max() + .002
y_min, y_max = X[:, 1].min() - .002, X[:, 1].max() + .002
- xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
- np.arange(y_min, y_max, h))
+
+ #incorporate pixel data
+ #x_min = min(x_min, data[:,0].min())
+ #x_max = max(x_max, data[:,0].max())
+ #y_min = min(y_min, data[:,1].min())
+ #y_max = max(y_max, data[:,1].max())
+ xx, yy = np.meshgrid(np.arange(x_min, x_max, (x_max-x_min)/h),
+ np.arange(y_min, y_max, (y_max-y_min)/h))
+ print xx.shape
# title for the plots
+ models = [ rbf_svc ]
titles = ['SVC with linear kernel',
'SVC with RBF kernel',
'SVC with polynomial (degree 3) kernel',
'LinearSVC (linear kernel)']
pl.set_cmap(pl.cm.Paired)
- for i, clf in enumerate((svc, rbf_svc, poly_svc, lin_svc)):
- # Plot the decision boundary. For that, we will asign a color to each
- # point in the mesh [x_min, m_max]x[y_min, y_max].
- pl.subplot(2, 2, i + 1)
- Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
-
- # Put the result into a color plot
- Z = Z.reshape(xx.shape)
- pl.set_cmap(pl.cm.Paired)
- pl.contourf(xx, yy, Z)
- pl.axis('off')
-
- # Plot also the training points
- pl.scatter(X[:, 0], X[:, 1], c=Y)
-
- pl.title(titles[i])
+ for i, clf in enumerate( models ):
+ # Plot the decision boundary. For that, we will asign a color to each
+ # point in the mesh [x_min, m_max]x[y_min, y_max].
+ pl.subplot(1, len(models), i + 1)
+ Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
+
+ # Put the result into a color plot
+ Z = Z.reshape(xx.shape)
+ pl.set_cmap(pl.cm.Paired)
+ pl.contourf(xx, yy, Z)
+ #pl.axis('off')
+
+ # Plot also the training points
+ colors = ["red", "green", "blue", "yellow", "black"]
+ for c,i in imgs.classMap.iteritems():
+ x = X[Y==i]
+ pl.plot(x[:,0], x[:,1], "o", c=colors[i], label=c)
+
+ #plot image points
+ #pl.plot(data[:,0], data[:,1], "o", c="orange", label="Pixels")
+
+ #legend and title
+ pl.legend()
+ pl.title(titles[i])
pl.show()
View
19 classifier/classify_image.py
@@ -5,25 +5,32 @@
from PIL import Image
import transImage as ti
-def classify_pixels(eoName, irName, model, dataset):
+def classify_pixels(eoName, irName,reducer,model,dataset=None):
eo = Image.open(eoName)
ir = Image.open(irName)
eoPix = np.float32(eo) / 255.0
irPix = np.float32(ir) / 255.0
eoDat = eoPix.reshape( (eoPix.shape[0]*eoPix.shape[1], eoPix.shape[2]) )
+ eoDat = eoDat[:,:3]
irDat = irPix.reshape( (irPix.shape[0]*irPix.shape[1], 1) )
- data = ti.features(eoDat, irDat)
+
+ #reduce data
+ data = reducer.features(eoDat, irDat)
Z = np.array(model.predict(data))
#print out classes
- print "Num pixels classified: "
- for name,val in dataset.classMap.iteritems():
- print name, np.sum(Z==val)
+ if dataset:
+ print "Num pixels classified: "
+ for name,val in dataset.classMap.iteritems():
+ print name, np.sum(Z==val)
+ #shape
Z = Z.reshape(irPix.shape)
- print Z.shape
+ print "Image shape: ", Z.shape
#try saving it out
newImg = Image.fromarray(Z)
newImg.save("test.tiff")
+
+ return data, Z
View
35 classifier/lda_example.py
@@ -0,0 +1,35 @@
+import pylab as pl
+
+from sklearn import datasets
+from sklearn.decomposition import PCA
+from sklearn.lda import LDA
+
+iris = datasets.load_iris()
+
+X = iris.data
+y = iris.target
+target_names = iris.target_names
+
+pca = PCA(n_components=2)
+X_r = pca.fit(X).transform(X)
+
+lda = LDA(n_components=2)
+X_r2 = lda.fit(X, y).transform(X)
+
+# Percentage of variance explained for each components
+print 'explained variance ratio (first two components):', \
+ pca.explained_variance_ratio_
+
+pl.figure()
+for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
+ pl.scatter(X_r[y == i, 0], X_r[y == i, 1], c=c, label=target_name)
+pl.legend()
+pl.title('PCA of IRIS dataset')
+
+pl.figure()
+for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
+ pl.scatter(X_r2[y == i, 0], X_r2[y == i, 1], c=c, label=target_name)
+pl.legend()
+pl.title('LDA of IRIS dataset')
+
+pl.show()
View
BIN  classifier/test.tiff
Binary file not shown
View
64 classifier/transImage.py
@@ -1,26 +1,54 @@
import numpy as np
import pylab as pl
+from sklearn.decomposition import PCA
+from sklearn.lda import LDA
+
+
+class LDAFeatures:
+
+ def __init__(self):
+ self.lda = None
+
+ def features(self, eoPixels, irPixels, gt=None):
+ #grab feature stack
+ fullFeatures = naive_features(eoPixels, irPixels)
+
+ #if the LDA from ground truth exists already, transform new features
+ if gt==None and self.lda != None:
+ return self.lda.transform(fullFeatures)
+ assert gt != None
+
+ #otherwise, train LDA
+ self.lda = LDA(n_components=2).fit(fullFeatures,gt)
+ return self.lda.transform(fullFeatures)
+
+class PCAFeatures:
+ def features(self, eoPixels, irPixels):
+ fullFeatures = naive_features(eoPixels, irPixels)
+ self.pca = PCA(n_components=2).fit(fullFeatures)
+ return self.pca.transform(fullFeatures)
+
+
+def naive_features(eoPixels, irPixels):
+ """Stacks a bunch of ratios/differences into a
+ high dimensional feature vector
+ """
+ #create sqr differences features for each channel
+ allPix = np.column_stack( (eoPixels, irPixels) )
+ intensity = np.sum(eoPixels[:,:3]) + irPixels[:,0] #total intensity
+ diffs = []
+ for i in range(4):
+ for j in range(4):
+ if i==j: continue
+ diff = (allPix[:,i] - allPix[:,j]) / intensity
+ diffs.append(diff)
-def features(eoPixels, irPixels):
- #gbDiff = green_blue_difference(eoPixels, irPixels)
- irDiff = ir_difference(eoPixels, irPixels)
rRatio = pixelRatio(eoPixels, irPixels, "red")
gRatio = pixelRatio(eoPixels, irPixels, "green")
- #bRatio = pixelRatio(eoPixels, irPixels, "blue")
- #return np.column_stack( (gbDiff, irDiff, rRatio, bRatio, gRatio) )
- return np.column_stack( (gRatio, rRatio) )
-
-def ir_difference(eoPixels, irPixels):
- irdiff = irPixels[:,0] - eoPixels[:,2]
- intensity = np.sum(eoPixels[:,:3]) + irPixels[:,0]
- return irdiff/intensity
-
-def green_blue_difference(eoPixels, irPixels):
- """Assuming eo and ir pixes are passed in, returns array of green blue diff"""
- assert eoPixels.shape[0] == irPixels.shape[0]
- gbdiff = eoPixels[:,1] - eoPixels[:,2]
- intensity = np.sum(eoPixels[:,:3]) + irPixels[:,0]
- return gbdiff / intensity
+ bRatio = pixelRatio(eoPixels, irPixels, "blue")
+ iRatio = pixelRatio(eoPixels, irPixels, "ir")
+ return np.column_stack( [rRatio, bRatio, gRatio, iRatio]+diffs )
+
def pixelRatio(eoPixels, irPixels, pixel_type="red"):
"""Green / (Green + Red + Blue + IR)"""
View
2  paint.py
@@ -57,7 +57,7 @@
for p in range(0,NUMPASSES):
#update using every image once
- frames = range(0,len(imgs)-1)[::options.skip];
+ frames = range(0,len(imgs))[::options.skip];
random.shuffle(frames);
for idx, i in enumerate(frames):
print "Pass: ", p, ", Iteration ", idx, " of ", len(frames)
View
61 render_spiral.py
@@ -6,6 +6,16 @@
from optparse import OptionParser
+def pointsFromFile(fname):
+ f = open(fname, 'r')
+ numPts = int(f.readline())
+ print numPts
+ pts = []
+ for line in f:
+ pt = [float(x) for x in line.strip().split()]
+ pts.append(pt)
+ return pts
+
def render_save(cam):
"""method for rendering/saving/incrementing globalIdx"""
global globalIdx
@@ -86,8 +96,9 @@ def lookSmooth(point0, point1, camCenter, numImgs, dCamCenter=(0,0,0)):
parser.add_option("-x", "--xmlfile", action="store", type="string", dest="xml", default="model/uscene.xml", help="scene.xml file name (model/uscene.xml, model_fixed/scene.xml, rscene.xml)")
parser.add_option("-g", "--gpu", action="store", type="string", dest="gpu", default="gpu1", help="specify gpu (gpu0, gpu1, etc)")
parser.add_option("-m", "--maxFrames", action="store", type="int", dest="maxFrames", default=500, help="max number of frames to render")
- parser.add_option("-r", "--radius", action="store", type="float", dest="radius", default=10.0, help="starting cam radius")
+ parser.add_option("-r", "--radius", action="store", type="float", dest="radius", default=-1.0, help="starting cam radius")
parser.add_option("-i", "--incline", action="store", type="string", dest="incline", default="38:45", help="incline change throughout spiral in degrees (ex 38:45)")
+ parser.add_option("-p", "--points", action="store", type="string", dest="pointsFile", default="", help="Include points file as center points of spirals (no file defaults to center of model)")
(options, args) = parser.parse_args()
print options
print args
@@ -116,7 +127,10 @@ def lookSmooth(point0, point1, camCenter, numImgs, dCamCenter=(0,0,0)):
#init trajectory, look at center point - this can drift
startInc, endInc = [int(x) for x in options.incline.split(":")]
- radius = max(options.radius, 1.4*(sceneMax[0]-sceneMin[0]));
+ if options.radius > 0.0:
+ radius = options.radius
+ else:
+ radius = 1.4*(sceneMax[0]-sceneMin[0])
modCenter = numpy.add(sceneMax, sceneMin)/2.0
#Generate camera params - fLenght, ppoint
@@ -127,8 +141,15 @@ def lookSmooth(point0, point1, camCenter, numImgs, dCamCenter=(0,0,0)):
####Render the various maneuvers###########
###########################################
# Initialize list of look points to spiral around
- pts = [ modCenter ]
- #pts.append( (.2308, -.295, .0279) )
+ pts = []
+ if options.pointsFile != "":
+ pts = pointsFromFile(options.pointsFile)
+ else:
+ pts = [ modCenter ]
+ print "Rotating about points: ", pts
+
+ #pts.append( (-.1, .3, -0.009446) ) #pts.append( (.2308, -.295, .0279) )
+ #pts.append( (-.5, -.5, 0.0) )
#pts.append( (-.106, 0.297, .021) )
globalIdx = 0
@@ -136,35 +157,45 @@ def lookSmooth(point0, point1, camCenter, numImgs, dCamCenter=(0,0,0)):
currR = radius
currInc = startInc
currAz = -90.0
- dr = 0.0 #- (radius/1.45) / options.maxFrames
+ dr = - (radius/1.45) / options.maxFrames
dAz = 360.0 / options.maxFrames
dInc = (endInc-startInc)/options.maxFrames
-
dcam = (dAz, dInc, dr)
+
+ #iterate over points
for i in range(len(pts)):
#render first spiral
center = renderSpiral(pts[i], (currAz, currInc, currR), dcam, options.maxFrames)
if i < len(pts)-1 :
#drift to next center
+ print '======DRIFTING======='
cartCenter = drift(pts[0], pts[1], center, options.maxFrames/4)
sphereCenter = cart2sphere( cartCenter, pts[1] )
- #sphereCenter = (sphereCenter[0],-sphereCenter[1],sphereCenter[2])
+ sphereCenter = (sphereCenter[0],-sphereCenter[1],sphereCenter[2])
+
+ #numImg = options.maxFrames
+ #sphereCenter = renderSpiral(pts[0], (currAz, currInc, currR), dcam, options.maxFrames)
+ ##sphereCenter = (sphereCenter[0],-sphereCenter[1],sphereCenter[2])
- #render next spiral
- #numImg = options.maxFrames/2
- #dcam = (360/numImg, 0, 0)
- #center = renderSpiral(pts[1], sphereCenter,dcam,numImg)
- #
##drift to next center
+ #cartCenter = drift(pts[0], pts[1], sphereCenter, numImg/4)
+ #sphereCenter = cart2sphere(cartCenter, pts[1])
+
+ ##render next spiral
+ #numImg = options.maxFrames/2
+ #dcam = (360.0 / numImg, 0, 0)
+ #center = renderSpiral(pts[1], sphereCenter, dcam,numImg)
+
+ #drift to next center
#cartCenter = drift(pts[1], pts[2], center, options.maxFrames/4)
#sphereCenter = cart2sphere( cartCenter, pts[2] )
- ##sphereCenter = (sphereCenter[0],-sphereCenter[1],sphereCenter[2])
+ #sphereCenter = (sphereCenter[0],-sphereCenter[1],sphereCenter[2])
- ##render next spiral
+ #render next spiral
#numImg = options.maxFrames/2
#dcam = (360/numImg, 0, 0)
#center = renderSpiral(pts[2], sphereCenter, dcam, numImg)
#mencoder "mf://*.png" -mf fps=18 -o demo.avi -ovc lavc -lavcopts vcodec=msmpeg4v2:vbitrate=24000000
-
+
View
27 scene_info.py
@@ -0,0 +1,27 @@
+from boxm2_scene_adaptor import *;
+from bbas_adaptor import *;
+from vil_adaptor import *;
+from vpgl_adaptor import *;
+import numpy, random, os, sys, math, scene_registry;
+from optparse import OptionParser
+
+if __name__ == "__main__":
+ # handle inputs
+ parser = OptionParser()
+ parser.add_option("-s", "--scene", action="store", type="string", dest="scene", help="specify scene name")
+ parser.add_option("-x", "--xmlfile", action="store", type="string", dest="xml", default="model/uscene.xml", help="scene.xml file name (model/uscene.xml, model_fixed/scene.xml, rscene.xml)")
+ (options, args) = parser.parse_args()
+ print options
+ print args
+
+ scene_root = scene_registry.scene_root( options.scene ); #
+ scene_path = scene_root + "/" + options.xml;
+ if not os.path.exists(scene_path):
+ print "Cannot find file: ", scene_path
+ sys.exit(-1)
+
+ #should initialize a GPU
+ scene = boxm2_scene_adaptor(scene_path);
+ (sceneMin, sceneMax) = scene.bounding_box();
+ print "Scene bounding box: ", sceneMin, " to ", sceneMax
+
Please sign in to comment.
Something went wrong with that request. Please try again.