Skip to content
Browse files

Replaced Tabs with Spaces. :) The classifiers now return a list of da…

…ta, where the first item in list is the predicted label and the second item is generic classifier output. You can use this output to perform thresholding for example, so you can reject predictions made by a PredictableModel.
  • Loading branch information...
1 parent b8f8bf8 commit 47ef10bc8740182fb588661593f6f6f95e17f107 @bytefish committed Oct 20, 2012
View
7 py/apps/scripts/fisherfaces_example.py
@@ -6,6 +6,7 @@
from facerec.feature import Fisherfaces
from facerec.distance import EuclideanDistance, CosineDistance
from facerec.classifier import NearestNeighbor
+from facerec.classifier import SVM
from facerec.model import PredictableModel
from facerec.validation import KFoldCrossValidation
from facerec.visual import subplot
@@ -25,7 +26,7 @@
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
# load a dataset (e.g. AT&T Facedatabase)
-dataSet = DataSet("/home/philipp/facerec/data/at")
+dataSet = DataSet("/home/philipp/facerec/data/yalefaces_recognition")
# define Fisherfaces as feature extraction method
feature = Fisherfaces()
# define a 1-NN classifier with Euclidean Distance
@@ -38,8 +39,8 @@
# images (note: eigenvectors are stored by column!)
E = []
for i in xrange(min(model.feature.eigenvectors.shape[1], 16)):
- e = model.feature.eigenvectors[:,i].reshape(dataSet.data[0].shape)
- E.append(minmax_normalize(e,0,255, dtype=np.uint8))
+ e = model.feature.eigenvectors[:,i].reshape(dataSet.data[0].shape)
+ E.append(minmax_normalize(e,0,255, dtype=np.uint8))
# plot them and store the plot to "python_fisherfaces_fisherfaces.pdf"
subplot(title="Fisherfaces", images=E, rows=4, cols=4, sptitle="Fisherface", colormap=cm.jet, filename="fisherfaces.pdf")
# perform a 10-fold cross validation
View
4 py/apps/scripts/simple_example.py
@@ -118,8 +118,8 @@ def read_images(path, sz=None):
# images (note: eigenvectors are stored by column!)
E = []
for i in xrange(min(model.feature.eigenvectors.shape[1], 16)):
- e = model.feature.eigenvectors[:,i].reshape(X[0].shape)
- E.append(minmax_normalize(e,0,255, dtype=np.uint8))
+ e = model.feature.eigenvectors[:,i].reshape(X[0].shape)
+ E.append(minmax_normalize(e,0,255, dtype=np.uint8))
# Plot them and store the plot to "python_fisherfaces_fisherfaces.pdf"
subplot(title="Fisherfaces", images=E, rows=4, cols=4, sptitle="Fisherface", colormap=cm.jet, filename="fisherfaces.png")
# Perform a 10-fold cross validation
View
98 py/apps/videofacerec/createsamples.py
@@ -31,56 +31,56 @@
'''
class App(object):
- def __init__(self, video_src, dst_dir, subject_name, face_sz=(130,130), cascade_fn="/home/philipp/projects/opencv2/OpenCV-2.3.1/data/haarcascades/haarcascade_frontalface_alt2.xml"):
- self.dst_dir = dst_dir
- self.subject_name = subject_name
- self.face_sz = face_sz
- self.cam = create_capture(video_src)
- self.detector = CascadedDetector(cascade_fn=cascade_fn, minNeighbors=5, scaleFactor=1.1)
- self.stored = 0
+ def __init__(self, video_src, dst_dir, subject_name, face_sz=(130,130), cascade_fn="/home/philipp/projects/opencv2/OpenCV-2.3.1/data/haarcascades/haarcascade_frontalface_alt2.xml"):
+ self.dst_dir = dst_dir
+ self.subject_name = subject_name
+ self.face_sz = face_sz
+ self.cam = create_capture(video_src)
+ self.detector = CascadedDetector(cascade_fn=cascade_fn, minNeighbors=5, scaleFactor=1.1)
+ self.stored = 0
- def saveImage(self, src):
- out_fn = "%s_%d.png" % (self.subject_name, self.stored)
- out_fn = os.path.join(self.dst_dir,out_fn)
- cv2.imwrite(out_fn, src)
- self.stored = self.stored + 1
-
- def run(self):
- while True:
- ret, frame = self.cam.read()
- # resize the frame to half the original size
- img = cv2.resize(frame, (frame.shape[1]/2, frame.shape[0]/2), interpolation = cv2.INTER_CUBIC)
- imgout = img.copy()
- faces = []
- for i,r in enumerate(self.detector.detect(img)):
- x0,y0,x1,y1 = r
- # get face, convert to grayscale & resize to face_sz
- face = img[y0:y1, x0:x1].copy()
- face = cv2.cvtColor(face,cv2.COLOR_BGR2GRAY)
- face = cv2.resize(face, self.face_sz, interpolation = cv2.INTER_CUBIC)
- # draw a rectangle to show the detection
- cv2.rectangle(imgout, (x0,y0),(x1,y1),(0,255,0),1)
- # and append to currently detected faces
- faces.append(face)
- cv2.imshow("detections", imgout)
- # wait for a key press
- ch = cv2.waitKey(10)
- # store the currently detected faces
- if (ch == ord('s')) and (len(faces) > 0):
- for face in faces:
- self.saveImage(face)
- if ch == 27 or ch == ord('q'):
- break
+ def saveImage(self, src):
+ out_fn = "%s_%d.png" % (self.subject_name, self.stored)
+ out_fn = os.path.join(self.dst_dir,out_fn)
+ cv2.imwrite(out_fn, src)
+ self.stored = self.stored + 1
+
+ def run(self):
+ while True:
+ ret, frame = self.cam.read()
+ # resize the frame to half the original size
+ img = cv2.resize(frame, (frame.shape[1]/2, frame.shape[0]/2), interpolation = cv2.INTER_CUBIC)
+ imgout = img.copy()
+ faces = []
+ for i,r in enumerate(self.detector.detect(img)):
+ x0,y0,x1,y1 = r
+ # get face, convert to grayscale & resize to face_sz
+ face = img[y0:y1, x0:x1].copy()
+ face = cv2.cvtColor(face,cv2.COLOR_BGR2GRAY)
+ face = cv2.resize(face, self.face_sz, interpolation = cv2.INTER_CUBIC)
+ # draw a rectangle to show the detection
+ cv2.rectangle(imgout, (x0,y0),(x1,y1),(0,255,0),1)
+ # and append to currently detected faces
+ faces.append(face)
+ cv2.imshow("detections", imgout)
+ # wait for a key press
+ ch = cv2.waitKey(10)
+ # store the currently detected faces
+ if (ch == ord('s')) and (len(faces) > 0):
+ for face in faces:
+ self.saveImage(face)
+ if ch == 27 or ch == ord('q'):
+ break
if __name__ == '__main__':
- import sys
- print help_message
- if len(sys.argv) < 4:
- sys.exit()
- # get params
- video_src = sys.argv[1]
- dataset_fn = sys.argv[2]
- subject_name = sys.argv[3]
- # start facerec app
- App(video_src, dataset_fn, subject_name).run()
+ import sys
+ print help_message
+ if len(sys.argv) < 4:
+ sys.exit()
+ # get params
+ video_src = sys.argv[1]
+ dataset_fn = sys.argv[2]
+ subject_name = sys.argv[3]
+ # start facerec app
+ App(video_src, dataset_fn, subject_name).run()
View
92 py/apps/videofacerec/videofacerec.py
@@ -37,51 +37,51 @@
'''
class App(object):
- def __init__(self, video_src, dataset_fn, face_sz=(130,130), cascade_fn="/home/philipp/projects/opencv2/OpenCV-2.3.1/data/haarcascades/haarcascade_frontalface_alt2.xml"):
- self.face_sz = face_sz
- self.cam = create_capture(video_src)
- ret, self.frame = self.cam.read()
- self.detector = CascadedDetector(cascade_fn=cascade_fn, minNeighbors=5, scaleFactor=1.1)
- # define feature extraction chain & and classifier)
- feature = ChainOperator(TanTriggsPreprocessing(), LBP())
- classifier = NearestNeighbor(dist_metric=ChiSquareDistance())
- # build the predictable model
- self.predictor = PredictableModel(feature, classifier)
- # read the data & compute the predictor
- self.dataSet = DataSet(filename=dataset_fn,sz=self.face_sz)
- self.predictor.compute(self.dataSet.data,self.dataSet.labels)
-
- def run(self):
- while True:
- ret, frame = self.cam.read()
- # resize the frame to half the original size
- img = cv2.resize(frame, (frame.shape[1]/2, frame.shape[0]/2), interpolation = cv2.INTER_CUBIC)
- imgout = img.copy()
- for i,r in enumerate(self.detector.detect(img)):
- x0,y0,x1,y1 = r
- # get face, convert to grayscale & resize to face_sz
- face = img[y0:y1, x0:x1]
- face = cv2.cvtColor(face,cv2.COLOR_BGR2GRAY)
- face = cv2.resize(face, self.face_sz, interpolation = cv2.INTER_CUBIC)
- # get a prediction
- prediction = self.predictor.predict(face)
- # draw the face area
- cv2.rectangle(imgout, (x0,y0),(x1,y1),(0,255,0),2)
- # draw the predicted name (folder name...)
- draw_str(imgout, (x0-20,y0-20), self.dataSet.names[prediction])
- cv2.imshow('videofacerec', imgout)
- # get pressed key
- ch = cv2.waitKey(10)
- if ch == 27:
- break
+ def __init__(self, video_src, dataset_fn, face_sz=(130,130), cascade_fn="/home/philipp/projects/opencv2/OpenCV-2.3.1/data/haarcascades/haarcascade_frontalface_alt2.xml"):
+ self.face_sz = face_sz
+ self.cam = create_capture(video_src)
+ ret, self.frame = self.cam.read()
+ self.detector = CascadedDetector(cascade_fn=cascade_fn, minNeighbors=5, scaleFactor=1.1)
+ # define feature extraction chain & and classifier)
+ feature = ChainOperator(TanTriggsPreprocessing(), LBP())
+ classifier = NearestNeighbor(dist_metric=ChiSquareDistance())
+ # build the predictable model
+ self.predictor = PredictableModel(feature, classifier)
+ # read the data & compute the predictor
+ self.dataSet = DataSet(filename=dataset_fn,sz=self.face_sz)
+ self.predictor.compute(self.dataSet.data,self.dataSet.labels)
+
+ def run(self):
+ while True:
+ ret, frame = self.cam.read()
+ # resize the frame to half the original size
+ img = cv2.resize(frame, (frame.shape[1]/2, frame.shape[0]/2), interpolation = cv2.INTER_CUBIC)
+ imgout = img.copy()
+ for i,r in enumerate(self.detector.detect(img)):
+ x0,y0,x1,y1 = r
+ # get face, convert to grayscale & resize to face_sz
+ face = img[y0:y1, x0:x1]
+ face = cv2.cvtColor(face,cv2.COLOR_BGR2GRAY)
+ face = cv2.resize(face, self.face_sz, interpolation = cv2.INTER_CUBIC)
+ # get a prediction
+ prediction = self.predictor.predict(face)[0]
+ # draw the face area
+ cv2.rectangle(imgout, (x0,y0),(x1,y1),(0,255,0),2)
+ # draw the predicted name (folder name...)
+ draw_str(imgout, (x0-20,y0-20), self.dataSet.names[prediction])
+ cv2.imshow('videofacerec', imgout)
+ # get pressed key
+ ch = cv2.waitKey(10)
+ if ch == 27:
+ break
if __name__ == '__main__':
- import sys
- print help_message
- if len(sys.argv) < 3:
- sys.exit()
- # get params
- video_src = sys.argv[1]
- dataset_fn = sys.argv[2]
- # start facerec app
- App(video_src, dataset_fn).run()
+ import sys
+ print help_message
+ if len(sys.argv) < 3:
+ sys.exit()
+ # get params
+ video_src = sys.argv[1]
+ dataset_fn = sys.argv[2]
+ # start facerec app
+ App(video_src, dataset_fn).run()
View
223 py/facerec/classifier.py
@@ -5,100 +5,167 @@
import operator as op
class AbstractClassifier(object):
- def compute(self,X,y):
- raise NotImplementedError("Every AbstractClassifier must implement the compute method.")
-
- def predict(self,X):
- raise NotImplementedError("Every AbstractClassifier must implement the predict method.")
+ def compute(self,X,y):
+ raise NotImplementedError("Every AbstractClassifier must implement the compute method.")
+
+ def predict(self,X):
+ raise NotImplementedError("Every AbstractClassifier must implement the predict method.")
class NearestNeighbor(AbstractClassifier):
- """
- Implements a k-Nearest Neighbor Model for a generic distance metric.
- """
- def __init__(self, dist_metric=EuclideanDistance(), k=1):
- AbstractClassifier.__init__(self)
- self.k = k
- self.dist_metric = dist_metric
+ """
+ Implements a k-Nearest Neighbor Model with a generic distance metric.
+ """
+ def __init__(self, dist_metric=EuclideanDistance(), k=1):
+ AbstractClassifier.__init__(self)
+ self.k = k
+ self.dist_metric = dist_metric
- def compute(self, X, y):
- self.X = X
- self.y = y
-
- def predict(self, q):
- distances = []
- for xi in self.X:
- xi = xi.reshape(-1,1)
- d = self.dist_metric(xi, q)
- distances.append(d)
- if len(distances) > len(self.y):
- raise Exception("More distances than classes. Is your distance metric correct?")
- idx = np.argsort(np.array(distances))
- sorted_y = self.y[idx]
- sorted_y = sorted_y[0:self.k]
- hist = dict((key,val) for key, val in enumerate(np.bincount(sorted_y)) if val)
- return max(hist.iteritems(), key=op.itemgetter(1))[0]
-
- def __repr__(self):
- return "NearestNeighbor (k=%s, dist_metric=%s)" % (self.k, repr(self.dist_metric))
+ def compute(self, X, y):
+ self.X = X
+ self.y = y
+
+ def predict(self, q):
+ """
+ Predicts the k-nearest neighbor for a given query in q.
+
+ Args:
+
+ q: The given query sample, which is an array.
+
+ Returns:
+
+ A list with the classifier output. In this framework it is
+ assumed, that the predicted class is always returned as first
+ element. Moreover, this class returns the distances for the
+ first k-Nearest Neighbors.
+
+ Example:
+
+ [ 0,
+ [
+ [ 0, 0, 1 ],
+ [ 10.132, 10.341, 13.314 ]
+ ]
+ ]
+
+ So if you want to perform a thresholding operation, you could
+ pick the distances in the second array of the generic classifier
+ output.
+
+ """
+ distances = []
+ for xi in self.X:
+ xi = xi.reshape(-1,1)
+ d = self.dist_metric(xi, q)
+ distances.append(d)
+ if len(distances) > len(self.y):
+ raise Exception("More distances than classes. Is your distance metric correct?")
+ distances = np.asarray(distances)
+ # Get the indices in an ascending sort order:
+ idx = np.argsort(distances)
+ # Sort the labels and distances accordingly:
+ sorted_y = self.y[idx]
+ sorted_distances = distances[idx]
+ # Take only the k first items:
+ sorted_y = sorted_y[0:self.k]
+ sorted_distances = sorted_distances[0:self.k]
+ # Make a histogram of them:
+ hist = dict((key,val) for key, val in enumerate(np.bincount(sorted_y)) if val)
+ # And get the bin with the maximum frequency:
+ predicted_label = max(hist.iteritems(), key=op.itemgetter(1))[0]
+ # A classifier should output a list with the label as first item and
+ # generic data behind. The k-nearest neighbor classifier outputs the
+ # distance of the k first items. So imagine you have a 1-NN and you
+ # want to perform a threshold against it, you should take the first
+ # item
+ return [predicted_label, [sorted_y, sorted_distances]]
+
+ def __repr__(self):
+ return "NearestNeighbor (k=%s, dist_metric=%s)" % (self.k, repr(self.dist_metric))
# libsvm
try:
- from svmutil import *
+ from svmutil import *
except ImportError:
- logger = logging.getLogger("facerec.classifier.SVM")
- logger.debug("Import Error: libsvm bindings not available.")
+ logger = logging.getLogger("facerec.classifier.SVM")
+ logger.debug("Import Error: libsvm bindings not available.")
except:
- logger = logging.getLogger("facerec.classifier.SVM")
- logger.debug("Import Error: libsvm bindings not available.")
+ logger = logging.getLogger("facerec.classifier.SVM")
+ logger.debug("Import Error: libsvm bindings not available.")
import sys
from StringIO import StringIO
bkp_stdout=sys.stdout
class SVM(AbstractClassifier):
- """
- This class is just a simple wrapper to use libsvm in the
- CrossValidation module. If you don't use this framework
- use the validation methods coming with LibSVM, they are
- much easier to access (simply pass the correct class
- labels in svm_predict and you are done...).
+ """
+ This class is just a simple wrapper to use libsvm in the
+ CrossValidation module. If you don't use this framework
+ use the validation methods coming with LibSVM, they are
+ much easier to access (simply pass the correct class
+ labels in svm_predict and you are done...).
- The grid search method in this class is somewhat similar
- to libsvm grid.py, as it performs a parameter search over
- a logarithmic scale. Again if you don't use this framework,
- use the libsvm tools as they are much easier to access.
+ The grid search method in this class is somewhat similar
+ to libsvm grid.py, as it performs a parameter search over
+ a logarithmic scale. Again if you don't use this framework,
+ use the libsvm tools as they are much easier to access.
- Please keep in mind to normalize your input data, as expected
- for the model. There's no way to assume a generic normalization
- step.
- """
+ Please keep in mind to normalize your input data, as expected
+ for the model. There's no way to assume a generic normalization
+ step.
+ """
- def __init__(self, param=None):
- AbstractClassifier.__init__(self)
- self.logger = logging.getLogger("facerec.classifier.SVM")
- self.param = param
- self.svm = svm_model()
- self.param = param
- if self.param is None:
- self.param = svm_parameter("-q")
-
- def compute(self, X, y):
- self.logger.debug("SVM TRAINING (C=%.2f,gamma=%.2f,p=%.2f,nu=%.2f,coef=%.2f,degree=%.2f)" % (self.param.C, self.param.gamma, self.param.p, self.param.nu, self.param.coef0, self.param.degree))
- # turn data into a row vector (needed for libsvm)
- X = asRowMatrix(X)
- y = np.asarray(y)
- problem = svm_problem(y, X.tolist())
- self.svm = svm_train(problem, self.param)
- self.y = y
-
- def predict(self, X):
- X = np.asarray(X).reshape(1,-1)
- sys.stdout=StringIO()
- p_lbl, p_acc, p_val = svm_predict([0], X.tolist(), self.svm)
- sys.stdout=bkp_stdout
- return int(p_lbl[0])
-
- def __repr__(self):
- return "Support Vector Machine (kernel_type=%s, C=%.2f,gamma=%.2f,p=%.2f,nu=%.2f,coef=%.2f,degree=%.2f)" % (KERNEL_TYPE[self.param.kernel_type], self.param.C, self.param.gamma, self.param.p, self.param.nu, self.param.coef0, self.param.degree)
+ def __init__(self, param=None):
+ AbstractClassifier.__init__(self)
+ self.logger = logging.getLogger("facerec.classifier.SVM")
+ self.param = param
+ self.svm = svm_model()
+ self.param = param
+ if self.param is None:
+ self.param = svm_parameter("-q")
+
+ def compute(self, X, y):
+ self.logger.debug("SVM TRAINING (C=%.2f,gamma=%.2f,p=%.2f,nu=%.2f,coef=%.2f,degree=%.2f)" % (self.param.C, self.param.gamma, self.param.p, self.param.nu, self.param.coef0, self.param.degree))
+ # turn data into a row vector (needed for libsvm)
+ X = asRowMatrix(X)
+ y = np.asarray(y)
+ problem = svm_problem(y, X.tolist())
+ self.svm = svm_train(problem, self.param)
+ self.y = y
+
+ def predict(self, X):
+ """
+
+ Args:
+
+ X: The query image, which is an array.
+
+ Returns:
+
+ A list with the classifier output. In this framework it is
+ assumed, that the predicted class is always returned as first
+ element. Moreover, this class returns the libsvm output for
+ p_labels, p_acc and p_vals. The libsvm help states:
+
+ p_labels: a list of predicted labels
+ p_acc: a tuple including accuracy (for classification), mean-squared
+ error, and squared correlation coefficient (for regression).
+ p_vals: a list of decision values or probability estimates (if '-b 1'
+ is specified). If k is the number of classes, for decision values,
+ each element includes results of predicting k(k-1)/2 binary-class
+ SVMs. For probabilities, each element contains k values indicating
+ the probability that the testing instance is in each class.
+ Note that the order of classes here is the same as 'model.label'
+ field in the model structure.
+ """
+ X = np.asarray(X).reshape(1,-1)
+ sys.stdout=StringIO()
+ p_lbl, p_acc, p_val = svm_predict([0], X.tolist(), self.svm)
+ sys.stdout=bkp_stdout
+ predicted_label = int(p_lbl[0])
+ return [predicted_label, [p_lbl, p_acc, p_val]]
+
+ def __repr__(self):
+ return "Support Vector Machine (kernel_type=%s, C=%.2f,gamma=%.2f,p=%.2f,nu=%.2f,coef=%.2f,degree=%.2f)" % (KERNEL_TYPE[self.param.kernel_type], self.param.C, self.param.gamma, self.param.p, self.param.nu, self.param.coef0, self.param.degree)
View
100 py/facerec/dataset.py
@@ -5,55 +5,55 @@
import csv
class DataSet(object):
- def __init__(self, filename=None, sz=None):
- self.labels = []
- self.groups = []
- self.names = {}
- self.data = []
- self.sz = sz
- if filename is not None:
- self.load(filename)
+ def __init__(self, filename=None, sz=None):
+ self.labels = []
+ self.groups = []
+ self.names = {}
+ self.data = []
+ self.sz = sz
+ if filename is not None:
+ self.load(filename)
- def shuffle(self):
- idx = np.argsort([random.random() for i in xrange(len(self.labels))])
- self.data = [self.data[i] for i in idx]
- self.labels = self.labels[idx]
- if len(self.groups) == len(self.labels):
- self.groups = self.groups[idx]
+ def shuffle(self):
+ idx = np.argsort([random.random() for i in xrange(len(self.labels))])
+ self.data = [self.data[i] for i in idx]
+ self.labels = self.labels[idx]
+ if len(self.groups) == len(self.labels):
+ self.groups = self.groups[idx]
- def load(self, path):
- c = 0
- for dirname, dirnames, filenames in os.walk(path):
- for subdirname in dirnames:
- subject_path = os.path.join(dirname, subdirname)
- for filename in os.listdir(subject_path):
- try:
- im = Image.open(os.path.join(subject_path, filename))
- im = im.convert("L")
- # resize to given size (if given)
- if (self.sz is not None) and isinstance(self.sz, tuple) and (len(self.sz) == 2):
- im = im.resize(self.sz, Image.ANTIALIAS)
- self.data.append(np.asarray(im, dtype=np.uint8))
- self.labels.append(c)
- except IOError:
- pass
- self.names[c] = subdirname
- c = c+1
- self.labels = np.array(self.labels, dtype=np.int)
-
- def readFromCSV(self, filename):
- # <filename>;<classId>;<groupId>
- data = [ [str(line[0]), int(line[1]),int(line[2])] for line in csv.reader(open(filename, 'rb'), delimiter=";")]
- self.labels = np.array([item[1] for item in data])
- self.groups = np.array([item[2] for item in data])
- print self.labels
- print self.groups
- for item in data:
- im_filename = item[0]
- print im_filename
- im = Image.open(os.path.join(im_filename))
- im = im.convert("L")
- # resize to given size (if given)
- if (self.sz is not None) and isinstance(self.sz, tuple) and (len(self.sz) == 2):
- im = im.resize(self.sz, Image.ANTIALIAS)
- self.data.append(np.asarray(im, dtype=np.uint8))
+ def load(self, path):
+ c = 0
+ for dirname, dirnames, filenames in os.walk(path):
+ for subdirname in dirnames:
+ subject_path = os.path.join(dirname, subdirname)
+ for filename in os.listdir(subject_path):
+ try:
+ im = Image.open(os.path.join(subject_path, filename))
+ im = im.convert("L")
+ # resize to given size (if given)
+ if (self.sz is not None) and isinstance(self.sz, tuple) and (len(self.sz) == 2):
+ im = im.resize(self.sz, Image.ANTIALIAS)
+ self.data.append(np.asarray(im, dtype=np.uint8))
+ self.labels.append(c)
+ except IOError:
+ pass
+ self.names[c] = subdirname
+ c = c+1
+ self.labels = np.array(self.labels, dtype=np.int)
+
+ def readFromCSV(self, filename):
+ # <filename>;<classId>;<groupId>
+ data = [ [str(line[0]), int(line[1]),int(line[2])] for line in csv.reader(open(filename, 'rb'), delimiter=";")]
+ self.labels = np.array([item[1] for item in data])
+ self.groups = np.array([item[2] for item in data])
+ print self.labels
+ print self.groups
+ for item in data:
+ im_filename = item[0]
+ print im_filename
+ im = Image.open(os.path.join(im_filename))
+ im = im.convert("L")
+ # resize to given size (if given)
+ if (self.sz is not None) and isinstance(self.sz, tuple) and (len(self.sz) == 2):
+ im = im.resize(self.sz, Image.ANTIALIAS)
+ self.data.append(np.asarray(im, dtype=np.uint8))
View
230 py/facerec/distance.py
@@ -3,134 +3,134 @@
class AbstractDistance(object):
- def __init__(self, name):
- self._name = name
-
- def __call__(self,p,q):
- raise NotImplementedError("Every AbstractDistance must implement the __call__ method.")
-
- @property
- def name(self):
- return self._name
-
- def __repr__(self):
- return self._name
-
+ def __init__(self, name):
+ self._name = name
+
+ def __call__(self,p,q):
+ raise NotImplementedError("Every AbstractDistance must implement the __call__ method.")
+
+ @property
+ def name(self):
+ return self._name
+
+ def __repr__(self):
+ return self._name
+
class EuclideanDistance(AbstractDistance):
- def __init__(self):
- AbstractDistance.__init__(self,"EuclideanDistance")
+ def __init__(self):
+ AbstractDistance.__init__(self,"EuclideanDistance")
- def __call__(self, p, q):
- p = np.asarray(p).flatten()
- q = np.asarray(q).flatten()
- return np.sqrt(np.sum(np.power((p-q),2)))
+ def __call__(self, p, q):
+ p = np.asarray(p).flatten()
+ q = np.asarray(q).flatten()
+ return np.sqrt(np.sum(np.power((p-q),2)))
class CosineDistance(AbstractDistance):
- """
- Negated Mahalanobis Cosine Distance.
-
- Literature:
- "Studies on sensitivity of face recognition performance to eye location accuracy.". Master Thesis (2004), Wang
- """
- def __init__(self):
- AbstractDistance.__init__(self,"CosineDistance")
-
- def __call__(self, p, q):
- p = np.asarray(p).flatten()
- q = np.asarray(q).flatten()
- return -np.dot(p.T,q) / (np.sqrt(np.dot(p,p.T)*np.dot(q,q.T)))
+ """
+ Negated Mahalanobis Cosine Distance.
+
+ Literature:
+ "Studies on sensitivity of face recognition performance to eye location accuracy.". Master Thesis (2004), Wang
+ """
+ def __init__(self):
+ AbstractDistance.__init__(self,"CosineDistance")
+
+ def __call__(self, p, q):
+ p = np.asarray(p).flatten()
+ q = np.asarray(q).flatten()
+ return -np.dot(p.T,q) / (np.sqrt(np.dot(p,p.T)*np.dot(q,q.T)))
class NormalizedCorrelation(AbstractDistance):
- """
- Calculates the NormalizedCorrelation Coefficient for two vectors.
-
- Literature:
- "Multi-scale Local Binary Pattern Histogram for Face Recognition". PhD (2008). Chi Ho Chan, University Of Surrey.
- """
- def __init__(self):
- AbstractDistance.__init__(self,"NormalizedCorrelation")
-
- def __call__(self, p, q):
- p = np.asarray(p).flatten()
- q = np.asarray(q).flatten()
- pmu = p.mean()
- qmu = q.mean()
- pm = p - pmu
- qm = q - qmu
- return 1.0 - (np.dot(pm, qm) / (np.sqrt(np.dot(pm, pm)) * np.sqrt(np.dot(qm, qm))))
-
+ """
+ Calculates the NormalizedCorrelation Coefficient for two vectors.
+
+ Literature:
+ "Multi-scale Local Binary Pattern Histogram for Face Recognition". PhD (2008). Chi Ho Chan, University Of Surrey.
+ """
+ def __init__(self):
+ AbstractDistance.__init__(self,"NormalizedCorrelation")
+
+ def __call__(self, p, q):
+ p = np.asarray(p).flatten()
+ q = np.asarray(q).flatten()
+ pmu = p.mean()
+ qmu = q.mean()
+ pm = p - pmu
+ qm = q - qmu
+ return 1.0 - (np.dot(pm, qm) / (np.sqrt(np.dot(pm, pm)) * np.sqrt(np.dot(qm, qm))))
+
class ChiSquareDistance(AbstractDistance):
- """
- Negated Mahalanobis Cosine Distance.
-
- Literature:
- "Studies on sensitivity of face recognition performance to eye location accuracy.". Master Thesis (2004), Wang
- """
- def __init__(self):
- AbstractDistance.__init__(self,"ChiSquareDistance")
-
- def __call__(self, p, q):
- p = np.asarray(p).flatten()
- q = np.asarray(q).flatten()
- bin_dists = (p-q)**2 / (p+q+np.finfo('float').eps)
- return np.sum(bin_dists)
+ """
+ Negated Mahalanobis Cosine Distance.
+
+ Literature:
+ "Studies on sensitivity of face recognition performance to eye location accuracy.". Master Thesis (2004), Wang
+ """
+ def __init__(self):
+ AbstractDistance.__init__(self,"ChiSquareDistance")
+
+ def __call__(self, p, q):
+ p = np.asarray(p).flatten()
+ q = np.asarray(q).flatten()
+ bin_dists = (p-q)**2 / (p+q+np.finfo('float').eps)
+ return np.sum(bin_dists)
class HistogramIntersection(AbstractDistance):
- def __init__(self):
- AbstractDistance.__init__(self,"HistogramIntersection")
+ def __init__(self):
+ AbstractDistance.__init__(self,"HistogramIntersection")
- def __call__(self, p, q):
- p = np.asarray(p).flatten()
- q = np.asarray(q).flatten()
- return np.sum(np.minimum(p,q))
+ def __call__(self, p, q):
+ p = np.asarray(p).flatten()
+ q = np.asarray(q).flatten()
+ return np.sum(np.minimum(p,q))
class BinRatioDistance(AbstractDistance):
- """
- Calculates the Bin Ratio Dissimilarity.
-
- Literature:
- "Use Bin-Ratio Information for Category and Scene Classification" (2010), Xie et.al.
- """
- def __init__(self):
- AbstractDistance.__init__(self,"BinRatioDistance")
-
- def __call__(self, p, q):
- p = np.asarray(p).flatten()
- q = np.asarray(q).flatten()
- a = np.abs(1-np.dot(p,q.T)) # NumPy needs np.dot instead of * for reducing to tensor
- b = ((p-q)**2 + 2*a*(p*q))/((p+q)**2+np.finfo('float').eps)
- return np.abs(np.sum(b))
+ """
+ Calculates the Bin Ratio Dissimilarity.
+
+ Literature:
+ "Use Bin-Ratio Information for Category and Scene Classification" (2010), Xie et.al.
+ """
+ def __init__(self):
+ AbstractDistance.__init__(self,"BinRatioDistance")
+
+ def __call__(self, p, q):
+ p = np.asarray(p).flatten()
+ q = np.asarray(q).flatten()
+ a = np.abs(1-np.dot(p,q.T)) # NumPy needs np.dot instead of * for reducing to tensor
+ b = ((p-q)**2 + 2*a*(p*q))/((p+q)**2+np.finfo('float').eps)
+ return np.abs(np.sum(b))
class L1BinRatioDistance(AbstractDistance):
- """
- Calculates the L1-Bin Ratio Dissimilarity.
-
- Literature:
- "Use Bin-Ratio Information for Category and Scene Classification" (2010), Xie et.al.
- """
- def __init__(self):
- AbstractDistance.__init__(self,"L1-BinRatioDistance")
-
- def __call__(self, p, q):
- p = np.asarray(p, dtype=np.float).flatten()
- q = np.asarray(q, dtype=np.float).flatten()
- a = np.abs(1-np.dot(p,q.T)) # NumPy needs np.dot instead of * for reducing to tensor
- b = ((p-q)**2 + 2*a*(p*q)) * abs(p-q) / ((p+q)**2+np.finfo('float').eps)
- return np.abs(np.sum(b))
+ """
+ Calculates the L1-Bin Ratio Dissimilarity.
+
+ Literature:
+ "Use Bin-Ratio Information for Category and Scene Classification" (2010), Xie et.al.
+ """
+ def __init__(self):
+ AbstractDistance.__init__(self,"L1-BinRatioDistance")
+
+ def __call__(self, p, q):
+ p = np.asarray(p, dtype=np.float).flatten()
+ q = np.asarray(q, dtype=np.float).flatten()
+ a = np.abs(1-np.dot(p,q.T)) # NumPy needs np.dot instead of * for reducing to tensor
+ b = ((p-q)**2 + 2*a*(p*q)) * abs(p-q) / ((p+q)**2+np.finfo('float').eps)
+ return np.abs(np.sum(b))
class ChiSquareBRD(AbstractDistance):
- """
- Calculates the ChiSquare-Bin Ratio Dissimilarity.
-
- Literature:
- "Use Bin-Ratio Information for Category and Scene Classification" (2010), Xie et.al.
- """
- def __init__(self):
- AbstractDistance.__init__(self,"ChiSquare-BinRatioDistance")
-
- def __call__(self, p, q):
- p = np.asarray(p, dtype=np.float).flatten()
- q = np.asarray(q, dtype=np.float).flatten()
- a = np.abs(1-np.dot(p,q.T)) # NumPy needs np.dot instead of * for reducing to tensor
- b = ((p-q)**2 + 2*a*(p*q)) * (p-q)**2 / ((p+q)**3+np.finfo('float').eps)
- return np.abs(np.sum(b))
+ """
+ Calculates the ChiSquare-Bin Ratio Dissimilarity.
+
+ Literature:
+ "Use Bin-Ratio Information for Category and Scene Classification" (2010), Xie et.al.
+ """
+ def __init__(self):
+ AbstractDistance.__init__(self,"ChiSquare-BinRatioDistance")
+
+ def __call__(self, p, q):
+ p = np.asarray(p, dtype=np.float).flatten()
+ q = np.asarray(q, dtype=np.float).flatten()
+ a = np.abs(1-np.dot(p,q.T)) # NumPy needs np.dot instead of * for reducing to tensor
+ b = ((p-q)**2 + 2*a*(p*q)) * (p-q)**2 / ((p+q)**3+np.finfo('float').eps)
+ return np.abs(np.sum(b))
View
466 py/facerec/feature.py
@@ -2,264 +2,264 @@
class AbstractFeature(object):
- def compute(self,X,y):
- raise NotImplementedError("Every AbstractFeature must implement the compute method.")
-
- def extract(self,X,y):
- raise NotImplementedError("Every AbstractFeature must implement the extract method.")
-
- def save(self):
- raise NotImplementedError("Not implemented yet (TODO).")
-
- def load(self):
- raise NotImplementedError("Not implemented yet (TODO).")
-
- def __repr__(self):
- return "AbstractFeature"
+ def compute(self,X,y):
+ raise NotImplementedError("Every AbstractFeature must implement the compute method.")
+
+ def extract(self,X):
+ raise NotImplementedError("Every AbstractFeature must implement the extract method.")
+
+ def save(self):
+ raise NotImplementedError("Not implemented yet (TODO).")
+
+ def load(self):
+ raise NotImplementedError("Not implemented yet (TODO).")
+
+ def __repr__(self):
+ return "AbstractFeature"
class Identity(AbstractFeature):
- """
- Simplest AbstractFeature you could imagine. It only forwards the data and does not operate on it,
- probably useful for learning a Support Vector Machine on raw data for example!
- """
- def __init__(self):
- AbstractFeature.__init__(self)
-
- def compute(self,X,y):
- return X
-
- def extract(self,X):
- return X
-
- def __repr__(self):
- return "Identity"
+ """
+ Simplest AbstractFeature you could imagine. It only forwards the data and does not operate on it,
+ probably useful for learning a Support Vector Machine on raw data for example!
+ """
+ def __init__(self):
+ AbstractFeature.__init__(self)
+
+ def compute(self,X,y):
+ return X
+
+ def extract(self,X):
+ return X
+
+ def __repr__(self):
+ return "Identity"
from facerec.util import asColumnMatrix
from facerec.operators import ChainOperator, CombineOperator
-
+
class PCA(AbstractFeature):
- def __init__(self, num_components=0):
- AbstractFeature.__init__(self)
- self._num_components = num_components
-
- def compute(self,X,y):
- # build the column matrix
- XC = asColumnMatrix(X)
- y = np.asarray(y)
- # set a valid number of components
- if self._num_components <= 0 or (self._num_components > XC.shape[1]-1):
- self._num_components = XC.shape[1]-1
- # center dataset
- self._mean = XC.mean(axis=1).reshape(-1,1)
- XC = XC - self._mean
- # perform an economy size decomposition (may still allocate too much memory for computation)
- self._eigenvectors, self._eigenvalues, variances = np.linalg.svd(XC, full_matrices=False)
- # sort eigenvectors by eigenvalues in descending order
- idx = np.argsort(-self._eigenvalues)
- self._eigenvalues, self._eigenvectors = self._eigenvalues[idx], self._eigenvectors[:,idx]
- # use only num_components
- self._eigenvectors = self._eigenvectors[0:,0:self._num_components].copy()
- self._eigenvalues = self._eigenvalues[0:self._num_components].copy()
- # finally turn singular values into eigenvalues
- self._eigenvalues = np.power(self._eigenvalues,2) / XC.shape[1]
- # get the features from the given data
- features = []
- for x in X:
- xp = self.project(x.reshape(-1,1))
- features.append(xp)
- return features
-
- def extract(self,X):
- X = np.asarray(X).reshape(-1,1)
- return self.project(X)
-
- def project(self, X):
- X = X - self._mean
- return np.dot(self._eigenvectors.T, X)
+ def __init__(self, num_components=0):
+ AbstractFeature.__init__(self)
+ self._num_components = num_components
+
+ def compute(self,X,y):
+ # build the column matrix
+ XC = asColumnMatrix(X)
+ y = np.asarray(y)
+ # set a valid number of components
+ if self._num_components <= 0 or (self._num_components > XC.shape[1]-1):
+ self._num_components = XC.shape[1]-1
+ # center dataset
+ self._mean = XC.mean(axis=1).reshape(-1,1)
+ XC = XC - self._mean
+ # perform an economy size decomposition (may still allocate too much memory for computation)
+ self._eigenvectors, self._eigenvalues, variances = np.linalg.svd(XC, full_matrices=False)
+ # sort eigenvectors by eigenvalues in descending order
+ idx = np.argsort(-self._eigenvalues)
+ self._eigenvalues, self._eigenvectors = self._eigenvalues[idx], self._eigenvectors[:,idx]
+ # use only num_components
+ self._eigenvectors = self._eigenvectors[0:,0:self._num_components].copy()
+ self._eigenvalues = self._eigenvalues[0:self._num_components].copy()
+ # finally turn singular values into eigenvalues
+ self._eigenvalues = np.power(self._eigenvalues,2) / XC.shape[1]
+ # get the features from the given data
+ features = []
+ for x in X:
+ xp = self.project(x.reshape(-1,1))
+ features.append(xp)
+ return features
+
+ def extract(self,X):
+ X = np.asarray(X).reshape(-1,1)
+ return self.project(X)
+
+ def project(self, X):
+ X = X - self._mean
+ return np.dot(self._eigenvectors.T, X)
- def reconstruct(self, X):
- X = np.dot(self._eigenvectors, X)
- return X + self._mean
+ def reconstruct(self, X):
+ X = np.dot(self._eigenvectors, X)
+ return X + self._mean
- @property
- def num_components(self):
- return self._num_components
+ @property
+ def num_components(self):
+ return self._num_components
- @property
- def eigenvalues(self):
- return self._eigenvalues
-
- @property
- def eigenvectors(self):
- return self._eigenvectors
+ @property
+ def eigenvalues(self):
+ return self._eigenvalues
+
+ @property
+ def eigenvectors(self):
+ return self._eigenvectors
- @property
- def mean(self):
- return self._mean
-
- def __repr__(self):
- return "PCA (num_components=%d)" % (self._num_components)
-
+ @property
+ def mean(self):
+ return self._mean
+
+ def __repr__(self):
+ return "PCA (num_components=%d)" % (self._num_components)
+
class LDA(AbstractFeature):
- def __init__(self, num_components=0):
- AbstractFeature.__init__(self)
- self._num_components = num_components
+ def __init__(self, num_components=0):
+ AbstractFeature.__init__(self)
+ self._num_components = num_components
- def compute(self, X, y):
- # build the column matrix
- XC = asColumnMatrix(X)
- y = np.asarray(y)
- # calculate dimensions
- d = XC.shape[0]
- c = len(np.unique(y))
- # set a valid number of components
- if self._num_components <= 0:
- self._num_components = c-1
- elif self._num_components > (c-1):
- self._num_components = c-1
- # calculate total mean
- meanTotal = XC.mean(axis=1).reshape(-1,1)
- # calculate the within and between scatter matrices
- Sw = np.zeros((d, d), dtype=np.float32)
- Sb = np.zeros((d, d), dtype=np.float32)
- for i in range(0,c):
- Xi = XC[:,np.where(y==i)[0]]
- meanClass = np.mean(Xi, axis = 1).reshape(-1,1)
- Sw = Sw + np.dot((Xi-meanClass), (Xi-meanClass).T)
- Sb = Sb + Xi.shape[1] * np.dot((meanClass - meanTotal), (meanClass - meanTotal).T)
- # solve eigenvalue problem for a general matrix
- self._eigenvalues, self._eigenvectors = np.linalg.eig(np.linalg.inv(Sw)*Sb)
- # sort eigenvectors by their eigenvalue in descending order
- idx = np.argsort(-self._eigenvalues.real)
- self._eigenvalues, self._eigenvectors = self._eigenvalues[idx], self._eigenvectors[:,idx]
- # only store (c-1) non-zero eigenvalues
- self._eigenvalues = np.array(self._eigenvalues[0:self._num_components].real, dtype=np.float32, copy=True)
- self._eigenvectors = np.matrix(self._eigenvectors[0:,0:self._num_components].real, dtype=np.float32, copy=True)
- # get the features from the given data
- features = []
- for x in X:
- xp = self.project(x.reshape(-1,1))
- features.append(xp)
- return features
-
- def project(self, X):
- return np.dot(self._eigenvectors.T, X)
+ def compute(self, X, y):
+ # build the column matrix
+ XC = asColumnMatrix(X)
+ y = np.asarray(y)
+ # calculate dimensions
+ d = XC.shape[0]
+ c = len(np.unique(y))
+ # set a valid number of components
+ if self._num_components <= 0:
+ self._num_components = c-1
+ elif self._num_components > (c-1):
+ self._num_components = c-1
+ # calculate total mean
+ meanTotal = XC.mean(axis=1).reshape(-1,1)
+ # calculate the within and between scatter matrices
+ Sw = np.zeros((d, d), dtype=np.float32)
+ Sb = np.zeros((d, d), dtype=np.float32)
+ for i in range(0,c):
+ Xi = XC[:,np.where(y==i)[0]]
+ meanClass = np.mean(Xi, axis = 1).reshape(-1,1)
+ Sw = Sw + np.dot((Xi-meanClass), (Xi-meanClass).T)
+ Sb = Sb + Xi.shape[1] * np.dot((meanClass - meanTotal), (meanClass - meanTotal).T)
+ # solve eigenvalue problem for a general matrix
+ self._eigenvalues, self._eigenvectors = np.linalg.eig(np.linalg.inv(Sw)*Sb)
+ # sort eigenvectors by their eigenvalue in descending order
+ idx = np.argsort(-self._eigenvalues.real)
+ self._eigenvalues, self._eigenvectors = self._eigenvalues[idx], self._eigenvectors[:,idx]
+ # only store (c-1) non-zero eigenvalues
+ self._eigenvalues = np.array(self._eigenvalues[0:self._num_components].real, dtype=np.float32, copy=True)
+ self._eigenvectors = np.matrix(self._eigenvectors[0:,0:self._num_components].real, dtype=np.float32, copy=True)
+ # get the features from the given data
+ features = []
+ for x in X:
+ xp = self.project(x.reshape(-1,1))
+ features.append(xp)
+ return features
+
+ def project(self, X):
+ return np.dot(self._eigenvectors.T, X)
- def reconstruct(self, X):
- return np.dot(self._eigenvectors, X)
+ def reconstruct(self, X):
+ return np.dot(self._eigenvectors, X)
- @property
- def num_components(self):
- return self._num_components
+ @property
+ def num_components(self):
+ return self._num_components
- @property
- def eigenvectors(self):
- return self._eigenvectors
-
- @property
- def eigenvalues(self):
- return self._eigenvalues
-
- def __repr__(self):
- return "LDA (num_components=%d)" % (self._num_components)
-
+ @property
+ def eigenvectors(self):
+ return self._eigenvectors
+
+ @property
+ def eigenvalues(self):
+ return self._eigenvalues
+
+ def __repr__(self):
+ return "LDA (num_components=%d)" % (self._num_components)
+
class Fisherfaces(AbstractFeature):
- def __init__(self, num_components=0):
- AbstractFeature.__init__(self)
- self._num_components = num_components
-
- def compute(self, X, y):
- # turn into numpy representation
- Xc = asColumnMatrix(X)
- y = np.asarray(y)
- # gather some statistics about the dataset
- n = len(y)
- c = len(np.unique(y))
- # define features to be extracted
- pca = PCA(num_components = (n-c))
- lda = LDA(num_components = self._num_components)
- # fisherfaces are a chained feature of PCA followed by LDA
- model = ChainOperator(pca,lda)
- # computing the chained model then calculates both decompositions
- model.compute(X,y)
- # store eigenvalues and number of components used
- self._eigenvalues = lda.eigenvalues
- self._num_components = lda.num_components
- # compute the new eigenspace as pca.eigenvectors*lda.eigenvectors
- self._eigenvectors = np.dot(pca.eigenvectors,lda.eigenvectors)
- # finally compute the features (these are the Fisherfaces)
- features = []
- for x in X:
- xp = self.project(x.reshape(-1,1))
- features.append(xp)
- return features
+ def __init__(self, num_components=0):
+ AbstractFeature.__init__(self)
+ self._num_components = num_components
+
+ def compute(self, X, y):
+ # turn into numpy representation
+ Xc = asColumnMatrix(X)
+ y = np.asarray(y)
+ # gather some statistics about the dataset
+ n = len(y)
+ c = len(np.unique(y))
+ # define features to be extracted
+ pca = PCA(num_components = (n-c))
+ lda = LDA(num_components = self._num_components)
+ # fisherfaces are a chained feature of PCA followed by LDA
+ model = ChainOperator(pca,lda)
+ # computing the chained model then calculates both decompositions
+ model.compute(X,y)
+ # store eigenvalues and number of components used
+ self._eigenvalues = lda.eigenvalues
+ self._num_components = lda.num_components
+ # compute the new eigenspace as pca.eigenvectors*lda.eigenvectors
+ self._eigenvectors = np.dot(pca.eigenvectors,lda.eigenvectors)
+ # finally compute the features (these are the Fisherfaces)
+ features = []
+ for x in X:
+ xp = self.project(x.reshape(-1,1))
+ features.append(xp)
+ return features
- def extract(self,X):
- X = np.asarray(X).reshape(-1,1)
- return self.project(X)
+ def extract(self,X):
+ X = np.asarray(X).reshape(-1,1)
+ return self.project(X)
- def project(self, X):
- return np.dot(self._eigenvectors.T, X)
-
- def reconstruct(self, X):
- return np.dot(self._eigenvectors, X)
+ def project(self, X):
+ return np.dot(self._eigenvectors.T, X)
+
+ def reconstruct(self, X):
+ return np.dot(self._eigenvectors, X)
- @property
- def num_components(self):
- return self._num_components
-
- @property
- def eigenvalues(self):
- return self._eigenvalues
-
- @property
- def eigenvectors(self):
- return self._eigenvectors
+ @property
+ def num_components(self):
+ return self._num_components
+
+ @property
+ def eigenvalues(self):
+ return self._eigenvalues
+
+ @property
+ def eigenvectors(self):
+ return self._eigenvectors
- def __repr__(self):
- return "Fisherfaces (num_components=%s)" % (self.num_components)
+ def __repr__(self):
+ return "Fisherfaces (num_components=%s)" % (self.num_components)
from facerec.lbp import LBPOperator, ExtendedLBP
class LBP(AbstractFeature):
- def __init__(self, lbp_operator=ExtendedLBP(), sz = (8,8)):
- AbstractFeature.__init__(self)
- if not isinstance(lbp_operator, LBPOperator):
- raise TypeError("Only an operator of type facerec.lbp.LBPOperator is a valid lbp_operator.")
- self.lbp_operator = lbp_operator
- self.sz = sz
-
- def compute(self,X,y):
- features = []
- for x in X:
- x = np.asarray(x)
- h = self.spatially_enhanced_histogram(x)
- features.append(h)
- return features
-
- def extract(self,X):
- X = np.asarray(X)
- return self.spatially_enhanced_histogram(X)
+ def __init__(self, lbp_operator=ExtendedLBP(), sz = (8,8)):
+ AbstractFeature.__init__(self)
+ if not isinstance(lbp_operator, LBPOperator):
+ raise TypeError("Only an operator of type facerec.lbp.LBPOperator is a valid lbp_operator.")
+ self.lbp_operator = lbp_operator
+ self.sz = sz
+
+ def compute(self,X,y):
+ features = []
+ for x in X:
+ x = np.asarray(x)
+ h = self.spatially_enhanced_histogram(x)
+ features.append(h)
+ return features
+
+ def extract(self,X):
+ X = np.asarray(X)
+ return self.spatially_enhanced_histogram(X)
- def spatially_enhanced_histogram(self, X):
- # calculate the LBP image
- L = self.lbp_operator(X)
- # calculate the grid geometry
- lbp_height, lbp_width = L.shape
- grid_rows, grid_cols = self.sz
- py = int(np.floor(lbp_height/grid_rows))
- px = int(np.floor(lbp_width/grid_cols))
- E = []
- for row in range(0,grid_rows):
- for col in range(0,grid_cols):
- C = L[row*py:(row+1)*py,col*px:(col+1)*px]
- H = np.histogram(C, bins=2**self.lbp_operator.neighbors, range=(0, 2**self.lbp_operator.neighbors), normed=True)[0]
- # probably useful to apply a mapping?
- E.extend(H)
- return np.asarray(E)
-
- def __repr__(self):
- return "Local Binary Pattern (operator=%s, grid=%s)" % (repr(self.lbp_operator), str(self.sz))
+ def spatially_enhanced_histogram(self, X):
+ # calculate the LBP image
+ L = self.lbp_operator(X)
+ # calculate the grid geometry
+ lbp_height, lbp_width = L.shape
+ grid_rows, grid_cols = self.sz
+ py = int(np.floor(lbp_height/grid_rows))
+ px = int(np.floor(lbp_width/grid_cols))
+ E = []
+ for row in range(0,grid_rows):
+ for col in range(0,grid_cols):
+ C = L[row*py:(row+1)*py,col*px:(col+1)*px]
+ H = np.histogram(C, bins=2**self.lbp_operator.neighbors, range=(0, 2**self.lbp_operator.neighbors), normed=True)[0]
+ # probably useful to apply a mapping?
+ E.extend(H)
+ return np.asarray(E)
+
+ def __repr__(self):
+ return "Local Binary Pattern (operator=%s, grid=%s)" % (repr(self.lbp_operator), str(self.sz))
View
178 py/facerec/lbp.py
@@ -1,98 +1,98 @@
import numpy as np
class LBPOperator(object):
- def __init__(self, neighbors):
- self._neighbors = neighbors
+ def __init__(self, neighbors):
+ self._neighbors = neighbors
- def __call__(self,X):
- raise NotImplementedError("Every LBPOperator must implement the __call__ method.")
-
- @property
- def neighbors(self):
- return self._neighbors
-
- def __repr__(self):
- return "LBPOperator (neighbors=%s)" % (self._neighbors)
+ def __call__(self,X):
+ raise NotImplementedError("Every LBPOperator must implement the __call__ method.")
+
+ @property
+ def neighbors(self):
+ return self._neighbors
+
+ def __repr__(self):
+ return "LBPOperator (neighbors=%s)" % (self._neighbors)
class OriginalLBP(LBPOperator):
- def __init__(self):
- LBPOperator.__init__(self, neighbors=8)
-
- def __call__(self,X):
- X = np.asarray(X)
- X = (1<<7) * (X[0:-2,0:-2] >= X[1:-1,1:-1]) \
- + (1<<6) * (X[0:-2,1:-1] >= X[1:-1,1:-1]) \
- + (1<<5) * (X[0:-2,2:] >= X[1:-1,1:-1]) \
- + (1<<4) * (X[1:-1,2:] >= X[1:-1,1:-1]) \
- + (1<<3) * (X[2:,2:] >= X[1:-1,1:-1]) \
- + (1<<2) * (X[2:,1:-1] >= X[1:-1,1:-1]) \
- + (1<<1) * (X[2:,:-2] >= X[1:-1,1:-1]) \
- + (1<<0) * (X[1:-1,:-2] >= X[1:-1,1:-1])
- return X
-
- def __repr__(self):
- return "OriginalLBP (neighbors=%s)" % (self._neighbors)
+ def __init__(self):
+ LBPOperator.__init__(self, neighbors=8)
+
+ def __call__(self,X):
+ X = np.asarray(X)
+ X = (1<<7) * (X[0:-2,0:-2] >= X[1:-1,1:-1]) \
+ + (1<<6) * (X[0:-2,1:-1] >= X[1:-1,1:-1]) \
+ + (1<<5) * (X[0:-2,2:] >= X[1:-1,1:-1]) \
+ + (1<<4) * (X[1:-1,2:] >= X[1:-1,1:-1]) \
+ + (1<<3) * (X[2:,2:] >= X[1:-1,1:-1]) \
+ + (1<<2) * (X[2:,1:-1] >= X[1:-1,1:-1]) \
+ + (1<<1) * (X[2:,:-2] >= X[1:-1,1:-1]) \
+ + (1<<0) * (X[1:-1,:-2] >= X[1:-1,1:-1])
+ return X
+
+ def __repr__(self):
+ return "OriginalLBP (neighbors=%s)" % (self._neighbors)
class ExtendedLBP(LBPOperator):
- def __init__(self, radius=1, neighbors=8):
- LBPOperator.__init__(self, neighbors=neighbors)
- self._radius = radius
-
- def __call__(self,X):
- X = np.asanyarray(X)
- ysize, xsize = X.shape
- # define circle
- angles = 2*np.pi/self._neighbors
- theta = np.arange(0,2*np.pi,angles)
- # calculate sample points on circle with radius
- sample_points = np.array([-np.sin(theta), np.cos(theta)]).T
- sample_points *= self._radius
- # find boundaries of the sample points
- miny=min(sample_points[:,0])
- maxy=max(sample_points[:,0])
- minx=min(sample_points[:,1])
- maxx=max(sample_points[:,1])
- # calculate block size, each LBP code is computed within a block of size bsizey*bsizex
- blocksizey = np.ceil(max(maxy,0)) - np.floor(min(miny,0)) + 1
- blocksizex = np.ceil(max(maxx,0)) - np.floor(min(minx,0)) + 1
- # coordinates of origin (0,0) in the block
- origy = 0 - np.floor(min(miny,0))
- origx = 0 - np.floor(min(minx,0))
- # calculate output image size
- dx = xsize - blocksizex + 1
- dy = ysize - blocksizey + 1
- # get center points
- C = np.asarray(X[origy:origy+dy,origx:origx+dx], dtype=np.uint8)
- result = np.zeros((dy,dx), dtype=np.uint32)
- for i,p in enumerate(sample_points):
- # get coordinate in the block
- y,x = p + (origy, origx)
- # Calculate floors, ceils and rounds for the x and y.
- fx = np.floor(x)
- fy = np.floor(y)
- cx = np.ceil(x)
- cy = np.ceil(y)
- # calculate fractional part
- ty = y - fy
- tx = x - fx
- # calculate interpolation weights
- w1 = (1 - tx) * (1 - ty)
- w2 = tx * (1 - ty)
- w3 = (1 - tx) * ty
- w4 = tx * ty
- # calculate interpolated image
- N = w1*X[fy:fy+dy,fx:fx+dx]
- N += w2*X[fy:fy+dy,cx:cx+dx]
- N += w3*X[cy:cy+dy,fx:fx+dx]
- N += w4*X[cy:cy+dy,cx:cx+dx]
- # update LBP codes
- D = N >= C
- result += (1<<i)*D
- return result
+ def __init__(self, radius=1, neighbors=8):
+ LBPOperator.__init__(self, neighbors=neighbors)
+ self._radius = radius
+
+ def __call__(self,X):
+ X = np.asanyarray(X)
+ ysize, xsize = X.shape
+ # define circle
+ angles = 2*np.pi/self._neighbors
+ theta = np.arange(0,2*np.pi,angles)
+ # calculate sample points on circle with radius
+ sample_points = np.array([-np.sin(theta), np.cos(theta)]).T
+ sample_points *= self._radius
+ # find boundaries of the sample points
+ miny=min(sample_points[:,0])
+ maxy=max(sample_points[:,0])
+ minx=min(sample_points[:,1])
+ maxx=max(sample_points[:,1])
+ # calculate block size, each LBP code is computed within a block of size bsizey*bsizex
+ blocksizey = np.ceil(max(maxy,0)) - np.floor(min(miny,0)) + 1
+ blocksizex = np.ceil(max(maxx,0)) - np.floor(min(minx,0)) + 1
+ # coordinates of origin (0,0) in the block
+ origy = 0 - np.floor(min(miny,0))
+ origx = 0 - np.floor(min(minx,0))
+ # calculate output image size
+ dx = xsize - blocksizex + 1
+ dy = ysize - blocksizey + 1
+ # get center points
+ C = np.asarray(X[origy:origy+dy,origx:origx+dx], dtype=np.uint8)
+ result = np.zeros((dy,dx), dtype=np.uint32)
+ for i,p in enumerate(sample_points):
+ # get coordinate in the block
+ y,x = p + (origy, origx)
+ # Calculate floors, ceils and rounds for the x and y.
+ fx = np.floor(x)
+ fy = np.floor(y)
+ cx = np.ceil(x)
+ cy = np.ceil(y)
+ # calculate fractional part
+ ty = y - fy
+ tx = x - fx
+ # calculate interpolation weights
+ w1 = (1 - tx) * (1 - ty)
+ w2 = tx * (1 - ty)
+ w3 = (1 - tx) * ty
+ w4 = tx * ty
+ # calculate interpolated image
+ N = w1*X[fy:fy+dy,fx:fx+dx]
+ N += w2*X[fy:fy+dy,cx:cx+dx]
+ N += w3*X[cy:cy+dy,fx:fx+dx]
+ N += w4*X[cy:cy+dy,cx:cx+dx]
+ # update LBP codes
+ D = N >= C
+ result += (1<<i)*D
+ return result
- @property
- def radius(self):
- return self._radius
-
- def __repr__(self):
- return "ExtendedLBP (neighbors=%s, radius=%s)" % (self._neighbors, self._radius)
+ @property
+ def radius(self):
+ return self._radius
+
+ def __repr__(self):
+ return "ExtendedLBP (neighbors=%s, radius=%s)" % (self._neighbors, self._radius)
View
40 py/facerec/model.py
@@ -2,24 +2,24 @@
from facerec.classifier import AbstractClassifier
class PredictableModel(object):
- def __init__(self, feature, classifier):
- if not isinstance(feature, AbstractFeature):
- raise TypeError("feature must be of type AbstractFeature!")
- if not isinstance(classifier, AbstractClassifier):
- raise TypeError("classifier must be of type AbstractClassifier!")
-
- self.feature = feature
- self.classifier = classifier
-
- def compute(self, X, y):
- features = self.feature.compute(X,y)
- self.classifier.compute(features,y)
+ def __init__(self, feature, classifier):
+ if not isinstance(feature, AbstractFeature):
+ raise TypeError("feature must be of type AbstractFeature!")
+ if not isinstance(classifier, AbstractClassifier):
+ raise TypeError("classifier must be of type AbstractClassifier!")
+
+ self.feature = feature
+ self.classifier = classifier
+
+ def compute(self, X, y):
+ features = self.feature.compute(X,y)
+ self.classifier.compute(features,y)
- def predict(self, X):
- q = self.feature.extract(X)
- return self.classifier.predict(q)
-
- def __repr__(self):
- feature_repr = repr(self.feature)
- classifier_repr = repr(self.classifier)
- return "PredictableModel (feature=%s, classifier=%s)" % (feature_repr, classifier_repr)
+ def predict(self, X):
+ q = self.feature.extract(X)
+ return self.classifier.predict(q)
+
+ def __repr__(self):
+ feature_repr = repr(self.feature)
+ classifier_repr = repr(self.classifier)
+ return "PredictableModel (feature=%s, classifier=%s)" % (feature_repr, classifier_repr)
View
38 py/facerec/normalization.py
@@ -1,24 +1,24 @@
import numpy as np
def minmax(X, low, high, minX=None, maxX=None, dtype=np.float):
- X = np.asarray(X)
- if minX is None:
- minX = np.min(X)
- if maxX is None:
- maxX = np.max(X)
- # normalize to [0...1].
- X = X - float(minX)
- X = X / float((maxX - minX))
- # scale to [low...high].
- X = X * (high-low)
- X = X + low
- return np.asarray(X,dtype=dtype)
+ X = np.asarray(X)
+ if minX is None:
+ minX = np.min(X)
+ if maxX is None:
+ maxX = np.max(X)
+ # normalize to [0...1].
+ X = X - float(minX)
+ X = X / float((maxX - minX))
+ # scale to [low...high].
+ X = X * (high-low)
+ X = X + low
+ return np.asarray(X,dtype=dtype)
def zscore(X, mean=None, std=None):
- X = np.asarray(X)
- if mean is None:
- mean = X.mean()
- if std is None:
- std = X.std()
- X = (X-mean)/std
- return X
+ X = np.asarray(X)
+ if mean is None:
+ mean = X.mean()
+ if std is None:
+ std = X.std()
+ X = (X-mean)/std
+ return X
View
210 py/facerec/operators.py
@@ -2,113 +2,113 @@
import numpy as np
class FeatureOperator(AbstractFeature):
- """
- A FeatureOperator operates on two feature models.
-
- Args:
- model1 [AbstractFeature]
- model2 [AbstractFeature]
- """
- def __init__(self,model1,model2):
- if (not isinstance(model1,AbstractFeature)) or (not isinstance(model2,AbstractFeature)):
- raise Exception("A FeatureOperator only works on classes implementing an AbstractFeature!")
- self.model1 = model1
- self.model2 = model2
-
- def __repr__(self):
- return "FeatureOperator(" + repr(self.model1) + "," + repr(self.model2) + ")"
-
+ """
+ A FeatureOperator operates on two feature models.
+
+ Args:
+ model1 [AbstractFeature]
+ model2 [AbstractFeature]
+ """
+ def __init__(self,model1,model2):
+ if (not isinstance(model1,AbstractFeature)) or (not isinstance(model2,AbstractFeature)):
+ raise Exception("A FeatureOperator only works on classes implementing an AbstractFeature!")
+ self.model1 = model1
+ self.model2 = model2
+
+ def __repr__(self):
+ return "FeatureOperator(" + repr(self.model1) + "," + repr(self.model2) + ")"
+
class ChainOperator(FeatureOperator):
- """
- The ChainOperator chains two feature extraction modules:
- model2.compute(model1.compute(X,y),y)
- Where X can be generic input data.
-
- Args:
- model1 [AbstractFeature]
- model2 [AbstractFeature]
- """
- def __init__(self,model1,model2):
- FeatureOperator.__init__(self,model1,model2)
-
- def compute(self,X,y):
- X = self.model1.compute(X,y)
- return self.model2.compute(X,y)
-
- def extract(self,X):
- X = self.model1.extract(X)
- return self.model2.extract(X)
-
- def __repr__(self):
- return "ChainOperator(" + repr(self.model1) + "," + repr(self.model2) + ")"
-
+ """
+ The ChainOperator chains two feature extraction modules:
+ model2.compute(model1.compute(X,y),y)
+ Where X can be generic input data.
+
+ Args:
+ model1 [AbstractFeature]
+ model2 [AbstractFeature]
+ """
+ def __init__(self,model1,model2):
+ FeatureOperator.__init__(self,model1,model2)
+
+ def compute(self,X,y):
+ X = self.model1.compute(X,y)
+ return self.model2.compute(X,y)
+
+ def extract(self,X):
+ X = self.model1.extract(X)
+ return self.model2.extract(X)
+
+ def __repr__(self):
+ return "ChainOperator(" + repr(self.model1) + "," + repr(self.model2) + ")"
+
class CombineOperator(FeatureOperator):
- """
- The CombineOperator combines the output of two feature extraction modules as:
- (model1.compute(X,y),model2.compute(X,y))
- , where the output of each feature is a [1xN] or [Nx1] feature vector.
-
-
- Args:
- model1 [AbstractFeature]
- model2 [AbstractFeature]
-
- """
- def __init__(self,model1,model2):
- FeatureOperator.__init__(self, model1, model2)
-
- def compute(self,X,y):
- A = self.model1.compute(X,y)
- B = self.model2.compute(X,y)
- C = []
- for i in range(0, len(A)):
- ai = np.asarray(A[i]).reshape(1,-1)
- bi = np.asarray(B[i]).reshape(1,-1)
- C.append(np.hstack((ai,bi)))
- return C
-
- def extract(self,X):
- ai = self.model1.extract(X)
- bi = self.model2.extract(X)
- ai = np.asarray(ai).reshape(1,-1)
- bi = np.asarray(bi).reshape(1,-1)
- return np.hstack((ai,bi))
+ """
+ The CombineOperator combines the output of two feature extraction modules as:
+ (model1.compute(X,y),model2.compute(X,y))
+ , where the output of each feature is a [1xN] or [Nx1] feature vector.
+
+
+ Args:
+ model1 [AbstractFeature]
+ model2 [AbstractFeature]
+
+ """
+ def __init__(self,model1,model2):
+ FeatureOperator.__init__(self, model1, model2)
+
+ def compute(self,X,y):
+ A = self.model1.compute(X,y)
+ B = self.model2.compute(X,y)
+ C = []
+ for i in range(0, len(A)):
+ ai = np.asarray(A[i]).reshape(1,-1)
+ bi = np.asarray(B[i]).reshape(1,-1)
+ C.append(np.hstack((ai,bi)))
+ return C
+
+ def extract(self,X):
+ ai = self.model1.extract(X)
+ bi = self.model2.extract(X)
+ ai = np.asarray(ai).reshape(1,-1)
+ bi = np.asarray(bi).reshape(1,-1)
+ return np.hstack((ai,bi))
- def __repr__(self):
- return "CombineOperator(" + repr(self.model1) + "," + repr(self.model2) + ")"
-
+ def __repr__(self):
+ return "CombineOperator(" + repr(self.model1) + "," + repr(self.model2) + ")"
+
class CombineOperatorND(FeatureOperator):
- """
- The CombineOperator combines the output of two multidimensional feature extraction modules.
- (model1.compute(X,y),model2.compute(X,y))
-
- Args:
- model1 [AbstractFeature]
- model2 [AbstractFeature]
- hstack [bool] stacks data horizontally if True and vertically if False
-
- """
- def __init__(self,model1,model2, hstack=True):
- FeatureOperator.__init__(self, model1, model2)
- self._hstack = hstack
-
- def compute(self,X,y):
- A = self.model1.compute(X,y)
- B = self.model2.compute(X,y)
- C = []
- for i in range(0, len(A)):
- if self._hstack:
- C.append(np.hstack((A[i],B[i])))
- else:
- C.append(np.vstack((A[i],B[i])))
- return C
-
- def extract(self,X):
- ai = self.model1.extract(X)
- bi = self.model2.extract(X)
- if self._hstack:
- return np.hstack((ai,bi))
- return np.vstack((ai,bi))
+ """
+ The CombineOperator combines the output of two multidimensional feature extraction modules.
+ (model1.compute(X,y),model2.compute(X,y))
+
+ Args:
+ model1 [AbstractFeature]
+ model2 [AbstractFeature]
+ hstack [bool] stacks data horizontally if True and vertically if False
+
+ """
+ def __init__(self,model1,model2, hstack=True):
+ FeatureOperator.__init__(self, model1, model2)
+ self._hstack = hstack
+
+ def compute(self,X,y):
+ A = self.model1.compute(X,y)
+ B = self.model2.compute(X,y)
+ C = []
+ for i in range(0, len(A)):
+ if self._hstack:
+ C.append(np.hstack((A[i],B[i])))
+ else:
+ C.append(np.vstack((A[i],B[i])))
+ return C
+
+ def extract(self,X):
+ ai = self.model1.extract(X)
+ bi = self.model2.extract(X)
+ if self._hstack:
+ return np.hstack((ai,bi))
+ return np.vstack((ai,bi))
- def __repr__(self):
- return "CombineOperatorND(" + repr(self.model1) + "," + repr(self.model2) + ", hstack=" + str(self._hstack) + ")"
+ def __repr__(self):
+ return "CombineOperatorND(" + repr(self.model1) + "," + repr(self.model2) + ", hstack=" + str(self._hstack) + ")"
View
188 py/facerec/preprocessing.py
@@ -2,114 +2,114 @@
from facerec.feature import AbstractFeature
from facerec.util import asColumnMatrix
from scipy import ndimage
-
+
class HistogramEqualization(AbstractFeature):
- def __init__(self, num_bins=256):
- AbstractFeature.__init__(self)
- self._num_bins = num_bins
-
- def compute(self,X,y):
- Xp = []
- for xi in X:
- Xp.append(self.extract(xi))
- return Xp
-
- def extract(self,X):
- h, b = np.histogram(X.flatten(), self._num_bins, normed=True)
- cdf = h.cumsum()
- cdf = 255 * cdf / cdf[-1]
- return np.interp(X.flatten(), b[:-1], cdf).reshape(X.shape)
-
- def __repr__(self):
- return "HistogramEqualization (num_bins=%s)" % (self._num_bins)
-
+ def __init__(self, num_bins=256):
+ AbstractFeature.__init__(self)
+ self._num_bins = num_bins
+
+ def compute(self,X,y):
+ Xp = []
+ for xi in X:
+ Xp.append(self.extract(xi))
+ return Xp
+
+ def extract(self,X):
+ h, b = np.histogram(X.flatten(), self._num_bins, normed=True)
+ cdf = h.cumsum()
+ cdf = 255 * cdf / cdf[-1]
+ return np.interp(X.flatten(), b[:-1], cdf).reshape(X.shape)
+
+ def __repr__(self):
+ return "HistogramEqualization (num_bins=%s)" % (self._num_bins)
+
class TanTriggsPreprocessing(AbstractFeature):
- def __init__(self, alpha = 0.1, tau = 10.0, gamma = 0.2, sigma0 = 1.0, sigma1 = 2.0):
- AbstractFeature.__init__(self)
- self._alpha = float(alpha)
- self._tau = float(tau)
- self._gamma = float(gamma)
- self._sigma0 = float(sigma0)
- self._sigma1 = float(sigma1)
-
- def compute(self,X,y):
- Xp = []
- for xi in X:
- Xp.append(self.extract(xi))
- return Xp
+ def __init__(self, alpha = 0.1, tau = 10.0, gamma = 0.2, sigma0 = 1.0, sigma1 = 2.0):
+ AbstractFeature.__init__(self)
+ self._alpha = float(alpha)
+ self._tau = float(tau)
+ self._gamma = float(gamma)
+ self._sigma0 = float(sigma0)
+ self._sigma1 = float(sigma1)
+
+ def compute(self,X,y):
+ Xp = []
+ for xi in X:
+ Xp.append(self.extract(xi))
+ return Xp
- def extract(self,X):
- X = np.array(X, dtype=np.float32)
- X = np.power(X,self._gamma)
- X = np.asarray(ndimage.gaussian_filter(X,self._sigma1) - ndimage.gaussian_filter(X,self._sigma0))
- X = X / np.power(np.mean(np.power(np.abs(X),self._alpha)), 1.0/self._alpha)
- X = X / np.power(np.mean(np.power(np.minimum(np.abs(X),self._tau),self._alpha)), 1.0/self._alpha)
- X = self._tau*np.tanh(X/self._tau)
- return X
+ def extract(self,X):
+ X = np.array(X, dtype=np.float32)
+ X = np.power(X,self._gamma)
+ X = np.asarray(ndimage.gaussian_filter(X,self._sigma1) - ndimage.gaussian_filter(X,self._sigma0))
+ X = X / np.power(np.mean(np.power(np.abs(X),self._alpha)), 1.0/self._alpha)
+ X = X / np.power(np.mean(np.power(np.minimum(np.abs(X),self._tau),self._alpha)), 1.0/self._alpha)
+ X = self._tau*np.tanh(X/self._tau)
+ return X
- def __repr__(self):
- return "TanTriggsPreprocessing (alpha=%.3f,tau=%.3f,gamma=%.3f,sigma0=%.3f,sigma1=%.3f)" % (self._alpha,self._tau,self._gamma,self._sigma0,self._sigma1)
+ def __repr__(self):
+ return "TanTriggsPreprocessing (alpha=%.3f,tau=%.3f,gamma=%.3f,sigma0=%.3f,sigma1=%.3f)" % (self._alpha,self._tau,self._gamma,self._sigma0,self._sigma1)
from facerec.lbp import ExtendedLBP
class LBPPreprocessing(AbstractFeature):
- def __init__(self, lbp_operator = ExtendedLBP(radius=1, neighbors=8)):
- AbstractFeature.__init__(self)
- self._lbp_operator = lbp_operator
-
- def compute(self,X,y):
- Xp = []
- for xi in X:
- Xp.append(self.extract(xi))
- return Xp
+ def __init__(self, lbp_operator = ExtendedLBP(radius=1, neighbors=8)):
+ AbstractFeature.__init__(self)
+ self._lbp_operator = lbp_operator
+
+ def compute(self,X,y):
+ Xp = []
+ for xi in X:
+ Xp.append(self.extract(xi))
+ return Xp
- def extract(self,X):
- return self._lbp_operator(X)
+ def extract(self,X):
+ return self._lbp_operator(X)
- def __repr__(self):
- return "LBPPreprocessing (lbp_operator=%s)" % (repr(self._lbp_operator))
+ def __repr__(self):
+ return "LBPPreprocessing (lbp_operator=%s)" % (repr(self._lbp_operator))
from facerec.normalization import zscore, minmax
class MinMaxNormalizePreprocessing(AbstractFeature):
- def __init__(self, low=0, high=1):
- AbstractFeature.__init__(self)
- self._low = low
- self._high = high
-
- def compute(self,X,y):
- Xp = []
- XC = asColumnMatrix(X)
- self._min = np.min(XC)
- self._max = np.max(XC)
- for xi in X:
- Xp.append(self.extract(xi))
- return Xp
-
- def extract(self,X):
- return minmax(X, self._low, self._high, self._min, self._max)
-
- def __repr__(self):
- return "MinMaxNormalizePreprocessing (low=%s, high=%s)" % (self._low, self._high)
-
+ def __init__(self, low=0, high=1):
+ AbstractFeature.__init__(self)
+ self._low = low
+ self._high = high
+
+ def compute(self,X,y):
+ Xp = []
+ XC = asColumnMatrix(X)
+ self._min = np.min(XC)
+ self._max = np.max(XC)
+ for xi in X:
+ Xp.append(self.extract(xi))
+ return Xp
+
+ def extract(self,X):
+ return minmax(X, self._low, self._high, self._min, self._max)
+
+ def __repr__(self):
+ return "MinMaxNormalizePreprocessing (low=%s, high=%s)" % (self._low, self._high)
+
class ZScoreNormalizePreprocessing(AbstractFeature):
- def __init__(self):
- AbstractFeature.__init__(self)
- self._mean = 0.0
- self._std = 1.0
-
- def compute(self,X,y):
- XC = asColumnMatrix(X)
- self._mean = XC.mean()
- self._std = XC.std()
- Xp = []
- for xi in X:
- Xp.append(self.extract(xi))
- return Xp
-
- def extract(self,X):
- return zscore(X,self._mean, self._std)
+ def __init__(self):
+ AbstractFeature.__init__(self)
+ self._mean = 0.0
+ self._std = 1.0
+
+ def compute(self,X,y):
+ XC = asColumnMatrix(X)
+ self._mean = XC.mean()
+ self._std = XC.std()
+ Xp = []
+ for xi in X:
+ Xp.append(self.extract(xi))
+ return Xp
+
+ def extract(self,X):
+ return zscore(X,self._mean, self._std)
- def __repr__(self):
- return "ZScoreNormalizePreprocessing (mean=%s, std=%s)" % (self._mean, self._std)
+ def __repr__(self):
+ return "ZScoreNormalizePreprocessing (mean=%s, std=%s)" % (self._mean, self._std)
View
114 py/facerec/svm.py
@@ -8,66 +8,66 @@
def range_f(begin, end, step):
- seq = []
- while True: