Skip to content

Commit

Permalink
new architecture, clean old models, new model with high accuracy an 5…
Browse files Browse the repository at this point in the history
… classes
  • Loading branch information
MrEliptik committed Jan 21, 2019
1 parent 4acee67 commit e4ff769
Show file tree
Hide file tree
Showing 8 changed files with 56 additions and 56 deletions.
10 changes: 6 additions & 4 deletions buildPosesDataset.py
Expand Up @@ -9,8 +9,10 @@ def read_data(req_poses):
count_im = 0
count_classes = 0
poses = os.listdir('Poses/')
for pose in poses:
if pose in req_poses:
if(req_poses[0]=='all'):
req_poses = poses.copy()
for pose in poses:
if pose in req_poses:
print(">> Working on pose : " + pose)
subdirs = os.listdir('Poses/' + pose + '/')
count_classes += 1
Expand Down Expand Up @@ -41,13 +43,13 @@ def read_data(req_poses):
# Read image
im = cv2.imread(path)
im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
im = im.astype(dtype="float32")
im = im.astype(dtype="float64")
im = np.reshape(im, (28, 28, 1))
x[count_im][:][:][:] = im
y[count_im] = count_classes
count_im += 1
count_classes += 1
x = x/255
x = x/255

return x, y

Expand Down
98 changes: 49 additions & 49 deletions cnn/cnn.py
Expand Up @@ -6,21 +6,24 @@
from keras import backend as K
from keras.layers.normalization import BatchNormalization
import matplotlib.pyplot as plt
import numpy as np

import os,sys
sys.path.insert(1, os.path.join(sys.path[0], '..'))
import buildPosesDataset as dataset

def train():
batch_size = 64
num_classes = 2
epochs = 5
batch_size = 128
epochs = 10
learning_rate = 0.01

# input image dimensions
img_rows, img_cols = 28, 28

# the data, shuffled and split between train and test sets
x_train, y_train, x_test, y_test = dataset.load_data(poses=["Palm", "Startrek"])
x_train, y_train, x_test, y_test = dataset.load_data(poses=["all"])

num_classes = len(np.unique(y_test))

if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
Expand All @@ -40,46 +43,31 @@ def train():
y_test = keras.utils.to_categorical(y_test, num_classes)

####### Model structure #######
#model building
model = Sequential()

model.add(Conv2D(64, (3, 3), input_shape=(28,28,1)))
model.add(Activation('relu'))
model.add(Dropout(0.1))
BatchNormalization(axis=-1)
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(Dropout(0.1))
BatchNormalization(axis=-1)
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(Dropout(0.1))
model.add(MaxPooling2D(pool_size=(2,2)))

BatchNormalization(axis=-1)
model.add(Conv2D(128,(3, 3)))
model.add(Activation('relu'))
model.add(Dropout(0.1))
BatchNormalization(axis=-1)
model.add(Conv2D(128, (3, 3)))
model.add(Activation('relu'))
model.add(Dropout(0.1))
model.add(MaxPooling2D(pool_size=(2,2)))

#convolutional layer with rectified linear unit activation
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
# 32 convolution filters used each of size 3x3
# again
model.add(Conv2D(64, (3, 3), activation='relu'))
# 64 convolution filters used each of size 3x3
# choose the best features via pooling
model.add(MaxPooling2D(pool_size=(2, 2)))
# randomly turn neurons on and off to improve convergence
model.add(Dropout(0.25))
# flatten since too many dimensions, we only want a classification output
model.add(Flatten())

# Fully connected layer
BatchNormalization()
model.add(Dense(1024))
model.add(Activation('relu'))
# fully connected to get all relevant data
model.add(Dense(128, activation='relu'))
# one more dropout for convergence' sake :)
model.add(Dropout(0.5))
BatchNormalization()

# Classification layer
model.add(Dense(num_classes))
model.add(Activation('softmax'))

# output a softmax to squash the matrix into output probabilities
model.add(Dense(num_classes, activation='softmax'))
# categorical ce since we have multiple classes (10)
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adam(),
optimizer=keras.optimizers.Adam(lr=learning_rate),
metrics=['accuracy'])

####### TRAINING #######
Expand All @@ -93,15 +81,27 @@ def train():

print('Test loss:', score[0])
print('Test accuracy:', score[1])
model.save("cnn/models/hand_poses_2poses_" + str(epochs) + ".h5")

# summarize history for loss
plt.plot(hist.history["loss"])
plt.plot(hist.history["val_loss"])
plt.title("model loss")
plt.ylabel("loss")
plt.xlabel("epoch")
plt.legend(["train", "val"], loc="upper left")
model.save("cnn/models/hand_poses_" + str(epochs) + ".h5")

# plotting the metrics
fig = plt.figure()
plt.subplot(2,1,1)
plt.plot(hist.history['acc'])
plt.plot(hist.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='lower right')

plt.subplot(2,1,2)
plt.plot(hist.history['loss'])
plt.plot(hist.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper right')

plt.tight_layout()
plt.show()

if __name__ == "__main__":
Expand Down
Binary file not shown.
Binary file removed cnn/models/hand_poses_25_.h5
Binary file not shown.
Binary file removed cnn/models/hand_poses_2poses_14.h5
Binary file not shown.
Binary file removed cnn/models/hand_poses_2poses_5.h5
Binary file not shown.
Binary file removed cnn/models/hand_poses_80_.h5
Binary file not shown.
4 changes: 1 addition & 3 deletions utils/pose_classification_utils.py
Expand Up @@ -29,9 +29,8 @@ def classify(model, im):
from keras.models import load_model

print(">> loading keras model for pose classification")
model = load_model('cnn/models/hand_poses_2poses_14.h5')
model = load_model('cnn/models/hand_poses_10.h5')

'''
# Fist
print('<< FIST >>')
im = cv2.imread("Poses/Fist/Fist_1/Fist_1_1302.png")
Expand All @@ -46,7 +45,6 @@ def classify(model, im):
print('<< FOUR >>')
im = cv2.imread("Poses/Four/Four_1/Four_1_867.png")
print(classify(model, im))
'''

# Startrek
print('<< Startrek >>')
Expand Down

0 comments on commit e4ff769

Please sign in to comment.