-
Notifications
You must be signed in to change notification settings - Fork 0
/
NN_classification.py
136 lines (120 loc) · 4.42 KB
/
NN_classification.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
import numpy as np
from mnist import load_mnist
import matplotlib.pyplot as plt
from scipy.special import expit
from pylab import *
from random import randint
import time
import getopt
import sys
class NeuralNetwork(object):
def __init__(self,layers,learning_parameter):
self.weights = [np.random.randn(y, x) for x, y in zip(layers[:-1], layers[1:])]
self.learning_parameter = learning_parameter
self.layers = layers
self.errors = []
def activation_function(self,z,deriv=False):
if deriv:
.5 * (1 + np.tanh(.5 * z)**2*0.5)
return .5 * (1 + np.tanh(.5 * z))
def feed_forward(self, X):
y = np.array(X)
for w in self.weights:
y = self.activation_function(np.dot(w, y))
return y
def back_propagete(self, X, y,i):
#Store each activation and vector values
activation = X
activations = [X]
zs = []
for weight in self.weights:
z = np.dot(weight, activation)
zs.append(z)
activation = self.activation_function(z)
activations.append(activation)
error = activations[-1] - y
squared_error = 0.0
for e in error:
squared_error += e**2.0
squared_error /= 2.0
self.errors.append(squared_error[0])
#First calculate the delta of output
deltas = [None] * len(self.weights)
deltas[-1] = error * self.activation_function(zs[-1],deriv=True)
for i in xrange(2, len(self.layers)):
delta = np.dot(self.weights[-i+1].T, deltas[-i+1]) * self.activation_function(zs[-i], deriv=True)
deltas[-i] = delta
deltas = np.array(deltas)
for i in range(0, len(activations)-1):
dEdW = np.dot(deltas[i],activations[i].T)
self.weights[i] = self.weights[i] - self.learning_parameter*dEdW
def train(NN):
print "Reading training data..."
images_training, training_labels = load_mnist('training')
training_inputs = [np.reshape(x, (784, 1))/255.0 for x in images_training]
print "Training NN..."
for i in range(0,len(images_training)):
image = training_inputs[i]
desired_output = get_desired_output(training_labels[i][0])
NN.back_propagete(image, desired_output, i)
print "Squared Error after feeding forward first training data: ", NN.errors[0]
print "Squared Error after feeding forward last training data: ", NN.errors[len(images_training)-1]
def test(NN, is_single):
wrong_count = 0
correct_count = 0
print "Reading test data..."
images_testing, testing_labels = load_mnist('testing')
testing_inputs = [np.reshape(x, (784, 1))/255.0 for x in images_testing]
if is_single:
index = randint(0, len(images_testing))
image = testing_inputs[index]
label = testing_labels[index][0]
output = NN.feed_forward(image)
prediction = output.argmax()
print "I predict:",prediction
imshow(images_testing[index], cmap=cm.gray)
show()
return
print "Running tests"
for i in range(0,len(testing_inputs)):
image = testing_inputs[i]
label = testing_labels[i][0]
output = NN.feed_forward(image)
prediction = output.argmax()
if prediction == label:
correct_count += 1
else:
wrong_count += 1
accuracy = float(correct_count) / float(wrong_count+correct_count)
print "Accuracy:",accuracy*100
layers =[784,100,10]
learning_parameter = 0.2
is_random = False
def get_desired_output(j):
e = np.zeros((10, 1))
e[j] = 1.0
return e
def handle_arguments(argv):
optlist, args = getopt.getopt(argv, '', ['layers=', 'learning_parameter=','r'])
global layers
global learning_parameter
global is_random
for opt in optlist:
if '--layers' in opt:
del layers[1]
layer_sizes = [int(l) for l in opt[1].split(',')]
layers[1:1] = layer_sizes
elif '--learning_parameter' in opt:
learning_parameter = float(opt[1])
elif '--r' in opt:
is_random = True
else:
usage()
def usage():
print "--layers '<hidden,layer,sizes,sperated,by,comma,no,space' --learning_parameter <parameter> "
handle_arguments(sys.argv[1:])
start_time = time.time()
NN = NeuralNetwork(layers,learning_parameter)
train(NN)
test(NN,is_random)
print("--- Time took: %s seconds ---" % (time.time() - start_time))