-
Notifications
You must be signed in to change notification settings - Fork 0
/
2.implementation.py
128 lines (92 loc) · 3.74 KB
/
2.implementation.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
# -*- coding: utf-8 -*-
"""Untitled17.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1bTeTiW5gUuGwzACLJz40AHJXQjdCgyPg
"""
from keras.datasets import mnist
(X_train, y_train), (X_test, y_test) = mnist.load_data()
print('X_train shape', X_train.shape, 'X_test shape', X_test.shape)
import matplotlib.pyplot as plt
import random
plt.figure(figsize = (12,5))
for i in range(8):
ind = random.randint(0, len(X_train))
plt.subplot(240+1+i)
plt.imshow(X_train[ind])
from keras.utils.np_utils import to_categorical
def preprocess_data(X_train, y_train, X_test, y_test):
X_train = X_train.reshape(X_train.shape[0], X_train.shape[1], X_train.shape[2], 1)
X_test = X_test.reshape(X_test.shape[0], X_test.shape[1], X_test.shape[2], 1)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train = X_train/255.0
X_test_norm = X_test/255.0
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
return X_train, y_train, X_test, y_test
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Dense, Flatten
from tensorflow.keras.optimizers import RMSprop
from keras.metrics import categorical_crossentropy
from tensorflow.keras.optimizers import SGD
def LeNet():
model = Sequential()
model.add(Conv2D(filters = 6, kernel_size = (5,5), padding = 'same',
activation = 'relu', input_shape = (28,28,1)))
model.add(MaxPooling2D(pool_size = (2,2)))
model.add(Conv2D(filters = 16, kernel_size = (5,5), activation = 'relu'))
model.add(MaxPooling2D(pool_size = (2,2)))
model.add(Flatten())
model.add(Dense(120, activation = 'relu'))
model.add(Dense(10, activation = 'softmax'))
opt = SGD(lr = 0.01)
model.compile(loss = categorical_crossentropy,
optimizer = opt,
metrics = ['accuracy'])
return model
LeNet_model = LeNet()
LeNet_model.summary()
def summary_history(history):
plt.figure(figsize = (10,6))
plt.plot(history.history['accuracy'], color = 'green', label = 'train')
plt.plot(history.history['val_accuracy'], color = 'red', label = 'val')
plt.legend()
plt.title('Accuracy')
plt.show()
def train_model(model, X_train, y_train, X_test, y_test, epochs = 50, batch_size = 128):
# Rescaling all training and testing data
X_train, y_train, X_test, y_test = preprocess_data(X_train, y_train, X_test, y_test)
# Fitting the model on the training set
history = model.fit(X_train, y_train, epochs = epochs, batch_size = batch_size,
steps_per_epoch = X_train.shape[0]//batch_size,
validation_data = (X_test, y_test),
validation_steps = X_test.shape[0]//batch_size, verbose = 1)
# evaluating the model
_, acc = model.evaluate(X_test, y_test, verbose = 1)
print('%.3f' % (acc * 100.0))
summary_history(history)
train_model(LeNet_model, X_train, y_train, X_test, y_test)
import numpy as np
# predict labels for the test set
y_test_pred = []
for i in range(len(X_test)):
img = X_test[i]
img = img.reshape(1,28,28,1)
img = img.astype('float32')
img = img/255.0
# one-hot vector output
vec_p = LeNet_model.predict(img)
# determine the label corresponding to vector vec_p
y_p = np.argmax(vec_p)
y_test_pred.append(y_p)
# convert y_test_pred from list to array
y_test_pred = np.asarray(y_test_pred)
from sklearn.metrics import confusion_matrix
import seaborn as sns
con_mat = confusion_matrix(y_test, y_test_pred)
plt.figure(figsize = (8,6))
sns.heatmap(con_mat, linewidths = 0.1, cmap = 'Greens', linecolor = 'gray',
fmt = '.1f', annot = True)
plt.xlabel('Predicted classes', fontsize = 20)
plt.ylabel('True classes', fontsize = 20)