-
Notifications
You must be signed in to change notification settings - Fork 1
/
rock_.py
188 lines (157 loc) · 7.37 KB
/
rock_.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
# -*- coding: utf-8 -*-
"""rock .ipynb
Automatically generated by Colaboratory.
"""
import pickle
from keras.models import Sequential
from keras.layers import Convolution2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
# Initialising the CNN The model type that we will be using is Sequential.
#Sequential is the easiest way to build a model in Keras. It allows you to build a model layer by layer.
classifier = Sequential()
# Step 1 - Convolution
#Activation is the activation function for the layer. The activation function
#, or Rectified Linear Activation. This activation function has been proven to work well in neural networks.
classifier.add(Convolution2D(32, 3, 3,input_shape = (64, 64, 3), activation = 'relu'))
# Step 2 - Pooling
classifier.add(MaxPooling2D(pool_size = (2, 2)))
# Adding a second convolutional layer
classifier.add(Convolution2D(32, 3,3, activation = 'relu'))
classifier.add(MaxPooling2D(pool_size = (2, 2)))
#Flatten serves as a connection between the convolution and dense layers.
# Step 3 - Flattening
classifier.add(Flatten())
#‘Dense’ is the layer type we will use in for our output layer.
#Dense is a standard layer type that is used in many cases for neural networks.
# Step 4 - Full connection ‘add()’ function to add layers to our model.
classifier.add(Dense(output_dim = 128, activation = 'relu'))
"""The activation is ‘softmax’. Softmax makes the output sum up to 1 so the output can be interpreted as probabilities.
The model will then make its prediction based on which option has the highest probability.
We will use ‘categorical_crossentropy’ for our loss function. This is the most common choice for classification.
A lower score indicates that the model is performing better.
To make things even easier to interpret, we will use the ‘accuracy’ metric to see the accuracy score on the validation set when we train the model."""
classifier.add(Dense(output_dim = 1, activation = 'sigmoid'))
#classifier.add(Dense(output_dim =3, activation = 'softmax'))
#softmax
# Compiling the CNN
"""The optimizer controls the learning rate. We will be using ‘adam’ as our optmizer. Adam is generally a good optimizer to use for many cases.
The adam optimizer adjusts the learning rate throughout training."""
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
#classifier.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['accuracy'])
# Part 2 - Fitting the CNN to the images
"""The number of epochs is the number of times the model will cycle through the data.
The more epochs we run, the more the model will improve, up to a certain point.
After that point, the model will stop improving during each epoch.
For our model, we will set the number of epochs to 25."""
classifier.summary()
from keras.preprocessing.image import ImageDataGenerator
"""The number of epochs is the number of times the model will cycle through the data.
The more epochs we run, the more the model will improve, up to a certain point.
After that point, the model will stop improving during each epoch.
For our model, we will set the number of epochs to 3."""
train_datagen = ImageDataGenerator(rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True,
validation_split=0.2 )
test_datagen = ImageDataGenerator(rescale = 1./255)
training_set = train_datagen.flow_from_directory('F:\\Amrit\\Convolutional_Neural_Networks\\dataset\\Train',
target_size = (64, 64),
batch_size = 32,
class_mode = 'binary')
#class_mode='categorical'
test_set = test_datagen.flow_from_directory('F:\\Amrit\\Convolutional_Neural_Networks\\dataset\\Test',
target_size = (64, 64),
batch_size = 32,
class_mode = 'binary')
validation_set = train_datagen.flow_from_directory(
'F:\\Amrit\\Convolutional_Neural_Networks\\dataset\\Train',# same directory as training data
target_size=(64,64),
batch_size=32,
class_mode='binary',
subset='validation') # set as validation data
"""model.fit_generator(
train_generator,
steps_per_epoch = train_generator.samples // batch_size,
validation_data = validation_generator,
validation_steps = validation_generator.samples // batch_size,
epochs = nb_epochs)
"""
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau, CSVLogger, TensorBoard
path='F:'+'\\'+'Amrit'+'\\'+'Convolutional_Neural_Networks'+'\\'+'dataset'+'\\'
callbacks = [
ModelCheckpoint(path+"model2.h5"),
ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=4),#if after certain epoc (here 4) val loss does notdecrease the learning rate decrease by o.1
CSVLogger(path+'data.csv'),#visualition
TensorBoard(),#visualition
EarlyStopping(monitor='val_loss', patience=10, restore_best_weights=False)# if continus 10 epoc val loss does not decreasing
#then it will stop the training just to prevent overfitting
]
history=classifier.fit_generator(training_set,
samples_per_epoch =11998//2,
nb_epoch = 30,
validation_data = validation_set,
nb_val_samples = 2398//2,
callbacks=callbacks
)
import numpy as np
from keras.preprocessing import image
import keras
from matplotlib import pyplot as plt
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.show()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.show()
data = open('123.csv', 'r')
eng = open('output.csv', 'w')
eng.write('Image_File,Class')
eng.write('\n')
l1 = data.readlines()
import numpy as np
count =0
from PIL import Image
import PIL
import numpy as np
for i in l1[1:7534]:
l2 = i.split(',')
try:
eng.write(l2[0])
eng.write(',')
# print()
img = Image.open('F:'+'\\'+'Amrit'+'\\'+'Convolutional_Neural_Networks'+'\\'+'dataset'+'\\'+'Test'+'\\'+l2[0])
#img = img.convert('L')
if img.getdata().mode == "RGBA":
img = img.convert('RGB')
img = img.resize((64, 64), PIL.Image.ANTIALIAS)
arr = np.array(img)
img = np.reshape(img, (1, 64, 64, 3))
img = img / 255
result = classifier.predict(img)
if result[0][0] > 0.5:
l2[1]="Small"
# prediction="Small"
print(result[0][0])
else:
l2[1]="Large"
#prediction="large"
print(result[0][0])
eng.write(l2[1])
eng.write('\n')
except:
#from IPython.display import Image
#x=Image('F:'+'\\'+'Amrit'+'\\'+'Convolutional_Neural_Networks'+'\\'+'dataset'+'\\'+'Test'+'\\'+l2[0])
#display(x)
#coun=
print(l2[0])