-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathautoencoder.py
82 lines (67 loc) · 2.27 KB
/
autoencoder.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten, Input
from keras.layers import Conv2D, MaxPooling2D, UpSampling2D
import matplotlib.pyplot as plt
from keras import backend as K
import numpy as np
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
# Define the model
model = Sequential()
#1st convolution layer
model.add(Conv2D(16, (3, 3), padding='same', input_shape=(224,224,3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2), padding='same'))
#2nd convolution layer
model.add(Conv2D(2,(3, 3), padding='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2), padding='same'))
#-------------------------
#3rd convolution layer
model.add(Conv2D(2,(3, 3), padding='same'))
model.add(Activation('relu'))
model.add(UpSampling2D((2, 2)))
#4rd convolution layer
model.add(Conv2D(16,(3, 3), padding='same'))
model.add(Activation('relu'))
model.add(UpSampling2D((2, 2)))
#-------------------------
model.add(Conv2D(3,(3, 3), padding='same'))
model.add(Activation('sigmoid'))
model.summary()
# Compile the model
model.compile(optimizer='adadelta', loss='binary_crossentropy')
# Generate data from the images in a folder
train_dir = 'data/train/'
batch_size = 32
train_datagen = ImageDataGenerator(rescale=1./255., data_format='channels_last')
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size=(224, 224),
batch_size=batch_size,
class_mode='input')
test_datagen = ImageDataGenerator(rescale=1./255., data_format='channels_last')
validation_generator = test_datagen.flow_from_directory(
train_dir,
target_size=(224, 224),
batch_size=batch_size,
class_mode='input')
# Train the model
model.fit_generator(
train_generator,
steps_per_epoch=1000 // batch_size,
epochs=20,
validation_data=validation_generator,
validation_steps=1000 // batch_size)
# Test the model
data_list = []
batch_index = 0
while batch_index <= train_generator.batch_index:
data = train_generator.next()
data_list.append(data[0])
batch_index = batch_index + 1
data_list[0].shape
predicted = model.predict(data_list[0])
plt.imshow(data_list[0][0])
plt.imshow(predicted[0])