-
Notifications
You must be signed in to change notification settings - Fork 30
/
anogan.py
112 lines (99 loc) · 4.24 KB
/
anogan.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
from keras.models import Sequential, Model
from keras.layers import Input, Reshape, Dense, Dropout, UpSampling2D, Conv2D, Flatten
from keras.layers.advanced_activations import LeakyReLU
from keras.optimizers import Adam
from keras import backend as K
from keras import initializers
import tensorflow as tf
import numpy as np
from tqdm import tqdm
def generator_model():
generator = Sequential()
generator.add(Dense(128*7*7, input_dim=100, kernel_initializer=initializers.RandomNormal(stddev=0.02)))
generator.add(LeakyReLU(0.2))
generator.add(Reshape((7, 7, 128)))
generator.add(UpSampling2D(size=(2, 2)))
generator.add(Conv2D(64, kernel_size=(5, 5), padding='same'))
generator.add(LeakyReLU(0.2))
generator.add(UpSampling2D(size=(2, 2)))
generator.add(Conv2D(1, kernel_size=(5, 5), padding='same', activation='tanh'))
generator.compile(loss='binary_crossentropy', optimizer='adam')
return generator
def discriminator_model():
discriminator = Sequential()
discriminator.add(Conv2D(64, kernel_size=(5, 5), strides=(2, 2), padding='same', input_shape=(28,28, 1), kernel_initializer=initializers.RandomNormal(stddev=0.02)))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Dropout(0.3))
discriminator.add(Conv2D(128, kernel_size=(5, 5), strides=(2, 2), padding='same'))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Dropout(0.3))
discriminator.add(Flatten())
discriminator.add(Dense(1, activation='sigmoid'))
discriminator.compile(loss='binary_crossentropy', optimizer='adam')
return discriminator
def generator_containing_discriminator(g, d):
d.trainable = False
ganInput = Input(shape=(100,))
x = g(ganInput)
ganOutput = d(x)
gan = Model(inputs=ganInput, outputs=ganOutput)
gan.compile(loss='binary_crossentropy', optimizer='adam')
return gan
def train(BATCH_SIZE, X_train):
d = discriminator_model()
print("#### discriminator ######")
d.summary()
g = generator_model()
print("#### generator ######")
g.summary()
d_on_g = generator_containing_discriminator(g, d)
d.trainable = True
for epoch in tqdm(range(200)):
for index in range(int(X_train.shape[0]/BATCH_SIZE)):
noise = np.random.uniform(0, 1, size=(BATCH_SIZE, 100))
image_batch = X_train[index*BATCH_SIZE:(index+1)*BATCH_SIZE]
generated_images = g.predict(noise, verbose=0)
X = np.concatenate((image_batch, generated_images))
y = np.array([1] * BATCH_SIZE + [0] * BATCH_SIZE)
d_loss = d.train_on_batch(X, y)
noise = np.random.uniform(0, 1, (BATCH_SIZE, 100))
d.trainable = False
g_loss = d_on_g.train_on_batch(noise, np.array([1] * BATCH_SIZE))
d.trainable = True
g.save_weights('assets/generator', True)
d.save_weights('assets/discriminator', True)
return d, g
def generate(BATCH_SIZE):
g = generator_model()
g.load_weights('assets/generator')
noise = np.random.uniform(0, 1, (BATCH_SIZE, 100))
generated_images = g.predict(noise)
return generated_images
def sum_of_residual(y_true, y_pred):
return tf.reduce_sum(abs(y_true - y_pred))
def feature_extractor():
d = discriminator_model()
d.load_weights('assets/discriminator')
intermidiate_model = Model(inputs=d.layers[0].input, outputs=d.layers[-5].output)
intermidiate_model.compile(loss='binary_crossentropy', optimizer='adam')
return intermidiate_model
def anomaly_detector():
g = generator_model()
g.load_weights('assets/generator')
g.trainable = False
intermidiate_model = feature_extractor()
intermidiate_model.trainable = False
aInput = Input(shape=(100,))
gInput = Dense((100))(aInput)
G_out = g(gInput)
D_out= intermidiate_model(G_out)
model = Model(inputs=aInput, outputs=[G_out, D_out])
model.compile(loss=sum_of_residual, loss_weights= [0.9, 0.1], optimizer='adam')
return model
def compute_anomaly_score(model, x):
z = np.random.uniform(0, 1, size=(1, 100))
intermidiate_model = feature_extractor()
d_x = intermidiate_model.predict(x)
loss = model.fit(z, [x, d_x], epochs=500, verbose=0)
similar_data, _ = model.predict(z)
return loss.history['loss'][-1], similar_data