This repository has been archived by the owner on Dec 26, 2022. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 0
/
review-with-checkpoint.py
90 lines (66 loc) · 2.75 KB
/
review-with-checkpoint.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
from __future__ import absolute_import, division, print_function
import tensorflow as tf
from tensorflow import keras
import os
import numpy as np
import matplotlib.pyplot as plt
print(tf.__version__)
NUM_WORDS = 10000
def multi_hot_sequences(sequences, dimension):
results = np.zeros((len(sequences), dimension))
for i, word_indices in enumerate(sequences):
results[i, word_indices] = 1.0
return results
def plot_history(histories, key='binary_crossentropy'):
plt.figure(figsize=(16, 10))
for name, history in histories:
val = plt.plot(history.epoch, history.history['val_' + key],
'--', label=name.title() + ' Val')
plt.plot(history.epoch, history.history[key], color=val[0].get_color(),
label=name.title() + ' Train')
plt.xlabel('Epochs')
plt.ylabel(key.replace('_', ' ').title())
plt.legend()
plt.xlim([0, max(history.epoch)])
def create_model():
model = keras.models.Sequential([
keras.layers.Dense(16, activation=tf.nn.relu, input_shape=(NUM_WORDS,)),
keras.layers.Dropout(0.5),
keras.layers.Dense(16, activation=tf.nn.relu),
keras.layers.Dropout(0.5),
keras.layers.Dense(1, activation=tf.nn.sigmoid)
])
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy', 'binary_crossentropy'])
return model;
# create checkpoint
checkpoint_path = "model/cp-{epoch:04d}.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)
# Create checkpoint callback
cp_callback = tf.keras.callbacks.ModelCheckpoint(checkpoint_path,
save_weights_only=True,
verbose=1,
period=5)
(train_data, train_labels), (test_data, test_labels) = keras.datasets.imdb.load_data(num_words=NUM_WORDS)
train_data = multi_hot_sequences(train_data, dimension=NUM_WORDS)
test_data = multi_hot_sequences(test_data, dimension=NUM_WORDS)
model = create_model()
model.save_weights(checkpoint_path.format(epoch=0))
model.summary()
##train and save snapshot
model.fit(train_data, train_labels,
epochs=50,
batch_size=512,
validation_data=(test_data, test_labels),
verbose=1,
callbacks=[cp_callback])
# create new model and load the snapshot and evulate shit
model1 = create_model()
loss,acc, *_ = model1.evaluate(test_data, test_labels)
print("Untrained model, acc: {:5.2f}%".format(100*acc))
latest = tf.train.latest_checkpoint(checkpoint_dir)
print("latest checkpoint is :%s",latest)
model1.load_weights(latest)
loss,acc, *_ = model1.evaluate(test_data, test_labels)
print("loaded model, acc: {:5.2f}%".format(100*acc))