-
Notifications
You must be signed in to change notification settings - Fork 0
/
9_layer_model_convert+predict.py
72 lines (56 loc) · 2.33 KB
/
9_layer_model_convert+predict.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
import tensorflow as tf
import numpy as np
tf.enable_eager_execution()
import pathlib
import time
(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.mnist.load_data()
# Second 9 layers data
train_images_sec = train_images[:10000]
test_images_sec = test_images[:10000]
test_images_sec = np.float32(test_images_sec)
train_labels = train_labels[:10000].astype('int64')
test_labels = test_labels[:10000].astype('int64')
# Translation of data
train_images_sec4D = train_images_sec.reshape(train_images_sec.shape[0], 28, 28, 1).astype('float32')
test_images_sec4D = test_images_sec.reshape(test_images_sec.shape[0], 28, 28, 1).astype('float32')
# Standardize feature data
train_images_sec4D_norm = train_images_sec4D / 255
test_images_sec4D_norm = test_images_sec4D /255
ds_sec = tf.data.Dataset.from_tensor_slices((test_images_sec4D_norm, test_labels)).batch(1)
# path
saved_models_root = "./saved_models/second"
print("saved_models_root =", saved_models_root)
saved_models_dir = str(sorted(pathlib.Path(saved_models_root).glob("*"))[-1])
print("saved_models_dir =", saved_models_dir)
# write tflite file
converter = tf.contrib.lite.TFLiteConverter.from_saved_model(saved_models_dir)
tflite_model = converter.convert()
tflite_models_dir = pathlib.Path('./mnist_tflite_models/')
tflite_models_dir.mkdir(exist_ok=True, parents=True)
tflite_model_file = tflite_models_dir/"mnist_model_sec.tflite"
tflite_model_file.write_bytes(tflite_model)
print('write to .tflite file ok')
# interpret
interpreter = tf.contrib.lite.Interpreter(model_path=str(tflite_model_file))
interpreter.allocate_tensors()
input_index = interpreter.get_input_details()[0]["index"]
output_index = interpreter.get_output_details()[0]["index"]
def eval_model(interpreter, ds):
total_seen = 0
num_correct = 0
for img, label in ds:
total_seen += 1
interpreter.set_tensor(input_index, img)
interpreter.invoke()
predictions = interpreter.get_tensor(output_index)
predictions = np.argmax(predictions[0])
if predictions == label.numpy():
num_correct += 1
if total_seen % 500 == 0:
print("Accuracy after %i images: %f" % (total_seen, float(num_correct) / float(total_seen)))
return float(num_correct) / float(total_seen)
# eval
start = time.time()
eval_model(interpreter, ds_sec)
time_elapsed = time.time() - start
print("time_elapsed =", time_elapsed)