-
Notifications
You must be signed in to change notification settings - Fork 0
/
viz2.py
142 lines (117 loc) · 5.19 KB
/
viz2.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import csv
import numpy as np
import json
mnist = tf.contrib.learn.datasets.load_dataset("mnist")
# Python optimisation variables
learning_rate = 0.00146
epochs = 10
batch_size = 25
# declare the training data placeholders
# input x - for 28 x 28 pixels = 784 - this is the flattened image data that is drawn from
# mnist.train.nextbatch()
x = tf.placeholder(tf.float32, [None, 784])
# dynamically reshape the input
x_shaped = tf.reshape(x, [-1, 28, 28, 1])
# now declare the output data placeholder - 10 digits
y = tf.placeholder(tf.float32, [None, 10])
def create_new_conv_layer(input_data, num_input_channels, num_filters, filter_shape, pool_shape, name):
# setup the filter input shape for tf.nn.conv_2d
conv_filt_shape = [filter_shape[0], filter_shape[1], num_input_channels,
num_filters]
# initialise weights and bias for the filter
weights = tf.Variable(tf.truncated_normal(conv_filt_shape, stddev=0.03),
name=name+'_W')
bias = tf.Variable(tf.truncated_normal([num_filters]), name=name+'_b')
# setup the convolutional layer operation
out_layer = tf.nn.conv2d(input_data, weights, [1, 1, 1, 1], padding='SAME')
# add the bias
out_layer += bias
# apply a ReLU non-linear activation
out_layer = tf.nn.relu(out_layer)
# now perform max pooling
ksize = [1, pool_shape[0], pool_shape[1], 1]
strides = [1, 2, 2, 1]
out_layer = tf.nn.max_pool(out_layer, ksize=ksize, strides=strides,
padding='SAME')
return out_layer
# create some convolutional layers
layer1 = create_new_conv_layer(x_shaped, 1, 32, [5, 5], [2, 2], name='layer1')
layer2 = create_new_conv_layer(layer1, 32, 64, [5, 5], [2, 2], name='layer2')
flattened = tf.reshape(layer2, [-1, 7 * 7 * 64])
# setup some weights and bias values for this layer, then activate with ReLU
wd1 = tf.Variable(tf.truncated_normal(
[7 * 7 * 64, 1000], stddev=0.03), name='wd1')
bd1 = tf.Variable(tf.truncated_normal([1000], stddev=0.01), name='bd1')
dense_layer1 = tf.matmul(flattened, wd1) + bd1
dense_layer1 = tf.nn.relu(dense_layer1)
# another layer with softmax activations
wd2 = tf.Variable(tf.truncated_normal([1000, 10], stddev=0.03), name='wd2')
bd2 = tf.Variable(tf.truncated_normal([10], stddev=0.01), name='bd2')
dense_layer2 = tf.matmul(dense_layer1, wd2) + bd2
y_ = tf.nn.softmax(dense_layer2)
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits=dense_layer2, labels=y))
# add an optimiser
optimiser = tf.train.AdamOptimizer(
learning_rate=learning_rate).minimize(cross_entropy)
# define an accuracy assessment operation
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# setup the initialisation operator
init_op = tf.global_variables_initializer()
depth = 10
finalReps = {}
finalReps["epoch_weights"] = []
saver = tf.train.Saver()
with tf.Session() as sess:
# initialise the variables
print("starting session")
""" sess.run(init_op)
total_batch = int(len(mnist.train.labels) / batch_size)
for epoch in range(1):
# for epoch in range(epochs):
print("starting epcoh:", (epoch+1))
avg_cost = 0
for i in range(total_batch):
# print("batch ", (i+1))
if (i % 100 == 0):
print("batch ", i)
batch_x, batch_y = mnist.train.next_batch(
batch_size=batch_size)
indices = batch_y
y_matrix = tf.one_hot(indices, depth).eval(session=sess)
# print("encoding scores")
_, c = sess.run([optimiser, cross_entropy],
feed_dict={x: batch_x, y: y_matrix})
avg_cost += c / total_batch
y_test_matrix = tf.one_hot(
mnist.test.labels[:100], depth).eval(session=sess)
test_acc = sess.run(accuracy,
feed_dict={x: mnist.test.images[:100], y: y_test_matrix})
print("Epoch:", (epoch + 1), "cost =", "{:.3f}".format(avg_cost),
"test accuracy: {: .3f}".format(test_acc)) """
saver.restore(sess, "/tmp/model.ckpt")
# save_path = saver.save(sess, "/tmp/model.ckpt")
# print("Model saved in path: %s" % save_path)
# weight1 = layer1.eval(session=sess, feed_dict={
# x: mnist.test.images[:500]})
# weight2 = layer2.eval(session=sess, feed_dict={
# x: mnist.test.images[:500]})
# finalReps.append(weight)
# print("weights at epoch: ", weight1)
dl1 = dense_layer1.eval(session=sess, feed_dict={
x: mnist.test.images[:500]})
dl2 = dense_layer2.eval(session=sess, feed_dict={
x: mnist.test.images[:500]})
# finalReps["epoch_weights"].append(
# { # "conv_1": weight1.tolist(),
# "dl_1": dl1.tolist(),
# "dl_2": dl2.tolist()
# })
finalReps["epoch_weights"] = dl2.tolist()
# {"conv_1": weight1.tolist(), "conv_2": weight2.tolist()})
with open("weigts_4_denseonlyappend.json", "w") as f:
json.dump(finalReps, f)
print("\nTraining complete!")