Skip to content

Commit

Permalink
fixed compatibility issues with Python 3.5 and removed deprecated Ten…
Browse files Browse the repository at this point in the history
…sorFlow code
  • Loading branch information
Wolfgang Beyer committed Jan 20, 2017
1 parent 20b464c commit e0c769d
Show file tree
Hide file tree
Showing 4 changed files with 16 additions and 13 deletions.
12 changes: 8 additions & 4 deletions data_helpers.py
@@ -1,13 +1,17 @@
'''Imports CIFAR-10 data.'''

import numpy as np
import cPickle as pickle
import pickle
import sys

def load_CIFAR10_batch(filename):
'''load data from single CIFAR-10 file'''

with open(filename, 'rb') as f:
dict = pickle.load(f)
if sys.version_info[0] < 3:
dict = pickle.load(f)
else:
dict = pickle.load(f, encoding='latin1')
x = dict['data']
y = dict['labels']
x = x.astype(float)
Expand All @@ -19,7 +23,7 @@ def load_data():

xs = []
ys = []
for i in xrange(1, 6):
for i in range(1, 6):
filename = 'cifar-10-batches-py/data_batch_' + str(i)
X, Y = load_CIFAR10_batch(filename)
xs.append(X)
Expand Down Expand Up @@ -69,7 +73,7 @@ def reshape_data(data_dict):
def gen_batch(data, batch_size, num_iter):
data = np.array(data)
index = len(data)
for i in xrange(num_iter):
for i in range(num_iter):
index += batch_size
if (index + batch_size > len(data)):
index = 0
Expand Down
8 changes: 4 additions & 4 deletions run_fc_model.py
Expand Up @@ -70,7 +70,7 @@
accuracy = two_layer_fc.evaluation(logits, labels_placeholder)

# Operation merging summary data for TensorBoard
summary = tf.merge_all_summaries()
summary = tf.summary.merge_all()

# Define saver to save model state at checkpoints
saver = tf.train.Saver()
Expand All @@ -81,12 +81,12 @@

with tf.Session() as sess:
# Initialize variables and create summary-writer
sess.run(tf.initialize_all_variables())
summary_writer = tf.train.SummaryWriter(logdir, sess.graph)
sess.run(tf.global_variables_initializer())
summary_writer = tf.summary.FileWriter(logdir, sess.graph)

# Generate input data batches
zipped_data = zip(data_sets['images_train'], data_sets['labels_train'])
batches = data_helpers.gen_batch(zipped_data, FLAGS.batch_size,
batches = data_helpers.gen_batch(list(zipped_data), FLAGS.batch_size,
FLAGS.max_steps)

for i in range(FLAGS.max_steps):
Expand Down
2 changes: 1 addition & 1 deletion softmax.py
Expand Up @@ -58,7 +58,7 @@

with tf.Session() as sess:
# Initialize variables
sess.run(tf.initialize_all_variables())
sess.run(tf.global_variables_initializer())

# Repeat max_steps times
for i in range(max_steps):
Expand Down
7 changes: 3 additions & 4 deletions two_layer_fc.py
Expand Up @@ -28,7 +28,6 @@ def inference(images, image_pixels, hidden_units, classes, reg_constant=0):
weights = tf.get_variable(
name='weights',
shape=[image_pixels, hidden_units],
# initializer=tf.truncated_normal_initializer(stddev=1.0 / np.sqrt(float(image_pixels)), seed=1234),
initializer=tf.truncated_normal_initializer(
stddev=1.0 / np.sqrt(float(image_pixels))),
regularizer=tf.contrib.layers.l2_regularizer(reg_constant)
Expand All @@ -53,7 +52,7 @@ def inference(images, image_pixels, hidden_units, classes, reg_constant=0):
logits = tf.matmul(hidden, weights) + biases

# Define summery-operation for 'logits'-variable
tf.histogram_summary('logits', logits)
tf.summary.histogram('logits', logits)

return logits

Expand All @@ -80,7 +79,7 @@ def loss(logits, labels):
tf.GraphKeys.REGULARIZATION_LOSSES))

# Add a scalar summary for the loss
tf.scalar_summary('loss', loss)
tf.summary.scalar('loss', loss)

return loss

Expand Down Expand Up @@ -128,6 +127,6 @@ def evaluation(logits, labels):
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

# Summary operation for the accuracy
tf.scalar_summary('train_accuracy', accuracy)
tf.summary.scalar('train_accuracy', accuracy)

return accuracy

0 comments on commit e0c769d

Please sign in to comment.