Skip to content
Permalink
Browse files

Updated chapter 11

  • Loading branch information...
nfmcclure committed Aug 9, 2018
1 parent 8ebfa0b commit 0d17fb1c36972184c43df2af22f71d93585e6a78
@@ -11,19 +11,21 @@
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.python.framework import ops
ops.reset_default_graph()

# Initialize a graph session
sess = tf.Session()

# Create a visualizer object
summary_writer = tf.summary.FileWriter('tensorboard', tf.get_default_graph())
summary_writer = tf.summary.FileWriter('tensorboard', sess.graph)

# Create tensorboard folder if not exists
if not os.path.exists('tensorboard'):
os.makedirs('tensorboard')
print('Running a slowed down linear regression. '
'Run the command: $tensorboard --logdir="tensorboard" '
' Then navigate to http://127.0.0.0:6006')
' Then navigate to http://127.0.0.1:6006')

# You can also specify a port option with --port 6006

@@ -69,9 +71,8 @@

# Visualize a histogram (errors)
with tf.name_scope('Loss_and_Residuals'):
tf.summary.histogram('Histogram_Errors', l1_loss)
tf.summary.histogram('Histogram_Residuals', residuals)

tf.summary.histogram('Histogram_Errors', tf.squeeze(l1_loss))
tf.summary.histogram('Histogram_Residuals', tf.squeeze(residuals))


# Declare summary merging operation
@@ -92,7 +93,7 @@
test_loss, test_resids = sess.run([l1_loss, residuals], feed_dict={x_graph_input: x_data_test,
y_graph_input: y_data_test})

if (i+1)%10==0:
if (i + 1) % 10 == 0:
print('Generation {} of {}. Train Loss: {:.3}, Test Loss: {:.3}.'.format(i+1, generations, train_loss, test_loss))

log_writer = tf.summary.FileWriter('tensorboard')
@@ -118,7 +119,7 @@ def gen_linear_plot(slope):
# Add the batch dimension
image = tf.expand_dims(image, 0)
# Add image summary
image_summary_op = tf.summary.image("Linear Plot", image)
image_summary_op = tf.summary.image("Linear_Plot", image)
image_summary = sess.run(image_summary_op)
log_writer.add_summary(image_summary, i)
log_writer.close()
log_writer.close()
@@ -58,7 +58,7 @@

# Get best fit individual
best_val = tf.reduce_min(top_vals)
best_ind = tf.arg_min(top_vals, 0)
best_ind = tf.argmin(top_vals, 0)
best_individual = tf.gather(population, best_ind)

# Get parents
@@ -108,11 +108,11 @@
best_individual_val = sess.run(best_individual, feed_dict=feed_dict)

if i % 5 == 0:
best_fit = sess.run(best_val, feed_dict = feed_dict)
print('Generation: {}, Best Fitness (lowest MSE): {:.2}'.format(i, -best_fit))
best_fit = sess.run(best_val, feed_dict = feed_dict)
print('Generation: {}, Best Fitness (lowest MSE): {:.2}'.format(i, -best_fit))

plt.plot(truth, label="True Values")
plt.plot(np.squeeze(best_individual_val), label="Best Individual")
plt.axis((0, features, -1.25, 1.25))
plt.legend(loc='upper right')
plt.show()
plt.show()
@@ -23,7 +23,7 @@

# Set k-means parameters
# There are 3 types of iris flowers, see if we can predict them
k=3
k = 3
generations = 25

data_points = tf.Variable(iris.data)
@@ -41,9 +41,10 @@
point_matrix = tf.reshape(tf.tile(data_points, [1, k]), [num_pts, k, num_feats])
distances = tf.reduce_sum(tf.square(point_matrix - centroid_matrix), axis=2)

#Find the group it belongs to with tf.argmin()
# Find the group it belongs to with tf.argmin()
centroid_group = tf.argmin(distances, 1)


# Find the group average
def data_group_avg(group_ids, data):
# Sum each group
@@ -52,7 +53,8 @@ def data_group_avg(group_ids, data):
num_total = tf.unsorted_segment_sum(tf.ones_like(data), group_ids, 3)
# Calculate average
avg_by_group = sum_total/num_total
return(avg_by_group)
return avg_by_group


means = data_group_avg(centroid_group, data_points)

@@ -73,18 +75,20 @@ def data_group_avg(group_ids, data):

[centers, assignments] = sess.run([centroids, cluster_labels])


# Find which group assignments correspond to which group labels
# First, need a most common element function
def most_common(my_list):
return(max(set(my_list), key=my_list.count))
return max(set(my_list), key=my_list.count)


label0 = most_common(list(assignments[0:50]))
label1 = most_common(list(assignments[50:100]))
label2 = most_common(list(assignments[100:150]))

group0_count = np.sum(assignments[0:50]==label0)
group1_count = np.sum(assignments[50:100]==label1)
group2_count = np.sum(assignments[100:150]==label2)
group0_count = np.sum(assignments[0:50] == label0)
group1_count = np.sum(assignments[50:100] == label1)
group2_count = np.sum(assignments[100:150] == label2)

accuracy = (group0_count + group1_count + group2_count)/150.

@@ -108,17 +112,15 @@ def most_common(my_list):
# Get k-means classifications for the grid points
xx_pt = list(xx.ravel())
yy_pt = list(yy.ravel())
xy_pts = np.array([[x,y] for x,y in zip(xx_pt, yy_pt)])
xy_pts = np.array([[x, y] for x, y in zip(xx_pt, yy_pt)])
mytree = cKDTree(reduced_centers)
dist, indexes = mytree.query(xy_pts)

# Put the result into a color plot
indexes = indexes.reshape(xx.shape)
plt.figure(1)
plt.clf()
plt.imshow(indexes, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
plt.imshow(indexes, interpolation='nearest', extent=(xx.min(), xx.max(), yy.min(), yy.max()), cmap=plt.cm.Paired,
aspect='auto', origin='lower')

# Plot each of the true iris data groups
@@ -128,12 +130,9 @@ def most_common(my_list):
temp_group = reduced_data[(i*50):(50)*(i+1)]
plt.plot(temp_group[:, 0], temp_group[:, 1], symbols[i], markersize=10, label=label_name[i])
# Plot the centroids as a white X
plt.scatter(reduced_centers[:, 0], reduced_centers[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
plt.title('K-means clustering on Iris Dataset\n'
'Centroids are marked with white cross')
plt.scatter(reduced_centers[:, 0], reduced_centers[:, 1], marker='x', s=169, linewidths=3, color='w', zorder=10)
plt.title('K-means clustering on Iris Dataset Centroids are marked with white cross')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.legend(loc='lower right')
plt.show()
plt.show()
@@ -64,4 +64,4 @@
plt.plot(prey_values)
plt.plot(predator_values)
plt.legend(['Prey', 'Predator'], loc='upper right')
plt.show()
plt.show()

0 comments on commit 0d17fb1

Please sign in to comment.
You can’t perform that action at this time.