Skip to content

Commit

Permalink
Update ch 1 and 2
Browse files Browse the repository at this point in the history
  • Loading branch information
nfmcclure committed Mar 25, 2018
1 parent 723e029 commit 568ddef
Show file tree
Hide file tree
Showing 16 changed files with 108 additions and 58 deletions.
9 changes: 8 additions & 1 deletion .gitignore
Expand Up @@ -25,6 +25,8 @@ checkpoint
*.pos
**/temp/
**/temp
**/tmp/
**/tmp
*/tensorboard_logs/*
*/tensorboard_logs/
*/tensorboard_logs
Expand Down Expand Up @@ -137,8 +139,13 @@ celerybeat-schedule
venv/
ENV/

# Spyder project settings
# Spyder/pycharm project settings
.spyderproject
.idea/

# Rope project settings
.ropeproject

# Ignore data sources
*/MNIST_data/*
*/birthweight_data/*
Expand Up @@ -59,7 +59,6 @@
merged = tf.summary.merge_all()

# Initialize graph writer:

writer = tf.summary.FileWriter("/tmp/variable_logs", graph=sess.graph)

# Initialize operation
Expand Down
Expand Up @@ -19,6 +19,6 @@

merged = tf.summary.merge_all()

writer = tf.summary.FileWriter("/tmp/variable_logs", sess.graph_def)
writer = tf.summary.FileWriter("/tmp/variable_logs", sess.graph)

print(sess.run(y, feed_dict={x: rand_array}))
3 changes: 1 addition & 2 deletions 01_Introduction/04_Working_with_Matrices/04_matrices.py
Expand Up @@ -28,8 +28,7 @@

# 3x2 random uniform matrix
C = tf.random_uniform([3,2])
print(sess.run(C))
print(sess.run(C)) # Note that we are reinitializing, hence the new random variabels
print(sess.run(C)) # Note that we are reinitializing, hence the new random variables

# Create matrix from np array
D = tf.convert_to_tensor(np.array([[1., 2., 3.], [-3., -7., -1.], [0., 5., -2.]]))
Expand Down
22 changes: 10 additions & 12 deletions 01_Introduction/05_Declaring_Operations/05_operations.py
Expand Up @@ -5,8 +5,6 @@
# in TensorFlow

# Declaring Operations
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import ops
ops.reset_default_graph()
Expand All @@ -15,31 +13,31 @@
sess = tf.Session()

# div() vs truediv() vs floordiv()
print(sess.run(tf.div(3,4)))
print(sess.run(tf.truediv(3,4)))
print(sess.run(tf.floordiv(3.0,4.0)))
print(sess.run(tf.div(3, 4)))
print(sess.run(tf.truediv(3, 4)))
print(sess.run(tf.floordiv(3.0, 4.0)))

# Mod function
print(sess.run(tf.mod(22.0,5.0)))
print(sess.run(tf.mod(22.0, 5.0)))

# Cross Product
print(sess.run(tf.cross([1.,0.,0.],[0.,1.,0.])))
print(sess.run(tf.cross([1., 0., 0.], [0., 1., 0.])))

# Trig functions
print(sess.run(tf.sin(3.1416)))
print(sess.run(tf.cos(3.1416)))
# Tangent
print(sess.run(tf.div(tf.sin(3.1416/4.), tf.cos(3.1416/4.))))
print(sess.run(tf.tan(3.1416/4.)))

# Custom operation
test_nums = range(15)
#from tensorflow.python.ops import math_ops
#print(sess.run(tf.equal(test_num, 3)))


def custom_polynomial(x_val):
# Return 3x^2 - x + 10
return(tf.subtract(3 * tf.square(x_val), x_val) + 10)
return tf.subtract(3 * tf.square(x_val), x_val) + 10

print(sess.run(custom_polynomial(11)))

# What should we get with list comprehension
expected_output = [3*x*x-x+10 for x in test_nums]
print(expected_output)
Expand Down
Expand Up @@ -51,12 +51,12 @@
plt.plot(x_vals, y_relu6, 'g-.', label='ReLU6', linewidth=2)
plt.plot(x_vals, y_elu, 'k-', label='ExpLU', linewidth=0.5)
plt.ylim([-1.5,7])
plt.legend(loc='top left')
plt.legend(loc='upper left')
plt.show()

plt.plot(x_vals, y_sigmoid, 'r--', label='Sigmoid', linewidth=2)
plt.plot(x_vals, y_tanh, 'b:', label='Tanh', linewidth=2)
plt.plot(x_vals, y_softsign, 'g-.', label='Softsign', linewidth=2)
plt.ylim([-2,2])
plt.legend(loc='top left')
plt.legend(loc='upper left')
plt.show()
Expand Up @@ -6,7 +6,6 @@

# Data Gathering
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import ops
ops.reset_default_graph()
Expand Down Expand Up @@ -64,11 +63,10 @@

print(X_train.shape)
print(y_train.shape)
print(y_train[0,]) # this is a frog
print(y_train[0,]) # this is a frog

# Plot the 0-th image (a frog)
from PIL import Image
%matplotlib inline
img = Image.fromarray(X_train[0,:,:,:])
plt.imshow(img)

Expand Down
@@ -1,5 +1,5 @@
# Operations on a Computational Graph
import matplotlib.pyplot as plt
import os
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import ops
Expand All @@ -13,13 +13,16 @@
# Create data to feed in
x_vals = np.array([1., 3., 5., 7., 9.])
x_data = tf.placeholder(tf.float32)
m = tf.constant(3.)
m_const = tf.constant(3.)

# Multiplication
prod = tf.multiply(x_data, m)
my_product = tf.multiply(x_data, m_const)
for x_val in x_vals:
print(sess.run(prod, feed_dict={x_data: x_val}))
print(sess.run(my_product, feed_dict={x_data: x_val}))

# View the tensorboard graph by running the following code and then
# going to the terminal and typing:
# $ tensorboard --logdir=tensorboard_logs
merged = tf.summary.merge_all()
if not os.path.exists('tensorboard_logs/'):
os.makedirs('tensorboard_logs/')
Expand Down
Expand Up @@ -202,7 +202,9 @@
"collapsed": true
},
"outputs": [],
"source": []
"source": [
""
]
}
],
"metadata": {
Expand All @@ -214,7 +216,7 @@
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
"version": 3.0
},
"file_extension": ".py",
"mimetype": "text/x-python",
Expand All @@ -225,5 +227,5 @@
}
},
"nbformat": 4,
"nbformat_minor": 2
}
"nbformat_minor": 0
}
@@ -0,0 +1,40 @@
# Layering Nested Operations

import os
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import ops
ops.reset_default_graph()

# Start a graph session
sess = tf.Session()

# Create the data and variables
my_array = np.array([[1., 3., 5., 7., 9.],
[-2., 0., 2., 4., 6.],
[-6., -3., 0., 3., 6.]])
x_vals = np.array([my_array, my_array + 1])
x_data = tf.placeholder(tf.float32, shape=(3, 5))

# Constants for matrix multiplication:
m1 = tf.constant([[1.], [0.], [-1.], [2.], [4.]])
m2 = tf.constant([[2.]])
a1 = tf.constant([[10.]])

# Create our multiple operations
prod1 = tf.matmul(x_data, m1)
prod2 = tf.matmul(prod1, m2)
add1 = tf.add(prod2, a1)

# Now feed data through placeholder and print results
for x_val in x_vals:
print(sess.run(add1, feed_dict={x_data: x_val}))

# View the tensorboard graph by running the following code and then
# going to the terminal and typing:
# $ tensorboard --logdir=tensorboard_logs
merged = tf.summary.merge_all()
if not os.path.exists('tensorboard_logs/'):
os.makedirs('tensorboard_logs/')

my_writer = tf.summary.FileWriter('tensorboard_logs/', sess.graph)
@@ -1,6 +1,5 @@
# Working with Multiple Layers

import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import os
Expand Down Expand Up @@ -41,7 +40,7 @@ def custom_layer(input_matrix):
custom_layer1 = custom_layer(mov_avg_layer)

# The output should be an array that is 2x2, but size (1,2,2,1)
#print(sess.run(mov_avg_layer, feed_dict={x_data: x_val}))
print(sess.run(mov_avg_layer, feed_dict={x_data: x_val}))

# After custom operation, size is now 2x2 (squeezed out size 1 dims)
print(sess.run(custom_layer1, feed_dict={x_data: x_val}))
Expand Down
Expand Up @@ -44,6 +44,7 @@
plt.plot(x_array, phuber2_y_out, 'g:', label='P-Huber Loss (5.0)')
plt.ylim(-0.2, 0.4)
plt.legend(loc='lower right', prop={'size': 11})
plt.grid()
plt.show()


Expand All @@ -68,15 +69,18 @@
# L = max(actual, 0) - actual * pred + log(1 + exp(-abs(actual)))
x_val_input = tf.expand_dims(x_vals, 1)
target_input = tf.expand_dims(targets, 1)
xentropy_sigmoid_y_vals = tf.nn.softmax_cross_entropy_with_logits(logits=x_val_input, labels=target_input)
xentropy_sigmoid_y_vals = tf.nn.softmax_cross_entropy_with_logits_v2(logits=x_val_input,
labels=target_input)
xentropy_sigmoid_y_out = sess.run(xentropy_sigmoid_y_vals)

# Weighted (softmax) cross entropy loss
# L = -actual * (log(pred)) * weights - (1-actual)(log(1-pred))
# or
# L = (1 - pred) * actual + (1 + (weights - 1) * pred) * log(1 + exp(-actual))
weight = tf.constant(0.5)
xentropy_weighted_y_vals = tf.nn.weighted_cross_entropy_with_logits(x_vals, targets, weight)
xentropy_weighted_y_vals = tf.nn.weighted_cross_entropy_with_logits(logits=x_vals,
targets=targets,
pos_weight=weight)
xentropy_weighted_y_out = sess.run(xentropy_weighted_y_vals)

# Plot the output
Expand All @@ -87,15 +91,16 @@
plt.plot(x_array, xentropy_weighted_y_out, 'g:', label='Weighted Cross Entropy Loss (x0.5)')
plt.ylim(-1.5, 3)
#plt.xlim(-1, 3)
plt.grid()
plt.legend(loc='lower right', prop={'size': 11})
plt.show()

# Softmax entropy loss
# L = -actual * (log(softmax(pred))) - (1-actual)(log(1-softmax(pred)))
unscaled_logits = tf.constant([[1., -3., 10.]])
target_dist = tf.constant([[0.1, 0.02, 0.88]])
softmax_xentropy = tf.nn.softmax_cross_entropy_with_logits(logits=unscaled_logits,
labels=target_dist)
softmax_xentropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits=unscaled_logits,
labels=target_dist)
print(sess.run(softmax_xentropy))

# Sparse entropy loss
Expand Down
Expand Up @@ -4,7 +4,6 @@
# This python function shows how to implement back propagation
# in regression and classification models.

import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import ops
Expand Down Expand Up @@ -36,14 +35,14 @@
# Add L2 loss operation to graph
loss = tf.square(my_output - y_target)

# Initialize variables
init = tf.global_variables_initializer()
sess.run(init)

# Create Optimizer
my_opt = tf.train.GradientDescentOptimizer(0.02)
train_step = my_opt.minimize(loss)

# Initialize variables
init = tf.global_variables_initializer()
sess.run(init)

# Run Loop
for i in range(100):
rand_index = np.random.choice(100)
Expand Down Expand Up @@ -108,7 +107,7 @@
if (i+1)%200==0:
print('Step #' + str(i+1) + ' A = ' + str(sess.run(A)))
print('Loss = ' + str(sess.run(xentropy, feed_dict={x_data: rand_x, y_target: rand_y})))

# Evaluate Predictions
predictions = []
for i in range(len(x_vals)):
Expand Down
Expand Up @@ -32,14 +32,14 @@
# Add L2 loss operation to graph
loss = tf.square(my_output - y_target)

# Initialize variables
init = tf.global_variables_initializer()
sess.run(init)

# Create Optimizer
my_opt = tf.train.GradientDescentOptimizer(0.02)
train_step = my_opt.minimize(loss)

# Initialize variables
init = tf.global_variables_initializer()
sess.run(init)

loss_stochastic = []
# Run Loop
for i in range(100):
Expand Down Expand Up @@ -77,14 +77,14 @@
# Add L2 loss operation to graph
loss = tf.reduce_mean(tf.square(my_output - y_target))

# Initialize variables
init = tf.global_variables_initializer()
sess.run(init)

# Create Optimizer
my_opt = tf.train.GradientDescentOptimizer(0.02)
train_step = my_opt.minimize(loss)

# Initialize variables
init = tf.global_variables_initializer()
sess.run(init)

loss_batch = []
# Run Loop
for i in range(100):
Expand Down
Expand Up @@ -150,7 +150,7 @@
# Plot classification result
A_result = -sess.run(A)
bins = np.linspace(-5, 5, 50)
plt.hist(x_vals[0:50], bins, alpha=0.5, label='N(-1,1)', color='white')
plt.hist(x_vals[0:50], bins, alpha=0.5, label='N(-1,1)', color='blue')
plt.hist(x_vals[50:100], bins[0:50], alpha=0.5, label='N(2,1)', color='red')
plt.plot((A_result, A_result), (0, 8), 'k--', linewidth=3, label='A = '+ str(np.round(A_result, 2)))
plt.legend(loc='upper right')
Expand Down
15 changes: 8 additions & 7 deletions requirements.txt
@@ -1,7 +1,8 @@
tensorflow
numpy
scipy
sklearn
requests
jupyter
matplotlib
tensorflow==1.6.0
numpy==1.14.2
scipy==1.0.0
sklearn==0.19.1
jupyter==1.0.0
matplotlib==2.2.2
requests==2.18.4
Pillow==5.0.0

0 comments on commit 568ddef

Please sign in to comment.