Skip to content

Commit

Permalink
Add bare bones implementations
Browse files Browse the repository at this point in the history
  • Loading branch information
siddk committed May 19, 2016
1 parent 829d944 commit 9780e0c
Show file tree
Hide file tree
Showing 20 changed files with 104,297 additions and 1 deletion.
2 changes: 1 addition & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -65,4 +65,4 @@ target/
.DS_Store

# PyCharm
idea/
.idea/
1 change: 1 addition & 0 deletions __init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
__author__ = 'sidd'
1 change: 1 addition & 0 deletions langmod_nn/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
__author__ = 'sidd'
23,683 changes: 23,683 additions & 0 deletions langmod_nn/data/english-senate-0.txt

Large diffs are not rendered by default.

8,486 changes: 8,486 additions & 0 deletions langmod_nn/data/english-senate-1.txt

Large diffs are not rendered by default.

24,566 changes: 24,566 additions & 0 deletions langmod_nn/data/english-senate-2.txt

Large diffs are not rendered by default.

47,366 changes: 47,366 additions & 0 deletions langmod_nn/data/good-bad.txt

Large diffs are not rendered by default.

Empty file.
Empty file.
Empty file.
Empty file.
Empty file added mnist_cnn/__init__.py
Empty file.
118 changes: 118 additions & 0 deletions mnist_cnn/mnist_cnn.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,118 @@
"""
deep_mnist.py
Code for following Tensorflow Tutorial.
"""
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf

# Load MNIST Data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)

# Start Tensorflow Session
sess = tf.InteractiveSession()

# Build Input/Output placeholders
x = tf.placeholder(tf.float32, shape=[None, 784])
y_ = tf.placeholder(tf.float32, shape=[None, 10])

# # Single Layer Model
#
# # Set up Model Parameters
# W = tf.Variable(tf.zeros([784, 10]))
# b = tf.Variable(tf.zeros([10]))
#
# # Initialize all variables in the session
# sess.run(tf.initialize_all_variables())
#
# # Actually implement the model
# y = tf.nn.softmax(tf.matmul(x, W) + b)
#
# # Set up loss function
# cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
#
# # Start the training process
# train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
#
# # Train the model
# for i in range(1000):
# batch = mnist.train.next_batch(50)
# train_step.run(feed_dict={x: batch[0], y_: batch[1]})
#
# # Set up evaluations
# correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
# accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

# Evaluate model
# print "Single Layer Accuracy:", accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels})


# Convolution Model
# Define variables
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)

def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)

# Define special layers
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')

def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')

# Build model
# Reshape Input
x_image = tf.reshape(x, [-1, 28, 28, 1])

# Convolution + Max Pool (1)
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])

h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)

# Convolution + Max Pool (2)
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])

h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)

# Dense Layer
W_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])

h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)

# Dropout
keep_prob = tf.placeholder(tf.float32) # Placeholder lets us turn on during train, off during test
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)

# Softmax Layer
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])

y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)

# Train and Evaluate
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y_conv), reduction_indices=[1]))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
sess.run(tf.initialize_all_variables())

for i in range(2000):
batch = mnist.train.next_batch(50)
if i % 100 == 0:
train_accuracy = accuracy.eval(feed_dict={x: batch[0], y_: batch[1], keep_prob: 1.0})
print("Step %d, training accuracy %g" % (i, train_accuracy))

train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})

print("Test accuracy: %g" % accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels,
keep_prob: 1.0}))
Empty file.
Empty file.
Empty file.
Empty file.
28 changes: 28 additions & 0 deletions mnist_nn/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
# MNIST NN #
Simple MNIST Handwriting Classification Problem. Given a 28 x 28 pixel image, pick which digit class
it belongs to.

## Model Setup ##

+ Input: 28 x 28 Pixel Image, flattened into a single vector of size 784. The input "x" is then
a matrix of size (Batch-Size, 784).

+ Output: Vector of size 10, corresponding to the probability that the given image belongs to
each of the 10 digit classes (0, 1, 2, ... 9). The output "y" is then a matrix of
size (Batch-Size, 10).
+ Layers: The model only has one fully-connected feed-forward layer, with a bias vector. This
layer is followed by a Softmax Layer.
1) Feed-Forward Layer: This is represented by a weight matrix "W" of size (784, 10).
The bias vector is of size (10). The transformation is of the form
(xW + b), a simple matrix multiplication between the input and the
weight matrix, and the addition of the bias.
2) Softmax Layer: Normal softmax over the output of size (10). Represents probability that
the current example image belongs to each of the 10 digit classes.

+ Loss: We use cross-entropy loss. The true predictions are encoded as one-hot versions of the
true classes (vectors of size 10).
+ Optimizer: Simple SGD Optimizer with learning rate 0.5.
Empty file added mnist_nn/__init__.py
Empty file.
47 changes: 47 additions & 0 deletions mnist_nn/mnist_nn.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
"""
mnist_nn.py
Code for MNIST Tensorflow Tutorial. Builds a simple one layer network with a simple softmax.
"""
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf

# Load MNIST Data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)

# Start Tensorflow Session
sess = tf.InteractiveSession()

# Build Input/Output placeholders
x = tf.placeholder(tf.float32, shape=[None, 784]) # X is input, of shape (Batch, 784)
y_ = tf.placeholder(tf.float32, shape=[None, 10]) # Y is output, of shape (Batch, 10)

# Single Layer Model

# Set up Model Parameters
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))

# Initialize all variables in the session
sess.run(tf.initialize_all_variables())

# Actually implement the model
y = tf.nn.softmax(tf.matmul(x, W) + b)

# Set up loss function
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))

# Start the training process
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)

# Train the model
for i in range(1000):
batch = mnist.train.next_batch(50)
train_step.run(feed_dict={x: batch[0], y_: batch[1]})

# Set up evaluations
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

# Evaluate model
print "Single Layer Accuracy:", accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels})

0 comments on commit 9780e0c

Please sign in to comment.