Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
5 changed files
with
963 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1 @@ | ||
Data/* |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,283 @@ | ||
{ | ||
"cells": [ | ||
{ | ||
"cell_type": "markdown", | ||
"metadata": {}, | ||
"source": [ | ||
"Builds the __IndianPines__ network.\n", | ||
"===================================\n", | ||
"Implements the _inference/loss/training pattern_ for model building.\n", | ||
"1. inference() - Builds the model as far as is required for running the network forward to make predictions.\n", | ||
"2. loss() - Adds to the inference model the layers required to generate loss.\n", | ||
"3. training() - Adds to the loss model the Ops required to generate and apply gradients.\n", | ||
"\n", | ||
"This file is used by the various \"fully_connected_*.py\" files and not meant to be run." | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": 1, | ||
"metadata": { | ||
"collapsed": true | ||
}, | ||
"outputs": [], | ||
"source": [ | ||
"from __future__ import absolute_import\n", | ||
"from __future__ import division\n", | ||
"from __future__ import print_function\n", | ||
"\n", | ||
"import math\n", | ||
"\n", | ||
"import tensorflow as tf" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": 2, | ||
"metadata": { | ||
"collapsed": true | ||
}, | ||
"outputs": [], | ||
"source": [ | ||
"# The IndianPines dataset has 16 classes, representing different kinds of land-cover.\n", | ||
"NUM_CLASSES = 16\n", | ||
"\n", | ||
"# We have chopped the IndianPines image into 28x28 pixels patches. \n", | ||
"# We will classify each patch\n", | ||
"IMAGE_SIZE = 31\n", | ||
"IMAGE_PIXELS = IMAGE_SIZE * IMAGE_SIZE *220" | ||
] | ||
}, | ||
{ | ||
"cell_type": "markdown", | ||
"metadata": {}, | ||
"source": [ | ||
"Build the IndianPines model up to where it may be used for inference.\n", | ||
"--------------------------------------------------\n", | ||
"Args:\n", | ||
"* images: Images placeholder, from inputs().\n", | ||
"* hidden1_units: Size of the first hidden layer.\n", | ||
"* hidden2_units: Size of the second hidden layer.\n", | ||
"\n", | ||
"Returns:\n", | ||
"* softmax_linear: Output tensor with the computed logits." | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": 3, | ||
"metadata": { | ||
"collapsed": true | ||
}, | ||
"outputs": [], | ||
"source": [ | ||
"def inference(images, conv1_channels, conv2_channels, fc1_units, fc2_units):\n", | ||
" \"\"\"Build the IndianPines model up to where it may be used for inference.\n", | ||
" Args:\n", | ||
" images: Images placeholder, from inputs().\n", | ||
" conv1_channels: Number of filters in the first convolutional layer.\n", | ||
" conv2_channels: Number of filters in the second convolutional layer.\n", | ||
" fc1_units = Number of units in the first fully connected hidden layer\n", | ||
" fc2_units = Number of units in the second fully connected hidden layer\n", | ||
"\n", | ||
" Returns:\n", | ||
" softmax_linear: Output tensor with the computed logits.\n", | ||
" \"\"\"\n", | ||
" \n", | ||
" # Conv 1\n", | ||
" with tf.name_scope('conv_1') as scope:\n", | ||
" weights = tf.get_variable('weights', shape=[5, 5, 220, conv1_channels], \n", | ||
" initializer=tf.contrib.layers.xavier_initializer_conv2d())\n", | ||
" biases = tf.get_variable('biases', shape=[conv1_channels], initializer=tf.constant_initializer(0.05))\n", | ||
" \n", | ||
" # converting the 1D array into a 3D image\n", | ||
" x_image = tf.reshape(images, [-1,IMAGE_SIZE,IMAGE_SIZE,220])\n", | ||
" z = tf.nn.conv2d(x_image, weights, strides=[1, 1, 1, 1], padding='VALID')\n", | ||
" h_conv1 = tf.nn.relu(z+biases, name=scope.name)\n", | ||
" \n", | ||
" # Maxpool 1\n", | ||
" h_pool1 = tf.nn.max_pool(h_conv1, ksize=[1, 2, 2, 1],\n", | ||
" strides=[1, 2, 2, 1], padding='SAME', name='h_pool1')\n", | ||
" \n", | ||
" # Conv2\n", | ||
" with tf.variable_scope('h_conv2') as scope:\n", | ||
" weights = tf.get_variable('weights', shape=[5, 5, conv1_channels, conv2_channels], \n", | ||
" initializer=tf.contrib.layers.xavier_initializer_conv2d())\n", | ||
" biases = tf.get_variable('biases', shape=[conv2_channels], initializer=tf.constant_initializer(0.05))\n", | ||
" z = tf.nn.conv2d(h_pool1, weights, strides=[1, 1, 1, 1], padding='VALID')\n", | ||
" h_conv2 = tf.nn.relu(z+biases, name=scope.name)\n", | ||
" \n", | ||
" # Maxpool 2\n", | ||
" h_pool2 = tf.nn.max_pool(h_conv2, ksize=[1, 2, 2, 1],\n", | ||
" strides=[1, 2, 2, 1], padding='SAME', name='h_pool2')\n", | ||
" \n", | ||
" # FIXED in python file\n", | ||
" #size_after_conv_and_pool_twice = 4\n", | ||
" size_after_conv_and_pool_twice = int(math.ceil((math.ceil(float(IMAGE_SIZE-KERNEL_SIZE+1)/2)-KERNEL_SIZE+1)/2))\n", | ||
" \n", | ||
" #Reshape from 4D to 2D\n", | ||
" h_pool2_flat = tf.reshape(h_pool2, [-1, (size_after_conv_and_pool_twice**2)*conv2_channels])\n", | ||
" \n", | ||
" # FC 1\n", | ||
" with tf.name_scope('h_FC1') as scope:\n", | ||
" weights = tf.Variable(\n", | ||
" tf.truncated_normal([size_after_conv_and_pool_twice, fc1_units],\n", | ||
" stddev=1.0 / math.sqrt(float(size_after_conv_and_pool_twice))),\n", | ||
" name='weights')\n", | ||
" biases = tf.Variable(tf.zeros([fc1_units]),\n", | ||
" name='biases')\n", | ||
" h_FC1 = tf.nn.relu(tf.matmul(h_pool2_flat, weights) + biases, name=scope.name)\n", | ||
" \n", | ||
" # FC 2\n", | ||
" with tf.name_scope('h_FC2'):\n", | ||
" weights = tf.Variable(\n", | ||
" tf.truncated_normal([fc1_units, fc2_units],\n", | ||
" stddev=1.0 / math.sqrt(float(fc1_units))),\n", | ||
" name='weights')\n", | ||
" biases = tf.Variable(tf.zeros([fc2_units]),\n", | ||
" name='biases')\n", | ||
" h_FC2 = tf.nn.relu(tf.matmul(h_FC1, weights) + biases, name=scope.name)\n", | ||
" \n", | ||
" # Linear\n", | ||
" with tf.name_scope('softmax_linear'):\n", | ||
" weights = tf.Variable(\n", | ||
" tf.truncated_normal([fc2_units, NUM_CLASSES],\n", | ||
" stddev=1.0 / math.sqrt(float(fc2_units))),\n", | ||
" name='weights')\n", | ||
" biases = tf.Variable(tf.zeros([NUM_CLASSES]),\n", | ||
" name='biases')\n", | ||
" logits = tf.matmul(h_FC2, weights) + biases\n", | ||
" \n", | ||
" \n", | ||
" return logits\n" | ||
] | ||
}, | ||
{ | ||
"cell_type": "markdown", | ||
"metadata": {}, | ||
"source": [ | ||
"Define the loss function\n", | ||
"------------------------" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": 4, | ||
"metadata": { | ||
"collapsed": true | ||
}, | ||
"outputs": [], | ||
"source": [ | ||
"def loss(logits, labels):\n", | ||
" \"\"\"Calculates the loss from the logits and the labels.\n", | ||
" Args:\n", | ||
" logits: Logits tensor, float - [batch_size, NUM_CLASSES].\n", | ||
" labels: Labels tensor, int32 - [batch_size].\n", | ||
" Returns:\n", | ||
" loss: Loss tensor of type float.\n", | ||
" \"\"\"\n", | ||
" labels = tf.to_int64(labels)\n", | ||
" cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\n", | ||
" logits, labels, name='xentropy')\n", | ||
" loss = tf.reduce_mean(cross_entropy, name='xentropy_mean')\n", | ||
" return loss" | ||
] | ||
}, | ||
{ | ||
"cell_type": "markdown", | ||
"metadata": {}, | ||
"source": [ | ||
"Define the Training OP\n", | ||
"--------------------" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": 5, | ||
"metadata": { | ||
"collapsed": true | ||
}, | ||
"outputs": [], | ||
"source": [ | ||
"def training(loss, learning_rate):\n", | ||
" \"\"\"Sets up the training Ops.\n", | ||
" Creates a summarizer to track the loss over time in TensorBoard.\n", | ||
" Creates an optimizer and applies the gradients to all trainable variables.\n", | ||
" The Op returned by this function is what must be passed to the\n", | ||
" `sess.run()` call to cause the model to train.\n", | ||
" Args:\n", | ||
" loss: Loss tensor, from loss().\n", | ||
" learning_rate: The learning rate to use for gradient descent.\n", | ||
" Returns:\n", | ||
" train_op: The Op for training.\n", | ||
" \"\"\"\n", | ||
" # Add a scalar summary for the snapshot loss.\n", | ||
" tf.scalar_summary(loss.op.name, loss)\n", | ||
" # Create the gradient descent optimizer with the given learning rate.\n", | ||
" optimizer = tf.train.GradientDescentOptimizer(learning_rate)\n", | ||
" # Create a variable to track the global step.\n", | ||
" global_step = tf.Variable(0, name='global_step', trainable=False)\n", | ||
" # Use the optimizer to apply the gradients that minimize the loss\n", | ||
" # (and also increment the global step counter) as a single training step.\n", | ||
" train_op = optimizer.minimize(loss, global_step=global_step)\n", | ||
" return train_op\n" | ||
] | ||
}, | ||
{ | ||
"cell_type": "markdown", | ||
"metadata": {}, | ||
"source": [ | ||
"Define the Evaluation OP\n", | ||
"----------------------" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": 6, | ||
"metadata": { | ||
"collapsed": true | ||
}, | ||
"outputs": [], | ||
"source": [ | ||
"def evaluation(logits, labels):\n", | ||
" \"\"\"Evaluate the quality of the logits at predicting the label.\n", | ||
" Args:\n", | ||
" logits: Logits tensor, float - [batch_size, NUM_CLASSES].\n", | ||
" labels: Labels tensor, int32 - [batch_size], with values in the\n", | ||
" range [0, NUM_CLASSES).\n", | ||
" Returns:\n", | ||
" A scalar int32 tensor with the number of examples (out of batch_size)\n", | ||
" that were predicted correctly.\n", | ||
" \"\"\"\n", | ||
" # For a classifier model, we can use the in_top_k Op.\n", | ||
" # It returns a bool tensor with shape [batch_size] that is true for\n", | ||
" # the examples where the label is in the top k (here k=1)\n", | ||
" # of all logits for that example.\n", | ||
" correct = tf.nn.in_top_k(logits, labels, 1)\n", | ||
" # Return the number of true entries.\n", | ||
" return tf.reduce_sum(tf.cast(correct, tf.int32))" | ||
] | ||
} | ||
], | ||
"metadata": { | ||
"kernelspec": { | ||
"display_name": "Python 2", | ||
"language": "python", | ||
"name": "python2" | ||
}, | ||
"language_info": { | ||
"codemirror_mode": { | ||
"name": "ipython", | ||
"version": 2 | ||
}, | ||
"file_extension": ".py", | ||
"mimetype": "text/x-python", | ||
"name": "python", | ||
"nbconvert_exporter": "python", | ||
"pygments_lexer": "ipython2", | ||
"version": "2.7.6" | ||
} | ||
}, | ||
"nbformat": 4, | ||
"nbformat_minor": 0 | ||
} |
Oops, something went wrong.