-
Notifications
You must be signed in to change notification settings - Fork 3
/
logistic_regression.py
73 lines (57 loc) · 2.61 KB
/
logistic_regression.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
import tensorflow as tf
import numpy as np
IMAGE_SIZE = 28
IMAGE_PIXELS = IMAGE_SIZE * IMAGE_SIZE
NUM_LABELS = 10
def inference(tf_train_dataset):
"""
Builds the graph as far as is required for running the network forward to make predictions
:param tf_train_dataset:
:return: Output tensor with the computed logits
"""
# Variables.
# These are the parameters that we are going to be training. The weight
# matrix will be initialized using random values following a (truncated)
# normal distribution. The biases get initialized to zero.
weights = tf.Variable(tf.truncated_normal([IMAGE_PIXELS, NUM_LABELS]), name='weights')
biases = tf.Variable(tf.zeros([NUM_LABELS]), name='biases')
# Training computation.
# We multiply the inputs with the weight matrix, and add biases
logits = tf.matmul(tf_train_dataset, weights) + biases
return logits, weights, biases
def loss(logits, tf_train_labels):
"""
Adds to the inference graph the ops required to generate loss
:return: Loss tensor of type float
"""
# We take the average of this cross-entropy across all training examples: that's our loss.
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=tf_train_labels, logits=logits)
return tf.reduce_mean(cross_entropy)
def training(loss, learning_rate):
"""
Adds to the loss graph the ops required to compute and apply gradients
Creates a summarizer to track the loss over time in TensorBoard.
Creates an optimizer and applies the gradients to all trainable variables.
:return: The Op for training
"""
# Optimizer.
# We are going to find the minimum of this loss using gradient descent.
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
train_op = optimizer.minimize(loss)
return train_op
def evaluation(logits, labels):
"""
Evaluate the quality of the logits at predicting the label.
Args:
logits: Logits tensor, float - [batch_size, NUM_CLASSES].
labels: Labels tensor, int32 - [batch_size], with values in the
range [0, NUM_CLASSES).
Returns:
A scalar int32 representing the percentage of correct predictions
"""
return 100.0 * np.sum(np.argmax(logits, 1) == np.argmax(labels, 1)) / logits.shape[0]
def prediction(logits, tf_valid_dataset, tf_test_dataset, weights, biases):
train_prediction = tf.nn.softmax(logits)
valid_prediction = tf.nn.softmax(tf.matmul(tf_valid_dataset, weights) + biases)
test_prediction = tf.nn.softmax(tf.matmul(tf_test_dataset, weights) + biases)
return train_prediction, valid_prediction, test_prediction