Skip to content

Commit 0269ffc

Browse files
committed
First 2 course codes
0 parents  commit 0269ffc

File tree

3 files changed

+214
-0
lines changed

3 files changed

+214
-0
lines changed

course_1_tf_basic_operation.py

Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,40 @@
1+
#
2+
import tensorflow as tf
3+
4+
#
5+
a = tf.constant(2)
6+
b = tf.constant(3)
7+
c=a+b
8+
d=a*b
9+
10+
sess=tf.Session()
11+
print sess.run(c)
12+
print sess.run(d)
13+
14+
#
15+
a = tf.placeholder(tf.int16)
16+
b = tf.placeholder(tf.int16)
17+
18+
#
19+
add = tf.add(a, b)
20+
mul = tf.multiply(a, b)
21+
print sess.run(add, feed_dict={a: 2, b: 3})
22+
print sess.run(mul, feed_dict={a: 2, b: 3})
23+
24+
25+
26+
#
27+
matrix1 = tf.constant([[3., 3.]])
28+
matrix2 = tf.constant([[2.],[2.]])
29+
product = tf.matmul(matrix2, matrix1)
30+
print sess.run(product)
31+
32+
#
33+
mat1=tf.Variable(tf.random_normal([3,2]))
34+
mat2=tf.Variable(tf.random_normal([2,3]))
35+
product=tf.matmul(mat1,mat2)
36+
37+
m1=[[1,3],[2,1],[0,5]]
38+
m2=[[3,2,1],[1,2,3]]
39+
40+
print sess.run(product,feed_dict={mat1:m1,mat2:m2})

course_1_tf_lr.py

Lines changed: 80 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,80 @@
1+
'''
2+
linear regression experiment, hope you can know:
3+
1. how to design the learning model
4+
2. optimize the model
5+
3. dealing with the dataset
6+
7+
Original Author: Aymeric Damien
8+
Edited by Wei Li for ChinaHadoop Deep learning course
9+
Project: https://github.com/aymericdamien/TensorFlow-Examples/
10+
'''
11+
12+
13+
import tensorflow as tf
14+
import numpy
15+
rng = numpy.random
16+
17+
# model params
18+
learning_rate = 0.02
19+
training_epochs = 3000
20+
display_step=50
21+
#
22+
train_X = numpy.asarray([3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167,
23+
7.042,10.791,5.313,7.997,5.654,9.27,3.1])
24+
train_Y = numpy.asarray([1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221,
25+
2.827,3.465,1.65,2.904,2.42,2.94,1.3])
26+
n_samples = train_X.shape[0]
27+
28+
# tf Graph Input
29+
X = tf.placeholder("float")
30+
Y = tf.placeholder("float")
31+
32+
# Set model weights
33+
W = tf.Variable(rng.randn(), name="weight")
34+
b = tf.Variable(rng.randn(), name="bias")
35+
36+
# Construct a linear model
37+
pred = tf.add(tf.multiply(X, W), b)
38+
39+
# Mean squared error
40+
cost = tf.reduce_sum(tf.pow(pred-Y, 2))/(2*n_samples)
41+
# Gradient descent
42+
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
43+
44+
# Initializing the variables
45+
init = tf.global_variables_initializer()
46+
47+
# Launch the graph
48+
with tf.Session() as sess:
49+
sess.run(init)
50+
51+
# Fit all training data
52+
for epoch in range(training_epochs):
53+
for (x, y) in zip(train_X, train_Y):
54+
sess.run(optimizer, feed_dict={X: x, Y: y})
55+
56+
# Display logs per epoch step
57+
if (epoch+1) % display_step == 0:
58+
c = sess.run(cost, feed_dict={X: train_X, Y:train_Y})
59+
print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(c), \
60+
"W=", sess.run(W), "b=", sess.run(b))
61+
62+
63+
training_cost = sess.run(cost, feed_dict={X: train_X, Y: train_Y})
64+
print("Training cost=", training_cost, "W=", sess.run(W), "b=", sess.run(b), '\n')
65+
66+
67+
68+
# the testing data
69+
test_X = numpy.asarray([6.83, 4.668, 8.9, 7.91, 5.7, 8.7, 3.1, 2.1])
70+
test_Y = numpy.asarray([1.84, 2.273, 3.2, 2.831, 2.92, 3.24, 1.35, 1.03])
71+
72+
print("Tssting...")
73+
testing_cost = sess.run(
74+
tf.reduce_sum(tf.pow(pred - Y, 2)) / (2 * test_X.shape[0]),
75+
feed_dict={X: test_X, Y: test_Y}) # same function as cost above
76+
print("Test LOSS=", testing_cost)
77+
print("Final Loss:", abs(
78+
training_cost - testing_cost))
79+
80+

course_2_tf_nn.py

Lines changed: 94 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,94 @@
1+
#get the mnist data
2+
# wget http://deeplearning.net/data/mnist/mnist.pkl.gz
3+
4+
5+
6+
7+
from tensorflow.examples.tutorials.mnist import input_data
8+
mnist = input_data.read_data_sets(".", one_hot=True)
9+
10+
import tensorflow as tf
11+
12+
# Parameters
13+
learning_rate = 0.001
14+
training_epochs = 30
15+
batch_size = 100
16+
display_step = 1
17+
18+
# Network Parameters
19+
n_hidden_1 = 256 # 1st layer number of features
20+
n_hidden_2 = 512 # 2nd layer number of features
21+
n_input = 784 # MNIST data input (img shape: 28*28)
22+
n_classes = 10 # MNIST total classes (0-9 digits)
23+
24+
# tf Graph input
25+
x = tf.placeholder("float", [None, n_input])
26+
y = tf.placeholder("float", [None, n_classes])
27+
28+
29+
# Create model
30+
def multilayer_perceptron(x, weights, biases):
31+
# Hidden layer with RELU activation
32+
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
33+
layer_1 = tf.nn.relu(layer_1)
34+
# Hidden layer with RELU activation
35+
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
36+
layer_2 = tf.nn.relu(layer_2)
37+
38+
#we can add dropout layer
39+
# drop_out = tf.nn.dropout(layer_2, 0.75)
40+
41+
42+
# Output layer with linear activation
43+
out_layer = tf.matmul(layer_2, weights['out']) + biases['out']
44+
return out_layer
45+
46+
# Store layers weight & biases
47+
weights = {
48+
'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
49+
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
50+
'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes]))
51+
}
52+
biases = {
53+
'b1': tf.Variable(tf.random_normal([n_hidden_1])),
54+
'b2': tf.Variable(tf.random_normal([n_hidden_2])),
55+
'out': tf.Variable(tf.random_normal([n_classes]))
56+
}
57+
58+
# Construct model
59+
pred = multilayer_perceptron(x, weights, biases)
60+
61+
# Define loss and optimizer
62+
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
63+
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
64+
65+
# Initializing the variables
66+
init = tf.global_variables_initializer()
67+
68+
# Launch the graph
69+
with tf.Session() as sess:
70+
sess.run(init)
71+
72+
# Training cycle
73+
for epoch in range(training_epochs):
74+
avg_cost = 0.
75+
total_batch = int(mnist.train.num_examples/batch_size)
76+
# Loop over all batches
77+
for i in range(total_batch):
78+
batch_x, batch_y = mnist.train.next_batch(batch_size)
79+
# Run optimization op (backprop) and cost op (to get loss value)
80+
_, c = sess.run([optimizer, cost], feed_dict={x: batch_x,
81+
y: batch_y})
82+
# Compute average loss
83+
avg_cost += c / total_batch
84+
# Display logs per epoch step
85+
if epoch % display_step == 0:
86+
print("Epoch:", '%04d' % (epoch+1), "cost=", \
87+
"{:.9f}".format(avg_cost))
88+
print("Optimization Finished!")
89+
90+
# Test model
91+
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
92+
# Calculate accuracy
93+
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
94+
print("Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))

0 commit comments

Comments
 (0)