This repository has been archived by the owner on Jan 6, 2019. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 2
/
logistic_regression_2d.py
73 lines (57 loc) · 1.95 KB
/
logistic_regression_2d.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
"""
Usage:
python logistic_regression_2d.py
"""
learning_rate = 0.1
training_epochs = 2000
x1_label1 = np.random.normal(3, 1, 1000)
x2_label1 = np.random.normal(2, 1, 1000)
x1_label2 = np.random.normal(7, 1, 1000)
x2_label2 = np.random.normal(6, 1, 1000)
x1s = np.append(x1_label1, x1_label2)
x2s = np.append(x2_label1, x2_label2)
ys = np.asarray([0.] * len(x1_label1) + [1.] * len(x1_label2))
X1 = tf.placeholder(tf.float32, shape=(None, ), name="x1")
X2 = tf.placeholder(tf.float32, shape=(None, ), name="x2")
Y = tf.placeholder(tf.float32, shape=(None, ), name="y")
w = tf.Variable([0., 0., 0.], name="weights", trainable=True)
y_model = tf.sigmoid(-(w[2] * X2 + w[1] * X1 + w[0]))
cost = tf.reduce_mean(-tf.log(y_model * Y + (1-y_model) * (1 - Y)))
train_op = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
prev_err = 0
for epoch in tqdm(range(training_epochs)):
err, _ = sess.run([cost, train_op], {
X1: x1s,
X2: x2s,
Y: ys
})
if epoch % 100 == 0:
print(epoch, err)
if abs(prev_err - err) < 0.00001:
break
prev_err = err
w_val = sess.run(w, {
X1: x1s,
X2: x2s,
Y: ys
})
sess.close()
def sigmoid(x):
return 1. / (1. + np.exp(-x))
x1_boundary, x2_boundary = [], []
for x1_test in np.linspace(0, 10, 100):
for x2_test in np.linspace(0, 10, 100):
z = sigmoid(-x2_test * w_val[2] - x1_test * w_val[1] - w_val[0])
if abs(z - 0.5) < 0.01:
x1_boundary.append(x1_test)
x2_boundary.append(x2_test)
plt.scatter(x1_boundary, x2_boundary, c='b', marker='o', s=20)
plt.scatter(x1_label1, x2_label1, c='r', marker='x', s=20)
plt.scatter(x1_label2, x2_label2, c='g', marker='1', s=20)
plt.show()