44# https://deeplearningcourses.com/c/data-science-logistic-regression-in-python
55# https://www.udemy.com/data-science-logistic-regression-in-python
66
7+ from __future__ import print_function , division
8+ from builtins import range
9+ # Note: you may need to update your version of future
10+ # sudo pip install -U future
11+
12+
13+
714import numpy as np
815import matplotlib .pyplot as plt
916
1522
1623# distance from origin is radius + random normal
1724# angle theta is uniformly distributed between (0, 2pi)
18- R1 = np .random .randn (N / 2 ) + R_inner
19- theta = 2 * np .pi * np .random .random (N / 2 )
25+ R1 = np .random .randn (N // 2 ) + R_inner
26+ theta = 2 * np .pi * np .random .random (N // 2 )
2027X_inner = np .concatenate ([[R1 * np .cos (theta )], [R1 * np .sin (theta )]]).T
2128
22- R2 = np .random .randn (N / 2 ) + R_outer
23- theta = 2 * np .pi * np .random .random (N / 2 )
29+ R2 = np .random .randn (N // 2 ) + R_outer
30+ theta = 2 * np .pi * np .random .random (N // 2 )
2431X_outer = np .concatenate ([[R2 * np .cos (theta )], [R2 * np .sin (theta )]]).T
2532
2633X = np .concatenate ([ X_inner , X_outer ])
27- T = np .array ([0 ]* (N / 2 ) + [1 ]* (N / 2 )) # labels: first 50 are 0, last 50 are 1
34+ T = np .array ([0 ]* (N // 2 ) + [1 ]* (N / /2 )) # labels: first 50 are 0, last 50 are 1
2835
2936plt .scatter (X [:,0 ], X [:,1 ], c = T )
3037plt .show ()
@@ -53,24 +60,17 @@ def sigmoid(z):
5360
5461# calculate the cross-entropy error
5562def cross_entropy (T , Y ):
56- # E = 0
57- # for i in xrange(N):
58- # if T[i] == 1:
59- # E -= np.log(Y[i])
60- # else:
61- # E -= np.log(1 - Y[i])
62- # return E
6363 return - (T * np .log (Y ) + (1 - T )* np .log (1 - Y )).sum ()
6464
6565
6666# let's do gradient descent 100 times
6767learning_rate = 0.0001
6868error = []
69- for i in xrange (5000 ):
69+ for i in range (5000 ):
7070 e = cross_entropy (T , Y )
7171 error .append (e )
7272 if i % 500 == 0 :
73- print e
73+ print ( e )
7474
7575 # gradient descent weight udpate with regularization
7676 # w += learning_rate * ( np.dot((T - Y).T, Xb) - 0.01*w ) # old
@@ -83,5 +83,5 @@ def cross_entropy(T, Y):
8383plt .title ("Cross-entropy per iteration" )
8484plt .show ()
8585
86- print "Final w:" , w
87- print "Final classification rate:" , 1 - np .abs (T - np .round (Y )).sum () / N
86+ print ( "Final w:" , w )
87+ print ( "Final classification rate:" , 1 - np .abs (T - np .round (Y )).sum () / N )
0 commit comments