@@ -40,34 +40,20 @@ def logistic_reg(
40
40
alpha ,
41
41
X ,
42
42
y ,
43
- num_steps ,
44
43
max_iterations = 70000 ,
45
44
):
46
- converged = False
47
- iterations = 0
48
45
theta = np .zeros (X .shape [1 ])
49
46
50
- while not converged :
47
+ for iterations in range ( max_iterations ) :
51
48
z = np .dot (X , theta )
52
49
h = sigmoid_function (z )
53
50
gradient = np .dot (X .T , h - y ) / y .size
54
- theta = theta - alpha * gradient
51
+ theta = theta - alpha * gradient # updating the weights
55
52
z = np .dot (X , theta )
56
53
h = sigmoid_function (z )
57
54
J = cost_function (h , y )
58
- iterations += 1 # update iterations
59
- weights = np .zeros (X .shape [1 ])
60
- for step in range (num_steps ):
61
- scores = np .dot (X , weights )
62
- predictions = sigmoid_function (scores )
63
- if step % 10000 == 0 :
64
- print (log_likelihood (X ,y ,weights )) # Print log-likelihood every so often
65
- return weights
66
-
67
- if iterations == max_iterations :
68
- print ('Maximum iterations exceeded!' )
69
- print ('Minimal cost function J=' , J )
70
- converged = True
55
+ if iterations % 100 == 0 :
56
+ print (f'loss: { J } \t ' ) # printing the loss after every 100 iterations
71
57
return theta
72
58
73
59
# In[68]:
@@ -78,8 +64,8 @@ def logistic_reg(
78
64
y = (iris .target != 0 ) * 1
79
65
80
66
alpha = 0.1
81
- theta = logistic_reg (alpha ,X ,y ,max_iterations = 70000 , num_steps = 30000 )
82
- print (theta )
67
+ theta = logistic_reg (alpha ,X ,y ,max_iterations = 70000 )
68
+ print (" theta: " , theta ) # printing the theta i.e our weights vector
83
69
84
70
85
71
def predict_prob (X ):
@@ -105,3 +91,4 @@ def predict_prob(X):
105
91
)
106
92
107
93
plt .legend ()
94
+ plt .show ()
0 commit comments