@@ -31,13 +31,16 @@ def sigmoid_function(z):
31
31
def cost_function (h , y ):
32
32
return (- y * np .log (h ) - (1 - y ) * np .log (1 - h )).mean ()
33
33
34
+ def log_likelihood (X , Y , weights ):
35
+ scores = np .dot (X , weights )
36
+ return np .sum (Y * scores - np .log (1 + np .exp (scores )) )
34
37
35
38
# here alpha is the learning rate, X is the feature matrix,y is the target matrix
36
-
37
39
def logistic_reg (
38
40
alpha ,
39
41
X ,
40
42
y ,
43
+ num_steps ,
41
44
max_iterations = 70000 ,
42
45
):
43
46
converged = False
@@ -49,21 +52,24 @@ def logistic_reg(
49
52
h = sigmoid_function (z )
50
53
gradient = np .dot (X .T , h - y ) / y .size
51
54
theta = theta - alpha * gradient
52
-
53
55
z = np .dot (X , theta )
54
56
h = sigmoid_function (z )
55
57
J = cost_function (h , y )
56
-
57
58
iterations += 1 # update iterations
58
-
59
- if iterations == max_iterations :
60
- print ('Maximum iterations exceeded!' )
61
- print ('Minimal cost function J=' , J )
62
- converged = True
63
-
59
+ weights = np .zeros (X .shape [1 ])
60
+ for step in range (num_steps ):
61
+ scores = np .dot (X , weights )
62
+ predictions = sigmoid_function (scores )
63
+ if step % 10000 == 0 :
64
+ print (log_likelihood (X ,y ,weights )) # Print log-likelihood every so often
65
+ return weights
66
+
67
+ if iterations == max_iterations :
68
+ print ('Maximum iterations exceeded!' )
69
+ print ('Minimal cost function J=' , J )
70
+ converged = True
64
71
return theta
65
72
66
-
67
73
# In[68]:
68
74
69
75
if __name__ == '__main__' :
@@ -72,7 +78,7 @@ def logistic_reg(
72
78
y = (iris .target != 0 ) * 1
73
79
74
80
alpha = 0.1
75
- theta = logistic_reg (alpha , X , y , max_iterations = 70000 )
81
+ theta = logistic_reg (alpha ,X , y , max_iterations = 70000 , num_steps = 30000 )
76
82
print (theta )
77
83
78
84
0 commit comments