2
2
from matplotlib import pyplot as plt
3
3
from sklearn import datasets
4
4
5
+
5
6
# Ridge Regression function
6
7
# reference : https://en.wikipedia.org/wiki/Ridge_regression
8
+ < << << << HEAD
7
9
def ridge_cost_function (x : np .ndarray , y : np .ndarray , theta : np .ndarray , alpha : float ) -> float :
10
+ == == == =
11
+ def ridge_cost_function (
12
+ X : np .ndarray , y : np .ndarray , theta : np .ndarray , alpha : float
13
+ ) -> float :
14
+ > >> >> >> a4f585c89d4426f2ddace3ead610ff1742922713
8
15
"""
9
16
Compute the Ridge regression cost function with L2 regularization.
10
17
@@ -26,11 +33,28 @@ def ridge_cost_function(x: np.ndarray, y: np.ndarray, theta: np.ndarray, alpha:
26
33
"""
27
34
m = len (y )
28
35
predictions = np .dot (X , theta )
36
+ < << << << HEAD
29
37
cost = (1 / (2 * m )) * np .sum ((predictions - y ) ** 2 )
30
38
cost += (alpha / 2 ) * np .sum (theta [1 :] ** 2 )
31
39
return cost
32
40
33
41
def ridge_gradient_descent (x : np .ndarray , y : np .ndarray , theta : np .ndarray , alpha : float , learning_rate : float , max_iterations : int ) -> np .ndarray :
42
+ == == == =
43
+ cost = (1 / (2 * m )) * np .sum ((predictions - y ) ** 2 ) + (alpha / 2 ) * np .sum (
44
+ theta [1 :] ** 2
45
+ )
46
+ return cost
47
+
48
+
49
+ def ridge_gradient_descent (
50
+ X : np .ndarray ,
51
+ y : np .ndarray ,
52
+ theta : np .ndarray ,
53
+ alpha : float ,
54
+ learning_rate : float ,
55
+ max_iterations : int ,
56
+ ) - > np .ndarray :
57
+ >> >> >> > a4f585c89d4426f2ddace3ead610ff1742922713
34
58
"""
35
59
Perform gradient descent to minimize the cost function and fit the Ridge regression model.
36
60
@@ -61,7 +85,6 @@ def ridge_gradient_descent(x: np.ndarray, y: np.ndarray, theta: np.ndarray, alph
61
85
return theta
62
86
63
87
64
-
65
88
if __name__ == "__main__" :
66
89
import doctest
67
90
doctest .testmod ()
@@ -83,18 +106,21 @@ def ridge_gradient_descent(x: np.ndarray, y: np.ndarray, theta: np.ndarray, alph
83
106
learning_rate = 0.01
84
107
max_iterations = 1000
85
108
86
- optimized_theta = ridge_gradient_descent (X , y , theta_initial , alpha , learning_rate , max_iterations )
109
+ optimized_theta = ridge_gradient_descent (
110
+ X , y , theta_initial , alpha , learning_rate , max_iterations
111
+ )
87
112
print (f"Optimized theta: { optimized_theta } " )
88
113
89
114
# Prediction
90
115
def predict (X , theta ):
91
116
return np .dot (X , theta )
117
+
92
118
y_pred = predict (X , optimized_theta )
93
119
94
120
# Plotting the results (here we visualize predicted vs actual values)
95
121
plt .figure (figsize = (10 , 6 ))
96
- plt .scatter (y , y_pred , color = 'b' , label = ' Predictions vs Actual' )
97
- plt .plot ([min (y ), max (y )], [min (y ), max (y )], color = 'r' , label = ' Perfect Fit' )
122
+ plt .scatter (y , y_pred , color = "b" , label = " Predictions vs Actual" )
123
+ plt .plot ([min (y ), max (y )], [min (y ), max (y )], color = "r" , label = " Perfect Fit" )
98
124
plt .xlabel ("Actual values" )
99
125
plt .ylabel ("Predicted values" )
100
126
plt .title ("Ridge Regression: Actual vs Predicted Values" )
0 commit comments