@@ -25,9 +25,10 @@ def ridge_cost_function(x: np.ndarray, y: np.ndarray, theta: np.ndarray, alpha:
25
25
@returns: The computed cost value
26
26
"""
27
27
m = len (y )
28
- predictions = np .dot (X , theta )
29
- cost = (1 / (2 * m )) * np .sum ((predictions - y ) ** 2 )
30
- cost += (alpha / 2 ) * np .sum (theta [1 :] ** 2 )
28
+ predictions = np .dot (x , theta )
29
+ cost = (1 / (2 * m )) * np .sum ((predictions - y ) ** 2 ) + \
30
+ (alpha / 2 ) * np .sum (theta [1 :] ** 2 )
31
+
31
32
return cost
32
33
33
34
def ridge_gradient_descent (x : np .ndarray , y : np .ndarray , theta : np .ndarray , alpha : float , learning_rate : float , max_iterations : int ) -> np .ndarray :
@@ -46,16 +47,16 @@ def ridge_gradient_descent(x: np.ndarray, y: np.ndarray, theta: np.ndarray, alph
46
47
m = len (y )
47
48
48
49
for iteration in range (max_iterations ):
49
- predictions = np .dot (X , theta )
50
+ predictions = np .dot (x , theta )
50
51
error = predictions - y
51
52
52
53
# calculate the gradient
53
- gradient = (1 / m ) * np .dot (X .T , error )
54
+ gradient = (1 / m ) * np .dot (x .T , error )
54
55
gradient [1 :] += (alpha / m ) * theta [1 :]
55
56
theta -= learning_rate * gradient
56
57
57
58
if iteration % 100 == 0 :
58
- cost = ridge_cost_function (X , y , theta , alpha )
59
+ cost = ridge_cost_function (x , y , theta , alpha )
59
60
print (f"Iteration { iteration } , Cost: { cost } " )
60
61
61
62
return theta
@@ -68,28 +69,28 @@ def ridge_gradient_descent(x: np.ndarray, y: np.ndarray, theta: np.ndarray, alph
68
69
69
70
# Load California Housing dataset
70
71
california_housing = datasets .fetch_california_housing ()
71
- X = california_housing .data [:, :2 ] # 2 features for simplicity
72
+ x = california_housing .data [:, :2 ] # 2 features for simplicity
72
73
y = california_housing .target
73
- X = (X - np .mean (X , axis = 0 )) / np .std (X , axis = 0 )
74
+ x = (x - np .mean (x , axis = 0 )) / np .std (x , axis = 0 )
74
75
75
76
# Add a bias column (intercept) to X
76
- X = np .c_ [np .ones (X .shape [0 ]), X ]
77
+ x = np .c_ [np .ones (x .shape [0 ]), x ]
77
78
78
79
# Initialize parameters (theta)
79
- theta_initial = np .zeros (X .shape [1 ])
80
+ theta_initial = np .zeros (x .shape [1 ])
80
81
81
82
# Set hyperparameters
82
83
alpha = 0.1
83
84
learning_rate = 0.01
84
85
max_iterations = 1000
85
86
86
- optimized_theta = ridge_gradient_descent (X , y , theta_initial , alpha , learning_rate , max_iterations )
87
+ optimized_theta = ridge_gradient_descent (x , y , theta_initial , alpha , learning_rate , max_iterations )
87
88
print (f"Optimized theta: { optimized_theta } " )
88
89
89
90
# Prediction
90
- def predict (X , theta ):
91
- return np .dot (X , theta )
92
- y_pred = predict (X , optimized_theta )
91
+ def predict (x , theta ):
92
+ return np .dot (x , theta )
93
+ y_pred = predict (x , optimized_theta )
93
94
94
95
# Plotting the results (here we visualize predicted vs actual values)
95
96
plt .figure (figsize = (10 , 6 ))
0 commit comments