Skip to content

Commit b7f49ae

Browse files
committed
Updated code as per PR feedback
2 parents fb1b7a7 + a4f585c commit b7f49ae

File tree

1 file changed

+30
-4
lines changed

1 file changed

+30
-4
lines changed

machine_learning/ridge_regression.py

+30-4
Original file line numberDiff line numberDiff line change
@@ -2,9 +2,16 @@
22
from matplotlib import pyplot as plt
33
from sklearn import datasets
44

5+
56
# Ridge Regression function
67
# reference : https://en.wikipedia.org/wiki/Ridge_regression
8+
<<<<<<< HEAD
79
def ridge_cost_function(x: np.ndarray, y: np.ndarray, theta: np.ndarray, alpha: float) -> float:
10+
=======
11+
def ridge_cost_function(
12+
X: np.ndarray, y: np.ndarray, theta: np.ndarray, alpha: float
13+
) -> float:
14+
>>>>>>> a4f585c89d4426f2ddace3ead610ff1742922713
815
"""
916
Compute the Ridge regression cost function with L2 regularization.
1017
@@ -26,11 +33,28 @@ def ridge_cost_function(x: np.ndarray, y: np.ndarray, theta: np.ndarray, alpha:
2633
"""
2734
m = len(y)
2835
predictions = np.dot(X, theta)
36+
<<<<<<< HEAD
2937
cost = (1 / (2 * m)) * np.sum((predictions - y) ** 2)
3038
cost += (alpha / 2) * np.sum(theta[1:] ** 2)
3139
return cost
3240

3341
def ridge_gradient_descent(x: np.ndarray, y: np.ndarray, theta: np.ndarray, alpha: float, learning_rate: float, max_iterations: int) -> np.ndarray:
42+
=======
43+
cost = (1 / (2 * m)) * np.sum((predictions - y) ** 2) + (alpha / 2) * np.sum(
44+
theta[1:] ** 2
45+
)
46+
return cost
47+
48+
49+
def ridge_gradient_descent(
50+
X: np.ndarray,
51+
y: np.ndarray,
52+
theta: np.ndarray,
53+
alpha: float,
54+
learning_rate: float,
55+
max_iterations: int,
56+
) -> np.ndarray:
57+
>>>>>>> a4f585c89d4426f2ddace3ead610ff1742922713
3458
"""
3559
Perform gradient descent to minimize the cost function and fit the Ridge regression model.
3660
@@ -61,7 +85,6 @@ def ridge_gradient_descent(x: np.ndarray, y: np.ndarray, theta: np.ndarray, alph
6185
return theta
6286

6387

64-
6588
if __name__ == "__main__":
6689
import doctest
6790
doctest.testmod()
@@ -83,18 +106,21 @@ def ridge_gradient_descent(x: np.ndarray, y: np.ndarray, theta: np.ndarray, alph
83106
learning_rate = 0.01
84107
max_iterations = 1000
85108

86-
optimized_theta = ridge_gradient_descent(X, y, theta_initial, alpha, learning_rate, max_iterations)
109+
optimized_theta = ridge_gradient_descent(
110+
X, y, theta_initial, alpha, learning_rate, max_iterations
111+
)
87112
print(f"Optimized theta: {optimized_theta}")
88113

89114
# Prediction
90115
def predict(X, theta):
91116
return np.dot(X, theta)
117+
92118
y_pred = predict(X, optimized_theta)
93119

94120
# Plotting the results (here we visualize predicted vs actual values)
95121
plt.figure(figsize=(10, 6))
96-
plt.scatter(y, y_pred, color='b', label='Predictions vs Actual')
97-
plt.plot([min(y), max(y)], [min(y), max(y)], color='r', label='Perfect Fit')
122+
plt.scatter(y, y_pred, color="b", label="Predictions vs Actual")
123+
plt.plot([min(y), max(y)], [min(y), max(y)], color="r", label="Perfect Fit")
98124
plt.xlabel("Actual values")
99125
plt.ylabel("Predicted values")
100126
plt.title("Ridge Regression: Actual vs Predicted Values")

0 commit comments

Comments
 (0)