Skip to content

Commit c8c1d9a

Browse files
[pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
1 parent d564698 commit c8c1d9a

File tree

1 file changed

+23
-8
lines changed

1 file changed

+23
-8
lines changed

machine_learning/ridge_regression.py

+23-8
Original file line numberDiff line numberDiff line change
@@ -2,9 +2,12 @@
22
from matplotlib import pyplot as plt
33
from sklearn import datasets
44

5+
56
# Ridge Regression function
67
# reference : https://en.wikipedia.org/wiki/Ridge_regression
7-
def ridge_cost_function(x: np.ndarray, y: np.ndarray, theta: np.ndarray, alpha: float) -> float:
8+
def ridge_cost_function(
9+
x: np.ndarray, y: np.ndarray, theta: np.ndarray, alpha: float
10+
) -> float:
811
"""
912
Compute the Ridge regression cost function with L2 regularization.
1013
@@ -26,12 +29,21 @@ def ridge_cost_function(x: np.ndarray, y: np.ndarray, theta: np.ndarray, alpha:
2629
"""
2730
m = len(y)
2831
predictions = np.dot(x, theta)
29-
cost = (1 / (2 * m)) * np.sum((predictions - y) ** 2) + \
30-
(alpha / 2) * np.sum(theta[1:] ** 2)
32+
cost = (1 / (2 * m)) * np.sum((predictions - y) ** 2) + (alpha / 2) * np.sum(
33+
theta[1:] ** 2
34+
)
3135

3236
return cost
3337

34-
def ridge_gradient_descent(x: np.ndarray, y: np.ndarray, theta: np.ndarray, alpha: float, learning_rate: float, max_iterations: int) -> np.ndarray:
38+
39+
def ridge_gradient_descent(
40+
x: np.ndarray,
41+
y: np.ndarray,
42+
theta: np.ndarray,
43+
alpha: float,
44+
learning_rate: float,
45+
max_iterations: int,
46+
) -> np.ndarray:
3547
"""
3648
Perform gradient descent to minimize the cost function and fit the Ridge regression model.
3749
@@ -62,9 +74,9 @@ def ridge_gradient_descent(x: np.ndarray, y: np.ndarray, theta: np.ndarray, alph
6274
return theta
6375

6476

65-
6677
if __name__ == "__main__":
6778
import doctest
79+
6880
doctest.testmod()
6981

7082
# Load California Housing dataset
@@ -84,18 +96,21 @@ def ridge_gradient_descent(x: np.ndarray, y: np.ndarray, theta: np.ndarray, alph
8496
learning_rate = 0.01
8597
max_iterations = 1000
8698

87-
optimized_theta = ridge_gradient_descent(x, y, theta_initial, alpha, learning_rate, max_iterations)
99+
optimized_theta = ridge_gradient_descent(
100+
x, y, theta_initial, alpha, learning_rate, max_iterations
101+
)
88102
print(f"Optimized theta: {optimized_theta}")
89103

90104
# Prediction
91105
def predict(x, theta):
92106
return np.dot(x, theta)
107+
93108
y_pred = predict(x, optimized_theta)
94109

95110
# Plotting the results (here we visualize predicted vs actual values)
96111
plt.figure(figsize=(10, 6))
97-
plt.scatter(y, y_pred, color='b', label='Predictions vs Actual')
98-
plt.plot([min(y), max(y)], [min(y), max(y)], color='r', label='Perfect Fit')
112+
plt.scatter(y, y_pred, color="b", label="Predictions vs Actual")
113+
plt.plot([min(y), max(y)], [min(y), max(y)], color="r", label="Perfect Fit")
99114
plt.xlabel("Actual values")
100115
plt.ylabel("Predicted values")
101116
plt.title("Ridge Regression: Actual vs Predicted Values")

0 commit comments

Comments
 (0)