Skip to content

Commit a4f585c

Browse files
[pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
1 parent 6991fb0 commit a4f585c

File tree

1 file changed

+22
-7
lines changed

1 file changed

+22
-7
lines changed

machine_learning/ridge_regression.py

+22-7
Original file line numberDiff line numberDiff line change
@@ -2,9 +2,12 @@
22
from matplotlib import pyplot as plt
33
from sklearn import datasets
44

5+
56
# Ridge Regression function
67
# reference : https://en.wikipedia.org/wiki/Ridge_regression
7-
def ridge_cost_function(X: np.ndarray, y: np.ndarray, theta: np.ndarray, alpha: float) -> float:
8+
def ridge_cost_function(
9+
X: np.ndarray, y: np.ndarray, theta: np.ndarray, alpha: float
10+
) -> float:
811
"""
912
Compute the Ridge regression cost function with L2 regularization.
1013
@@ -26,10 +29,20 @@ def ridge_cost_function(X: np.ndarray, y: np.ndarray, theta: np.ndarray, alpha:
2629
"""
2730
m = len(y)
2831
predictions = np.dot(X, theta)
29-
cost = (1 / (2 * m)) * np.sum((predictions - y) ** 2) + (alpha / 2) * np.sum(theta[1:] ** 2)
32+
cost = (1 / (2 * m)) * np.sum((predictions - y) ** 2) + (alpha / 2) * np.sum(
33+
theta[1:] ** 2
34+
)
3035
return cost
3136

32-
def ridge_gradient_descent(X: np.ndarray, y: np.ndarray, theta: np.ndarray, alpha: float, learning_rate: float, max_iterations: int) -> np.ndarray:
37+
38+
def ridge_gradient_descent(
39+
X: np.ndarray,
40+
y: np.ndarray,
41+
theta: np.ndarray,
42+
alpha: float,
43+
learning_rate: float,
44+
max_iterations: int,
45+
) -> np.ndarray:
3346
"""
3447
Perform gradient descent to minimize the cost function and fit the Ridge regression model.
3548
@@ -60,7 +73,6 @@ def ridge_gradient_descent(X: np.ndarray, y: np.ndarray, theta: np.ndarray, alph
6073
return theta
6174

6275

63-
6476
if __name__ == "__main__":
6577
import doctest
6678

@@ -81,18 +93,21 @@ def ridge_gradient_descent(X: np.ndarray, y: np.ndarray, theta: np.ndarray, alph
8193
learning_rate = 0.01
8294
max_iterations = 1000
8395

84-
optimized_theta = ridge_gradient_descent(X, y, theta_initial, alpha, learning_rate, max_iterations)
96+
optimized_theta = ridge_gradient_descent(
97+
X, y, theta_initial, alpha, learning_rate, max_iterations
98+
)
8599
print(f"Optimized theta: {optimized_theta}")
86100

87101
# Prediction
88102
def predict(X, theta):
89103
return np.dot(X, theta)
104+
90105
y_pred = predict(X, optimized_theta)
91106

92107
# Plotting the results (here we visualize predicted vs actual values)
93108
plt.figure(figsize=(10, 6))
94-
plt.scatter(y, y_pred, color='b', label='Predictions vs Actual')
95-
plt.plot([min(y), max(y)], [min(y), max(y)], color='r', label='Perfect Fit')
109+
plt.scatter(y, y_pred, color="b", label="Predictions vs Actual")
110+
plt.plot([min(y), max(y)], [min(y), max(y)], color="r", label="Perfect Fit")
96111
plt.xlabel("Actual values")
97112
plt.ylabel("Predicted values")
98113
plt.title("Ridge Regression: Actual vs Predicted Values")

0 commit comments

Comments
 (0)