Skip to content

Commit d4fc2bf

Browse files
[pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
1 parent b72320b commit d4fc2bf

File tree

1 file changed

+16
-16
lines changed
  • machine_learning/ridge_regression

1 file changed

+16
-16
lines changed
+16-16
Original file line numberDiff line numberDiff line change
@@ -1,56 +1,56 @@
11
import numpy as np
22
import pandas as pd
33

4+
45
class RidgeRegression:
56
def __init__(self, alpha=0.001, regularization_param=0.1, num_iterations=1000):
67
self.alpha = alpha
78
self.regularization_param = regularization_param
89
self.num_iterations = num_iterations
910
self.theta = None
1011

11-
1212
def feature_scaling(self, X):
1313
mean = np.mean(X, axis=0)
1414
std = np.std(X, axis=0)
15-
15+
1616
# avoid division by zero for constant features (std = 0)
1717
std[std == 0] = 1 # set std=1 for constant features to avoid NaN
18-
18+
1919
X_scaled = (X - mean) / std
2020
return X_scaled, mean, std
21-
2221

2322
def fit(self, X, y):
2423
X_scaled, mean, std = self.feature_scaling(X)
2524
m, n = X_scaled.shape
2625
self.theta = np.zeros(n) # initializing weights to zeros
27-
26+
2827
for i in range(self.num_iterations):
2928
predictions = X_scaled.dot(self.theta)
3029
error = predictions - y
31-
30+
3231
# computing gradient with L2 regularization
33-
gradient = (X_scaled.T.dot(error) + self.regularization_param * self.theta) / m
32+
gradient = (
33+
X_scaled.T.dot(error) + self.regularization_param * self.theta
34+
) / m
3435
self.theta -= self.alpha * gradient # updating weights
3536

36-
3737
def predict(self, X):
3838
X_scaled, _, _ = self.feature_scaling(X)
3939
return X_scaled.dot(self.theta)
40-
4140

4241
def compute_cost(self, X, y):
43-
X_scaled, _, _ = self.feature_scaling(X)
42+
X_scaled, _, _ = self.feature_scaling(X)
4443
m = len(y)
45-
44+
4645
predictions = X_scaled.dot(self.theta)
47-
cost = (1 / (2 * m)) * np.sum((predictions - y) ** 2) + (self.regularization_param / (2 * m)) * np.sum(self.theta**2)
46+
cost = (1 / (2 * m)) * np.sum((predictions - y) ** 2) + (
47+
self.regularization_param / (2 * m)
48+
) * np.sum(self.theta**2)
4849
return cost
49-
5050

5151
def mean_absolute_error(self, y_true, y_pred):
5252
return np.mean(np.abs(y_true - y_pred))
53-
53+
5454

5555
# Example usage
5656
if __name__ == "__main__":
@@ -60,7 +60,7 @@ def mean_absolute_error(self, y_true, y_pred):
6060
y = (y - np.mean(y)) / np.std(y)
6161

6262
# Add bias term (intercept) to the feature matrix
63-
X = np.c_[np.ones(X.shape[0]), X]
63+
X = np.c_[np.ones(X.shape[0]), X]
6464

6565
# initialize and train the Ridge Regression model
6666
model = RidgeRegression(alpha=0.01, regularization_param=0.1, num_iterations=1000)
@@ -72,4 +72,4 @@ def mean_absolute_error(self, y_true, y_pred):
7272
# results
7373
print("Optimized Weights:", model.theta)
7474
print("Cost:", model.compute_cost(X, y))
75-
print("Mean Absolute Error:", model.mean_absolute_error(y, predictions))
75+
print("Mean Absolute Error:", model.mean_absolute_error(y, predictions))

0 commit comments

Comments
 (0)