@@ -9,12 +9,8 @@ def __init__(self, alpha:float=0.001, regularization_param:float=0.1, num_iterat
9
9
self .num_iterations :int = num_iterations
10
10
self .theta :np .ndarray = None
11
11
12
- < << << << HEAD
13
12
14
13
def feature_scaling (self , X :np .ndarray ) -> tuple [np .ndarray , np .ndarray , np .ndarray ]:
15
- == == == =
16
- def feature_scaling (self , X ):
17
- >> >> >> > d4fc2bf852ec4a023380f4ef367edefa88fd6881
18
14
mean = np .mean (X , axis = 0 )
19
15
std = np .std (X , axis = 0 )
20
16
@@ -43,13 +39,8 @@ def predict(self, X:np.ndarray) -> np.ndarray:
43
39
X_scaled , _ , _ = self .feature_scaling (X )
44
40
return X_scaled .dot (self .theta )
45
41
46
- < << << << HEAD
47
42
def compute_cost (self , X :np .ndarray , y :np .ndarray ) -> float :
48
43
X_scaled , _ , _ = self .feature_scaling (X )
49
- == == == =
50
- def compute_cost (self , X , y ):
51
- X_scaled , _ , _ = self .feature_scaling (X )
52
- >> >> >> > d4fc2bf852ec4a023380f4ef367edefa88fd6881
53
44
m = len (y )
54
45
55
46
predictions = X_scaled .dot (self .theta )
@@ -69,13 +60,8 @@ def mean_absolute_error(self, y_true:np.ndarray, y_pred:np.ndarray) -> float:
69
60
y = df ["ADR" ].values
70
61
y = (y - np .mean (y )) / np .std (y )
71
62
72
- < << << << HEAD
73
63
# added bias term to the feature matrix
74
64
X = np .c_ [np .ones (X .shape [0 ]), X ]
75
- == == == =
76
- # Add bias term (intercept) to the feature matrix
77
- X = np .c_ [np .ones (X .shape [0 ]), X ]
78
- >> >> >> > d4fc2bf852ec4a023380f4ef367edefa88fd6881
79
65
80
66
# initialize and train the ridge regression model
81
67
model = RidgeRegression (alpha = 0.01 , regularization_param = 0.1 , num_iterations = 1000 )
0 commit comments