@@ -15,68 +15,68 @@ def __init__(
15
15
self .theta : np .ndarray = None
16
16
17
17
def feature_scaling (
18
- self , x : np .ndarray
18
+ self , features : np .ndarray
19
19
) -> tuple [np .ndarray , np .ndarray , np .ndarray ]:
20
- mean = np .mean (x , axis = 0 )
21
- std = np .std (x , axis = 0 )
20
+ mean = np .mean (features , axis = 0 )
21
+ std = np .std (features , axis = 0 )
22
22
23
23
# avoid division by zero for constant features (std = 0)
24
24
std [std == 0 ] = 1 # set std=1 for constant features to avoid NaN
25
25
26
- x_scaled = (x - mean ) / std
27
- return x_scaled , mean , std
26
+ features_scaled = (features - mean ) / std
27
+ return features_scaled , mean , std
28
28
29
- def fit (self , x : np .ndarray , y : np .ndarray ) -> None :
30
- x_scaled , mean , std = self .feature_scaling (x )
31
- m , n = x_scaled .shape
29
+ def fit (self , features : np .ndarray , target : np .ndarray ) -> None :
30
+ features_scaled , mean , std = self .feature_scaling (features )
31
+ m , n = features_scaled .shape
32
32
self .theta = np .zeros (n ) # initializing weights to zeros
33
33
34
34
for _ in range (self .num_iterations ):
35
- predictions = x_scaled .dot (self .theta )
36
- error = predictions - y
35
+ predictions = features_scaled .dot (self .theta )
36
+ error = predictions - target
37
37
38
38
# computing gradient with L2 regularization
39
39
gradient = (
40
- x_scaled .T .dot (error ) + self .regularization_param * self .theta
40
+ features_scaled .T .dot (error ) + self .regularization_param * self .theta
41
41
) / m
42
42
self .theta -= self .alpha * gradient # updating weights
43
43
44
- def predict (self , x : np .ndarray ) -> np .ndarray :
45
- x_scaled , _ , _ = self .feature_scaling (x )
46
- return x_scaled .dot (self .theta )
44
+ def predict (self , features : np .ndarray ) -> np .ndarray :
45
+ features_scaled , _ , _ = self .feature_scaling (features )
46
+ return features_scaled .dot (self .theta )
47
47
48
- def compute_cost (self , x : np .ndarray , y : np .ndarray ) -> float :
49
- x_scaled , _ , _ = self .feature_scaling (x )
50
- m = len (y )
48
+ def compute_cost (self , features : np .ndarray , target : np .ndarray ) -> float :
49
+ features_scaled , _ , _ = self .feature_scaling (features )
50
+ m = len (target )
51
51
52
- predictions = x_scaled .dot (self .theta )
53
- cost = (1 / (2 * m )) * np .sum ((predictions - y ) ** 2 ) + (
52
+ predictions = features_scaled .dot (self .theta )
53
+ cost = (1 / (2 * m )) * np .sum ((predictions - target ) ** 2 ) + (
54
54
self .regularization_param / (2 * m )
55
55
) * np .sum (self .theta ** 2 )
56
56
return cost
57
57
58
- def mean_absolute_error (self , y_true : np .ndarray , y_pred : np .ndarray ) -> float :
59
- return np .mean (np .abs (y_true - y_pred ))
58
+ def mean_absolute_error (self , target : np .ndarray , predictions : np .ndarray ) -> float :
59
+ return np .mean (np .abs (target - predictions ))
60
60
61
61
62
62
# Example usage
63
63
if __name__ == "__main__" :
64
64
data = pd .read_csv ("ADRvsRating.csv" )
65
- x = data [["Rating" ]].to_numpy ()
66
- y = data ["ADR" ].to_numpy ()
67
- y = (y - np .mean (y )) / np .std (y )
65
+ features_matrix = data [["Rating" ]].to_numpy ()
66
+ target = data ["ADR" ].to_numpy ()
67
+ target = (target - np .mean (target )) / np .std (target )
68
68
69
69
# added bias term to the feature matrix
70
- x = np .c_ [np .ones (x .shape [0 ]), x ]
70
+ x = np .c_ [np .ones (features_matrix .shape [0 ]), features_matrix ]
71
71
72
72
# initialize and train the ridge regression model
73
73
model = RidgeRegression (alpha = 0.01 , regularization_param = 0.1 , num_iterations = 1000 )
74
- model .fit (x , y )
74
+ model .fit (features_matrix , target )
75
75
76
76
# predictions
77
- predictions = model .predict (x )
77
+ predictions = model .predict (features_matrix )
78
78
79
79
# results
80
80
print ("Optimized Weights:" , model .theta )
81
- print ("Cost:" , model .compute_cost (x , y ))
82
- print ("Mean Absolute Error:" , model .mean_absolute_error (y , predictions ))
81
+ print ("Cost:" , model .compute_cost (features_matrix , target ))
82
+ print ("Mean Absolute Error:" , model .mean_absolute_error (target , predictions ))
0 commit comments