Skip to content

Commit 0c04372

Browse files
authored
Merge pull request #1 from ankana2113/main
fixes ruff check in loss_functions.py
2 parents 97eb853 + 1459adf commit 0c04372

File tree

4 files changed

+62
-50
lines changed

4 files changed

+62
-50
lines changed

Diff for: machine_learning/frequent_pattern_growth.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -240,7 +240,7 @@ def ascend_tree(leaf_node: TreeNode, prefix_path: list[str]) -> None:
240240
ascend_tree(leaf_node.parent, prefix_path)
241241

242242

243-
def find_prefix_path(base_pat: frozenset, tree_node: TreeNode | None) -> dict: # noqa: ARG001
243+
def find_prefix_path(_: frozenset, tree_node: TreeNode | None) -> dict:
244244
"""
245245
Find the conditional pattern base for a given base pattern.
246246

Diff for: machine_learning/loss_functions.py

+13-4
Original file line numberDiff line numberDiff line change
@@ -629,13 +629,15 @@ def smooth_l1_loss(y_true: np.ndarray, y_pred: np.ndarray, beta: float = 1.0) ->
629629
return np.mean(loss)
630630

631631

632-
def kullback_leibler_divergence(y_true: np.ndarray, y_pred: np.ndarray) -> float:
632+
def kullback_leibler_divergence(
633+
y_true: np.ndarray, y_pred: np.ndarray, epsilon: float = 1e-10
634+
) -> float:
633635
"""
634636
Calculate the Kullback-Leibler divergence (KL divergence) loss between true labels
635637
and predicted probabilities.
636638
637-
KL divergence loss quantifies dissimilarity between true labels and predicted
638-
probabilities. It's often used in training generative models.
639+
KL divergence loss quantifies the dissimilarity between true labels and predicted
640+
probabilities. It is often used in training generative models.
639641
640642
KL = Σ(y_true * ln(y_true / y_pred))
641643
@@ -649,6 +651,7 @@ def kullback_leibler_divergence(y_true: np.ndarray, y_pred: np.ndarray) -> float
649651
>>> predicted_probs = np.array([0.3, 0.3, 0.4])
650652
>>> float(kullback_leibler_divergence(true_labels, predicted_probs))
651653
0.030478754035472025
654+
652655
>>> true_labels = np.array([0.2, 0.3, 0.5])
653656
>>> predicted_probs = np.array([0.3, 0.3, 0.4, 0.5])
654657
>>> kullback_leibler_divergence(true_labels, predicted_probs)
@@ -659,7 +662,13 @@ def kullback_leibler_divergence(y_true: np.ndarray, y_pred: np.ndarray) -> float
659662
if len(y_true) != len(y_pred):
660663
raise ValueError("Input arrays must have the same length.")
661664

662-
kl_loss = y_true * np.log(y_true / y_pred)
665+
# negligible epsilon to avoid issues with log(0) or division by zero
666+
epsilon = 1e-10
667+
y_pred = np.clip(y_pred, epsilon, None)
668+
669+
# calculate KL divergence only where y_true is not zero
670+
kl_loss = np.where(y_true != 0, y_true * np.log(y_true / y_pred), 0.0)
671+
663672
return np.sum(kl_loss)
664673

665674

Diff for: machine_learning/ridge_regression/ridge_regression.py

+29-29
Original file line numberDiff line numberDiff line change
@@ -15,68 +15,68 @@ def __init__(
1515
self.theta: np.ndarray = None
1616

1717
def feature_scaling(
18-
self, x: np.ndarray
18+
self, features: np.ndarray
1919
) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
20-
mean = np.mean(x, axis=0)
21-
std = np.std(x, axis=0)
20+
mean = np.mean(features, axis=0)
21+
std = np.std(features, axis=0)
2222

2323
# avoid division by zero for constant features (std = 0)
2424
std[std == 0] = 1 # set std=1 for constant features to avoid NaN
2525

26-
x_scaled = (x - mean) / std
27-
return x_scaled, mean, std
26+
features_scaled = (features - mean) / std
27+
return features_scaled, mean, std
2828

29-
def fit(self, x: np.ndarray, y: np.ndarray) -> None:
30-
x_scaled, mean, std = self.feature_scaling(x)
31-
m, n = x_scaled.shape
29+
def fit(self, features: np.ndarray, target: np.ndarray) -> None:
30+
features_scaled, mean, std = self.feature_scaling(features)
31+
m, n = features_scaled.shape
3232
self.theta = np.zeros(n) # initializing weights to zeros
3333

3434
for _ in range(self.num_iterations):
35-
predictions = x_scaled.dot(self.theta)
36-
error = predictions - y
35+
predictions = features_scaled.dot(self.theta)
36+
error = predictions - target
3737

3838
# computing gradient with L2 regularization
3939
gradient = (
40-
x_scaled.T.dot(error) + self.regularization_param * self.theta
40+
features_scaled.T.dot(error) + self.regularization_param * self.theta
4141
) / m
4242
self.theta -= self.alpha * gradient # updating weights
4343

44-
def predict(self, x: np.ndarray) -> np.ndarray:
45-
x_scaled, _, _ = self.feature_scaling(x)
46-
return x_scaled.dot(self.theta)
44+
def predict(self, features: np.ndarray) -> np.ndarray:
45+
features_scaled, _, _ = self.feature_scaling(features)
46+
return features_scaled.dot(self.theta)
4747

48-
def compute_cost(self, x: np.ndarray, y: np.ndarray) -> float:
49-
x_scaled, _, _ = self.feature_scaling(x)
50-
m = len(y)
48+
def compute_cost(self, features: np.ndarray, target: np.ndarray) -> float:
49+
features_scaled, _, _ = self.feature_scaling(features)
50+
m = len(target)
5151

52-
predictions = x_scaled.dot(self.theta)
53-
cost = (1 / (2 * m)) * np.sum((predictions - y) ** 2) + (
52+
predictions = features_scaled.dot(self.theta)
53+
cost = (1 / (2 * m)) * np.sum((predictions - target) ** 2) + (
5454
self.regularization_param / (2 * m)
5555
) * np.sum(self.theta**2)
5656
return cost
5757

58-
def mean_absolute_error(self, y_true: np.ndarray, y_pred: np.ndarray) -> float:
59-
return np.mean(np.abs(y_true - y_pred))
58+
def mean_absolute_error(self, target: np.ndarray, predictions: np.ndarray) -> float:
59+
return np.mean(np.abs(target - predictions))
6060

6161

6262
# Example usage
6363
if __name__ == "__main__":
6464
data = pd.read_csv("ADRvsRating.csv")
65-
x = data[["Rating"]].to_numpy()
66-
y = data["ADR"].to_numpy()
67-
y = (y - np.mean(y)) / np.std(y)
65+
features_matrix = data[["Rating"]].to_numpy()
66+
target = data["ADR"].to_numpy()
67+
target = (target - np.mean(target)) / np.std(target)
6868

6969
# added bias term to the feature matrix
70-
x = np.c_[np.ones(x.shape[0]), x]
70+
x = np.c_[np.ones(features_matrix.shape[0]), features_matrix]
7171

7272
# initialize and train the ridge regression model
7373
model = RidgeRegression(alpha=0.01, regularization_param=0.1, num_iterations=1000)
74-
model.fit(x, y)
74+
model.fit(features_matrix, target)
7575

7676
# predictions
77-
predictions = model.predict(x)
77+
predictions = model.predict(features_matrix)
7878

7979
# results
8080
print("Optimized Weights:", model.theta)
81-
print("Cost:", model.compute_cost(x, y))
82-
print("Mean Absolute Error:", model.mean_absolute_error(y, predictions))
81+
print("Cost:", model.compute_cost(features_matrix, target))
82+
print("Mean Absolute Error:", model.mean_absolute_error(target, predictions))

Diff for: machine_learning/ridge_regression/test_ridge_regression.py

+19-16
Original file line numberDiff line numberDiff line change
@@ -12,17 +12,20 @@
1212
"""
1313

1414
import numpy as np # noqa: F401
15-
from ridge_regression import RidgeRegression # noqa: F401
15+
16+
from machine_learning.ridge_regression.ridge_regression import (
17+
RidgeRegression, # noqa: F401
18+
)
1619

1720

1821
def test_feature_scaling():
1922
"""
2023
Tests the feature_scaling function of RidgeRegression.
2124
--------
2225
>>> model = RidgeRegression()
23-
>>> X = np.array([[1, 2], [2, 3], [3, 4]])
24-
>>> X_scaled, mean, std = model.feature_scaling(X)
25-
>>> np.round(X_scaled, 2)
26+
>>> features = np.array([[1, 2], [2, 3], [3, 4]])
27+
>>> features_scaled, mean, std = model.feature_scaling(features)
28+
>>> np.round(features_scaled, 2)
2629
array([[-1.22, -1.22],
2730
[ 0. , 0. ],
2831
[ 1.22, 1.22]])
@@ -40,14 +43,14 @@ def test_fit():
4043
>>> model = RidgeRegression(alpha=0.01,
4144
... regularization_param=0.1,
4245
... num_iterations=1000)
43-
>>> X = np.array([[1], [2], [3]])
44-
>>> y = np.array([2, 3, 4])
46+
>>> features = np.array([[1], [2], [3]])
47+
>>> target = np.array([2, 3, 4])
4548
4649
# Adding a bias term
47-
>>> X = np.c_[np.ones(X.shape[0]), X]
50+
>>> features = np.c_[np.ones(features.shape[0]), features]
4851
4952
# Fit the model
50-
>>> model.fit(X, y)
53+
>>> model.fit(features, target)
5154
5255
# Check if the weights have been updated
5356
>>> np.round(model.theta, decimals=2)
@@ -62,17 +65,17 @@ def test_predict():
6265
>>> model = RidgeRegression(alpha=0.01,
6366
... regularization_param=0.1,
6467
... num_iterations=1000)
65-
>>> X = np.array([[1], [2], [3]])
66-
>>> y = np.array([2, 3, 4])
68+
>>> features = np.array([[1], [2], [3]])
69+
>>> target = np.array([2, 3, 4])
6770
6871
# Adding a bias term
69-
>>> X = np.c_[np.ones(X.shape[0]), X]
72+
>>> features = np.c_[np.ones(features.shape[0]), features]
7073
7174
# Fit the model
72-
>>> model.fit(X, y)
75+
>>> model.fit(features, target)
7376
7477
# Predict with the model
75-
>>> predictions = model.predict(X)
78+
>>> predictions = model.predict(features)
7679
>>> np.round(predictions, decimals=2)
7780
array([-0.97, 0. , 0.97])
7881
"""
@@ -83,9 +86,9 @@ def test_mean_absolute_error():
8386
Tests the mean_absolute_error function of RidgeRegression
8487
--------
8588
>>> model = RidgeRegression()
86-
>>> y_true = np.array([2, 3, 4])
87-
>>> y_pred = np.array([2.1, 3.0, 3.9])
88-
>>> mae = model.mean_absolute_error(y_true, y_pred)
89+
>>> target = np.array([2, 3, 4])
90+
>>> predictions = np.array([2.1, 3.0, 3.9])
91+
>>> mae = model.mean_absolute_error(target, predictions)
8992
>>> float(np.round(mae, 2))
9093
0.07
9194
"""

0 commit comments

Comments
 (0)