@@ -36,7 +36,7 @@ def binary_cross_entropy(
36
36
37
37
y_pred = np .clip (y_pred , epsilon , 1 - epsilon ) # Clip predictions to avoid log(0)
38
38
bce_loss = - (y_true * np .log (y_pred ) + (1 - y_true ) * np .log (1 - y_pred ))
39
- return np .mean (bce_loss )
39
+ return float ( np .mean (bce_loss ) )
40
40
41
41
42
42
def binary_focal_cross_entropy (
@@ -87,7 +87,7 @@ def binary_focal_cross_entropy(
87
87
+ (1 - alpha ) * y_pred ** gamma * (1 - y_true ) * np .log (1 - y_pred )
88
88
)
89
89
90
- return np .mean (bcfe_loss )
90
+ return float ( np .mean (bcfe_loss ) )
91
91
92
92
93
93
def categorical_cross_entropy (
@@ -145,7 +145,7 @@ def categorical_cross_entropy(
145
145
raise ValueError ("Predicted probabilities must sum to approximately 1." )
146
146
147
147
y_pred = np .clip (y_pred , epsilon , 1 ) # Clip predictions to avoid log(0)
148
- return - np .sum (y_true * np .log (y_pred ))
148
+ return float ( - np .sum (y_true * np .log (y_pred ) ))
149
149
150
150
151
151
def categorical_focal_cross_entropy (
@@ -247,7 +247,7 @@ def categorical_focal_cross_entropy(
247
247
alpha * np .power (1 - y_pred , gamma ) * y_true * np .log (y_pred ), axis = 1
248
248
)
249
249
250
- return np .mean (cfce_loss )
250
+ return float ( np .mean (cfce_loss ) )
251
251
252
252
253
253
def hinge_loss (y_true : np .ndarray , y_pred : np .ndarray ) -> float :
@@ -287,7 +287,7 @@ def hinge_loss(y_true: np.ndarray, y_pred: np.ndarray) -> float:
287
287
raise ValueError ("y_true can have values -1 or 1 only." )
288
288
289
289
hinge_losses = np .maximum (0 , 1.0 - (y_true * y_pred ))
290
- return np .mean (hinge_losses )
290
+ return float ( np .mean (hinge_losses ) )
291
291
292
292
293
293
def huber_loss (y_true : np .ndarray , y_pred : np .ndarray , delta : float ) -> float :
@@ -309,11 +309,11 @@ def huber_loss(y_true: np.ndarray, y_pred: np.ndarray, delta: float) -> float:
309
309
310
310
>>> true_values = np.array([0.9, 10.0, 2.0, 1.0, 5.2])
311
311
>>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2])
312
- >>> np.isclose(huber_loss(true_values, predicted_values, 1.0), 2.102)
312
+ >>> np.isclose(huber_loss(true_values, predicted_values, 1.0), 2.102).item()
313
313
True
314
314
>>> true_labels = np.array([11.0, 21.0, 3.32, 4.0, 5.0])
315
315
>>> predicted_probs = np.array([8.3, 20.8, 2.9, 11.2, 5.0])
316
- >>> np.isclose(huber_loss(true_labels, predicted_probs, 1.0), 1.80164)
316
+ >>> np.isclose(huber_loss(true_labels, predicted_probs, 1.0), 1.80164).item()
317
317
True
318
318
>>> true_labels = np.array([11.0, 21.0, 3.32, 4.0])
319
319
>>> predicted_probs = np.array([8.3, 20.8, 2.9, 11.2, 5.0])
@@ -347,7 +347,7 @@ def mean_squared_error(y_true: np.ndarray, y_pred: np.ndarray) -> float:
347
347
348
348
>>> true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0])
349
349
>>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2])
350
- >>> np.isclose(mean_squared_error(true_values, predicted_values), 0.028)
350
+ >>> np.isclose(mean_squared_error(true_values, predicted_values), 0.028).item()
351
351
True
352
352
>>> true_labels = np.array([1.0, 2.0, 3.0, 4.0, 5.0])
353
353
>>> predicted_probs = np.array([0.3, 0.8, 0.9, 0.2])
@@ -381,11 +381,11 @@ def mean_absolute_error(y_true: np.ndarray, y_pred: np.ndarray) -> float:
381
381
382
382
>>> true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0])
383
383
>>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2])
384
- >>> np.isclose(mean_absolute_error(true_values, predicted_values), 0.16)
384
+ >>> np.isclose(mean_absolute_error(true_values, predicted_values), 0.16).item()
385
385
True
386
386
>>> true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0])
387
387
>>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2])
388
- >>> np.isclose(mean_absolute_error(true_values, predicted_values), 2.16)
388
+ >>> np.isclose(mean_absolute_error(true_values, predicted_values), 2.16).item()
389
389
False
390
390
>>> true_labels = np.array([1.0, 2.0, 3.0, 4.0, 5.0])
391
391
>>> predicted_probs = np.array([0.3, 0.8, 0.9, 5.2])
@@ -433,7 +433,7 @@ def mean_squared_logarithmic_error(y_true: np.ndarray, y_pred: np.ndarray) -> fl
433
433
raise ValueError ("Input arrays must have the same length." )
434
434
435
435
squared_logarithmic_errors = (np .log1p (y_true ) - np .log1p (y_pred )) ** 2
436
- return np .mean (squared_logarithmic_errors )
436
+ return float ( np .mean (squared_logarithmic_errors ) )
437
437
438
438
439
439
def mean_absolute_percentage_error (
@@ -478,7 +478,7 @@ def mean_absolute_percentage_error(
478
478
y_true = np .where (y_true == 0 , epsilon , y_true )
479
479
absolute_percentage_diff = np .abs ((y_true - y_pred ) / y_true )
480
480
481
- return np .mean (absolute_percentage_diff )
481
+ return float ( np .mean (absolute_percentage_diff ) )
482
482
483
483
484
484
def perplexity_loss (
@@ -570,7 +570,7 @@ def perplexity_loss(
570
570
# Calculating perplexity for each sentence
571
571
perp_losses = np .exp (np .negative (np .mean (np .log (true_class_pred ), axis = 1 )))
572
572
573
- return np .mean (perp_losses )
573
+ return float ( np .mean (perp_losses ) )
574
574
575
575
576
576
def smooth_l1_loss (y_true : np .ndarray , y_pred : np .ndarray , beta : float = 1.0 ) -> float :
@@ -626,7 +626,7 @@ def smooth_l1_loss(y_true: np.ndarray, y_pred: np.ndarray, beta: float = 1.0) ->
626
626
627
627
diff = np .abs (y_true - y_pred )
628
628
loss = np .where (diff < beta , 0.5 * diff ** 2 / beta , diff - 0.5 * beta )
629
- return np .mean (loss )
629
+ return float ( np .mean (loss ) )
630
630
631
631
632
632
def kullback_leibler_divergence (y_true : np .ndarray , y_pred : np .ndarray ) -> float :
@@ -660,7 +660,7 @@ def kullback_leibler_divergence(y_true: np.ndarray, y_pred: np.ndarray) -> float
660
660
raise ValueError ("Input arrays must have the same length." )
661
661
662
662
kl_loss = y_true * np .log (y_true / y_pred )
663
- return np .sum (kl_loss )
663
+ return float ( np .sum (kl_loss ) )
664
664
665
665
666
666
if __name__ == "__main__" :
0 commit comments