@@ -68,7 +68,7 @@ def binary_focal_cross_entropy(
68
68
69
69
>>> true_labels = np.array([0, 1, 1, 0, 1])
70
70
>>> predicted_probs = np.array([0.2, 0.7, 0.9, 0.3, 0.8])
71
- >>> binary_focal_cross_entropy(true_labels, predicted_probs)
71
+ >>> float( binary_focal_cross_entropy(true_labels, predicted_probs) )
72
72
0.008257977659239775
73
73
>>> true_labels = np.array([0, 1, 1, 0, 1])
74
74
>>> predicted_probs = np.array([0.3, 0.8, 0.9, 0.2])
@@ -108,7 +108,7 @@ def categorical_cross_entropy(
108
108
109
109
>>> true_labels = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
110
110
>>> pred_probs = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1], [0.0, 0.1, 0.9]])
111
- >>> categorical_cross_entropy(true_labels, pred_probs)
111
+ >>> float( categorical_cross_entropy(true_labels, pred_probs) )
112
112
0.567395975254385
113
113
>>> true_labels = np.array([[1, 0], [0, 1]])
114
114
>>> pred_probs = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1]])
@@ -179,13 +179,13 @@ def categorical_focal_cross_entropy(
179
179
>>> true_labels = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
180
180
>>> pred_probs = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1], [0.0, 0.1, 0.9]])
181
181
>>> alpha = np.array([0.6, 0.2, 0.7])
182
- >>> categorical_focal_cross_entropy(true_labels, pred_probs, alpha)
182
+ >>> float( categorical_focal_cross_entropy(true_labels, pred_probs, alpha) )
183
183
0.0025966118981496423
184
184
185
185
>>> true_labels = np.array([[0, 1, 0], [0, 0, 1]])
186
186
>>> pred_probs = np.array([[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
187
187
>>> alpha = np.array([0.25, 0.25, 0.25])
188
- >>> categorical_focal_cross_entropy(true_labels, pred_probs, alpha)
188
+ >>> float( categorical_focal_cross_entropy(true_labels, pred_probs, alpha) )
189
189
0.23315276982014324
190
190
191
191
>>> true_labels = np.array([[1, 0], [0, 1]])
@@ -265,7 +265,7 @@ def hinge_loss(y_true: np.ndarray, y_pred: np.ndarray) -> float:
265
265
266
266
>>> true_labels = np.array([-1, 1, 1, -1, 1])
267
267
>>> pred = np.array([-4, -0.3, 0.7, 5, 10])
268
- >>> hinge_loss(true_labels, pred)
268
+ >>> float( hinge_loss(true_labels, pred) )
269
269
1.52
270
270
>>> true_labels = np.array([-1, 1, 1, -1, 1, 1])
271
271
>>> pred = np.array([-4, -0.3, 0.7, 5, 10])
@@ -309,11 +309,11 @@ def huber_loss(y_true: np.ndarray, y_pred: np.ndarray, delta: float) -> float:
309
309
310
310
>>> true_values = np.array([0.9, 10.0, 2.0, 1.0, 5.2])
311
311
>>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2])
312
- >>> np.isclose(huber_loss(true_values, predicted_values, 1.0), 2.102)
312
+ >>> bool( np.isclose(huber_loss(true_values, predicted_values, 1.0), 2.102) )
313
313
True
314
314
>>> true_labels = np.array([11.0, 21.0, 3.32, 4.0, 5.0])
315
315
>>> predicted_probs = np.array([8.3, 20.8, 2.9, 11.2, 5.0])
316
- >>> np.isclose(huber_loss(true_labels, predicted_probs, 1.0), 1.80164)
316
+ >>> bool( np.isclose(huber_loss(true_labels, predicted_probs, 1.0), 1.80164) )
317
317
True
318
318
>>> true_labels = np.array([11.0, 21.0, 3.32, 4.0])
319
319
>>> predicted_probs = np.array([8.3, 20.8, 2.9, 11.2, 5.0])
@@ -347,7 +347,7 @@ def mean_squared_error(y_true: np.ndarray, y_pred: np.ndarray) -> float:
347
347
348
348
>>> true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0])
349
349
>>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2])
350
- >>> np.isclose(mean_squared_error(true_values, predicted_values), 0.028)
350
+ >>> bool( np.isclose(mean_squared_error(true_values, predicted_values), 0.028) )
351
351
True
352
352
>>> true_labels = np.array([1.0, 2.0, 3.0, 4.0, 5.0])
353
353
>>> predicted_probs = np.array([0.3, 0.8, 0.9, 0.2])
@@ -381,11 +381,11 @@ def mean_absolute_error(y_true: np.ndarray, y_pred: np.ndarray) -> float:
381
381
382
382
>>> true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0])
383
383
>>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2])
384
- >>> np.isclose(mean_absolute_error(true_values, predicted_values), 0.16)
384
+ >>> bool( np.isclose(mean_absolute_error(true_values, predicted_values), 0.16) )
385
385
True
386
386
>>> true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0])
387
387
>>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2])
388
- >>> np.isclose(mean_absolute_error(true_values, predicted_values), 2.16)
388
+ >>> bool( np.isclose(mean_absolute_error(true_values, predicted_values), 2.16) )
389
389
False
390
390
>>> true_labels = np.array([1.0, 2.0, 3.0, 4.0, 5.0])
391
391
>>> predicted_probs = np.array([0.3, 0.8, 0.9, 5.2])
@@ -600,17 +600,17 @@ def smooth_l1_loss(y_true: np.ndarray, y_pred: np.ndarray, beta: float = 1.0) ->
600
600
601
601
>>> y_true = np.array([3, 5, 2, 7])
602
602
>>> y_pred = np.array([2.9, 4.8, 2.1, 7.2])
603
- >>> smooth_l1_loss(y_true, y_pred, 1.0)
603
+ >>> float( smooth_l1_loss(y_true, y_pred, 1.0) )
604
604
0.012500000000000022
605
605
606
606
>>> y_true = np.array([2, 4, 6])
607
607
>>> y_pred = np.array([1, 5, 7])
608
- >>> smooth_l1_loss(y_true, y_pred, 1.0)
608
+ >>> float( smooth_l1_loss(y_true, y_pred, 1.0) )
609
609
0.5
610
610
611
611
>>> y_true = np.array([1, 3, 5, 7])
612
612
>>> y_pred = np.array([1, 3, 5, 7])
613
- >>> smooth_l1_loss(y_true, y_pred, 1.0)
613
+ >>> float( smooth_l1_loss(y_true, y_pred, 1.0) )
614
614
0.0
615
615
616
616
>>> y_true = np.array([1, 3, 5])
@@ -647,7 +647,7 @@ def kullback_leibler_divergence(y_true: np.ndarray, y_pred: np.ndarray) -> float
647
647
648
648
>>> true_labels = np.array([0.2, 0.3, 0.5])
649
649
>>> predicted_probs = np.array([0.3, 0.3, 0.4])
650
- >>> kullback_leibler_divergence(true_labels, predicted_probs)
650
+ >>> float( kullback_leibler_divergence(true_labels, predicted_probs) )
651
651
0.030478754035472025
652
652
>>> true_labels = np.array([0.2, 0.3, 0.5])
653
653
>>> predicted_probs = np.array([0.3, 0.3, 0.4, 0.5])
0 commit comments