-
-
Notifications
You must be signed in to change notification settings - Fork 46.6k
/
Copy pathbinary_cross_entropy.py
40 lines (29 loc) · 1.06 KB
/
binary_cross_entropy.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
import numpy as np
def binary_cross_entropy(
y_true: np.ndarray, y_pred: np.ndarray, epsilon: float = 1e-15
) -> float:
"""
Calculate the BCE Loss between true labels and predicted probabilities.
Parameters:
- y_true: True binary labels (0 or 1).
- y_pred: Predicted probabilities for class 1.
- epsilon: Small constant to avoid numerical instability.
Returns:
- bce_loss: Binary Cross-Entropy Loss.
Example Usage:
>>> true_labels = np.array([0, 1, 1, 0, 1])
>>> predicted_probs = np.array([0.2, 0.7, 0.9, 0.3, 0.8])
>>> bce_loss = binary_cross_entropy(true_labels, predicted_probs)
>>> bce_loss
0.6785203447911846
"""
# Clip predicted probabilities to avoid log(0) and log(1)
y_pred = np.clip(y_pred, epsilon, 1 - epsilon)
# Calculate binary cross-entropy loss
bce_loss = -(y_true * np.log(y_pred) + (1 - y_true) * np.log(1 - y_pred))
# Take the mean over all samples
bce_loss = np.mean(bce_loss)
return bce_loss
if __name__ == "__main__":
import doctest
doctest.testmod()