-
-
Notifications
You must be signed in to change notification settings - Fork 46.6k
/
Copy pathbinary_cross_entropy.py
33 lines (25 loc) · 1022 Bytes
/
binary_cross_entropy.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
import numpy as np
def binary_cross_entropy(
y_true: np.array[int], y_pred: np.array[float], epsilon: float = 1e-15
) -> float:
"""
Calculate the BCE Loss between true labels and predicted probabilities.
Parameters:
- y_true: True binary labels (0 or 1).
- y_pred: Predicted probabilities for class 1.
- epsilon: Small constant to avoid numerical instability.
Returns:
- bce_loss: Binary Cross-Entropy Loss.
Example Usage:
true_labels = np.array([0, 1, 1, 0, 1])
predicted_probs = np.array([0.2, 0.7, 0.9, 0.3, 0.8])
bce_loss = binary_cross_entropy(true_labels, predicted_probs)
print(f"Binary Cross-Entropy Loss: {bce_loss}")
"""
# Clip predicted probabilities to avoid log(0) and log(1)
y_pred = np.clip(y_pred, epsilon, 1 - epsilon)
# Calculate binary cross-entropy loss
bce_loss = -(y_true * np.log(y_pred) + (1 - y_true) * np.log(1 - y_pred))
# Take the mean over all samples
bce_loss = np.mean(bce_loss)
return bce_loss