forked from TheAlgorithms/Python
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathcategorical_cross_entropy.py
58 lines (43 loc) · 1.72 KB
/
categorical_cross_entropy.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
"""
Categorical Cross-Entropy Loss
This function calculates the Categorical Cross-Entropy Loss between true class
labels and predicted class probabilities.
Formula:
Categorical Cross-Entropy Loss = -Σ(y_true * log(y_pred))
Resources:
- [Wikipedia - Cross entropy](https://en.wikipedia.org/wiki/Cross_entropy)
"""
import numpy as np
def categorical_crossentropy(
y_true: np.ndarray, y_pred: np.ndarray, epsilon: float = 1e-15
) -> float:
"""
Calculate Categorical Cross-Entropy Loss between true class labels and
predicted class probabilities.
Parameters:
- y_true: True class labels (one-hot encoded) as a NumPy array.
- y_pred: Predicted class probabilities as a NumPy array.
- epsilon: Small constant to avoid numerical instability.
Returns:
- ce_loss: Categorical Cross-Entropy Loss as a floating-point number.
Example:
>>> true_labels = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
>>> pred_probs = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1], [0.0, 0.1, 0.9]])
>>> categorical_crossentropy(true_labels, pred_probs)
0.18913199175146167
>>> y_true = np.array([[1, 0], [0, 1]])
>>> y_pred = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1]])
>>> categorical_crossentropy(y_true, y_pred)
Traceback (most recent call last):
...
ValueError: Input arrays must have the same length.
"""
if y_true.shape != y_pred.shape:
raise ValueError("Input arrays must have the same length.")
# Clip predicted probabilities to avoid log(0)
y_pred = np.clip(y_pred, epsilon, 1 - epsilon)
# Calculate categorical cross-entropy loss
return -np.sum(y_true * np.log(y_pred)) / len(y_true)
if __name__ == "__main__":
import doctest
doctest.testmod()