-
-
Notifications
You must be signed in to change notification settings - Fork 46.9k
Created folder for losses in Machine_Learning #9969
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 1 commit
57e7cdc
aedf0b9
9b7468c
38f405c
448785c
761fe33
7d59779
e8e0aa2
09cd350
ae4e3ee
6f26bc3
66faa35
c5a58c3
0753db2
277a681
1e3baed
25c550a
8513bda
cae669e
5624c22
7cebd00
f200049
b302359
ba41942
26beff6
e60dd14
747e8cc
e618611
fc15fb8
7ff7948
9502a0c
bb45ca7
2a4989d
5a23be7
9e1b7d3
f47d2c3
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,31 @@ | ||
import numpy as np | ||
|
||
|
||
def binary_cross_entropy(y_true, y_pred, epsilon=1e-15): | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. As there is no test file in this pull request nor any test function or class in the file Please provide return type hint for the function: Please provide type hint for the parameter: Please provide type hint for the parameter: Please provide type hint for the parameter: |
||
""" | ||
Calculate the BCE Loss between true labels and predicted probabilities. | ||
|
||
Parameters: | ||
- y_true: True binary labels (0 or 1). | ||
- y_pred: Predicted probabilities for class 1. | ||
- epsilon: Small constant to avoid numerical instability. | ||
|
||
Returns: | ||
- bce_loss: Binary Cross-Entropy Loss. | ||
|
||
Example Usage: | ||
true_labels = np.array([0, 1, 1, 0, 1]) | ||
predicted_probs = np.array([0.2, 0.7, 0.9, 0.3, 0.8]) | ||
bce_loss = binary_cross_entropy(true_labels, predicted_probs) | ||
print(f"Binary Cross-Entropy Loss: {bce_loss}") | ||
""" | ||
# Clip predicted probabilities to avoid log(0) and log(1) | ||
y_pred = np.clip(y_pred, epsilon, 1 - epsilon) | ||
|
||
# Calculate binary cross-entropy loss | ||
bce_loss = -(y_true * np.log(y_pred) + (1 - y_true) * np.log(1 - y_pred)) | ||
|
||
# Take the mean over all samples | ||
bce_loss = np.mean(bce_loss) | ||
|
||
return bce_loss | ||
THEGAMECHANGER416 marked this conversation as resolved.
Show resolved
Hide resolved
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,27 @@ | ||
import numpy as np | ||
THEGAMECHANGER416 marked this conversation as resolved.
Show resolved
Hide resolved
|
||
|
||
|
||
def mean_squared_error(y_true, y_pred): | ||
cclauss marked this conversation as resolved.
Show resolved
Hide resolved
cclauss marked this conversation as resolved.
Show resolved
Hide resolved
|
||
""" | ||
Calculate the Mean Squared Error (MSE) between two arrays. | ||
|
||
Parameters: | ||
- y_true: The true values (ground truth). | ||
- y_pred: The predicted values. | ||
|
||
Returns: | ||
- mse: The Mean Squared Error between y_true and y_pred. | ||
|
||
Example usage: | ||
true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) | ||
predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2]) | ||
mse = mean_squared_error(true_values, predicted_values) | ||
print(f"Mean Squared Error: {mse}") | ||
""" | ||
if len(y_true) != len(y_pred): | ||
raise ValueError("Input arrays must have the same length.") | ||
|
||
squared_errors = np.square(np.subtract(y_true, y_pred)) | ||
mse = np.mean(squared_errors) | ||
|
||
return mse | ||
THEGAMECHANGER416 marked this conversation as resolved.
Show resolved
Hide resolved
|
Uh oh!
There was an error while loading. Please reload this page.