From b3ca5a8057cf1e7863628c51067acd5b64f7f9ba Mon Sep 17 00:00:00 2001 From: Adarsh Acharya Date: Sat, 2 Sep 2023 18:19:34 +0530 Subject: [PATCH 1/7] Added Scaled Exponential Linear Unit Activation Function --- .../scaled_exponential_linear_unit.py | 35 +++++++++++++++++++ 1 file changed, 35 insertions(+) create mode 100644 neural_network/activation_functions/scaled_exponential_linear_unit.py diff --git a/neural_network/activation_functions/scaled_exponential_linear_unit.py b/neural_network/activation_functions/scaled_exponential_linear_unit.py new file mode 100644 index 000000000000..fe1c91cc5f51 --- /dev/null +++ b/neural_network/activation_functions/scaled_exponential_linear_unit.py @@ -0,0 +1,35 @@ +""" +Implements the Scaled Exponential Linear Unit or SELU function. + +The function takes a vector of K real numbers and two real numbers alpha(default = 1.6732) and lambda (default = 1.0507) +as input and then applies the SELU function to each element of the vector. SELU is a self-normalizing activation function. +It is a variant of the ELU. The main advantage of SELU is that we can be sure that the output will always be standardized +due to its self-normalizing behavior. That means there is no need to include Batch-Normalization layers. + +References : +https://iq.opengenus.org/scaled-exponential-linear-unit/ +""" + +import numpy as np + +def scaled_exponential_linear_unit(vector: np.ndarray, alpha : float = 1.6732, _lambda : float = 1.0507) -> np.ndarray: + """ + Applies the Scaled Exponential Linear Unit function to each element of the vector. + Parameters : vector : np.ndarray + alpha : float (default = 1.6732) + _lambda : float (default = 1.0507) + Returns : np.ndarray + Formula : f(x) = _lambda * x if x > 0 + _lambda * alpha * (e**x - 1) if x <= 0 + Examples : + >>> scaled_exponential_linear_unit(vector = np.array([1.3, 3.7, 2.4])) + Output : np.array([1.36591, 3.88759, 2.52168]) + + >>> scaled_exponential_linear_unit(vector = np.array([2.342, -3.455, -7.2116, 0.0, -4.532])) + Output : np.array([2.4607394, -1.70249977, -1.75673386, 0., -1.73911634]) + """ + return _lambda * np.where(vector > 0, vector, alpha * (np.exp(vector) - 1)) + +if __name__ == "__main__": + import doctest + doctest.testmod() \ No newline at end of file From 6a767bfb43f32ce929b26657a3c4aa5b2310252b Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Sat, 2 Sep 2023 12:59:57 +0000 Subject: [PATCH 2/7] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .../scaled_exponential_linear_unit.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/neural_network/activation_functions/scaled_exponential_linear_unit.py b/neural_network/activation_functions/scaled_exponential_linear_unit.py index fe1c91cc5f51..74af1cf92454 100644 --- a/neural_network/activation_functions/scaled_exponential_linear_unit.py +++ b/neural_network/activation_functions/scaled_exponential_linear_unit.py @@ -2,8 +2,8 @@ Implements the Scaled Exponential Linear Unit or SELU function. The function takes a vector of K real numbers and two real numbers alpha(default = 1.6732) and lambda (default = 1.0507) -as input and then applies the SELU function to each element of the vector. SELU is a self-normalizing activation function. -It is a variant of the ELU. The main advantage of SELU is that we can be sure that the output will always be standardized +as input and then applies the SELU function to each element of the vector. SELU is a self-normalizing activation function. +It is a variant of the ELU. The main advantage of SELU is that we can be sure that the output will always be standardized due to its self-normalizing behavior. That means there is no need to include Batch-Normalization layers. References : @@ -12,7 +12,10 @@ import numpy as np -def scaled_exponential_linear_unit(vector: np.ndarray, alpha : float = 1.6732, _lambda : float = 1.0507) -> np.ndarray: + +def scaled_exponential_linear_unit( + vector: np.ndarray, alpha: float = 1.6732, _lambda: float = 1.0507 +) -> np.ndarray: """ Applies the Scaled Exponential Linear Unit function to each element of the vector. Parameters : vector : np.ndarray @@ -30,6 +33,8 @@ def scaled_exponential_linear_unit(vector: np.ndarray, alpha : float = 1.6732, _ """ return _lambda * np.where(vector > 0, vector, alpha * (np.exp(vector) - 1)) + if __name__ == "__main__": import doctest - doctest.testmod() \ No newline at end of file + + doctest.testmod() From 6c18b4b60dd13c96074b672af3de260aaf675c2f Mon Sep 17 00:00:00 2001 From: Adarsh Acharya <132294330+AdarshAcharya5@users.noreply.github.com> Date: Sat, 2 Sep 2023 18:38:43 +0530 Subject: [PATCH 3/7] Update scaled_exponential_linear_unit.py Fixed max char limit violation --- .../scaled_exponential_linear_unit.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/neural_network/activation_functions/scaled_exponential_linear_unit.py b/neural_network/activation_functions/scaled_exponential_linear_unit.py index 74af1cf92454..e0c9f1a8cb5f 100644 --- a/neural_network/activation_functions/scaled_exponential_linear_unit.py +++ b/neural_network/activation_functions/scaled_exponential_linear_unit.py @@ -1,10 +1,10 @@ """ Implements the Scaled Exponential Linear Unit or SELU function. -The function takes a vector of K real numbers and two real numbers alpha(default = 1.6732) and lambda (default = 1.0507) -as input and then applies the SELU function to each element of the vector. SELU is a self-normalizing activation function. -It is a variant of the ELU. The main advantage of SELU is that we can be sure that the output will always be standardized -due to its self-normalizing behavior. That means there is no need to include Batch-Normalization layers. +The function takes a vector of K real numbers and two real numbers alpha(default = 1.6732) & lambda (default = 1.0507) +as input and then applies the SELU function to each element of the vector. SELU is a self-normalizing activation +function. It is a variant of the ELU. The main advantage of SELU is that we can be sure that the output will always be +standardized due to its self-normalizing behavior. That means there is no need to include Batch-Normalization layers. References : https://iq.opengenus.org/scaled-exponential-linear-unit/ @@ -25,10 +25,10 @@ def scaled_exponential_linear_unit( Formula : f(x) = _lambda * x if x > 0 _lambda * alpha * (e**x - 1) if x <= 0 Examples : - >>> scaled_exponential_linear_unit(vector = np.array([1.3, 3.7, 2.4])) + >>> scaled_exponential_linear_unit(np.array([1.3, 3.7, 2.4])) Output : np.array([1.36591, 3.88759, 2.52168]) - >>> scaled_exponential_linear_unit(vector = np.array([2.342, -3.455, -7.2116, 0.0, -4.532])) + >>> scaled_exponential_linear_unit(np.array([2.342, -3.455, -7.2116, 0.0, -4.532])) Output : np.array([2.4607394, -1.70249977, -1.75673386, 0., -1.73911634]) """ return _lambda * np.where(vector > 0, vector, alpha * (np.exp(vector) - 1)) From 3281a38983f6beaee794babdcf38e5c435c612b7 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Sat, 2 Sep 2023 13:09:15 +0000 Subject: [PATCH 4/7] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .../activation_functions/scaled_exponential_linear_unit.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/neural_network/activation_functions/scaled_exponential_linear_unit.py b/neural_network/activation_functions/scaled_exponential_linear_unit.py index e0c9f1a8cb5f..63a8ccc810f3 100644 --- a/neural_network/activation_functions/scaled_exponential_linear_unit.py +++ b/neural_network/activation_functions/scaled_exponential_linear_unit.py @@ -2,8 +2,8 @@ Implements the Scaled Exponential Linear Unit or SELU function. The function takes a vector of K real numbers and two real numbers alpha(default = 1.6732) & lambda (default = 1.0507) -as input and then applies the SELU function to each element of the vector. SELU is a self-normalizing activation -function. It is a variant of the ELU. The main advantage of SELU is that we can be sure that the output will always be +as input and then applies the SELU function to each element of the vector. SELU is a self-normalizing activation +function. It is a variant of the ELU. The main advantage of SELU is that we can be sure that the output will always be standardized due to its self-normalizing behavior. That means there is no need to include Batch-Normalization layers. References : From fa8b1f466ed973414f0263ebf1cb3edd9c446456 Mon Sep 17 00:00:00 2001 From: Adarsh Acharya <132294330+AdarshAcharya5@users.noreply.github.com> Date: Sat, 2 Sep 2023 18:43:14 +0530 Subject: [PATCH 5/7] Update scaled_exponential_linear_unit.py --- .../scaled_exponential_linear_unit.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/neural_network/activation_functions/scaled_exponential_linear_unit.py b/neural_network/activation_functions/scaled_exponential_linear_unit.py index 63a8ccc810f3..228a8eace6e5 100644 --- a/neural_network/activation_functions/scaled_exponential_linear_unit.py +++ b/neural_network/activation_functions/scaled_exponential_linear_unit.py @@ -1,10 +1,14 @@ """ Implements the Scaled Exponential Linear Unit or SELU function. -The function takes a vector of K real numbers and two real numbers alpha(default = 1.6732) & lambda (default = 1.0507) -as input and then applies the SELU function to each element of the vector. SELU is a self-normalizing activation -function. It is a variant of the ELU. The main advantage of SELU is that we can be sure that the output will always be -standardized due to its self-normalizing behavior. That means there is no need to include Batch-Normalization layers. +The function takes a vector of K real numbers and two real numbers +alpha(default = 1.6732) & lambda (default = 1.0507) as input and +then applies the SELU function to each element of the vector. +SELU is a self-normalizing activation function. It is a variant +of the ELU. The main advantage of SELU is that we can be sure +that the output will always be standardized due to its +self-normalizing behavior. That means there is no need to +include Batch-Normalization layers. References : https://iq.opengenus.org/scaled-exponential-linear-unit/ From 1ef798623fc541885558b10480f301b67b45e2eb Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Sat, 2 Sep 2023 13:13:45 +0000 Subject: [PATCH 6/7] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .../scaled_exponential_linear_unit.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/neural_network/activation_functions/scaled_exponential_linear_unit.py b/neural_network/activation_functions/scaled_exponential_linear_unit.py index 228a8eace6e5..11e28e1dd470 100644 --- a/neural_network/activation_functions/scaled_exponential_linear_unit.py +++ b/neural_network/activation_functions/scaled_exponential_linear_unit.py @@ -1,13 +1,13 @@ """ Implements the Scaled Exponential Linear Unit or SELU function. -The function takes a vector of K real numbers and two real numbers -alpha(default = 1.6732) & lambda (default = 1.0507) as input and -then applies the SELU function to each element of the vector. -SELU is a self-normalizing activation function. It is a variant -of the ELU. The main advantage of SELU is that we can be sure -that the output will always be standardized due to its -self-normalizing behavior. That means there is no need to +The function takes a vector of K real numbers and two real numbers +alpha(default = 1.6732) & lambda (default = 1.0507) as input and +then applies the SELU function to each element of the vector. +SELU is a self-normalizing activation function. It is a variant +of the ELU. The main advantage of SELU is that we can be sure +that the output will always be standardized due to its +self-normalizing behavior. That means there is no need to include Batch-Normalization layers. References : From d99b6db467944f7e00d255dc749f3b5e7d63ca64 Mon Sep 17 00:00:00 2001 From: Adarsh Acharya Date: Sun, 10 Sep 2023 09:43:08 +0530 Subject: [PATCH 7/7] moved sigmoid, tanh and relu to neural_network/activation_functions --- {maths => neural_network/activation_functions}/relu.py | 0 {maths => neural_network/activation_functions}/sigmoid.py | 0 .../activation_functions}/sigmoid_linear_unit.py | 0 {maths => neural_network/activation_functions}/tanh.py | 0 4 files changed, 0 insertions(+), 0 deletions(-) rename {maths => neural_network/activation_functions}/relu.py (100%) rename {maths => neural_network/activation_functions}/sigmoid.py (100%) rename {maths => neural_network/activation_functions}/sigmoid_linear_unit.py (100%) rename {maths => neural_network/activation_functions}/tanh.py (100%) diff --git a/maths/relu.py b/neural_network/activation_functions/relu.py similarity index 100% rename from maths/relu.py rename to neural_network/activation_functions/relu.py diff --git a/maths/sigmoid.py b/neural_network/activation_functions/sigmoid.py similarity index 100% rename from maths/sigmoid.py rename to neural_network/activation_functions/sigmoid.py diff --git a/maths/sigmoid_linear_unit.py b/neural_network/activation_functions/sigmoid_linear_unit.py similarity index 100% rename from maths/sigmoid_linear_unit.py rename to neural_network/activation_functions/sigmoid_linear_unit.py diff --git a/maths/tanh.py b/neural_network/activation_functions/tanh.py similarity index 100% rename from maths/tanh.py rename to neural_network/activation_functions/tanh.py