From b3ca5a8057cf1e7863628c51067acd5b64f7f9ba Mon Sep 17 00:00:00 2001 From: Adarsh Acharya Date: Sat, 2 Sep 2023 18:19:34 +0530 Subject: [PATCH 01/11] Added Scaled Exponential Linear Unit Activation Function --- .../scaled_exponential_linear_unit.py | 35 +++++++++++++++++++ 1 file changed, 35 insertions(+) create mode 100644 neural_network/activation_functions/scaled_exponential_linear_unit.py diff --git a/neural_network/activation_functions/scaled_exponential_linear_unit.py b/neural_network/activation_functions/scaled_exponential_linear_unit.py new file mode 100644 index 000000000000..fe1c91cc5f51 --- /dev/null +++ b/neural_network/activation_functions/scaled_exponential_linear_unit.py @@ -0,0 +1,35 @@ +""" +Implements the Scaled Exponential Linear Unit or SELU function. + +The function takes a vector of K real numbers and two real numbers alpha(default = 1.6732) and lambda (default = 1.0507) +as input and then applies the SELU function to each element of the vector. SELU is a self-normalizing activation function. +It is a variant of the ELU. The main advantage of SELU is that we can be sure that the output will always be standardized +due to its self-normalizing behavior. That means there is no need to include Batch-Normalization layers. + +References : +https://iq.opengenus.org/scaled-exponential-linear-unit/ +""" + +import numpy as np + +def scaled_exponential_linear_unit(vector: np.ndarray, alpha : float = 1.6732, _lambda : float = 1.0507) -> np.ndarray: + """ + Applies the Scaled Exponential Linear Unit function to each element of the vector. + Parameters : vector : np.ndarray + alpha : float (default = 1.6732) + _lambda : float (default = 1.0507) + Returns : np.ndarray + Formula : f(x) = _lambda * x if x > 0 + _lambda * alpha * (e**x - 1) if x <= 0 + Examples : + >>> scaled_exponential_linear_unit(vector = np.array([1.3, 3.7, 2.4])) + Output : np.array([1.36591, 3.88759, 2.52168]) + + >>> scaled_exponential_linear_unit(vector = np.array([2.342, -3.455, -7.2116, 0.0, -4.532])) + Output : np.array([2.4607394, -1.70249977, -1.75673386, 0., -1.73911634]) + """ + return _lambda * np.where(vector > 0, vector, alpha * (np.exp(vector) - 1)) + +if __name__ == "__main__": + import doctest + doctest.testmod() \ No newline at end of file From cf66ff8f368c62d3651027425fe24201e6e009dd Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Sat, 2 Sep 2023 13:46:52 +0000 Subject: [PATCH 02/11] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .../scaled_exponential_linear_unit.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/neural_network/activation_functions/scaled_exponential_linear_unit.py b/neural_network/activation_functions/scaled_exponential_linear_unit.py index fe1c91cc5f51..74af1cf92454 100644 --- a/neural_network/activation_functions/scaled_exponential_linear_unit.py +++ b/neural_network/activation_functions/scaled_exponential_linear_unit.py @@ -2,8 +2,8 @@ Implements the Scaled Exponential Linear Unit or SELU function. The function takes a vector of K real numbers and two real numbers alpha(default = 1.6732) and lambda (default = 1.0507) -as input and then applies the SELU function to each element of the vector. SELU is a self-normalizing activation function. -It is a variant of the ELU. The main advantage of SELU is that we can be sure that the output will always be standardized +as input and then applies the SELU function to each element of the vector. SELU is a self-normalizing activation function. +It is a variant of the ELU. The main advantage of SELU is that we can be sure that the output will always be standardized due to its self-normalizing behavior. That means there is no need to include Batch-Normalization layers. References : @@ -12,7 +12,10 @@ import numpy as np -def scaled_exponential_linear_unit(vector: np.ndarray, alpha : float = 1.6732, _lambda : float = 1.0507) -> np.ndarray: + +def scaled_exponential_linear_unit( + vector: np.ndarray, alpha: float = 1.6732, _lambda: float = 1.0507 +) -> np.ndarray: """ Applies the Scaled Exponential Linear Unit function to each element of the vector. Parameters : vector : np.ndarray @@ -30,6 +33,8 @@ def scaled_exponential_linear_unit(vector: np.ndarray, alpha : float = 1.6732, _ """ return _lambda * np.where(vector > 0, vector, alpha * (np.exp(vector) - 1)) + if __name__ == "__main__": import doctest - doctest.testmod() \ No newline at end of file + + doctest.testmod() From b10622916bd88adcac517701b98a0124d72cb9fc Mon Sep 17 00:00:00 2001 From: Adarsh Acharya <132294330+AdarshAcharya5@users.noreply.github.com> Date: Sat, 2 Sep 2023 19:22:04 +0530 Subject: [PATCH 03/11] Update scaled_exponential_linear_unit.py --- .../scaled_exponential_linear_unit.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/neural_network/activation_functions/scaled_exponential_linear_unit.py b/neural_network/activation_functions/scaled_exponential_linear_unit.py index 74af1cf92454..a82b37876320 100644 --- a/neural_network/activation_functions/scaled_exponential_linear_unit.py +++ b/neural_network/activation_functions/scaled_exponential_linear_unit.py @@ -1,11 +1,13 @@ """ Implements the Scaled Exponential Linear Unit or SELU function. - -The function takes a vector of K real numbers and two real numbers alpha(default = 1.6732) and lambda (default = 1.0507) -as input and then applies the SELU function to each element of the vector. SELU is a self-normalizing activation function. -It is a variant of the ELU. The main advantage of SELU is that we can be sure that the output will always be standardized -due to its self-normalizing behavior. That means there is no need to include Batch-Normalization layers. - +The function takes a vector of K real numbers and two real numbers +alpha(default = 1.6732) & lambda (default = 1.0507) as input and +then applies the SELU function to each element of the vector. +SELU is a self-normalizing activation function. It is a variant +of the ELU. The main advantage of SELU is that we can be sure +that the output will always be standardized due to its +self-normalizing behavior. That means there is no need to +include Batch-Normalization layers. References : https://iq.opengenus.org/scaled-exponential-linear-unit/ """ From 563d5b72045fc96b9db58273812d326622511aec Mon Sep 17 00:00:00 2001 From: Adarsh Acharya <132294330+AdarshAcharya5@users.noreply.github.com> Date: Sat, 2 Sep 2023 19:26:28 +0530 Subject: [PATCH 04/11] Update scaled_exponential_linear_unit.py --- .../activation_functions/scaled_exponential_linear_unit.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/neural_network/activation_functions/scaled_exponential_linear_unit.py b/neural_network/activation_functions/scaled_exponential_linear_unit.py index a82b37876320..f54a09be19f4 100644 --- a/neural_network/activation_functions/scaled_exponential_linear_unit.py +++ b/neural_network/activation_functions/scaled_exponential_linear_unit.py @@ -27,10 +27,10 @@ def scaled_exponential_linear_unit( Formula : f(x) = _lambda * x if x > 0 _lambda * alpha * (e**x - 1) if x <= 0 Examples : - >>> scaled_exponential_linear_unit(vector = np.array([1.3, 3.7, 2.4])) + >>> scaled_exponential_linear_unit(np.array([1.3, 3.7, 2.4])) Output : np.array([1.36591, 3.88759, 2.52168]) - >>> scaled_exponential_linear_unit(vector = np.array([2.342, -3.455, -7.2116, 0.0, -4.532])) + >>> scaled_exponential_linear_unit(np.array([2.342, -3.455, -7.2116, 0.0, -4.532])) Output : np.array([2.4607394, -1.70249977, -1.75673386, 0., -1.73911634]) """ return _lambda * np.where(vector > 0, vector, alpha * (np.exp(vector) - 1)) @@ -38,5 +38,4 @@ def scaled_exponential_linear_unit( if __name__ == "__main__": import doctest - doctest.testmod() From 5544cc2181456c059946bc8c821ac7c059430e04 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Sat, 2 Sep 2023 13:56:59 +0000 Subject: [PATCH 05/11] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .../activation_functions/scaled_exponential_linear_unit.py | 1 + 1 file changed, 1 insertion(+) diff --git a/neural_network/activation_functions/scaled_exponential_linear_unit.py b/neural_network/activation_functions/scaled_exponential_linear_unit.py index f54a09be19f4..94f852bcd1c2 100644 --- a/neural_network/activation_functions/scaled_exponential_linear_unit.py +++ b/neural_network/activation_functions/scaled_exponential_linear_unit.py @@ -38,4 +38,5 @@ def scaled_exponential_linear_unit( if __name__ == "__main__": import doctest + doctest.testmod() From 01758c173e1cd99f4f9d7d61e2d26f1a9ec804ab Mon Sep 17 00:00:00 2001 From: Adarsh Acharya <132294330+AdarshAcharya5@users.noreply.github.com> Date: Tue, 5 Sep 2023 08:58:33 +0530 Subject: [PATCH 06/11] Update scaled_exponential_linear_unit.py --- .../activation_functions/scaled_exponential_linear_unit.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/neural_network/activation_functions/scaled_exponential_linear_unit.py b/neural_network/activation_functions/scaled_exponential_linear_unit.py index 94f852bcd1c2..0124fde0b306 100644 --- a/neural_network/activation_functions/scaled_exponential_linear_unit.py +++ b/neural_network/activation_functions/scaled_exponential_linear_unit.py @@ -28,10 +28,10 @@ def scaled_exponential_linear_unit( _lambda * alpha * (e**x - 1) if x <= 0 Examples : >>> scaled_exponential_linear_unit(np.array([1.3, 3.7, 2.4])) - Output : np.array([1.36591, 3.88759, 2.52168]) + Output : array([1.36591, 3.88759, 2.52168]) >>> scaled_exponential_linear_unit(np.array([2.342, -3.455, -7.2116, 0.0, -4.532])) - Output : np.array([2.4607394, -1.70249977, -1.75673386, 0., -1.73911634]) + Output : array([2.4607394, -1.70249977, -1.75673386, 0., -1.73911634]) """ return _lambda * np.where(vector > 0, vector, alpha * (np.exp(vector) - 1)) From cd2484e3fd14ff0f70bc85c4e1f96d76854e550c Mon Sep 17 00:00:00 2001 From: Adarsh Acharya <132294330+AdarshAcharya5@users.noreply.github.com> Date: Tue, 5 Sep 2023 09:44:49 +0530 Subject: [PATCH 07/11] Update scaled_exponential_linear_unit.py --- .../activation_functions/scaled_exponential_linear_unit.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/neural_network/activation_functions/scaled_exponential_linear_unit.py b/neural_network/activation_functions/scaled_exponential_linear_unit.py index 0124fde0b306..a15258f15b13 100644 --- a/neural_network/activation_functions/scaled_exponential_linear_unit.py +++ b/neural_network/activation_functions/scaled_exponential_linear_unit.py @@ -28,10 +28,10 @@ def scaled_exponential_linear_unit( _lambda * alpha * (e**x - 1) if x <= 0 Examples : >>> scaled_exponential_linear_unit(np.array([1.3, 3.7, 2.4])) - Output : array([1.36591, 3.88759, 2.52168]) + array([1.36591, 3.88759, 2.52168]) >>> scaled_exponential_linear_unit(np.array([2.342, -3.455, -7.2116, 0.0, -4.532])) - Output : array([2.4607394, -1.70249977, -1.75673386, 0., -1.73911634]) + array([2.4607394, -1.70249977, -1.75673386, 0., -1.73911634]) """ return _lambda * np.where(vector > 0, vector, alpha * (np.exp(vector) - 1)) From 185d2fe0fd0b243353dbbfad6dcc0b20d49f5940 Mon Sep 17 00:00:00 2001 From: Adarsh Acharya <132294330+AdarshAcharya5@users.noreply.github.com> Date: Tue, 5 Sep 2023 11:50:53 +0530 Subject: [PATCH 08/11] Update scaled_exponential_linear_unit.py --- .../scaled_exponential_linear_unit.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/neural_network/activation_functions/scaled_exponential_linear_unit.py b/neural_network/activation_functions/scaled_exponential_linear_unit.py index a15258f15b13..29fdeed2a215 100644 --- a/neural_network/activation_functions/scaled_exponential_linear_unit.py +++ b/neural_network/activation_functions/scaled_exponential_linear_unit.py @@ -20,23 +20,23 @@ def scaled_exponential_linear_unit( ) -> np.ndarray: """ Applies the Scaled Exponential Linear Unit function to each element of the vector. - Parameters : vector : np.ndarray - alpha : float (default = 1.6732) - _lambda : float (default = 1.0507) + Parameters : + vector : np.ndarray + alpha : float (default = 1.6732) + _lambda : float (default = 1.0507) + Returns : np.ndarray Formula : f(x) = _lambda * x if x > 0 _lambda * alpha * (e**x - 1) if x <= 0 Examples : - >>> scaled_exponential_linear_unit(np.array([1.3, 3.7, 2.4])) + >>> scaled_exponential_linear_unit(vector=np.array([1.3, 3.7, 2.4])) array([1.36591, 3.88759, 2.52168]) - >>> scaled_exponential_linear_unit(np.array([2.342, -3.455, -7.2116, 0.0, -4.532])) - array([2.4607394, -1.70249977, -1.75673386, 0., -1.73911634]) + >>> scaled_exponential_linear_unit(vector=np.array([1.3, 4.7, 8.2])) + array([1.36591 4.93829 8.61574]) """ return _lambda * np.where(vector > 0, vector, alpha * (np.exp(vector) - 1)) - if __name__ == "__main__": import doctest - doctest.testmod() From 6a08c33c22030e0cc5ca59745961fa58aa908e30 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 5 Sep 2023 06:21:25 +0000 Subject: [PATCH 09/11] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .../activation_functions/scaled_exponential_linear_unit.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/neural_network/activation_functions/scaled_exponential_linear_unit.py b/neural_network/activation_functions/scaled_exponential_linear_unit.py index 29fdeed2a215..dc2cb41c3845 100644 --- a/neural_network/activation_functions/scaled_exponential_linear_unit.py +++ b/neural_network/activation_functions/scaled_exponential_linear_unit.py @@ -20,11 +20,11 @@ def scaled_exponential_linear_unit( ) -> np.ndarray: """ Applies the Scaled Exponential Linear Unit function to each element of the vector. - Parameters : + Parameters : vector : np.ndarray alpha : float (default = 1.6732) _lambda : float (default = 1.0507) - + Returns : np.ndarray Formula : f(x) = _lambda * x if x > 0 _lambda * alpha * (e**x - 1) if x <= 0 @@ -37,6 +37,8 @@ def scaled_exponential_linear_unit( """ return _lambda * np.where(vector > 0, vector, alpha * (np.exp(vector) - 1)) + if __name__ == "__main__": import doctest + doctest.testmod() From b5a4d22184e7bd251d213105e5367796465e78c2 Mon Sep 17 00:00:00 2001 From: Adarsh Acharya <132294330+AdarshAcharya5@users.noreply.github.com> Date: Tue, 5 Sep 2023 11:56:54 +0530 Subject: [PATCH 10/11] Update scaled_exponential_linear_unit.py --- .../activation_functions/scaled_exponential_linear_unit.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neural_network/activation_functions/scaled_exponential_linear_unit.py b/neural_network/activation_functions/scaled_exponential_linear_unit.py index dc2cb41c3845..a9f272d8f294 100644 --- a/neural_network/activation_functions/scaled_exponential_linear_unit.py +++ b/neural_network/activation_functions/scaled_exponential_linear_unit.py @@ -33,7 +33,7 @@ def scaled_exponential_linear_unit( array([1.36591, 3.88759, 2.52168]) >>> scaled_exponential_linear_unit(vector=np.array([1.3, 4.7, 8.2])) - array([1.36591 4.93829 8.61574]) + array([1.36591, 4.93829, 8.61574]) """ return _lambda * np.where(vector > 0, vector, alpha * (np.exp(vector) - 1)) From 76a11d1ff74c89ccf19b4b7684920d3978e2ceb9 Mon Sep 17 00:00:00 2001 From: Adarsh Acharya <132294330+AdarshAcharya5@users.noreply.github.com> Date: Wed, 6 Sep 2023 20:11:50 +0530 Subject: [PATCH 11/11] Update scaled_exponential_linear_unit.py --- .../scaled_exponential_linear_unit.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/neural_network/activation_functions/scaled_exponential_linear_unit.py b/neural_network/activation_functions/scaled_exponential_linear_unit.py index a9f272d8f294..f91dc6852136 100644 --- a/neural_network/activation_functions/scaled_exponential_linear_unit.py +++ b/neural_network/activation_functions/scaled_exponential_linear_unit.py @@ -16,18 +16,18 @@ def scaled_exponential_linear_unit( - vector: np.ndarray, alpha: float = 1.6732, _lambda: float = 1.0507 + vector: np.ndarray, alpha: float = 1.6732, lambda_: float = 1.0507 ) -> np.ndarray: """ Applies the Scaled Exponential Linear Unit function to each element of the vector. Parameters : vector : np.ndarray alpha : float (default = 1.6732) - _lambda : float (default = 1.0507) + lambda_ : float (default = 1.0507) Returns : np.ndarray - Formula : f(x) = _lambda * x if x > 0 - _lambda * alpha * (e**x - 1) if x <= 0 + Formula : f(x) = lambda_ * x if x > 0 + lambda_ * alpha * (e**x - 1) if x <= 0 Examples : >>> scaled_exponential_linear_unit(vector=np.array([1.3, 3.7, 2.4])) array([1.36591, 3.88759, 2.52168]) @@ -35,7 +35,7 @@ def scaled_exponential_linear_unit( >>> scaled_exponential_linear_unit(vector=np.array([1.3, 4.7, 8.2])) array([1.36591, 4.93829, 8.61574]) """ - return _lambda * np.where(vector > 0, vector, alpha * (np.exp(vector) - 1)) + return lambda_ * np.where(vector > 0, vector, alpha * (np.exp(vector) - 1)) if __name__ == "__main__":