From d91eb1c86ad8a9d311d2b319b983931becb079bd Mon Sep 17 00:00:00 2001 From: SJTechy <67711756+atomicsorcerer@users.noreply.github.com> Date: Tue, 19 Jul 2022 23:29:30 -0700 Subject: [PATCH 1/3] Created leaky_relu.py and implemented the algorithm. --- maths/leaky_relu.py | 52 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) create mode 100644 maths/leaky_relu.py diff --git a/maths/leaky_relu.py b/maths/leaky_relu.py new file mode 100644 index 000000000000..f4aee9b62421 --- /dev/null +++ b/maths/leaky_relu.py @@ -0,0 +1,52 @@ +""" +This algorithm implements the leaky rectified linear (LReLU). + +LReLU is at times used as a substitute to ReLU because it fixes the dying ReLU problem. +This is done by adding a slight slope to the negative portion of the function. +The default value for the slope is 0.01. +The new slope is determined before the network is trained. + +Script inspired from its corresponding Wikipedia article +https://en.wikipedia.org/wiki/Rectifier_(neural_networks) +""" +from __future__ import annotations + + +def leaky_relu(vector: float | list[float], slope: float = 0.01) -> float | list[float]: + """ + Implements the leaky rectified linear activation function + + >>> leaky_relu([-5]) + [-0.05] + >>> leaky_relu([-2, 0.8, -0.3]) + [-0.02, 0.8, -0.003] + >>> leaky_relu(-3.0) + -0.03 + >>> leaky_relu(2) + Traceback (most recent call last): + ... + ValueError: leaky_relu() only accepts floats or a list of floats for vector + """ + if isinstance(vector, int): + raise ValueError( + "leaky_relu() only accepts floats or a list of floats for vector" + ) + if not isinstance(slope, float): + raise ValueError("leaky_relu() only accepts a float value for slope") + + if isinstance(vector, float): + if vector < 0: + return vector * slope + return vector + + for index, value in enumerate(vector): + if value < 0: + vector[index] = value * slope + + return vector + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From e1daa2b6842dd964702ca5b466b8c53b990b8105 Mon Sep 17 00:00:00 2001 From: SJTechy <67711756+atomicsorcerer@users.noreply.github.com> Date: Wed, 20 Jul 2022 12:28:59 -0700 Subject: [PATCH 2/3] Updated docstrings in leaky_relu --- maths/leaky_relu.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/maths/leaky_relu.py b/maths/leaky_relu.py index f4aee9b62421..7823fc4f814d 100644 --- a/maths/leaky_relu.py +++ b/maths/leaky_relu.py @@ -1,5 +1,5 @@ """ -This algorithm implements the leaky rectified linear (LReLU). +This algorithm implements the leaky rectified linear algorithm (LReLU). LReLU is at times used as a substitute to ReLU because it fixes the dying ReLU problem. This is done by adding a slight slope to the negative portion of the function. @@ -16,6 +16,10 @@ def leaky_relu(vector: float | list[float], slope: float = 0.01) -> float | list """ Implements the leaky rectified linear activation function + :param vector: The float or list of floats to apply the algorithm to + :param slope: The multiplier that is applied to every negative value in the list + :return: The modified value or list of values after applying LReLU + >>> leaky_relu([-5]) [-0.05] >>> leaky_relu([-2, 0.8, -0.3]) From 97dfba69c91cd602fd234aabf7f5a4fab85f23c9 Mon Sep 17 00:00:00 2001 From: SJTechy <67711756+atomicsorcerer@users.noreply.github.com> Date: Sat, 23 Jul 2022 10:47:03 -0700 Subject: [PATCH 3/3] Changed slope to negative_slope to provide more info --- maths/leaky_relu.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/maths/leaky_relu.py b/maths/leaky_relu.py index 7823fc4f814d..a94175b7b8f9 100644 --- a/maths/leaky_relu.py +++ b/maths/leaky_relu.py @@ -12,7 +12,9 @@ from __future__ import annotations -def leaky_relu(vector: float | list[float], slope: float = 0.01) -> float | list[float]: +def leaky_relu( + vector: float | list[float], negative_slope: float = 0.01 +) -> float | list[float]: """ Implements the leaky rectified linear activation function @@ -35,17 +37,17 @@ def leaky_relu(vector: float | list[float], slope: float = 0.01) -> float | list raise ValueError( "leaky_relu() only accepts floats or a list of floats for vector" ) - if not isinstance(slope, float): - raise ValueError("leaky_relu() only accepts a float value for slope") + if not isinstance(negative_slope, float): + raise ValueError("leaky_relu() only accepts a float value for negative_slope") if isinstance(vector, float): if vector < 0: - return vector * slope + return vector * negative_slope return vector for index, value in enumerate(vector): if value < 0: - vector[index] = value * slope + vector[index] = value * negative_slope return vector