From 4c249ddacd62ddf8117134f439543d2b4525483f Mon Sep 17 00:00:00 2001 From: cozek Date: Thu, 3 Oct 2019 17:20:06 +0530 Subject: [PATCH 1/2] adding softmax function --- maths/softmax.py | 50 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) create mode 100644 maths/softmax.py diff --git a/maths/softmax.py b/maths/softmax.py new file mode 100644 index 000000000000..e78da5d3ed89 --- /dev/null +++ b/maths/softmax.py @@ -0,0 +1,50 @@ +""" +This script demonstrates the implementation of the Softmax function. + +Its a function that takes as input a vector of K real numbers, and normalizes it into a probability distribution consisting of K probabilities proportional to the exponentials of the input numbers. After softmax, the elements of the vector always +sum up to 1. + +Script inspired from its corresponding Wikipedia article +https://en.wikipedia.org/wiki/Softmax_function +""" + +import numpy as np + + +def softmax(vector): + """ + Implements the softmax function + + Parameters: + vector (np.array,list,tuple): A numpy array of shape (1,n) consisting + of real values or a similar list,tuple + + Returns: + softmax_vec (np.array): The input numpy array after applying softmax. + + The softmax vector adds up to one. We need to ceil to mitigate for precision + >>> np.ceil(np.sum(softmax([1,2,3,4]))) + 1.0 + + >>> vec = np.array([5,5]) + >>> softmax(vec) + array([0.5, 0.5]) + + >>> softmax([0]) + array([1.]) + """ + + # Calculate e^x for each x in your vector where e is Euler's number (approximately 2.718) + exponentVector = np.exp(vector) + + # Add up the all the exponentials + sumOfExponents = np.sum(exponentVector) + + # Divide every exponent by the sum of all exponents + softmax_vector = exponentVector / sumOfExponents + + return softmax_vector + + +if __name__ == "__main__": + print(softmax((0,))) From 4a63c784c4076ac617b197e475d5f350d8868079 Mon Sep 17 00:00:00 2001 From: cozek Date: Thu, 3 Oct 2019 19:49:13 +0530 Subject: [PATCH 2/2] wraped lines as asked --- maths/softmax.py | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/maths/softmax.py b/maths/softmax.py index e78da5d3ed89..92ff4ca27b88 100644 --- a/maths/softmax.py +++ b/maths/softmax.py @@ -1,8 +1,10 @@ """ This script demonstrates the implementation of the Softmax function. -Its a function that takes as input a vector of K real numbers, and normalizes it into a probability distribution consisting of K probabilities proportional to the exponentials of the input numbers. After softmax, the elements of the vector always -sum up to 1. +Its a function that takes as input a vector of K real numbers, and normalizes +it into a probability distribution consisting of K probabilities proportional +to the exponentials of the input numbers. After softmax, the elements of the +vector always sum up to 1. Script inspired from its corresponding Wikipedia article https://en.wikipedia.org/wiki/Softmax_function @@ -16,13 +18,16 @@ def softmax(vector): Implements the softmax function Parameters: - vector (np.array,list,tuple): A numpy array of shape (1,n) consisting - of real values or a similar list,tuple + vector (np.array,list,tuple): A numpy array of shape (1,n) + consisting of real values or a similar list,tuple + Returns: - softmax_vec (np.array): The input numpy array after applying softmax. + softmax_vec (np.array): The input numpy array after applying + softmax. - The softmax vector adds up to one. We need to ceil to mitigate for precision + The softmax vector adds up to one. We need to ceil to mitigate for + precision >>> np.ceil(np.sum(softmax([1,2,3,4]))) 1.0 @@ -34,7 +39,8 @@ def softmax(vector): array([1.]) """ - # Calculate e^x for each x in your vector where e is Euler's number (approximately 2.718) + # Calculate e^x for each x in your vector where e is Euler's + # number (approximately 2.718) exponentVector = np.exp(vector) # Add up the all the exponentials