Permalink
Browse files

added support for non-linearties to CConvolutionalFeatureMap

  • Loading branch information...
khalednasr committed May 30, 2014
1 parent 9596049 commit 7ee4b6ed4e50e21402612ed0ebbae215e5388d4e
@@ -42,9 +42,11 @@ using namespace shogun;
CConvolutionalFeatureMap::CConvolutionalFeatureMap(
int32_t width, int32_t height,
int32_t radius_x, int32_t radius_y, int32_t index) :
int32_t radius_x, int32_t radius_y, int32_t index,
EConvMapActivationFunction function) :
m_width(width), m_height(height),
m_radius_x(radius_x), m_radius_y(radius_y), m_index(index)
m_radius_x(radius_x), m_radius_y(radius_y), m_index(index),
m_activation_function(function)
{
}
@@ -99,10 +101,26 @@ void CConvolutionalFeatureMap::compute_activations(
activations(i+row_offset,j) += biases[i];
}
}
if (m_activation_function==CMAF_LOGISTIC)
{
for (int32_t i=0; i<num_neurons; i++)
for (int32_t j=0; j<batch_size; j++)
activations(i+row_offset,j) =
1.0/(1.0+CMath::exp(-1.0*activations(i+row_offset,j)));
}
else if (m_activation_function==CMAF_RECTIFIED_LINEAR)
{
for (int32_t i=0; i<num_neurons; i++)
for (int32_t j=0; j<batch_size; j++)
activations(i+row_offset,j) =
CMath::max<float64_t>(0, activations(i+row_offset,j));
}
}
void CConvolutionalFeatureMap::compute_gradients(
SGVector< float64_t > parameters,
SGVector< float64_t > parameters,
SGMatrix<float64_t> activations,
SGMatrix< float64_t > activation_gradients,
CDynamicObjectArray* layers,
SGVector< int32_t > input_indices,
@@ -112,6 +130,26 @@ void CConvolutionalFeatureMap::compute_gradients(
int32_t batch_size = activation_gradients.num_cols;
int32_t row_offset = m_index*num_neurons;
if (m_activation_function==CMAF_LOGISTIC)
{
for (int32_t i=0; i<num_neurons; i++)
{
for (int32_t j=0; j<batch_size; j++)
{
activation_gradients(i+row_offset,j) *=
activation_gradients(i+row_offset,j) *
(1.0-activation_gradients(i+row_offset,j));
}
}
}
else if (m_activation_function==CMAF_RECTIFIED_LINEAR)
{
for (int32_t i=0; i<num_neurons; i++)
for (int32_t j=0; j<batch_size; j++)
if (activations(i+row_offset,j)==0)
activation_gradients(i+row_offset,j) = 0;
}
float64_t* bias_gradients = parameter_gradients.vector;
for (int32_t i=0; i<num_neurons; i++)
{
@@ -38,6 +38,14 @@
namespace shogun
{
enum EConvMapActivationFunction
{
CMAF_IDENTITY = 0,
CMAF_LOGISTIC = 1,
CMAF_RECTIFIED_LINEAR = 2
};
template <class T> class SGVector;
template <class T> class SGMatrix;
class CDynamicObjectArray;
@@ -63,7 +71,8 @@ class CConvolutionalFeatureMap
* its outputs in.
*/
CConvolutionalFeatureMap(int32_t width, int32_t height,
int32_t radius_x, int32_t radius_y, int32_t index=0);
int32_t radius_x, int32_t radius_y, int32_t index=0,
EConvMapActivationFunction function = CMAF_IDENTITY);
/** Computes the activations of the feature map
*
@@ -93,6 +102,8 @@ class CConvolutionalFeatureMap
* @param parameters Vector of parameters for the map. length
* width*height+(2*radius_x+1)+(2*radius_y+1)
*
* @param activations Activations of the map
*
* @param activation_gradients Gradients of the error with respect to the
* map's activations
*
@@ -108,6 +119,7 @@ class CConvolutionalFeatureMap
* stored
*/
void compute_gradients(SGVector<float64_t> parameters,
SGMatrix<float64_t> activations,
SGMatrix<float64_t> activation_gradients,
CDynamicObjectArray* layers,
SGVector<int32_t> input_indices,
@@ -179,6 +191,9 @@ class CConvolutionalFeatureMap
* part of the activations/activation_gradients matrix that map will use
*/
int32_t m_index;
/** The map's activation function */
EConvMapActivationFunction m_activation_function;
};
}
Oops, something went wrong.

0 comments on commit 7ee4b6e

Please sign in to comment.