Skip to content

Commit

Permalink
fixed indents
Browse files Browse the repository at this point in the history
  • Loading branch information
khalednasr committed Mar 23, 2014
1 parent f590f9a commit 74ef966
Show file tree
Hide file tree
Showing 4 changed files with 28 additions and 37 deletions.
2 changes: 1 addition & 1 deletion src/shogun/neuralnets/NeuralLayer.cpp
Expand Up @@ -22,7 +22,7 @@ CNeuralLayer::CNeuralLayer()

CNeuralLayer::CNeuralLayer(int32_t num_neurons)
: CSGObject(), m_num_neurons(num_neurons),
m_previous_layer_num_neurons(0), m_batch_size(0)
m_previous_layer_num_neurons(0), m_batch_size(0)
{
init();
}
Expand Down
6 changes: 3 additions & 3 deletions src/shogun/neuralnets/NeuralLayer.h
Expand Up @@ -109,11 +109,11 @@ class CNeuralLayer : public CSGObject
* previous layer, matrix of size previous_layer_num_neurons * batch_size
*/
virtual void compute_activations(float64_t* parameters,
float64_t* previous_layer_activations) = 0;
float64_t* previous_layer_activations) = 0;

/** Computes the gradients that are relevent to this layer:
* - The gradients of the error with respect to the layer's parameters
* - The gradients of the error with respect to the layer's inputs
*- The gradients of the error with respect to the layer's parameters
* -The gradients of the error with respect to the layer's inputs
*
* The input gradients are stored in m_input_gradients
*
Expand Down
22 changes: 6 additions & 16 deletions src/shogun/neuralnets/NeuralLinearLayer.cpp
Expand Up @@ -36,8 +36,8 @@ int32_t CNeuralLinearLayer::get_num_parameters()
}

void CNeuralLinearLayer::initialize_parameters(float64_t* parameters,
bool* parameter_regularizable,
float64_t sigma)
bool* parameter_regularizable,
float64_t sigma)
{
CRandom random_generator(CRandom::generate_seed());

Expand All @@ -54,7 +54,7 @@ void CNeuralLinearLayer::initialize_parameters(float64_t* parameters,


void CNeuralLinearLayer::compute_activations(float64_t* parameters,
float64_t* previous_layer_activations)
float64_t* previous_layer_activations)
{
float64_t* weights = parameters;
float64_t* biases = parameters + m_num_neurons*m_previous_layer_num_neurons;
Expand All @@ -72,9 +72,9 @@ void CNeuralLinearLayer::compute_activations(float64_t* parameters,
}

void CNeuralLinearLayer::compute_gradients(float64_t* parameters,
bool is_output, float64_t* p,
float64_t* previous_layer_activations,
float64_t* parameter_gradients)
bool is_output, float64_t* p,
float64_t* previous_layer_activations,
float64_t* parameter_gradients)
{
float64_t* weights = parameters;

Expand Down Expand Up @@ -130,13 +130,3 @@ float64_t CNeuralLinearLayer::computer_error(float64_t* targets)
sum *= (0.5/m_batch_size);
return sum;
}










35 changes: 18 additions & 17 deletions src/shogun/neuralnets/NeuralNetwork.cpp
Expand Up @@ -63,7 +63,7 @@ void CNeuralNetwork::initialize(int32_t num_inputs, CDynamicObjectArray* layers)
for (int32_t i=0; i<m_num_layers; i++)
{
get_layer(i)->initialize_parameters(get_layer_params(i),
get_layer_param_regularizable(i));
get_layer_param_regularizable(i));

get_layer(i)->set_batch_size(m_batch_size);
}
Expand Down Expand Up @@ -140,7 +140,7 @@ void CNeuralNetwork::train_gradient_descent(
for (int32_t j=0; j < training_set_size; j += _batch_size)
{
if (j+_batch_size>training_set_size) j =
training_set_size-_batch_size;
training_set_size-_batch_size;

float64_t* targets_batch = targets_matrix + j*get_num_outputs();
float64_t* inputs_batch = inputs_matrix + j*m_num_inputs;
Expand All @@ -151,7 +151,7 @@ void CNeuralNetwork::train_gradient_descent(
for (int32_t k=0; k<n_param; k++)
{
param_updates[k] = momentum*param_updates[k]
-learning_rate*m_param_gradients[k];
-learning_rate*m_param_gradients[k];

m_params[k] += param_updates[k];
}
Expand All @@ -170,7 +170,7 @@ void CNeuralNetwork::forward_propagate(float64_t* inputs)

for (int i=1; i<m_num_layers; i++)
get_layer(i)->compute_activations(get_layer_params(i),
get_layer(i-1)->get_activations());
get_layer(i-1)->get_activations());
}

void CNeuralNetwork::compute_gradients(float64_t* inputs, float64_t* targets)
Expand All @@ -180,7 +180,7 @@ void CNeuralNetwork::compute_gradients(float64_t* inputs, float64_t* targets)
if (m_num_layers==1)
{
get_layer(0)->compute_gradients(get_layer_params(0), true, targets,
inputs ,get_layer_param_gradients(0));
inputs ,get_layer_param_gradients(0));
}
else
{
Expand All @@ -190,21 +190,21 @@ void CNeuralNetwork::compute_gradients(float64_t* inputs, float64_t* targets)
if (i==m_num_layers-1)
{
get_layer(i)->compute_gradients(get_layer_params(i), true,
targets, get_layer(i-1)->get_activations(),
get_layer_param_gradients(i));
targets, get_layer(i-1)->get_activations(),
get_layer_param_gradients(i));
}
else if (i==0)
{
get_layer(i)->compute_gradients(get_layer_params(i), false,
get_layer(i+1)->get_input_gradients(),
inputs, get_layer_param_gradients(i));
get_layer(i+1)->get_input_gradients(),
inputs, get_layer_param_gradients(i));
}
else
{
get_layer(i)->compute_gradients(get_layer_params(i), false,
get_layer(i+1)->get_input_gradients(),
get_layer(i-1)->get_activations(),
get_layer_param_gradients(i));
get_layer(i+1)->get_input_gradients(),
get_layer(i-1)->get_activations(),
get_layer_param_gradients(i));
}
}

Expand Down Expand Up @@ -296,16 +296,17 @@ void CNeuralNetwork::init()
SG_ADD(&m_batch_size, "batch_size",
"Batch Size", MS_NOT_AVAILABLE);
SG_ADD(&m_index_offsets, "index_offsets",
"Index Offsets", MS_NOT_AVAILABLE);
"Index Offsets", MS_NOT_AVAILABLE);
SG_ADD(&m_params, "params",
"Parameters", MS_NOT_AVAILABLE);
"Parameters", MS_NOT_AVAILABLE);
SG_ADD(&m_param_gradients, "param_gradients",
"Parameter Gradients", MS_NOT_AVAILABLE);
"Parameter Gradients", MS_NOT_AVAILABLE);
SG_ADD(&m_param_regularizable, "param_regularizable",
"Parameter Regularizable", MS_NOT_AVAILABLE);
"Parameter Regularizable", MS_NOT_AVAILABLE);

SG_ADD((CSGObject**)&m_layers, "layers",
"DynamicObjectArray of NeuralNetwork objects", MS_NOT_AVAILABLE);
"DynamicObjectArray of NeuralNetwork objects",
MS_NOT_AVAILABLE);
}

void CNeuralNetwork::shallow_copy(const CNeuralNetwork& orig)
Expand Down

0 comments on commit 74ef966

Please sign in to comment.