Skip to content

Commit

Permalink
fixed indents
Browse files Browse the repository at this point in the history
  • Loading branch information
khalednasr committed Mar 23, 2014
1 parent d825264 commit f590f9a
Show file tree
Hide file tree
Showing 8 changed files with 35 additions and 74 deletions.
4 changes: 2 additions & 2 deletions doc/ipython-notebooks/neuralnets/neuralnets_digits.ipynb
Expand Up @@ -87,11 +87,11 @@
"output_type": "stream",
"stream": "stdout",
"text": [
"Test Error = 8.58938547486 %\n"
"Test Error = 8.54283054004 %\n"
]
}
],
"prompt_number": 4
"prompt_number": 1
},
{
"cell_type": "code",
Expand Down
10 changes: 0 additions & 10 deletions src/shogun/neuralnets/NeuralLayer.cpp
Expand Up @@ -85,13 +85,3 @@ void CNeuralLayer::shallow_copy(const CNeuralLayer &orig)
m_input_gradients = SGVector<float64_t>(orig.m_input_gradients);
m_local_gradients = SGVector<float64_t>(orig.m_local_gradients);
}










12 changes: 6 additions & 6 deletions src/shogun/neuralnets/NeuralLayer.h
Expand Up @@ -96,8 +96,8 @@ class CNeuralLayer : public CSGObject
* parameters
*/
virtual void initialize_parameters(float64_t* parameters,
bool* parameter_regularizable,
float64_t sigma = 0.01f) = 0;
bool* parameter_regularizable,
float64_t sigma = 0.01f) = 0;

/** Computes the activations of the neurons in this layer, results should
* be stored in m_activations
Expand Down Expand Up @@ -136,10 +136,10 @@ class CNeuralLayer : public CSGObject
* layer
*/
virtual void compute_gradients(float64_t* parameters,
bool is_output,
float64_t* p,
float64_t* previous_layer_activations,
float64_t* parameter_gradients) = 0;
bool is_output,
float64_t* p,
float64_t* previous_layer_activations,
float64_t* parameter_gradients) = 0;

/** Computes the error between the layer's current activations and the given
* target activations. Should only be used with output layers
Expand Down
16 changes: 8 additions & 8 deletions src/shogun/neuralnets/NeuralLinearLayer.h
Expand Up @@ -64,8 +64,8 @@ class CNeuralLinearLayer : public CNeuralLayer
* parameters
*/
virtual void initialize_parameters(float64_t* parameters,
bool* parameter_regularizable,
float64_t sigma = 0.01f);
bool* parameter_regularizable,
float64_t sigma = 0.01f);

/** Computes the activations of the neurons in this layer, results should
* be stored in m_activations
Expand All @@ -77,7 +77,7 @@ class CNeuralLinearLayer : public CNeuralLayer
* previous layer, matrix of size previous_layer_num_neurons * batch_size
*/
virtual void compute_activations(float64_t* parameters,
float64_t* previous_layer_activations);
float64_t* previous_layer_activations);

/** Computes the gradients that are relevent to this layer:
* - The gradients of the error with respect to the layer's parameters
Expand All @@ -104,10 +104,10 @@ class CNeuralLinearLayer : public CNeuralLayer
* layer
*/
virtual void compute_gradients(float64_t* parameters,
bool is_output,
float64_t* p,
float64_t* previous_layer_activations,
float64_t* parameter_gradients);
bool is_output,
float64_t* p,
float64_t* previous_layer_activations,
float64_t* parameter_gradients);

/** Computes the error between the layer's current activations and the given
* target activations. Should only be used with output layers
Expand Down Expand Up @@ -144,4 +144,4 @@ class CNeuralLinearLayer : public CNeuralLayer
};

}
#endif
#endif
14 changes: 2 additions & 12 deletions src/shogun/neuralnets/NeuralLogisticLayer.cpp
Expand Up @@ -29,7 +29,7 @@ CNeuralLinearLayer(num_neurons)
}

void CNeuralLogisticLayer::compute_activations(float64_t* parameters,
float64_t* previous_layer_activations)
float64_t* previous_layer_activations)
{
CNeuralLinearLayer::compute_activations(parameters,
previous_layer_activations);
Expand All @@ -41,7 +41,7 @@ void CNeuralLogisticLayer::compute_activations(float64_t* parameters,
}

void CNeuralLogisticLayer::compute_local_gradients(bool is_output,
float64_t* p)
float64_t* p)
{
CNeuralLinearLayer::compute_local_gradients(is_output,p);

Expand All @@ -50,13 +50,3 @@ void CNeuralLogisticLayer::compute_local_gradients(bool is_output,
for (int32_t i=0; i<length; i++)
m_local_gradients[i] *= m_activations[i] * (1.0-m_activations[i]);
}










6 changes: 2 additions & 4 deletions src/shogun/neuralnets/NeuralLogisticLayer.h
Expand Up @@ -10,8 +10,6 @@
#ifndef __NEURALLOGISTICLAYER_H__
#define __NEURALLOGISTICLAYER_H__

#include <shogun/lib/config.h>
#include <shogun/lib/common.h>
#include <shogun/neuralnets/NeuralLinearLayer.h>

#ifdef HAVE_EIGEN3
Expand Down Expand Up @@ -52,7 +50,7 @@ class CNeuralLogisticLayer : public CNeuralLinearLayer
* previous layer, matrix of size previous_layer_num_neurons * batch_size
*/
virtual void compute_activations(float64_t* parameters,
float64_t* previous_layer_activations);
float64_t* previous_layer_activations);

/** Computes the gradients of the error with respect to this layer's
* activations. Results are stored in m_local_gradients.
Expand All @@ -76,4 +74,4 @@ class CNeuralLogisticLayer : public CNeuralLinearLayer
};

}
#endif
#endif
31 changes: 7 additions & 24 deletions src/shogun/neuralnets/NeuralNetwork.cpp
Expand Up @@ -76,7 +76,7 @@ CNeuralNetwork::~CNeuralNetwork()
}

CDenseFeatures<float64_t>* CNeuralNetwork::apply(
CDenseFeatures<float64_t>* inputs)
CDenseFeatures<float64_t>* inputs)
{
ASSERT(inputs->get_num_features()==m_num_inputs);

Expand All @@ -101,12 +101,12 @@ CDenseFeatures<float64_t>* CNeuralNetwork::apply(
}

void CNeuralNetwork::train_gradient_descent(
CDenseFeatures< float64_t >* inputs,
CDenseFeatures< float64_t >* targets,
int32_t max_num_epochs,
int32_t batch_size,
float64_t learning_rate,
float64_t momentum)
CDenseFeatures< float64_t >* inputs,
CDenseFeatures< float64_t >* targets,
int32_t max_num_epochs,
int32_t batch_size,
float64_t learning_rate,
float64_t momentum)
{
int32_t training_set_size = inputs->get_num_vectors();
int32_t _batch_size = batch_size;
Expand Down Expand Up @@ -321,20 +321,3 @@ void CNeuralNetwork::shallow_copy(const CNeuralNetwork& orig)
m_param_gradients = SGVector<float64_t>(orig.m_param_gradients);
m_param_regularizable = SGVector<bool>(orig.m_param_regularizable);
}

















16 changes: 8 additions & 8 deletions src/shogun/neuralnets/NeuralNetwork.h
Expand Up @@ -91,11 +91,11 @@ class CNeuralNetwork : public CSGObject
* @param momentum momentum multiplier
*/
virtual void train_gradient_descent(CDenseFeatures<float64_t>* inputs,
CDenseFeatures<float64_t>* targets,
int32_t max_num_epochs = 1000,
int32_t batch_size = 0,
float64_t learning_rate = 0.1,
float64_t momentum = 0.9);
CDenseFeatures<float64_t>* targets,
int32_t max_num_epochs = 1000,
int32_t batch_size = 0,
float64_t learning_rate = 0.1,
float64_t momentum = 0.9);

/** Checks if the gradients computed using backpropagation are correct by
* comparing them with gradients computed using numerical approximation.
Expand All @@ -109,7 +109,7 @@ class CNeuralNetwork : public CSGObject
* @return true if the gradients are correct, false otherwise
*/
virtual bool check_gradients(float64_t epsilon=1.0e-06,
float64_t tolerance=1.0e-09);
float64_t tolerance=1.0e-09);

/** returns the totat number of parameters in the network */
int32_t get_num_parameters() {return m_total_num_parameters;}
Expand Down Expand Up @@ -177,7 +177,7 @@ class CNeuralNetwork : public CSGObject
* update the activations before the error is computed.
*/
virtual float64_t compute_error(float64_t* targets,
float64_t* inputs=NULL);
float64_t* inputs=NULL);

private:
/** returns a pointer to layer i in the network */
Expand Down Expand Up @@ -254,4 +254,4 @@ class CNeuralNetwork : public CSGObject
};

}
#endif
#endif

0 comments on commit f590f9a

Please sign in to comment.