Skip to content

Commit

Permalink
Add gradient output functionality in Neural Nets
Browse files Browse the repository at this point in the history
  • Loading branch information
sanuj committed May 6, 2015
1 parent 3f9ee7a commit 9f55012
Show file tree
Hide file tree
Showing 3 changed files with 27 additions and 2 deletions.
2 changes: 1 addition & 1 deletion src/shogun/io/SGIO.h
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ enum EMessageLocation
#define __PRETTY_FUNCTION__ __FUNCTION__
#endif

// printf like funktions (with additional severity level)
// printf like functions (with additional severity level)
// for object derived from CSGObject
#define SG_GCDEBUG(...) { \
if (SG_UNLIKELY(io->loglevel_above(MSG_GCDEBUG))) \
Expand Down
25 changes: 25 additions & 0 deletions src/shogun/neuralnets/NeuralNetwork.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -309,6 +309,18 @@ bool CNeuralNetwork::train_gradient_descent(SGMatrix<float64_t> inputs,

float64_t e = compute_gradients(inputs_batch, targets_batch, gradients);


for (int32_t k=0; k<m_num_layers; k++)
{
SGVector<float64_t> layer_gradients = get_section(gradients, k);
if (layer_gradients.vlen > 0)
{
SG_INFO("Layer %i, Max Gradient: %g, Mean Gradient: %g.\n", k,
CMath::max(layer_gradients.vector, layer_gradients.vlen),
SGVector<float64_t>::sum(layer_gradients.vector, layer_gradients.vlen)/layer_gradients.vlen);
}
}

// filter the errors
if (error==-1.0)
error = e;
Expand Down Expand Up @@ -408,6 +420,19 @@ int CNeuralNetwork::lbfgs_progress(void* instance,
int n, int k, int ls)
{
SG_SINFO("Epoch %i: Error = %f\n",k, fx);

CNeuralNetwork* network = static_cast<CNeuralNetwork*>(instance);
SGVector<float64_t> gradients((float64_t*)g, network->get_num_parameters(), false);
for (int32_t i=0; i<network->m_num_layers; i++)
{
SGVector<float64_t> layer_gradients = network->get_section(gradients, i);
if (layer_gradients.vlen > 0)
{
SG_SINFO("Layer %i, Max Gradient: %g, Mean Gradient: %g.\n", i,
CMath::max(layer_gradients.vector, layer_gradients.vlen),
SGVector<float64_t>::sum(layer_gradients.vector, layer_gradients.vlen)/layer_gradients.vlen);
}
}
return 0;
}

Expand Down
2 changes: 1 addition & 1 deletion src/shogun/neuralnets/NeuralNetwork.h
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ enum ENNOptimizationMethod
* due to their stochastic nature. Use gradient descent instead.
*
* During training, the error at each iteration is logged as MSG_INFO. (to turn
* on info messages call io.set_loglevel(MSG_INFO)).
* on info messages call sg_io->set_loglevel(MSG_INFO)).
*
* The network stores the parameters of all the layers in a single array. This
* makes it easy to train a network of any combination of arbitrary layer types
Expand Down

0 comments on commit 9f55012

Please sign in to comment.