Skip to content

Commit

Permalink
add setters/getters in nn and autoencoders
Browse files Browse the repository at this point in the history
  • Loading branch information
sanuj committed Jun 8, 2016
1 parent ed0cc9e commit cdbe259
Show file tree
Hide file tree
Showing 8 changed files with 1,381 additions and 1,089 deletions.
949 changes: 481 additions & 468 deletions doc/ipython-notebooks/neuralnets/autoencoders.ipynb

Large diffs are not rendered by default.

1,034 changes: 525 additions & 509 deletions doc/ipython-notebooks/neuralnets/neuralnets_digits.ipynb

Large diffs are not rendered by default.

28 changes: 14 additions & 14 deletions src/shogun/io/NeuralNetworkFileReader.cpp
Expand Up @@ -138,36 +138,36 @@ CNeuralNetwork* CNeuralNetworkFileReader::parse_network(json_object* json_networ
{
const char* method = json_object_get_string(iter.val);
if (string_equal(method, "NNOM_LBFGS"))
network->optimization_method = NNOM_LBFGS;
network->set_optimization_method(NNOM_LBFGS);
else if (string_equal(method, "NNOM_GRADIENT_DESCENT"))
network->optimization_method = NNOM_GRADIENT_DESCENT;
network->set_optimization_method(NNOM_GRADIENT_DESCENT);
else
SG_ERROR("Invalid optimization method (%s)\n", method);
}
else if (string_equal(iter.key, "l2_coefficient"))
network->l2_coefficient = json_object_get_double(iter.val);
network->set_l2_coefficient(json_object_get_double(iter.val));
else if (string_equal(iter.key, "l1_coefficient"))
network->l1_coefficient = json_object_get_double(iter.val);
network->set_l1_coefficient(json_object_get_double(iter.val));
else if (string_equal(iter.key, "dropout_hidden"))
network->dropout_hidden = json_object_get_double(iter.val);
network->set_dropout_hidden(json_object_get_double(iter.val));
else if (string_equal(iter.key, "dropout_input"))
network->dropout_input = json_object_get_double(iter.val);
network->set_dropout_input(json_object_get_double(iter.val));
else if (string_equal(iter.key, "max_norm"))
network->max_norm = json_object_get_double(iter.val);
network->set_max_norm(json_object_get_double(iter.val));
else if (string_equal(iter.key, "epsilon"))
network->epsilon = json_object_get_double(iter.val);
network->set_epsilon(json_object_get_double(iter.val));
else if (string_equal(iter.key, "max_num_epochs"))
network->max_num_epochs = json_object_get_int(iter.val);
network->set_max_num_epochs(json_object_get_int(iter.val));
else if (string_equal(iter.key, "gd_mini_batch_size"))
network->gd_mini_batch_size = json_object_get_int(iter.val);
network->set_gd_mini_batch_size(json_object_get_int(iter.val));
else if (string_equal(iter.key, "gd_learning_rate"))
network->gd_learning_rate = json_object_get_double(iter.val);
network->set_gd_learning_rate(json_object_get_double(iter.val));
else if (string_equal(iter.key, "gd_learning_rate_decay"))
network->gd_learning_rate_decay = json_object_get_double(iter.val);
network->set_gd_learning_rate_decay(json_object_get_double(iter.val));
else if (string_equal(iter.key, "gd_momentum"))
network->gd_momentum = json_object_get_double(iter.val);
network->set_gd_momentum(json_object_get_double(iter.val));
else if (string_equal(iter.key, "gd_error_damping_coeff"))
network->gd_error_damping_coeff = json_object_get_double(iter.val);
network->set_gd_error_damping_coeff(json_object_get_double(iter.val));

else if (!string_equal(iter.key, "layers"))
SG_ERROR("Invalid parameter (%s)\n", iter.key);
Expand Down
63 changes: 50 additions & 13 deletions src/shogun/neuralnets/Autoencoder.h
Expand Up @@ -165,6 +165,44 @@ class CAutoencoder : public CNeuralNetwork

virtual const char* get_name() const { return "Autoencoder"; }

/** Sets noise type for denoising autoencoders.
*
* If set to AENT_DROPOUT, inputs are randomly set to zero during each
* iteration of training with probability noise_parameter.
*
* If set to AENT_GAUSSIAN, gaussian noise with zero mean and noise_parameter
* standard deviation is added to the inputs.
*
* Default value is AENT_NONE
* @param _noise_type noise type for denoising autoencoders
*/
void set_noise_type(EAENoiseType _noise_type)
{
this->noise_type = _noise_type;
}

/** Returns noise type for denoising autoencoders */
EAENoiseType get_noise_type()
{
return this->noise_type;
}

/** Sets noise parameter
* Controls the strength of the noise, depending on noise_type
*
* @param _noise_parameter controls the strength of noise
*/
void set_noise_parameter(float64_t _noise_parameter)
{
this->noise_parameter = _noise_parameter;
}

/** Returns noise parameter */
float64_t get_noise_parameter()
{
return this->noise_parameter;
}

protected:
/** Computes the error between the output layer's activations and the given
* target activations.
Expand All @@ -181,7 +219,18 @@ class CAutoencoder : public CNeuralNetwork
template<class T>
SGVector<T> get_section(SGVector<T> v, int32_t i);

public:
protected:
/** For contractive autoencoders [Rifai, 2011], a term:
* \f[ \frac{\lambda}{N} \sum_{k=0}^{N-1} \left \| J(x_k) \right \|^2_F \f]
* is added to the error, where \f$ \left \| J(x_k)) \right \|^2_F \f$ is the
* Frobenius norm of the Jacobian of the activations of the hidden layer
* with respect to its inputs, \f$ N \f$ is the batch size, and
* \f$ \lambda \f$ is the contraction coefficient.
*
* Default value is 0.0.
*/
float64_t m_contraction_coefficient;

/** Noise type for denoising autoencoders.
*
* If set to AENT_DROPOUT, inputs are randomly set to zero during each
Expand All @@ -196,18 +245,6 @@ class CAutoencoder : public CNeuralNetwork

/** Controls the strength of the noise, depending on noise_type */
float64_t noise_parameter;

protected:
/** For contractive autoencoders [Rifai, 2011], a term:
* \f[ \frac{\lambda}{N} \sum_{k=0}^{N-1} \left \| J(x_k) \right \|^2_F \f]
* is added to the error, where \f$ \left \| J(x_k)) \right \|^2_F \f$ is the
* Frobenius norm of the Jacobian of the activations of the hidden layer
* with respect to its inputs, \f$ N \f$ is the batch size, and
* \f$ \lambda \f$ is the contraction coefficient.
*
* Default value is 0.0.
*/
float64_t m_contraction_coefficient;
};
}
#endif
24 changes: 12 additions & 12 deletions src/shogun/neuralnets/DeepAutoencoder.cpp
Expand Up @@ -112,19 +112,19 @@ void CDeepAutoencoder::pre_train(CFeatures* data)
SG_UNREF(ae_encoding_layer);
SG_UNREF(ae_decoding_layer);

ae->noise_type = EAENoiseType(pt_noise_type[i-1]);
ae->noise_parameter = pt_noise_parameter[i-1];
ae->set_noise_type(EAENoiseType(pt_noise_type[i-1]));
ae->set_noise_parameter(pt_noise_parameter[i-1]);
ae->set_contraction_coefficient(pt_contraction_coefficient[i-1]);
ae->optimization_method = ENNOptimizationMethod(pt_optimization_method[i-1]);
ae->l2_coefficient = pt_l2_coefficient[i-1];
ae->l1_coefficient = pt_l1_coefficient[i-1];
ae->epsilon = pt_epsilon[i-1];
ae->max_num_epochs = pt_max_num_epochs[i-1];
ae->gd_learning_rate = pt_gd_learning_rate[i-1];
ae->gd_learning_rate_decay = pt_gd_learning_rate_decay[i-1];
ae->gd_momentum = pt_gd_momentum[i-1];
ae->gd_mini_batch_size = pt_gd_mini_batch_size[i-1];
ae->gd_error_damping_coeff = pt_gd_error_damping_coeff[i-1];
ae->set_optimization_method(ENNOptimizationMethod(pt_optimization_method[i-1]));
ae->set_l2_coefficient(pt_l2_coefficient[i-1]);
ae->set_l1_coefficient(pt_l1_coefficient[i-1]);
ae->set_epsilon(pt_epsilon[i-1]);
ae->set_max_num_epochs(pt_max_num_epochs[i-1]);
ae->set_gd_learning_rate(pt_gd_learning_rate[i-1]);
ae->set_gd_learning_rate_decay(pt_gd_learning_rate_decay[i-1]);
ae->set_gd_momentum(pt_gd_momentum[i-1]);
ae->set_gd_mini_batch_size(pt_gd_mini_batch_size[i-1]);
ae->set_gd_error_damping_coeff(pt_gd_error_damping_coeff[i-1]);

// forward propagate the data to obtain the training data for the
// current autoencoder
Expand Down

0 comments on commit cdbe259

Please sign in to comment.