Skip to content
Permalink
Browse files

Adding Hinge embedding loss function (#2229)

* Adding hard shrink function (#2186)

* completed hardshrink_function.hpp

* resetting master

* complete hardsrhink_function.hpp

* activation_functions_test.cpp

* cmake changes

* base layer changes

* style correction

* starting implementing hard shrink as layer

* new changes

* changes in tests

* inv changes

* test changes

* test changes

* deleting prev function

* test style changes

* more style changes

* comment changes

* minor changes

* comment corrections hard shrink

* minor changes

* dummy commit

* Style fix for parameter lambda

* hardshrink.hpp to hardshrink_impl.hpp fn function shift

* style fix

Co-authored-by: Marcus Edel <marcus.edel@fu-berlin.de>

* completed hardshrink_function.hpp

* resetting master

* complete hardsrhink_function.hpp

* activation_functions_test.cpp

* cmake changes

* base layer changes

* style correction

* starting implementing hard shrink as layer

* new changes

* changes in tests

* inv changes

* test changes

* test changes

* deleting prev function

* test style changes

* more style changes

* comment changes

* minor changes

* comment corrections hard shrink

* minor changes

* dummy commit

* Style fix for parameter lambda

* hardshrink.hpp to hardshrink_impl.hpp fn function shift

* style fix

Co-authored-by: Marcus Edel <marcus.edel@fu-berlin.de>

dummy commit

Redirected link on NUMfocus logo

test bugs fixing

rebasing branch

* rvalue refactor corrections

edited copyright.txt

changes in History.md

bug fix

* bug fixes

minor change

* dummy commit

* style fix
  • Loading branch information
ojhalakshya committed Mar 18, 2020
1 parent 1379831 commit a967547d28cb860387ac582092c8214bcb542abe
@@ -129,6 +129,7 @@ Copyright:
Copyright 2020, Manoranjan Kumar Bharti ( Nakul Bharti ) <knakul853@gmail.com>
Copyright 2020, Saraansh Tandon <saraanshtandon1999@gmail.com>
Copyright 2020, Gaurav Singh <gs8763076@gmail.com>
Copyright 2020, Lakshya Ojha <ojhalakshya@gmail.com>

License: BSD-3-clause
All rights reserved.
@@ -54,6 +54,12 @@

* Bump minimum Boost version to 1.58 (#2305).

* Add Hard Shrink Activation Function (#2186).

* Add Soft Shrink Activation Function (#2174).

* Add Hinge Embedding Loss Function (#2229).

### mlpack 3.2.2
###### 2019-11-26
* Add `valid` and `same` padding option in `Convolution` and `Atrous
@@ -23,6 +23,8 @@ set(SOURCES
reconstruction_loss_impl.hpp
sigmoid_cross_entropy_error.hpp
sigmoid_cross_entropy_error_impl.hpp
hinge_embedding_loss.hpp
hinge_embedding_loss_impl.hpp
)

# Add directory name to sources.
@@ -0,0 +1,87 @@
/**
* @file hinge_embedding_loss.hpp
* @author Lakshya Ojha
*
* Definition of the Hinge Embedding Loss Function.
* The Hinge Embedding loss function is often used to improve performance
* in semi-supervised learning or to learn nonlinear embeddings.
*
* mlpack is free software; you may redistribute it and/or modify it under the
* terms of the 3-clause BSD license. You should have received a copy of the
* 3-clause BSD license along with mlpack. If not, see
* http://www.opensource.org/licenses/BSD-3-Clause for more information.
*/

#ifndef MLPACK_METHODS_ANN_LOSS_FUNCTION_HINGE_EMBEDDING_LOSS_HPP
#define MLPACK_METHODS_ANN_LOSS_FUNCTION_HINGE_EMBEDDING_LOSS_HPP

#include <mlpack/prereqs.hpp>

namespace mlpack {
namespace ann /** Artificial Neural Network. */ {

/**
* The Hinge Embedding loss function is often used to compute the loss
* between y_true and y_pred.
*
* @tparam InputDataType Type of the input data (arma::colvec, arma::mat,
* arma::sp_mat or arma::cube).
* @tparam OutputDataType Type of the output data (arma::colvec, arma::mat,
* arma::sp_mat or arma::cube).
*/
template <
typename InputDataType = arma::mat,
typename OutputDataType = arma::mat
>
class HingeEmbeddingLoss
{
public:
/**
* Create the Hinge Embedding object.
*/
HingeEmbeddingLoss();

/**
* Computes the Hinge Embedding loss function.
*
* @param input Input data used for evaluating the specified function.
* @param target Target data to compare with.
*/
template<typename InputType, typename TargetType>
double Forward(const InputType& input, const TargetType& target);

/**
* Ordinary feed backward pass of a neural network.
*
* @param input The propagated input activation.
* @param target The target vector.
* @param output The calculated error.
*/
template<typename InputType, typename TargetType, typename OutputType>
void Backward(const InputType& input,
const TargetType& target,
OutputType& output);

//! Get the output parameter.
OutputDataType& OutputParameter() const { return outputParameter; }
//! Modify the output parameter.
OutputDataType& OutputParameter() { return outputParameter; }

/**
* Serialize the loss function.
*/
template<typename Archive>
void serialize(Archive& ar, const unsigned int /* version */);

private:
//! Locally-stored output parameter object.
OutputDataType outputParameter;
}; // class HingeEmbeddingLoss

} // namespace ann
} // namespace mlpack

// include implementation
#include "hinge_embedding_loss_impl.hpp"

#endif
@@ -0,0 +1,60 @@
/**
* @file hinge_embedding_loss_impl.hpp
* @author Lakshya Ojha
*
* Implementation of the Hinge Embedding loss function.
*
* mlpack is free software; you may redistribute it and/or modify it under the
* terms of the 3-clause BSD license. You should have received a copy of the
* 3-clause BSD license along with mlpack. If not, see
* http://www.opensource.org/licenses/BSD-3-Clause for more information.
*/

#ifndef MLPACK_METHODS_ANN_LOSS_FUNCTION_HINGE_EMBEDDING_LOSS_IMPL_HPP
#define MLPACK_METHODS_ANN_LOSS_FUNCTION_HINGE_EMBEDDING_LOSS_IMPL_HPP

// In case it hasn't yet been included.
#include "hinge_embedding_loss.hpp"

namespace mlpack {
namespace ann /** Artificial Neural Network. */ {

template<typename InputDataType, typename OutputDataType>
HingeEmbeddingLoss<InputDataType, OutputDataType>::HingeEmbeddingLoss()
{
// Nothing to do here.
}

template<typename InputDataType, typename OutputDataType>
template<typename InputType, typename TargetType>
double HingeEmbeddingLoss<InputDataType, OutputDataType>::Forward(
const InputType& input, const TargetType& target)
{
TargetType temp = target - (target == 0);
return (arma::accu(arma::max(1-input % temp, 0.))) / target.n_elem;
}

template<typename InputDataType, typename OutputDataType>
template<typename InputType, typename TargetType, typename OutputType>
void HingeEmbeddingLoss<InputDataType, OutputDataType>::Backward(
const InputType& input,
const TargetType& target,
OutputType& output)
{
TargetType temp = target - (target == 0);
output = (input < 1 / temp) % -temp;
}

template<typename InputDataType, typename OutputDataType>
template<typename Archive>
void HingeEmbeddingLoss<InputDataType, OutputDataType>::serialize(
Archive& /* ar */,
const unsigned int /* version */)
{
// Nothing to do here.
}

} // namespace ann
} // namespace mlpack

#endif
@@ -25,6 +25,7 @@
#include <mlpack/methods/ann/loss_functions/mean_bias_error.hpp>
#include <mlpack/methods/ann/loss_functions/dice_loss.hpp>
#include <mlpack/methods/ann/loss_functions/log_cosh_loss.hpp>
#include <mlpack/methods/ann/loss_functions/hinge_embedding_loss.hpp>
#include <mlpack/methods/ann/init_rules/nguyen_widrow_init.hpp>
#include <mlpack/methods/ann/ffn.hpp>

@@ -510,4 +511,43 @@ BOOST_AUTO_TEST_CASE(LogCoshLossTest)
BOOST_REQUIRE_EQUAL(output.n_rows, input.n_rows);
BOOST_REQUIRE_EQUAL(output.n_cols, input.n_cols);
}

/**
* Simple test for the Hinge Embedding loss function.
*/
BOOST_AUTO_TEST_CASE(HingeEmbeddingLossTest)
{
arma::mat input, target, output;
double loss;
HingeEmbeddingLoss<> module;

// Test the Forward function. Loss should be 0 if input = target.
input = arma::ones(10, 1);
target = arma::ones(10, 1);
loss = module.Forward(input, target);
BOOST_REQUIRE_EQUAL(loss, 0);

// Test the Backward function for input = target.
module.Backward(input, target, output);
for (double el : output)
{
// For input = target we should get 0.0 everywhere.
BOOST_REQUIRE_CLOSE(el, 0.0, 1e-5);
}

BOOST_REQUIRE_EQUAL(output.n_rows, input.n_rows);
BOOST_REQUIRE_EQUAL(output.n_cols, input.n_cols);

// Test the Forward function. Loss should be 0.84.
input = arma::mat("0.1 0.8 0.6 0.0 0.5");
target = arma::mat("0 1.0 1.0 0 0");
loss = module.Forward(input, target);
BOOST_REQUIRE_CLOSE(loss, 0.84, 1e-3);

// Test the Backward function.
module.Backward(input, target, output);
BOOST_REQUIRE_CLOSE(arma::accu(output), -2, 1e-3);
BOOST_REQUIRE_EQUAL(output.n_rows, input.n_rows);
BOOST_REQUIRE_EQUAL(output.n_cols, input.n_cols);
}
BOOST_AUTO_TEST_SUITE_END();

0 comments on commit a967547

Please sign in to comment.
You can’t perform that action at this time.