diff --git a/src/shogun/metric/LMNN.cpp b/src/shogun/metric/LMNN.cpp index 9fddd7274f8..95546f035f2 100644 --- a/src/shogun/metric/LMNN.cpp +++ b/src/shogun/metric/LMNN.cpp @@ -97,8 +97,7 @@ void CLMNN::train(SGMatrix init_transform) // Compute objective SG_DEBUG("Computing objective.\n") - obj[iter] = CLMNNImpl::compute_objective(x, L, outer_products, target_nn, - cur_impostors, m_regularization); + obj[iter] = CLMNNImpl::compute_objective(L, gradient); // Correct step size CLMNNImpl::correct_stepsize(stepsize, obj, iter); diff --git a/src/shogun/metric/LMNNImpl.cpp b/src/shogun/metric/LMNNImpl.cpp index 8881ecea5db..72222b5b47a 100644 --- a/src/shogun/metric/LMNNImpl.cpp +++ b/src/shogun/metric/LMNNImpl.cpp @@ -245,36 +245,12 @@ void CLMNNImpl::gradient_step(MatrixXd& L, const MatrixXd& G, float64_t stepsize L -= stepsize*(2*L*G); } -float64_t CLMNNImpl::compute_objective(const CDenseFeatures* x, const MatrixXd& L, - const OuterProductsMatrixType& C, const SGMatrix target_nn, - const ImpostorsSetType& Nc, float64_t regularization) +float64_t CLMNNImpl::compute_objective(const MatrixXd& L, const MatrixXd& G) { - // get the number of examples from data - int32_t n = x->get_num_vectors(); - // get the number of target neighbors per example (k) from the arguments - int32_t k = target_nn.num_rows; - // initialize the objective - float64_t obj = 0; // pre-compute the Mahalanobis distance matrix MatrixXd M = L.transpose()*L; - - // add pull contributions to the objective - for (int32_t i = 0; i < n; ++i) // for each training example - { - for (int32_t j = 0; j < k; ++j) // for each target neighbor - obj += (1-regularization)*TRACE(M,C[i][ target_nn(j,i) ]); - } - - // add push contributions to the objective - for (ImpostorsSetType::iterator it = Nc.begin(); it != Nc.end(); ++it) // for each possible impostor - { - double hinge = 1 + TRACE(M,C[it->example][it->target]) - TRACE(M,C[it->example][it->impostor]); - - if (hinge > 0) - obj += regularization*hinge; - } - - return obj; + // compute objective + return TRACE(M,G); } void CLMNNImpl::correct_stepsize(float64_t& stepsize, const SGVector obj, const uint32_t iter) diff --git a/src/shogun/metric/LMNNImpl.h b/src/shogun/metric/LMNNImpl.h index 2332894b8b7..26e487ba335 100644 --- a/src/shogun/metric/LMNNImpl.h +++ b/src/shogun/metric/LMNNImpl.h @@ -109,7 +109,7 @@ class CLMNNImpl static void gradient_step(Eigen::MatrixXd& L, const Eigen::MatrixXd& G, float64_t stepsize); /** compute LMNN objective */ - static float64_t compute_objective(const CDenseFeatures* x, const Eigen::MatrixXd& L, const OuterProductsMatrixType& C, const SGMatrix target_nn, const ImpostorsSetType& Nc, float64_t mu); + static float64_t compute_objective(const Eigen::MatrixXd& G, const Eigen::MatrixXd& L); /** correct step size depending on the last fluctuation of the objective */ static void correct_stepsize(float64_t& stepsize, const SGVector obj, const uint32_t iter); diff --git a/tests/unit/metric/LMNN_unittest.cc b/tests/unit/metric/LMNN_unittest.cc index 386b7edc902..3973db7a9b8 100644 --- a/tests/unit/metric/LMNN_unittest.cc +++ b/tests/unit/metric/LMNN_unittest.cc @@ -56,7 +56,7 @@ TEST(LMNN,train) // check linear transform solution SGMatrix L=lmnn->get_linear_transform(); - EXPECT_NEAR(L(0,0),0.983061830081943477,1e-5); + EXPECT_NEAR(L(0,0),0.991577280560543,1e-5); EXPECT_NEAR(L(0,1),0,1e-5); EXPECT_NEAR(L(1,0),0,1e-5); EXPECT_NEAR(L(1,1),1.00000080000000002,1e-5);