Skip to content

Commit

Permalink
Merge pull request #1322 from iglesias/feature/lmnn
Browse files Browse the repository at this point in the history
Faster computation of LMNN objective
  • Loading branch information
iglesias committed Jul 28, 2013
2 parents ce0aca3 + ce25859 commit cb21763
Show file tree
Hide file tree
Showing 4 changed files with 6 additions and 31 deletions.
3 changes: 1 addition & 2 deletions src/shogun/metric/LMNN.cpp
Expand Up @@ -97,8 +97,7 @@ void CLMNN::train(SGMatrix<float64_t> init_transform)

// Compute objective
SG_DEBUG("Computing objective.\n")
obj[iter] = CLMNNImpl::compute_objective(x, L, outer_products, target_nn,
cur_impostors, m_regularization);
obj[iter] = CLMNNImpl::compute_objective(L, gradient);

// Correct step size
CLMNNImpl::correct_stepsize(stepsize, obj, iter);
Expand Down
30 changes: 3 additions & 27 deletions src/shogun/metric/LMNNImpl.cpp
Expand Up @@ -245,36 +245,12 @@ void CLMNNImpl::gradient_step(MatrixXd& L, const MatrixXd& G, float64_t stepsize
L -= stepsize*(2*L*G);
}

float64_t CLMNNImpl::compute_objective(const CDenseFeatures<float64_t>* x, const MatrixXd& L,
const OuterProductsMatrixType& C, const SGMatrix<index_t> target_nn,
const ImpostorsSetType& Nc, float64_t regularization)
float64_t CLMNNImpl::compute_objective(const MatrixXd& L, const MatrixXd& G)
{
// get the number of examples from data
int32_t n = x->get_num_vectors();
// get the number of target neighbors per example (k) from the arguments
int32_t k = target_nn.num_rows;
// initialize the objective
float64_t obj = 0;
// pre-compute the Mahalanobis distance matrix
MatrixXd M = L.transpose()*L;

// add pull contributions to the objective
for (int32_t i = 0; i < n; ++i) // for each training example
{
for (int32_t j = 0; j < k; ++j) // for each target neighbor
obj += (1-regularization)*TRACE(M,C[i][ target_nn(j,i) ]);
}

// add push contributions to the objective
for (ImpostorsSetType::iterator it = Nc.begin(); it != Nc.end(); ++it) // for each possible impostor
{
double hinge = 1 + TRACE(M,C[it->example][it->target]) - TRACE(M,C[it->example][it->impostor]);

if (hinge > 0)
obj += regularization*hinge;
}

return obj;
// compute objective
return TRACE(M,G);
}

void CLMNNImpl::correct_stepsize(float64_t& stepsize, const SGVector<float64_t> obj, const uint32_t iter)
Expand Down
2 changes: 1 addition & 1 deletion src/shogun/metric/LMNNImpl.h
Expand Up @@ -109,7 +109,7 @@ class CLMNNImpl
static void gradient_step(Eigen::MatrixXd& L, const Eigen::MatrixXd& G, float64_t stepsize);

/** compute LMNN objective */
static float64_t compute_objective(const CDenseFeatures<float64_t>* x, const Eigen::MatrixXd& L, const OuterProductsMatrixType& C, const SGMatrix<index_t> target_nn, const ImpostorsSetType& Nc, float64_t mu);
static float64_t compute_objective(const Eigen::MatrixXd& G, const Eigen::MatrixXd& L);

/** correct step size depending on the last fluctuation of the objective */
static void correct_stepsize(float64_t& stepsize, const SGVector<float64_t> obj, const uint32_t iter);
Expand Down
2 changes: 1 addition & 1 deletion tests/unit/metric/LMNN_unittest.cc
Expand Up @@ -56,7 +56,7 @@ TEST(LMNN,train)

// check linear transform solution
SGMatrix<float64_t> L=lmnn->get_linear_transform();
EXPECT_NEAR(L(0,0),0.983061830081943477,1e-5);
EXPECT_NEAR(L(0,0),0.991577280560543,1e-5);
EXPECT_NEAR(L(0,1),0,1e-5);
EXPECT_NEAR(L(1,0),0,1e-5);
EXPECT_NEAR(L(1,1),1.00000080000000002,1e-5);
Expand Down

0 comments on commit cb21763

Please sign in to comment.