Skip to content

Commit

Permalink
using computation_controller macro (#4286)
Browse files Browse the repository at this point in the history
  • Loading branch information
shubham808 authored and karlnapf committed May 21, 2018
1 parent 41755fb commit ab4779c
Show file tree
Hide file tree
Showing 14 changed files with 55 additions and 61 deletions.
3 changes: 2 additions & 1 deletion src/shogun/classifier/AveragedPerceptron.cpp
Expand Up @@ -71,8 +71,9 @@ bool CAveragedPerceptron::train_machine(CFeatures* data)

//loop till we either get everything classified right or reach max_iter

while (!(cancel_computation()) && (!converged && iter < max_iter))
while (!converged && iter < max_iter)
{
COMPUTATION_CONTROLLERS
converged=true;
SG_INFO("Iteration Number : %d of max %d\n", iter, max_iter);

Expand Down
7 changes: 3 additions & 4 deletions src/shogun/classifier/LPBoost.cpp
Expand Up @@ -122,8 +122,10 @@ bool CLPBoost::train_machine(CFeatures* data)
int32_t num_hypothesis=0;
CTime time;

while (!(cancel_computation()))
while (get_max_train_time() <= 0 ||
time.cur_time_diff() <= get_max_train_time())
{
COMPUTATION_CONTROLLERS
int32_t max_dim=0;
float64_t violator=find_max_violator(max_dim);
SG_PRINT("iteration:%06d violator: %10.17f (>1.0) chosen: %d\n", num_hypothesis, violator, max_dim)
Expand All @@ -146,9 +148,6 @@ bool CLPBoost::train_machine(CFeatures* data)
solver.optimize(u);
//CMath::display_vector(u, num_vec, "u");
num_hypothesis++;

if (get_max_train_time()>0 && time.cur_time_diff()>get_max_train_time())
break;
}
float64_t* lambda=SG_MALLOC(float64_t, num_hypothesis);
solver.optimize(u, lambda);
Expand Down
3 changes: 2 additions & 1 deletion src/shogun/classifier/Perceptron.cpp
Expand Up @@ -75,8 +75,9 @@ bool CPerceptron::train_machine(CFeatures* data)
}

//loop till we either get everything classified right or reach max_iter
while (!(cancel_computation()) && (!converged && iter < max_iter))
while (!converged && iter < max_iter)
{
COMPUTATION_CONTROLLERS
converged=true;
auto iter_train_labels = train_labels.begin();
auto iter_output = output.begin();
Expand Down
26 changes: 14 additions & 12 deletions src/shogun/classifier/mkl/MKL.cpp
Expand Up @@ -459,27 +459,29 @@ bool CMKL::train_machine(CFeatures* data)
{
float64_t* sumw = SG_MALLOC(float64_t, num_kernels);



while (true)
while (get_max_train_time() <= 0 ||
training_time_clock.cur_time_diff() <= get_max_train_time())
{
COMPUTATION_CONTROLLERS
svm->train();

float64_t suma=compute_sum_alpha();
compute_sum_beta(sumw);

if((training_time_clock.cur_time_diff()>get_max_train_time ())&&(get_max_train_time ()>0))
{
SG_SWARNING("MKL Algorithm terminates PREMATURELY due to current training time exceeding get_max_train_time ()= %f . It may have not converged yet!\n",get_max_train_time ())
break;
}


mkl_iterations++;
if (perform_mkl_step(sumw, suma) || cancel_computation())
if (perform_mkl_step(sumw, suma))
break;
}

if ((training_time_clock.cur_time_diff() > get_max_train_time()) &&
(get_max_train_time() > 0))
{
SG_SWARNING(
"MKL Algorithm terminates PREMATURELY due to current training "
"time exceeding get_max_train_time ()= %f . It may have not "
"converged yet!\n",
get_max_train_time())
}

SG_FREE(sumw);
}
#ifdef USE_CPLEX
Expand Down
4 changes: 2 additions & 2 deletions src/shogun/classifier/mkl/MKLMulticlass.cpp
Expand Up @@ -366,9 +366,9 @@ bool CMKLMulticlass::train_machine(CFeatures* data)
int32_t numberofsilpiterations=0;
bool final=false;

while (!(cancel_computation()) && !final)
while (!final)
{

COMPUTATION_CONTROLLERS
//curweights.clear();
lpw->computeweights(curweights);
weightshistory.push_back(curweights);
Expand Down
9 changes: 6 additions & 3 deletions src/shogun/classifier/svm/LibLinear.cpp
Expand Up @@ -326,8 +326,9 @@ void CLibLinear::solve_l2r_l1l2_svc(

auto pb = progress(range(10));
CTime start_time;
while (iter < get_max_iterations() && !cancel_computation())
while (iter < get_max_iterations())
{
COMPUTATION_CONTROLLERS
if (m_max_train_time > 0 &&
start_time.cur_time_diff() > m_max_train_time)
break;
Expand Down Expand Up @@ -539,8 +540,9 @@ void CLibLinear::solve_l1r_l2_svc(

auto pb = progress(range(10));
CTime start_time;
while (iter < get_max_iterations() && !cancel_computation())
while (iter < get_max_iterations())
{
COMPUTATION_CONTROLLERS
if (m_max_train_time > 0 &&
start_time.cur_time_diff() > m_max_train_time)
break;
Expand Down Expand Up @@ -913,8 +915,9 @@ void CLibLinear::solve_l1r_lr(

auto pb = progress(range(10));
CTime start_time;
while (iter < get_max_iterations() && !cancel_computation())
while (iter < get_max_iterations())
{
COMPUTATION_CONTROLLERS
if (m_max_train_time > 0 &&
start_time.cur_time_diff() > m_max_train_time)
break;
Expand Down
3 changes: 2 additions & 1 deletion src/shogun/classifier/svm/MPDSVM.cpp
Expand Up @@ -92,8 +92,9 @@ bool CMPDSVM::train_machine(CFeatures* data)
}

// go ...
while (niter++ < maxiter && !cancel_computation())
while (niter++ < maxiter)
{
COMPUTATION_CONTROLLERS
int32_t maxpidx=-1;
float64_t maxpviol = -1;
//float64_t maxdviol = CMath::abs(detas[0]);
Expand Down
14 changes: 7 additions & 7 deletions src/shogun/classifier/svm/NewtonSVM.cpp
Expand Up @@ -80,16 +80,11 @@ bool CNewtonSVM::train_machine(CFeatures* data)
float64_t obj, *grad=SG_MALLOC(float64_t, x_d+1);
float64_t t;

while (!cancel_computation())
while (iter <= num_iter)
{
COMPUTATION_CONTROLLERS
iter++;

if (iter>num_iter)
{
SG_PRINT("Maximum number of Newton steps reached. Try larger lambda")
break;
}

obj_fun_linear(weights, out, &obj, sv, &size_sv, grad);

#ifdef DEBUG_NEWTON
Expand Down Expand Up @@ -211,6 +206,11 @@ bool CNewtonSVM::train_machine(CFeatures* data)
break;
}

if (iter > num_iter)
{
SG_PRINT("Maximum number of Newton steps reached. Try larger lambda")
}

#ifdef V_NEWTON
SG_PRINT("FINAL W AND BIAS Vector=\n\n")
CMath::display_matrix(weights, x_d+1, 1);
Expand Down
3 changes: 2 additions & 1 deletion src/shogun/classifier/svm/OnlineSVMSGD.cpp
Expand Up @@ -89,8 +89,9 @@ bool COnlineSVMSGD::train(CFeatures* data)
is_log_loss = true;

int32_t vec_count;
for (int32_t e = 0; e < epochs && (!cancel_computation()); e++)
for (int32_t e = 0; e < epochs; e++)
{
COMPUTATION_CONTROLLERS
vec_count=0;
count = skip;
while (features->get_next_example())
Expand Down
3 changes: 2 additions & 1 deletion src/shogun/classifier/svm/SGDQN.cpp
Expand Up @@ -129,8 +129,9 @@ bool CSGDQN::train(CFeatures* data)
if ((loss_type == L_LOGLOSS) || (loss_type == L_LOGLOSSMARGIN))
is_log_loss = true;

for (int32_t e = 0; e < epochs && (!cancel_computation()); e++)
for (int32_t e = 0; e < epochs; e++)
{
COMPUTATION_CONTROLLERS
count = skip;
bool updateB=false;
for (int32_t i=0; i<num_vec; i++)
Expand Down
7 changes: 3 additions & 4 deletions src/shogun/classifier/svm/SVMLight.cpp
Expand Up @@ -647,13 +647,12 @@ int32_t CSVMLight::optimize_to_convergence(int32_t* docs, int32_t* label, int32_
for (;((iteration<100 || (!mkl_converged && callback) ) || (retrain && (!terminate))); iteration++){
#else

for (; ((!cancel_computation()) &&
((iteration < 3 || (!mkl_converged && callback)) ||
(retrain && (!terminate))));
for (; (iteration < 3 || (!mkl_converged && callback)) ||
(retrain && (!terminate));
iteration++)
{
#endif

COMPUTATION_CONTROLLERS
if(use_kernel_cache)
kernel->set_time(iteration); /* for lru cache */

Expand Down
25 changes: 4 additions & 21 deletions src/shogun/machine/KernelMachine.cpp
Expand Up @@ -357,13 +357,9 @@ SGVector<float64_t> CKernelMachine::apply_get_outputs(CFeatures* data)
? num_vectors
: (thread_num + 1) * step;

#ifdef WIN32
for (int32_t vec = start; vec < end; vec++)
#else
for (int32_t vec = start; vec < end && !cancel_computation();
vec++)
#endif
{
COMPUTATION_CONTROLLERS
pb.print_progress();

ASSERT(kernel)
Expand All @@ -386,11 +382,6 @@ SGVector<float64_t> CKernelMachine::apply_get_outputs(CFeatures* data)
}
pb.complete();
}

#ifndef WIN32
if (cancel_computation())
SG_INFO("prematurely stopped. \n")
#endif
}

SG_DEBUG("leaving %s::apply_get_outputs(%s at %p)\n",
Expand Down Expand Up @@ -516,12 +507,10 @@ SGVector<float64_t> CKernelMachine::apply_locked_get_output(
int32_t start = thread_num * step;
int32_t end =
(thread_num == num_threads) ? num_inds : (thread_num + 1) * step;
#ifdef WIN32

for (int32_t vec = start; vec < end; vec++)
#else
for (int32_t vec = start; vec < end && !cancel_computation(); vec++)
#endif
{
COMPUTATION_CONTROLLERS
pb.print_progress();
index_t index = indices[vec];
ASSERT(kernel)
Expand All @@ -542,13 +531,7 @@ SGVector<float64_t> CKernelMachine::apply_locked_get_output(
}
}
}

#ifndef WIN32
if (cancel_computation())
SG_INFO("prematurely stopped.\n")
else
#endif
pb.complete();
pb.complete();

return output;
}
Expand Down
6 changes: 4 additions & 2 deletions src/shogun/multiclass/KNN.cpp
Expand Up @@ -128,8 +128,9 @@ SGMatrix<index_t> CKNN::nearest_neighbors()
auto pb = progress(range(n), *this->io);

//for each test example
for (int32_t i = 0; i < n && (!cancel_computation()); i++)
for (int32_t i = 0; i < n; i++)
{
COMPUTATION_CONTROLLERS
pb.print_progress();

//lhs idx 0..num train examples-1 (i.e., all train examples) and rhs idx i
Expand Down Expand Up @@ -211,8 +212,9 @@ CMulticlassLabels* CKNN::classify_NN()
auto pb = progress(range(num_lab), *this->io);

// for each test example
for (int32_t i = 0; i < num_lab && (!cancel_computation()); i++)
for (int32_t i = 0; i < num_lab; i++)
{
COMPUTATION_CONTROLLERS
pb.print_progress();

// get distances from i-th test example to 0..num_m_train_labels-1 train examples
Expand Down
3 changes: 2 additions & 1 deletion src/shogun/transfer/multitask/LibLinearMTL.cpp
Expand Up @@ -251,8 +251,9 @@ void CLibLinearMTL::solve_l2r_l1l2_svc(const liblinear_problem *prob, double eps

auto pb = progress(range(10));
CTime start_time;
while (iter < max_iterations && !cancel_computation())
while (iter < max_iterations)
{
COMPUTATION_CONTROLLERS
if (m_max_train_time > 0 && start_time.cur_time_diff() > m_max_train_time)
break;

Expand Down

0 comments on commit ab4779c

Please sign in to comment.