Skip to content

Commit

Permalink
[PrematureStopping] Convert CSignal::cancel_computations() to cancel_…
Browse files Browse the repository at this point in the history
…computation().

Remove CSignal::cancel_computations() and add TODO when the class does not
inherit directly from CMachine.
  • Loading branch information
geektoni authored and vigsterkr committed Jul 12, 2017
1 parent d0a58e8 commit f687607
Show file tree
Hide file tree
Showing 36 changed files with 156 additions and 107 deletions.
4 changes: 3 additions & 1 deletion src/gpl/shogun/lib/external/gpdtsolve.cpp
Expand Up @@ -1311,7 +1311,9 @@ float64_t QPproblem::gpdtsolve(float64_t *solution)
}
Cache->Iteration();
nit = nit+1;
} while (!optimal() && !(CSignal::cancel_computations()));
// TODO: replace with the new signal
//} while (!optimal() && !(CSignal::cancel_computations()));
} while (!optimal());
/* End of the problem resolution loop */
/***************************************************************************/

Expand Down
5 changes: 4 additions & 1 deletion src/gpl/shogun/lib/malsar/malsar_joint_feature_learning.cpp
Expand Up @@ -51,7 +51,10 @@ malsar_result_t malsar_joint_feature_learning(

//internal::set_is_malloc_allowed(false);
bool done = false;
while (!done && iter <= options.max_iter && !CSignal::cancel_computations())
// TODO: replace with the new signal
// while (!done && iter <= options.max_iter &&
// !CSignal::cancel_computations())
while (!done && iter <= options.max_iter)
{
double alpha = double(t_old - 1)/t;

Expand Down
3 changes: 2 additions & 1 deletion src/gpl/shogun/lib/slep/slep_mc_plain_lr.cpp
Expand Up @@ -91,7 +91,8 @@ slep_result_t slep_mc_plain_lr(
bool done = false;
CTime time;
//internal::set_is_malloc_allowed(false);
while ((!done) && (iter<options.max_iter) && (!CSignal::cancel_computations()))
// while ((!done) && (iter<options.max_iter) && (!cancel_computation()))
while ((!done) && (iter < options.max_iter))
{
double beta = (alphap-1)/alpha;
// compute search points
Expand Down
3 changes: 2 additions & 1 deletion src/gpl/shogun/lib/slep/slep_mc_tree_lr.cpp
Expand Up @@ -93,7 +93,8 @@ slep_result_t slep_mc_tree_lr(
bool done = false;
CTime time;
//internal::set_is_malloc_allowed(false);
while ((!done) && (iter<options.max_iter) && (!CSignal::cancel_computations()))
// while ((!done) && (iter<options.max_iter) && (!cancel_computation()))
while ((!done) && (iter < options.max_iter))
{
double beta = (alphap-1)/alpha;
// compute search points
Expand Down
3 changes: 2 additions & 1 deletion src/gpl/shogun/lib/slep/slep_solver.cpp
Expand Up @@ -524,7 +524,8 @@ slep_result_t slep_solver(
double alphap = 0.0, alpha = 1.0;
double fun_x = 0.0;

while (!done && iter <= options.max_iter && !CSignal::cancel_computations())
// while (!done && iter <= options.max_iter && !cancel_computation())
while (!done && iter <= options.max_iter)
{
beta = (alphap-1.0)/alpha;

Expand Down
2 changes: 1 addition & 1 deletion src/shogun/classifier/AveragedPerceptron.cpp
Expand Up @@ -63,7 +63,7 @@ bool CAveragedPerceptron::train_machine(CFeatures* data)

//loop till we either get everything classified right or reach max_iter

while (!(CSignal::cancel_computations()) && (!converged && iter<max_iter))
while (!(cancel_computation()) && (!converged && iter < max_iter))
{
converged=true;
SG_INFO("Iteration Number : %d of max %d\n", iter, max_iter);
Expand Down
2 changes: 1 addition & 1 deletion src/shogun/classifier/LPBoost.cpp
Expand Up @@ -125,7 +125,7 @@ bool CLPBoost::train_machine(CFeatures* data)
int32_t num_hypothesis=0;
CTime time;

while (!(CSignal::cancel_computations()))
while (!(cancel_computation()))
{
int32_t max_dim=0;
float64_t violator=find_max_violator(max_dim);
Expand Down
2 changes: 1 addition & 1 deletion src/shogun/classifier/Perceptron.cpp
Expand Up @@ -67,7 +67,7 @@ bool CPerceptron::train_machine(CFeatures* data)


//loop till we either get everything classified right or reach max_iter
while (!(CSignal::cancel_computations()) && (!converged && iter<max_iter))
while (!(cancel_computation()) && (!converged && iter < max_iter))
{
converged=true;
for (auto example_idx : features->index_iterator())
Expand Down
2 changes: 1 addition & 1 deletion src/shogun/classifier/mkl/MKL.cpp
Expand Up @@ -477,7 +477,7 @@ bool CMKL::train_machine(CFeatures* data)


mkl_iterations++;
if (perform_mkl_step(sumw, suma) || CSignal::cancel_computations())
if (perform_mkl_step(sumw, suma) || cancel_computation())
break;
}

Expand Down
2 changes: 1 addition & 1 deletion src/shogun/classifier/mkl/MKLMulticlass.cpp
Expand Up @@ -371,7 +371,7 @@ bool CMKLMulticlass::train_machine(CFeatures* data)
int32_t numberofsilpiterations=0;
bool final=false;

while (!(CSignal::cancel_computations()) && !final)
while (!(cancel_computation()) && !final)
{

//curweights.clear();
Expand Down
6 changes: 3 additions & 3 deletions src/shogun/classifier/svm/LibLinear.cpp
Expand Up @@ -316,7 +316,7 @@ void CLibLinear::solve_l2r_l1l2_svc(

auto pb = progress(range(10));
CTime start_time;
while (iter < max_iterations && !CSignal::cancel_computations())
while (iter < max_iterations && !cancel_computation())
{
if (m_max_train_time > 0 && start_time.cur_time_diff() > m_max_train_time)
break;
Expand Down Expand Up @@ -526,7 +526,7 @@ void CLibLinear::solve_l1r_l2_svc(

auto pb = progress(range(10));
CTime start_time;
while (iter < max_iterations && !CSignal::cancel_computations())
while (iter < max_iterations && !cancel_computation())
{
if (m_max_train_time > 0 && start_time.cur_time_diff() > m_max_train_time)
break;
Expand Down Expand Up @@ -898,7 +898,7 @@ void CLibLinear::solve_l1r_lr(

auto pb = progress(range(10));
CTime start_time;
while (iter < max_iterations && !CSignal::cancel_computations())
while (iter < max_iterations && !cancel_computation())
{
if (m_max_train_time > 0 && start_time.cur_time_diff() > m_max_train_time)
break;
Expand Down
2 changes: 1 addition & 1 deletion src/shogun/classifier/svm/MPDSVM.cpp
Expand Up @@ -95,7 +95,7 @@ bool CMPDSVM::train_machine(CFeatures* data)
}

// go ...
while (niter++ < maxiter && !CSignal::cancel_computations())
while (niter++ < maxiter && !cancel_computation())
{
int32_t maxpidx=-1;
float64_t maxpviol = -1;
Expand Down
2 changes: 1 addition & 1 deletion src/shogun/classifier/svm/NewtonSVM.cpp
Expand Up @@ -82,7 +82,7 @@ bool CNewtonSVM::train_machine(CFeatures* data)
float64_t obj, *grad=SG_MALLOC(float64_t, x_d+1);
float64_t t;

while(!CSignal::cancel_computations())
while (!cancel_computation())
{
iter++;

Expand Down
2 changes: 1 addition & 1 deletion src/shogun/classifier/svm/OnlineSVMSGD.cpp
Expand Up @@ -106,7 +106,7 @@ bool COnlineSVMSGD::train(CFeatures* data)
is_log_loss = true;

int32_t vec_count;
for(int32_t e=0; e<epochs && (!CSignal::cancel_computations()); e++)
for (int32_t e = 0; e < epochs && (!cancel_computation()); e++)
{
vec_count=0;
count = skip;
Expand Down
2 changes: 1 addition & 1 deletion src/shogun/classifier/svm/SGDQN.cpp
Expand Up @@ -143,7 +143,7 @@ bool CSGDQN::train(CFeatures* data)
if ((loss_type == L_LOGLOSS) || (loss_type == L_LOGLOSSMARGIN))
is_log_loss = true;

for(int32_t e=0; e<epochs && (!CSignal::cancel_computations()); e++)
for (int32_t e = 0; e < epochs && (!cancel_computation()); e++)
{
count = skip;
bool updateB=false;
Expand Down
6 changes: 5 additions & 1 deletion src/shogun/classifier/svm/SVMLight.cpp
Expand Up @@ -647,7 +647,11 @@ int32_t CSVMLight::optimize_to_convergence(int32_t* docs, int32_t* label, int32_
for (;((iteration<100 || (!mkl_converged && callback) ) || (retrain && (!terminate))); iteration++){
#else

for (;((!CSignal::cancel_computations()) && ((iteration<3 || (!mkl_converged && callback) ) || (retrain && (!terminate)))); iteration++){
for (; ((!cancel_computation()) &&
((iteration < 3 || (!mkl_converged && callback)) ||
(retrain && (!terminate))));
iteration++)
{
#endif

if(use_kernel_cache)
Expand Down
2 changes: 1 addition & 1 deletion src/shogun/classifier/svm/SVMSGD.cpp
Expand Up @@ -115,7 +115,7 @@ bool CSVMSGD::train_machine(CFeatures* data)
if ((loss_type == L_LOGLOSS) || (loss_type == L_LOGLOSSMARGIN))
is_log_loss = true;

for(int32_t e=0; e<epochs && (!CSignal::cancel_computations()); e++)
for (int32_t e = 0; e < epochs && (!cancel_computation()); e++)
{
count = skip;
for (int32_t i=0; i<num_vec; i++)
Expand Down
2 changes: 1 addition & 1 deletion src/shogun/classifier/vw/VowpalWabbit.cpp
Expand Up @@ -164,7 +164,7 @@ bool CVowpalWabbit::train_machine(CFeatures* feat)
}

features->start_parser();
while (!(CSignal::cancel_computations()) && (env->passes_complete < env->num_passes))
while (!(cancel_computation()) && (env->passes_complete < env->num_passes))
{
while (features->get_next_example())
{
Expand Down
5 changes: 3 additions & 2 deletions src/shogun/distance/Distance.cpp
Expand Up @@ -333,8 +333,9 @@ SGMatrix<T> CDistance::get_distance_matrix()

pb.print_progress();

if (CSignal::cancel_computations())
break;
// TODO: replace with new signal
// if (CSignal::cancel_computations())
// break;
}
}
}
Expand Down
4 changes: 3 additions & 1 deletion src/shogun/distributions/HMM.cpp
Expand Up @@ -5586,7 +5586,9 @@ bool CHMM::baum_welch_viterbi_train(BaumWelchViterbiType type)
float64_t prob_train=CMath::ALMOST_NEG_INFTY;
iteration_count=iterations;

while (!converged(prob, prob_train) && (!CSignal::cancel_computations()))
// TODO: replace with the new signal
// while (!converged(prob, prob_train) && (!CSignal::cancel_computations()))
while (!converged(prob, prob_train))
{
CMath::swap(working, estimate);
prob=prob_train;
Expand Down
22 changes: 8 additions & 14 deletions src/shogun/features/DotFeatures.cpp
Expand Up @@ -88,8 +88,10 @@ void CDotFeatures::dense_dot_range(float64_t* output, int32_t start, int32_t sto
#ifdef WIN32
for (int32_t i=t_start; i<t_stop; i++)
#else
for (int32_t i=t_start; i<t_stop &&
!CSignal::cancel_computations(); i++)
// TODO: replace with the new signal
// for (int32_t i=t_start; i<t_stop &&
// !CSignal::cancel_computations(); i++)
for (int32_t i = t_start; i < t_stop; i++)
#endif
{
if (alphas)
Expand All @@ -100,11 +102,6 @@ void CDotFeatures::dense_dot_range(float64_t* output, int32_t start, int32_t sto
}
}
pb.complete();

#ifndef WIN32
if ( CSignal::cancel_computations() )
SG_INFO("prematurely stopped. \n")
#endif
}

void CDotFeatures::dense_dot_range_subset(int32_t* sub_index, int32_t num, float64_t* output, float64_t* alphas, float64_t* vec, int32_t dim, float64_t b)
Expand Down Expand Up @@ -137,8 +134,10 @@ void CDotFeatures::dense_dot_range_subset(int32_t* sub_index, int32_t num, float
#ifdef WIN32
for (int32_t i=t_start; i<t_stop; i++)
#else
for (int32_t i=t_start; i<t_stop &&
!CSignal::cancel_computations(); i++)
// TODO: replace with the new signal
// for (int32_t i=t_start; i<t_stop &&
// !CSignal::cancel_computations(); i++)
for (int32_t i = t_start; i < t_stop; i++)
#endif
{
if (alphas)
Expand All @@ -149,11 +148,6 @@ void CDotFeatures::dense_dot_range_subset(int32_t* sub_index, int32_t num, float
}
}
pb.complete();

#ifndef WIN32
if ( CSignal::cancel_computations() )
SG_INFO("prematurely stopped. \n")
#endif
}

SGMatrix<float64_t> CDotFeatures::get_computed_dot_feature_matrix()
Expand Down
10 changes: 0 additions & 10 deletions src/shogun/features/hashed/HashedWDFeaturesTransposed.cpp
Expand Up @@ -291,11 +291,6 @@ void CHashedWDFeaturesTransposed::dense_dot_range(float64_t* output, int32_t sta
}
#endif
SG_FREE(index);

#ifndef WIN32
if ( CSignal::cancel_computations() )
SG_INFO("prematurely stopped. \n")
#endif
}

void CHashedWDFeaturesTransposed::dense_dot_range_subset(int32_t* sub_index, int num, float64_t* output, float64_t* alphas, float64_t* vec, int32_t dim, float64_t b)
Expand Down Expand Up @@ -382,11 +377,6 @@ void CHashedWDFeaturesTransposed::dense_dot_range_subset(int32_t* sub_index, int
SG_FREE(index);
}
#endif

#ifndef WIN32
if ( CSignal::cancel_computations() )
SG_INFO("prematurely stopped. \n")
#endif
}

void* CHashedWDFeaturesTransposed::dense_dot_range_helper(void* p)
Expand Down
5 changes: 3 additions & 2 deletions src/shogun/kernel/Kernel.cpp
Expand Up @@ -1302,8 +1302,9 @@ template <class T> void* CKernel::get_kernel_matrix_helper(void* p)

pb->print_progress();

if (CSignal::cancel_computations())
break;
// TODO: replace with the new signal
// if (CSignal::cancel_computations())
// break;
}
}

Expand Down
46 changes: 25 additions & 21 deletions src/shogun/kernel/string/WeightedDegreePositionStringKernel.cpp
Expand Up @@ -1254,26 +1254,28 @@ void CWeightedDegreePositionStringKernel::compute_batch(
{

auto pb = progress(range(num_feat), *this->io);
for (int32_t j=0; j<num_feat && !CSignal::cancel_computations(); j++)
{
init_optimization(num_suppvec, IDX, alphas, j);
S_THREAD_PARAM_WDS<DNATrie> params;
params.vec=vec;
params.result=result;
params.weights=weights;
params.kernel=this;
params.tries=&tries;
params.factor=factor;
params.j=j;
params.start=0;
params.end=num_vec;
params.length=length;
params.max_shift=max_shift;
params.shift=shift;
params.vec_idx=vec_idx;
compute_batch_helper((void*) &params);

pb.print_progress();
// TODO: replace with the new signal
// for (int32_t j=0; j<num_feat && !CSignal::cancel_computations(); j++)
for (int32_t j = 0; j < num_feat; j++)
{
init_optimization(num_suppvec, IDX, alphas, j);
S_THREAD_PARAM_WDS<DNATrie> params;
params.vec = vec;
params.result = result;
params.weights = weights;
params.kernel = this;
params.tries = &tries;
params.factor = factor;
params.j = j;
params.start = 0;
params.end = num_vec;
params.length = length;
params.max_shift = max_shift;
params.shift = shift;
params.vec_idx = vec_idx;
compute_batch_helper((void*)&params);

pb.print_progress();
}
pb.complete();
}
Expand All @@ -1282,7 +1284,9 @@ void CWeightedDegreePositionStringKernel::compute_batch(
{

auto pb = progress(range(num_feat), *this->io);
for (int32_t j=0; j<num_feat && !CSignal::cancel_computations(); j++)
// TODO: replace with the new signal
// for (int32_t j=0; j<num_feat && !CSignal::cancel_computations(); j++)
for (int32_t j = 0; j < num_feat; j++)
{
init_optimization(num_suppvec, IDX, alphas, j);
pthread_t* threads = SG_MALLOC(pthread_t, num_threads-1);
Expand Down
10 changes: 6 additions & 4 deletions src/shogun/kernel/string/WeightedDegreeStringKernel.cpp
Expand Up @@ -884,8 +884,9 @@ void CWeightedDegreeStringKernel::compute_batch(

if (num_threads < 2)
{

for (int32_t j=0; j<num_feat && !CSignal::cancel_computations(); j++)
// TODO: replace with the new signal
// for (int32_t j=0; j<num_feat && !CSignal::cancel_computations(); j++)
for (int32_t j = 0; j < num_feat; j++)
{
init_optimization(num_suppvec, IDX, alphas, j);
S_THREAD_PARAM_WD params;
Expand All @@ -909,8 +910,9 @@ void CWeightedDegreeStringKernel::compute_batch(
#ifdef HAVE_PTHREAD
else
{

for (int32_t j=0; j<num_feat && !CSignal::cancel_computations(); j++)
// TODO: replace with the new signal
// for (int32_t j=0; j<num_feat && !CSignal::cancel_computations(); j++)
for (int32_t j = 0; j < num_feat; j++)
{
init_optimization(num_suppvec, IDX, alphas, j);
pthread_t* threads = SG_MALLOC(pthread_t, num_threads-1);
Expand Down

0 comments on commit f687607

Please sign in to comment.