Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

progress bar in iterative algorithms #4305

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
2 changes: 1 addition & 1 deletion src/gpl
Submodule gpl updated 1 files
+1 −1 shogun/multiclass/LaRank.cpp
29 changes: 24 additions & 5 deletions src/shogun/base/progress.h
Expand Up @@ -58,6 +58,12 @@

namespace shogun
{
#define SG_PROGRESS(...) \
progress( \
std::string(this->get_name()) + "::" + std::string(__FUNCTION__), \
*this->io, __VA_ARGS__)

#define SG_SPROGRESS(...) progress(__FUNCTION__, __VA_ARGS__)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

cool!
How does it look like with this? Can you post and example?


/** Possible print modes */
enum SG_PRG_MODE
Expand Down Expand Up @@ -712,7 +718,7 @@ namespace shogun
*/
template <typename T>
inline PRange<T> progress(
Range<T> range, const SGIO& io, std::string prefix = "PROGRESS: ",
std::string prefix, const SGIO& io, Range<T> range,
SG_PRG_MODE mode = UTF8,
std::function<bool()> condition = []() { return true; })
{
Expand All @@ -733,8 +739,7 @@ namespace shogun
*/
template <typename T>
inline PRange<T> progress(
Range<T> range, std::string prefix = "PROGRESS: ",
SG_PRG_MODE mode = UTF8,
std::string prefix, Range<T> range, SG_PRG_MODE mode = UTF8,
std::function<bool()> condition = []() { return true; })
{
return PRange<T>(range, *sg_io, prefix, mode, condition);
Expand All @@ -746,9 +751,23 @@ namespace shogun
* @param condition premature stopping condition
*/
template <typename T>
inline PRange<T> progress(Range<T> range, std::function<bool()> condition)
inline PRange<T> progress(
std::string prefix, Range<T> range, std::function<bool()> condition)
{
return PRange<T>(range, *sg_io, prefix, UTF8, condition);
}
/** Creates @ref PRange given a range and a stopping condition
*
* @param range range used
* @param io SGIO object
* @param condition premature stopping condition
*/
template <typename T>
inline PRange<T> progress(
std::string prefix, const SGIO& io, Range<T> range,
std::function<bool()> condition)
{
return PRange<T>(range, *sg_io, "PROGRESS: ", UTF8, condition);
return PRange<T>(range, io, prefix, UTF8, condition);
}
};
#endif /* __SG_PROGRESS_H__ */
12 changes: 7 additions & 5 deletions src/shogun/classifier/AveragedPerceptron.cpp
Expand Up @@ -5,11 +5,12 @@
* Michele Mazzoni
*/

#include <shogun/base/progress.h>
#include <shogun/classifier/AveragedPerceptron.h>
#include <shogun/labels/Labels.h>
#include <shogun/mathematics/Math.h>
#include <shogun/labels/BinaryLabels.h>
#include <shogun/labels/Labels.h>
#include <shogun/lib/Signal.h>
#include <shogun/mathematics/Math.h>

using namespace shogun;

Expand Down Expand Up @@ -69,8 +70,8 @@ bool CAveragedPerceptron::train_machine(CFeatures* data)
for (int32_t i=0; i<num_feat; i++)
w[i]=1.0/num_feat;

//loop till we either get everything classified right or reach max_iter

auto pb = SG_PROGRESS(range(max_iter));
// loop till we either get everything classified right or reach max_iter
while (!converged && iter < max_iter)
{
COMPUTATION_CONTROLLERS
Expand All @@ -95,8 +96,9 @@ bool CAveragedPerceptron::train_machine(CFeatures* data)
tmp_bias+=bias;
}
iter++;
pb.print_progress();
}

pb.complete();
if (converged)
SG_INFO("Averaged Perceptron algorithm converged after %d iterations.\n", iter)
else
Expand Down
5 changes: 4 additions & 1 deletion src/shogun/classifier/Perceptron.cpp
Expand Up @@ -5,6 +5,7 @@
* Michele Mazzoni, Heiko Strathmann, Fernando Iglesias
*/

#include <shogun/base/progress.h>
#include <shogun/base/range.h>
#include <shogun/classifier/Perceptron.h>
#include <shogun/features/iterators/DotIterator.h>
Expand Down Expand Up @@ -81,6 +82,7 @@ bool CPerceptron::train_machine(CFeatures* data)
{
w = get_w();
}
auto pb = SG_PROGRESS(range(max_iter));
//loop till we either get everything classified right or reach max_iter
while (!converged && iter < max_iter)
{
Expand All @@ -105,8 +107,9 @@ bool CPerceptron::train_machine(CFeatures* data)
}

iter++;
pb.print_progress();
}

pb.complete();
if (converged)
SG_INFO("Perceptron algorithm converged after %d iterations.\n", iter)
else
Expand Down
8 changes: 4 additions & 4 deletions src/shogun/classifier/svm/LibLinear.cpp
Expand Up @@ -324,7 +324,7 @@ void CLibLinear::solve_l2r_l1l2_svc(
index[i] = i;
}

auto pb = progress(range(10));
auto pb = SG_PROGRESS(range(10));
CTime start_time;
while (iter < get_max_iterations())
{
Expand Down Expand Up @@ -538,7 +538,7 @@ void CLibLinear::solve_l1r_l2_svc(
}
}

auto pb = progress(range(10));
auto pb = SG_PROGRESS(range(10));
CTime start_time;
while (iter < get_max_iterations())
{
Expand Down Expand Up @@ -913,7 +913,7 @@ void CLibLinear::solve_l1r_lr(
}
}

auto pb = progress(range(10));
auto pb = SG_PROGRESS(range(10));
CTime start_time;
while (iter < get_max_iterations())
{
Expand Down Expand Up @@ -1270,7 +1270,7 @@ void CLibLinear::solve_l2r_lr_dual(
index[i] = i;
}

auto pb = progress(range(10));
auto pb = SG_PROGRESS(range(10));
while (iter < max_iter)
{
for (i = 0; i < l; i++)
Expand Down
6 changes: 5 additions & 1 deletion src/shogun/classifier/svm/MPDSVM.cpp
Expand Up @@ -5,11 +5,12 @@
* Evan Shelhamer, Sergey Lisitsyn
*/

#include <shogun/base/progress.h>
#include <shogun/classifier/svm/MPDSVM.h>
#include <shogun/io/SGIO.h>
#include <shogun/lib/Signal.h>
#include <shogun/lib/common.h>
#include <shogun/mathematics/Math.h>
#include <shogun/lib/Signal.h>

using namespace shogun;

Expand Down Expand Up @@ -91,6 +92,7 @@ bool CMPDSVM::train_machine(CFeatures* data)
dalphas[i]=-1; //CSVC
}

auto pb = SG_PROGRESS(range(maxiter));
// go ...
while (niter++ < maxiter)
{
Expand Down Expand Up @@ -222,8 +224,10 @@ bool CMPDSVM::train_machine(CFeatures* data)
dalphas[i]+= F[i] * etachange;
//dalphas[i]+= F[i] * etachange[0] + F[i+n] * etachange[1];
}
pb.print_progress();
}

pb.complete();
if (niter >= maxiter)
SG_WARNING("increase maxiter ... \n")

Expand Down
19 changes: 9 additions & 10 deletions src/shogun/classifier/svm/NewtonSVM.cpp
Expand Up @@ -7,15 +7,16 @@
#include <shogun/lib/config.h>

#ifdef HAVE_LAPACK
#include <shogun/base/progress.h>
#include <shogun/classifier/svm/NewtonSVM.h>
#include <shogun/mathematics/Math.h>
#include <shogun/mathematics/linalg/LinalgNamespace.h>
#include <shogun/machine/LinearMachine.h>
#include <shogun/features/DotFeatures.h>
#include <shogun/labels/Labels.h>
#include <shogun/labels/BinaryLabels.h>
#include <shogun/mathematics/lapack.h>
#include <shogun/labels/Labels.h>
#include <shogun/lib/Signal.h>
#include <shogun/machine/LinearMachine.h>
#include <shogun/mathematics/Math.h>
#include <shogun/mathematics/lapack.h>
#include <shogun/mathematics/linalg/LinalgNamespace.h>

//#define DEBUG_NEWTON
//#define V_NEWTON
Expand Down Expand Up @@ -80,6 +81,7 @@ bool CNewtonSVM::train_machine(CFeatures* data)
float64_t obj, *grad=SG_MALLOC(float64_t, x_d+1);
float64_t t;

auto pb = SG_PROGRESS(range(num_iter));
while (iter <= num_iter)
{
COMPUTATION_CONTROLLERS
Expand Down Expand Up @@ -204,13 +206,10 @@ bool CNewtonSVM::train_machine(CFeatures* data)

if (newton_decrement*2<prec*obj)
break;
}

if (iter > num_iter)
{
SG_PRINT("Maximum number of Newton steps reached. Try larger lambda")
pb.print_progress();
}

pb.complete();
#ifdef V_NEWTON
SG_PRINT("FINAL W AND BIAS Vector=\n\n")
CMath::display_matrix(weights, x_d+1, 1);
Expand Down
9 changes: 5 additions & 4 deletions src/shogun/classifier/svm/OnlineSVMSGD.cpp
Expand Up @@ -5,12 +5,13 @@
* Thoralf Klein, Viktor Gal, Evan Shelhamer, Bjoern Esser
*/

#include <shogun/classifier/svm/OnlineSVMSGD.h>
#include <shogun/mathematics/Math.h>
#include <shogun/mathematics/linalg/LinalgNamespace.h>
#include <shogun/base/Parameter.h>
#include <shogun/base/progress.h>
#include <shogun/classifier/svm/OnlineSVMSGD.h>
#include <shogun/lib/Signal.h>
#include <shogun/loss/HingeLoss.h>
#include <shogun/mathematics/Math.h>
#include <shogun/mathematics/linalg/LinalgNamespace.h>

using namespace shogun;

Expand Down Expand Up @@ -89,7 +90,7 @@ bool COnlineSVMSGD::train(CFeatures* data)
is_log_loss = true;

int32_t vec_count;
for (int32_t e = 0; e < epochs; e++)
for (auto e : SG_PROGRESS(range(epochs)))
{
COMPUTATION_CONTROLLERS
vec_count=0;
Expand Down
9 changes: 5 additions & 4 deletions src/shogun/classifier/svm/SGDQN.cpp
Expand Up @@ -5,12 +5,13 @@
* Sergey Lisitsyn, Thoralf Klein, Evan Shelhamer, Bjoern Esser
*/

#include <shogun/classifier/svm/SGDQN.h>
#include <shogun/base/Parameter.h>
#include <shogun/base/progress.h>
#include <shogun/classifier/svm/SGDQN.h>
#include <shogun/labels/BinaryLabels.h>
#include <shogun/lib/Signal.h>
#include <shogun/mathematics/Math.h>
#include <shogun/loss/HingeLoss.h>
#include <shogun/labels/BinaryLabels.h>
#include <shogun/mathematics/Math.h>

using namespace shogun;

Expand Down Expand Up @@ -129,7 +130,7 @@ bool CSGDQN::train(CFeatures* data)
if ((loss_type == L_LOGLOSS) || (loss_type == L_LOGLOSSMARGIN))
is_log_loss = true;

for (int32_t e = 0; e < epochs; e++)
for (auto e : SG_PROGRESS(range(epochs)))
{
COMPUTATION_CONTROLLERS
count = skip;
Expand Down
2 changes: 1 addition & 1 deletion src/shogun/classifier/svm/SVMLight.cpp
Expand Up @@ -642,7 +642,7 @@ int32_t CSVMLight::optimize_to_convergence(int32_t* docs, int32_t* label, int32_
CTime start_time;
mkl_converged=false;

auto pb = progress(range(10), *this->io);
auto pb = SG_PROGRESS(range(10));
#ifdef CYGWIN
for (;((iteration<100 || (!mkl_converged && callback) ) || (retrain && (!terminate))); iteration++){
#else
Expand Down
14 changes: 8 additions & 6 deletions src/shogun/clustering/GMM.cpp
Expand Up @@ -7,8 +7,9 @@
*/
#include <shogun/lib/config.h>

#include <shogun/base/some.h>
#include <shogun/base/Parameter.h>
#include <shogun/base/progress.h>
#include <shogun/base/some.h>
#include <shogun/clustering/GMM.h>
#include <shogun/clustering/KMeans.h>
#include <shogun/distance/EuclideanDistance.h>
Expand Down Expand Up @@ -149,7 +150,7 @@ float64_t CGMM::train_em(float64_t min_cov, int32_t max_iter, float64_t min_chan
SGVector<float64_t> logPxy(num_vectors * m_components.size());
SGVector<float64_t> logPx(num_vectors);
//float64_t* logPost=SG_MALLOC(float64_t, num_vectors*m_components.vlen);

auto pb = SG_PROGRESS(range(max_iter));
while (iter<max_iter)
{
log_likelihood_prev=log_likelihood_cur;
Expand Down Expand Up @@ -181,12 +182,12 @@ float64_t CGMM::train_em(float64_t min_cov, int32_t max_iter, float64_t min_chan

if (iter>0 && log_likelihood_cur-log_likelihood_prev<min_change)
break;

pb.print_progress();
max_likelihood(alpha, min_cov);

iter++;
}

pb.complete();
return log_likelihood_cur;
}

Expand Down Expand Up @@ -218,6 +219,7 @@ float64_t CGMM::train_smem(int32_t max_iter, int32_t max_cand, float64_t min_cov
SGVector<int32_t> merge_ind(
m_components.size() * (m_components.size() - 1) / 2);

auto pb = SG_PROGRESS(range(max_iter));
while (iter<max_iter)
{
linalg::zero(logPostSum);
Expand Down Expand Up @@ -334,8 +336,9 @@ float64_t CGMM::train_smem(int32_t max_iter, int32_t max_cand, float64_t min_cov
if (!better_found)
break;
iter++;
pb.print_progress();
}

pb.complete();
return cur_likelihood;
}

Expand Down Expand Up @@ -826,4 +829,3 @@ void CGMM::register_params()
&m_coefficients, "m_coefficients", "Mixture coefficients.",
MS_NOT_AVAILABLE);
}

4 changes: 2 additions & 2 deletions src/shogun/clustering/Hierarchical.cpp
Expand Up @@ -109,7 +109,7 @@ bool CHierarchical::train_machine(CFeatures* data)
float64_t* distances=SG_MALLOC(float64_t, num_pairs);

int32_t offs=0;
for (auto i : progress(range(0, num), *this->io))
for (auto i : SG_PROGRESS(range(0, num)))
{
for (int32_t j=i+1; j<num; j++)
{
Expand All @@ -123,7 +123,7 @@ bool CHierarchical::train_machine(CFeatures* data)
CMath::qsort_index<float64_t,pair>(distances, index, (num-1)*num/2);
//CMath::display_vector(distances, (num-1)*num/2, "dists");

auto pb = progress(range(0, num_pairs - 1), *this->io);
auto pb = SG_PROGRESS(range(0, num_pairs - 1));
int32_t k=-1;
int32_t l=0;
for (; l<num && (num-l)>=merges && k<num_pairs-1; l++)
Expand Down