New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

progress bar in iterative algorithms #4305

Merged
merged 4 commits into from Jun 12, 2018
Jump to file or symbol
Failed to load files and symbols.
+218 −191
Diff settings

Always

Just for now

Submodule gpl updated 1 files
+1 −1 shogun/multiclass/LaRank.cpp
Copy path View file
@@ -58,6 +58,12 @@
namespace shogun
{
#define SG_PROGRESS(...) \
progress( \
std::string(this->get_name()) + "::" + std::string(__FUNCTION__), \
*this->io, __VA_ARGS__)
#define SG_SPROGRESS(...) progress(__FUNCTION__, __VA_ARGS__)

This comment has been minimized.

@karlnapf

karlnapf Jun 10, 2018

Member

cool!
How does it look like with this? Can you post and example?

/** Possible print modes */
enum SG_PRG_MODE
@@ -712,7 +718,7 @@ namespace shogun
*/
template <typename T>
inline PRange<T> progress(
Range<T> range, const SGIO& io, std::string prefix = "PROGRESS: ",
std::string prefix, const SGIO& io, Range<T> range,
SG_PRG_MODE mode = UTF8,
std::function<bool()> condition = []() { return true; })
{
@@ -733,8 +739,7 @@ namespace shogun
*/
template <typename T>
inline PRange<T> progress(
Range<T> range, std::string prefix = "PROGRESS: ",
SG_PRG_MODE mode = UTF8,
std::string prefix, Range<T> range, SG_PRG_MODE mode = UTF8,
std::function<bool()> condition = []() { return true; })
{
return PRange<T>(range, *sg_io, prefix, mode, condition);
@@ -746,9 +751,23 @@ namespace shogun
* @param condition premature stopping condition
*/
template <typename T>
inline PRange<T> progress(Range<T> range, std::function<bool()> condition)
inline PRange<T> progress(
std::string prefix, Range<T> range, std::function<bool()> condition)
{
return PRange<T>(range, *sg_io, prefix, UTF8, condition);
}
/** Creates @ref PRange given a range and a stopping condition
*
* @param range range used
* @param io SGIO object
* @param condition premature stopping condition
*/
template <typename T>
inline PRange<T> progress(
std::string prefix, const SGIO& io, Range<T> range,
std::function<bool()> condition)
{
return PRange<T>(range, *sg_io, "PROGRESS: ", UTF8, condition);
return PRange<T>(range, io, prefix, UTF8, condition);
}
};
#endif /* __SG_PROGRESS_H__ */
@@ -5,11 +5,12 @@
* Michele Mazzoni
*/
#include <shogun/base/progress.h>
#include <shogun/classifier/AveragedPerceptron.h>
#include <shogun/labels/Labels.h>
#include <shogun/mathematics/Math.h>
#include <shogun/labels/BinaryLabels.h>
#include <shogun/labels/Labels.h>
#include <shogun/lib/Signal.h>
#include <shogun/mathematics/Math.h>
using namespace shogun;
@@ -69,8 +70,8 @@ bool CAveragedPerceptron::train_machine(CFeatures* data)
for (int32_t i=0; i<num_feat; i++)
w[i]=1.0/num_feat;
//loop till we either get everything classified right or reach max_iter
auto pb = SG_PROGRESS(range(max_iter));
// loop till we either get everything classified right or reach max_iter
while (!converged && iter < max_iter)
{
COMPUTATION_CONTROLLERS
@@ -95,8 +96,9 @@ bool CAveragedPerceptron::train_machine(CFeatures* data)
tmp_bias+=bias;
}
iter++;
pb.print_progress();
}
pb.complete();
if (converged)
SG_INFO("Averaged Perceptron algorithm converged after %d iterations.\n", iter)
else
@@ -5,6 +5,7 @@
* Michele Mazzoni, Heiko Strathmann, Fernando Iglesias
*/
#include <shogun/base/progress.h>
#include <shogun/base/range.h>
#include <shogun/classifier/Perceptron.h>
#include <shogun/features/iterators/DotIterator.h>
@@ -81,6 +82,7 @@ bool CPerceptron::train_machine(CFeatures* data)
{
w = get_w();
}
auto pb = SG_PROGRESS(range(max_iter));
//loop till we either get everything classified right or reach max_iter
while (!converged && iter < max_iter)
{
@@ -105,8 +107,9 @@ bool CPerceptron::train_machine(CFeatures* data)
}
iter++;
pb.print_progress();
}
pb.complete();
if (converged)
SG_INFO("Perceptron algorithm converged after %d iterations.\n", iter)
else
@@ -324,7 +324,7 @@ void CLibLinear::solve_l2r_l1l2_svc(
index[i] = i;
}
auto pb = progress(range(10));
auto pb = SG_PROGRESS(range(10));
CTime start_time;
while (iter < get_max_iterations())
{
@@ -538,7 +538,7 @@ void CLibLinear::solve_l1r_l2_svc(
}
}
auto pb = progress(range(10));
auto pb = SG_PROGRESS(range(10));
CTime start_time;
while (iter < get_max_iterations())
{
@@ -913,7 +913,7 @@ void CLibLinear::solve_l1r_lr(
}
}
auto pb = progress(range(10));
auto pb = SG_PROGRESS(range(10));
CTime start_time;
while (iter < get_max_iterations())
{
@@ -1270,7 +1270,7 @@ void CLibLinear::solve_l2r_lr_dual(
index[i] = i;
}
auto pb = progress(range(10));
auto pb = SG_PROGRESS(range(10));
while (iter < max_iter)
{
for (i = 0; i < l; i++)
@@ -5,11 +5,12 @@
* Evan Shelhamer, Sergey Lisitsyn
*/
#include <shogun/base/progress.h>
#include <shogun/classifier/svm/MPDSVM.h>
#include <shogun/io/SGIO.h>
#include <shogun/lib/Signal.h>
#include <shogun/lib/common.h>
#include <shogun/mathematics/Math.h>
#include <shogun/lib/Signal.h>
using namespace shogun;
@@ -91,6 +92,7 @@ bool CMPDSVM::train_machine(CFeatures* data)
dalphas[i]=-1; //CSVC
}
auto pb = SG_PROGRESS(range(maxiter));
// go ...
while (niter++ < maxiter)
{
@@ -222,8 +224,10 @@ bool CMPDSVM::train_machine(CFeatures* data)
dalphas[i]+= F[i] * etachange;
//dalphas[i]+= F[i] * etachange[0] + F[i+n] * etachange[1];
}
pb.print_progress();
}
pb.complete();
if (niter >= maxiter)
SG_WARNING("increase maxiter ... \n")
@@ -7,15 +7,16 @@
#include <shogun/lib/config.h>
#ifdef HAVE_LAPACK
#include <shogun/base/progress.h>
#include <shogun/classifier/svm/NewtonSVM.h>
#include <shogun/mathematics/Math.h>
#include <shogun/mathematics/linalg/LinalgNamespace.h>
#include <shogun/machine/LinearMachine.h>
#include <shogun/features/DotFeatures.h>
#include <shogun/labels/Labels.h>
#include <shogun/labels/BinaryLabels.h>
#include <shogun/mathematics/lapack.h>
#include <shogun/labels/Labels.h>
#include <shogun/lib/Signal.h>
#include <shogun/machine/LinearMachine.h>
#include <shogun/mathematics/Math.h>
#include <shogun/mathematics/lapack.h>
#include <shogun/mathematics/linalg/LinalgNamespace.h>
//#define DEBUG_NEWTON
//#define V_NEWTON
@@ -80,6 +81,7 @@ bool CNewtonSVM::train_machine(CFeatures* data)
float64_t obj, *grad=SG_MALLOC(float64_t, x_d+1);
float64_t t;
auto pb = SG_PROGRESS(range(num_iter));
while (iter <= num_iter)
{
COMPUTATION_CONTROLLERS
@@ -204,13 +206,10 @@ bool CNewtonSVM::train_machine(CFeatures* data)
if (newton_decrement*2<prec*obj)
break;
}
if (iter > num_iter)
{
SG_PRINT("Maximum number of Newton steps reached. Try larger lambda")
pb.print_progress();
}
pb.complete();
#ifdef V_NEWTON
SG_PRINT("FINAL W AND BIAS Vector=\n\n")
CMath::display_matrix(weights, x_d+1, 1);
@@ -5,12 +5,13 @@
* Thoralf Klein, Viktor Gal, Evan Shelhamer, Bjoern Esser
*/
#include <shogun/classifier/svm/OnlineSVMSGD.h>
#include <shogun/mathematics/Math.h>
#include <shogun/mathematics/linalg/LinalgNamespace.h>
#include <shogun/base/Parameter.h>
#include <shogun/base/progress.h>
#include <shogun/classifier/svm/OnlineSVMSGD.h>
#include <shogun/lib/Signal.h>
#include <shogun/loss/HingeLoss.h>
#include <shogun/mathematics/Math.h>
#include <shogun/mathematics/linalg/LinalgNamespace.h>
using namespace shogun;
@@ -89,7 +90,7 @@ bool COnlineSVMSGD::train(CFeatures* data)
is_log_loss = true;
int32_t vec_count;
for (int32_t e = 0; e < epochs; e++)
for (auto e : SG_PROGRESS(range(epochs)))
{
COMPUTATION_CONTROLLERS
vec_count=0;
@@ -5,12 +5,13 @@
* Sergey Lisitsyn, Thoralf Klein, Evan Shelhamer, Bjoern Esser
*/
#include <shogun/classifier/svm/SGDQN.h>
#include <shogun/base/Parameter.h>
#include <shogun/base/progress.h>
#include <shogun/classifier/svm/SGDQN.h>
#include <shogun/labels/BinaryLabels.h>
#include <shogun/lib/Signal.h>
#include <shogun/mathematics/Math.h>
#include <shogun/loss/HingeLoss.h>
#include <shogun/labels/BinaryLabels.h>
#include <shogun/mathematics/Math.h>
using namespace shogun;
@@ -129,7 +130,7 @@ bool CSGDQN::train(CFeatures* data)
if ((loss_type == L_LOGLOSS) || (loss_type == L_LOGLOSSMARGIN))
is_log_loss = true;
for (int32_t e = 0; e < epochs; e++)
for (auto e : SG_PROGRESS(range(epochs)))
{
COMPUTATION_CONTROLLERS
count = skip;
@@ -642,7 +642,7 @@ int32_t CSVMLight::optimize_to_convergence(int32_t* docs, int32_t* label, int32_
CTime start_time;
mkl_converged=false;
auto pb = progress(range(10), *this->io);
auto pb = SG_PROGRESS(range(10));
#ifdef CYGWIN
for (;((iteration<100 || (!mkl_converged && callback) ) || (retrain && (!terminate))); iteration++){
#else
Copy path View file
@@ -7,8 +7,9 @@
*/
#include <shogun/lib/config.h>
#include <shogun/base/some.h>
#include <shogun/base/Parameter.h>
#include <shogun/base/progress.h>
#include <shogun/base/some.h>
#include <shogun/clustering/GMM.h>
#include <shogun/clustering/KMeans.h>
#include <shogun/distance/EuclideanDistance.h>
@@ -149,7 +150,7 @@ float64_t CGMM::train_em(float64_t min_cov, int32_t max_iter, float64_t min_chan
SGVector<float64_t> logPxy(num_vectors * m_components.size());
SGVector<float64_t> logPx(num_vectors);
//float64_t* logPost=SG_MALLOC(float64_t, num_vectors*m_components.vlen);
auto pb = SG_PROGRESS(range(max_iter));
while (iter<max_iter)
{
log_likelihood_prev=log_likelihood_cur;
@@ -181,12 +182,12 @@ float64_t CGMM::train_em(float64_t min_cov, int32_t max_iter, float64_t min_chan
if (iter>0 && log_likelihood_cur-log_likelihood_prev<min_change)
break;
pb.print_progress();
max_likelihood(alpha, min_cov);
iter++;
}
pb.complete();
return log_likelihood_cur;
}
@@ -218,6 +219,7 @@ float64_t CGMM::train_smem(int32_t max_iter, int32_t max_cand, float64_t min_cov
SGVector<int32_t> merge_ind(
m_components.size() * (m_components.size() - 1) / 2);
auto pb = SG_PROGRESS(range(max_iter));
while (iter<max_iter)
{
linalg::zero(logPostSum);
@@ -334,8 +336,9 @@ float64_t CGMM::train_smem(int32_t max_iter, int32_t max_cand, float64_t min_cov
if (!better_found)
break;
iter++;
pb.print_progress();
}
pb.complete();
return cur_likelihood;
}
@@ -826,4 +829,3 @@ void CGMM::register_params()
&m_coefficients, "m_coefficients", "Mixture coefficients.",
MS_NOT_AVAILABLE);
}
@@ -109,7 +109,7 @@ bool CHierarchical::train_machine(CFeatures* data)
float64_t* distances=SG_MALLOC(float64_t, num_pairs);
int32_t offs=0;
for (auto i : progress(range(0, num), *this->io))
for (auto i : SG_PROGRESS(range(0, num)))
{
for (int32_t j=i+1; j<num; j++)
{
@@ -123,7 +123,7 @@ bool CHierarchical::train_machine(CFeatures* data)
CMath::qsort_index<float64_t,pair>(distances, index, (num-1)*num/2);
//CMath::display_vector(distances, (num-1)*num/2, "dists");
auto pb = progress(range(0, num_pairs - 1), *this->io);
auto pb = SG_PROGRESS(range(0, num_pairs - 1));
int32_t k=-1;
int32_t l=0;
for (; l<num && (num-l)>=merges && k<num_pairs-1; l++)
Oops, something went wrong.
ProTip! Use n and p to navigate between commits in a pull request.