Skip to content

Commit

Permalink
move optimizing steps to the optimizer class
Browse files Browse the repository at this point in the history
  • Loading branch information
schalkdaniel committed Nov 15, 2019
1 parent 1d4f8c3 commit 7a0b528
Show file tree
Hide file tree
Showing 3 changed files with 31 additions and 26 deletions.
31 changes: 7 additions & 24 deletions src/compboost.cpp
Expand Up @@ -57,47 +57,30 @@ void Compboost::train (const unsigned int& trace, std::shared_ptr<loggerlist::Lo
Rcpp::stop("Could not train without any registered base-learner.");
}

arma::mat blearner_pred_temp;

bool stop_the_algorithm = false;
// Bool to indicate whether the stop criteria (stopc) is reached or not:
bool is_stopc_reached = false;
unsigned int k = 1;

// Main Algorithm. While the stop criteria isn't fulfilled, run the
// algorithm:
while (! stop_the_algorithm) {
while (! is_stopc_reached) {

actual_iteration = blearner_track.getBaselearnerVector().size() + 1;

sh_ptr_response->setActualIteration(actual_iteration);
sh_ptr_response->updatePseudoResiduals(sh_ptr_loss);

// Cast integer k to string for baselearner identifier:
std::string temp_string = std::to_string(k);
std::shared_ptr<blearner::Baselearner> sh_ptr_blearner_selected = sh_ptr_optimizer->findBestBaselearner(temp_string,
sh_ptr_response, used_baselearner_list.getMap());

// Prediction is needed more often, use a temp vector to avoid multiple computations:
blearner_pred_temp = sh_ptr_blearner_selected->predict();

sh_ptr_optimizer->calculateStepSize(sh_ptr_loss, sh_ptr_response, blearner_pred_temp);

// Insert new base-learner to vector of selected base-learner. The parameter are estimated here, hence
// the contribution to the old parameter is the estimated parameter times the learning rate times
// the step size. Therefore we have to pass the step size which changes in each iteration:
blearner_track.insertBaselearner(sh_ptr_blearner_selected, sh_ptr_optimizer->getStepSize(actual_iteration));
sh_ptr_response->updatePrediction(learning_rate, sh_ptr_optimizer->getStepSize(actual_iteration), blearner_pred_temp);
sh_ptr_optimizer->optimize(actual_iteration, learning_rate, sh_ptr_loss, sh_ptr_response, blearner_track,
used_baselearner_list);

// Log the current step:
// The last term has to be the prediction or anything like that. This is
// important to track the risk (inbag or oob)!!!!
logger_list->logCurrent(actual_iteration, sh_ptr_response, sh_ptr_blearner_selected, learning_rate, sh_ptr_optimizer->getStepSize(actual_iteration));
logger_list->logCurrent(actual_iteration, sh_ptr_response, blearner_track.getBaselearnerVector().back(), learning_rate, sh_ptr_optimizer->getStepSize(actual_iteration));

// Calculate and log risk:
risk.push_back(sh_ptr_response->calculateEmpiricalRisk(sh_ptr_loss));

// Get status of the algorithm (is the stopping criteria reached?). The negation here
// seems a bit weird, but it makes the while loop easier to read:
stop_the_algorithm = ! logger_list->getStopperStatus(stop_if_all_stopper_fulfilled);
is_stopc_reached = ! logger_list->getStopperStatus(stop_if_all_stopper_fulfilled);

if (helper::checkTracePrinter(actual_iteration, trace)) logger_list->printLoggerStatus(risk.back());
k += 1;
Expand Down
19 changes: 19 additions & 0 deletions src/optimizer.cpp
Expand Up @@ -148,6 +148,25 @@ std::shared_ptr<blearner::Baselearner> OptimizerCoordinateDescent::findBestBasel
}
}

void OptimizerCoordinateDescent::optimize (const unsigned int& actual_iteration, const double& learning_rate, const std::shared_ptr<loss::Loss> sh_ptr_loss, const std::shared_ptr<response::Response> sh_ptr_response,
blearnertrack::BaselearnerTrack& blearner_track, const blearnerlist::BaselearnerFactoryList& blearner_list)
{
std::string temp_string = std::to_string(actual_iteration);
std::shared_ptr<blearner::Baselearner> sh_ptr_blearner_selected = findBestBaselearner(temp_string,
sh_ptr_response, blearner_list.getMap());

// Prediction is needed more often, use a temp vector to avoid multiple computations:
arma::mat blearner_pred_temp = sh_ptr_blearner_selected->predict();

calculateStepSize(sh_ptr_loss, sh_ptr_response, blearner_pred_temp);

// Insert new base-learner to vector of selected base-learner. The parameter are estimated here, hence
// the contribution to the old parameter is the estimated parameter times the learning rate times
// the step size. Therefore we have to pass the step size which changes in each iteration:
blearner_track.insertBaselearner(sh_ptr_blearner_selected, getStepSize(actual_iteration));
sh_ptr_response->updatePrediction(learning_rate, getStepSize(actual_iteration), blearner_pred_temp);
}

void OptimizerCoordinateDescent::calculateStepSize (std::shared_ptr<loss::Loss> sh_ptr_loss, std::shared_ptr<response::Response> sh_ptr_response,
const arma::vec& baselearner_prediction)
{
Expand Down
7 changes: 5 additions & 2 deletions src/optimizer.h
Expand Up @@ -30,6 +30,7 @@

#include "baselearner.h"
#include "baselearner_factory_list.h"
#include "baselearner_track.h"
#include "loss.h"
#include "line_search.h"
#include "helper.h"
Expand All @@ -50,6 +51,8 @@ class Optimizer

virtual std::shared_ptr<blearner::Baselearner> findBestBaselearner (const std::string&,
std::shared_ptr<response::Response>, const blearner_factory_map&) const = 0;
virtual void optimize (const unsigned int&, const double&, const std::shared_ptr<loss::Loss>, const std::shared_ptr<response::Response>,
blearnertrack::BaselearnerTrack&, const blearnerlist::BaselearnerFactoryList&) = 0;

// loss, target, model_prediction, base_learner_prediction (prediction of newly selected base-learner)
virtual void calculateStepSize (std::shared_ptr<loss::Loss>, std::shared_ptr<response::Response>, const arma::vec&) = 0;
Expand Down Expand Up @@ -82,14 +85,14 @@ class OptimizerCoordinateDescent : public Optimizer

std::shared_ptr<blearner::Baselearner> findBestBaselearner (const std::string&, std::shared_ptr<response::Response>,
const blearner_factory_map&) const;
void optimize (const unsigned int&, const double&, const std::shared_ptr<loss::Loss>, const std::shared_ptr<response::Response>,
blearnertrack::BaselearnerTrack&, const blearnerlist::BaselearnerFactoryList&);

void calculateStepSize (std::shared_ptr<loss::Loss>, std::shared_ptr<response::Response>, const arma::vec&);
std::vector<double> getStepSize () const;
double getStepSize (const unsigned int&) const;
};

// Coordinate Descent with line search:
// -------------------------------------------
class OptimizerCoordinateDescentLineSearch : public OptimizerCoordinateDescent
{
public:
Expand Down

0 comments on commit 7a0b528

Please sign in to comment.