Skip to content

Commit

Permalink
Renamed liblinear structures to avoid name clashes in future
Browse files Browse the repository at this point in the history
  • Loading branch information
lisitsyn committed Jun 8, 2013
1 parent 5aed350 commit eca7941
Show file tree
Hide file tree
Showing 9 changed files with 38 additions and 38 deletions.
10 changes: 5 additions & 5 deletions src/shogun/classifier/svm/LibLinear.cpp
Expand Up @@ -120,7 +120,7 @@ bool CLibLinear::train_machine(CFeatures* data)
else
w=SGVector<float64_t>(num_feat);

problem prob;
liblinear_problem prob;
if (use_bias)
{
prob.n=w.vlen+1;
Expand Down Expand Up @@ -250,7 +250,7 @@ bool CLibLinear::train_machine(CFeatures* data)
// To support weights for instances, use GETI(i) (i)

void CLibLinear::solve_l2r_l1l2_svc(
const problem *prob, double eps, double Cp, double Cn, LIBLINEAR_SOLVER_TYPE st)
const liblinear_problem *prob, double eps, double Cp, double Cn, LIBLINEAR_SOLVER_TYPE st)
{
int l = prob->l;
int w_size = prob->n;
Expand Down Expand Up @@ -450,7 +450,7 @@ void CLibLinear::solve_l2r_l1l2_svc(
// To support weights for instances, use GETI(i) (i)

void CLibLinear::solve_l1r_l2_svc(
problem *prob_col, double eps, double Cp, double Cn)
liblinear_problem *prob_col, double eps, double Cp, double Cn)
{
int l = prob_col->l;
int w_size = prob_col->n;
Expand Down Expand Up @@ -796,7 +796,7 @@ void CLibLinear::solve_l1r_l2_svc(
// To support weights for instances, use GETI(i) (i)

void CLibLinear::solve_l1r_lr(
const problem *prob_col, double eps,
const liblinear_problem *prob_col, double eps,
double Cp, double Cn)
{
int l = prob_col->l;
Expand Down Expand Up @@ -1167,7 +1167,7 @@ void CLibLinear::solve_l1r_lr(
#define GETI(i) (y[i]+1)
// To support weights for instances, use GETI(i) (i)

void CLibLinear::solve_l2r_lr_dual(const problem *prob, double eps, double Cp, double Cn)
void CLibLinear::solve_l2r_lr_dual(const liblinear_problem *prob, double eps, double Cp, double Cn)
{
int l = prob->l;
int w_size = prob->n;
Expand Down
10 changes: 5 additions & 5 deletions src/shogun/classifier/svm/LibLinear.h
Expand Up @@ -174,13 +174,13 @@ class CLibLinear : public CLinearMachine
/** set up parameters */
void init();

void train_one(const problem *prob, const parameter *param, double Cp, double Cn);
void train_one(const liblinear_problem *prob, const liblinear_parameter *param, double Cp, double Cn);
void solve_l2r_l1l2_svc(
const problem *prob, double eps, double Cp, double Cn, LIBLINEAR_SOLVER_TYPE st);
const liblinear_problem *prob, double eps, double Cp, double Cn, LIBLINEAR_SOLVER_TYPE st);

void solve_l1r_l2_svc(problem *prob_col, double eps, double Cp, double Cn);
void solve_l1r_lr(const problem *prob_col, double eps, double Cp, double Cn);
void solve_l2r_lr_dual(const problem *prob, double eps, double Cp, double Cn);
void solve_l1r_l2_svc(liblinear_problem *prob_col, double eps, double Cp, double Cn);
void solve_l1r_lr(const liblinear_problem *prob_col, double eps, double Cp, double Cn);
void solve_l2r_lr_dual(const liblinear_problem *prob, double eps, double Cp, double Cn);


protected:
Expand Down
2 changes: 1 addition & 1 deletion src/shogun/multiclass/MulticlassLibLinear.cpp
Expand Up @@ -102,7 +102,7 @@ bool CMulticlassLibLinear::train_machine(CFeatures* data)
int32_t num_classes = ((CMulticlassLabels*) m_labels)->get_num_classes();
int32_t bias_n = m_use_bias ? 1 : 0;

problem mc_problem;
liblinear_problem mc_problem;
mc_problem.l = num_vectors;
mc_problem.n = m_features->get_dim_feature_space() + bias_n;
mc_problem.y = SG_MALLOC(float64_t, mc_problem.l);
Expand Down
12 changes: 6 additions & 6 deletions src/shogun/optimization/liblinear/shogun_liblinear.cpp
Expand Up @@ -46,7 +46,7 @@

using namespace shogun;

l2r_lr_fun::l2r_lr_fun(const problem *p, float64_t* Cs)
l2r_lr_fun::l2r_lr_fun(const liblinear_problem *p, float64_t* Cs)
{
int l=p->l;

Expand Down Expand Up @@ -161,7 +161,7 @@ void l2r_lr_fun::XTv(double *v, double *res_XTv)
}
}

l2r_l2_svc_fun::l2r_l2_svc_fun(const problem *p, double* Cs)
l2r_l2_svc_fun::l2r_l2_svc_fun(const liblinear_problem *p, double* Cs)
{
int l=p->l;

Expand Down Expand Up @@ -299,7 +299,7 @@ void l2r_l2_svc_fun::subXTv(double *v, double *XTv)
}
}

l2r_l2_svr_fun::l2r_l2_svr_fun(const problem *prob, double *Cs, double p):
l2r_l2_svr_fun::l2r_l2_svr_fun(const liblinear_problem *prob, double *Cs, double p):
l2r_l2_svc_fun(prob, Cs)
{
m_p = p;
Expand Down Expand Up @@ -387,7 +387,7 @@ void l2r_l2_svr_fun::grad(double *w, double *g)
#define GETI(i) (prob->y[i])
// To support weights for instances, use GETI(i) (i)

Solver_MCSVM_CS::Solver_MCSVM_CS(const problem *p, int n_class,
Solver_MCSVM_CS::Solver_MCSVM_CS(const liblinear_problem *p, int n_class,
double *weighted_C, double *w0_reg,
double epsilon, int max_it, double max_time,
mcsvm_state* given_state)
Expand Down Expand Up @@ -695,14 +695,14 @@ void Solver_MCSVM_CS::solve()
// Interface functions
//

void destroy_model(struct model *model_)
void destroy_model(struct liblinear_model *model_)
{
SG_FREE(model_->w);
SG_FREE(model_->label);
SG_FREE(model_);
}

void destroy_param(parameter* param)
void destroy_param(liblinear_parameter* param)
{
SG_FREE(param->weight_label);
SG_FREE(param->weight);
Expand Down
30 changes: 15 additions & 15 deletions src/shogun/optimization/liblinear/shogun_liblinear.h
Expand Up @@ -49,7 +49,7 @@ extern "C" {
#endif

/** problem */
struct problem
struct liblinear_problem
{
/** l */
int32_t l;
Expand All @@ -64,7 +64,7 @@ struct problem
};

/** parameter */
struct parameter
struct liblinear_parameter
{
/** solver type */
int32_t solver_type;
Expand All @@ -83,10 +83,10 @@ struct parameter
};

/** model */
struct model
struct liblinear_model
{
/** parameter */
struct parameter param;
struct liblinear_parameter param;
/** number of classes */
int32_t nr_class;
/** number of features */
Expand All @@ -99,8 +99,8 @@ struct model
float64_t bias;
};

void destroy_model(struct model *model_);
void destroy_param(struct parameter *param);
void destroy_model(struct liblinear_model *model_);
void destroy_param(struct liblinear_parameter *param);
#ifdef __cplusplus
}
#endif
Expand All @@ -115,7 +115,7 @@ class l2loss_svm_fun : public function
* @param Cp Cp
* @param Cn Cn
*/
l2loss_svm_fun(const problem *prob, float64_t Cp, float64_t Cn);
l2loss_svm_fun(const liblinear_problem *prob, float64_t Cp, float64_t Cn);
~l2loss_svm_fun();

/** fun
Expand Down Expand Up @@ -155,7 +155,7 @@ class l2loss_svm_fun : public function
float64_t *D;
int32_t *I;
int32_t sizeI;
const problem *prob;
const liblinear_problem *prob;
};

/** class l2r_lr_fun */
Expand All @@ -168,7 +168,7 @@ class l2r_lr_fun : public function
* @param Cp Cp
* @param Cn Cn
*/
l2r_lr_fun(const problem *prob, float64_t* C);
l2r_lr_fun(const liblinear_problem *prob, float64_t* C);
~l2r_lr_fun();

/** fun
Expand Down Expand Up @@ -201,13 +201,13 @@ class l2r_lr_fun : public function
float64_t *C;
float64_t *z;
float64_t *D;
const problem *m_prob;
const liblinear_problem *m_prob;
};

class l2r_l2_svc_fun : public function
{
public:
l2r_l2_svc_fun(const problem *prob, float64_t* Cs);
l2r_l2_svc_fun(const liblinear_problem *prob, float64_t* Cs);
~l2r_l2_svc_fun();

double fun(double *w);
Expand All @@ -226,13 +226,13 @@ class l2r_l2_svc_fun : public function
double *D;
int *I;
int sizeI;
const problem *m_prob;
const liblinear_problem *m_prob;
};

class l2r_l2_svr_fun: public l2r_l2_svc_fun
{
public:
l2r_l2_svr_fun(const problem *prob, double *Cs, double p);
l2r_l2_svr_fun(const liblinear_problem *prob, double *Cs, double p);

double fun(double *w);
void grad(double *w, double *g);
Expand Down Expand Up @@ -296,7 +296,7 @@ struct mcsvm_state
class Solver_MCSVM_CS
{
public:
Solver_MCSVM_CS(const problem *prob, int nr_class, double *C,
Solver_MCSVM_CS(const liblinear_problem *prob, int nr_class, double *C,
double *w0, double eps, int max_iter,
double train_time, mcsvm_state* given_state);
~Solver_MCSVM_CS();
Expand All @@ -311,7 +311,7 @@ class Solver_MCSVM_CS
double eps;
double max_train_time;
double* w0;
const problem *prob;
const liblinear_problem *prob;
mcsvm_state* state;
};

Expand Down
4 changes: 2 additions & 2 deletions src/shogun/regression/svr/LibLinearRegression.cpp
Expand Up @@ -81,7 +81,7 @@ bool CLibLinearRegression::train_machine(CFeatures* data)
else
w=SGVector<float64_t>(num_feat);

problem prob;
liblinear_problem prob;
if (m_use_bias)
{
prob.n=w.vlen+1;
Expand Down Expand Up @@ -154,7 +154,7 @@ bool CLibLinearRegression::train_machine(CFeatures* data)
#define GETI(i) (0)
// To support weights for instances, use GETI(i) (i)

void CLibLinearRegression::solve_l2r_l1l2_svr(const problem *prob)
void CLibLinearRegression::solve_l2r_l1l2_svr(const liblinear_problem *prob)
{
int l = prob->l;
double C = m_C;
Expand Down
2 changes: 1 addition & 1 deletion src/shogun/regression/svr/LibLinearRegression.h
Expand Up @@ -143,7 +143,7 @@ class CLibLinearRegression : public CLinearMachine

private:
/** solve svr with l1 or l2 loss */
void solve_l2r_l1l2_svr(const problem *prob);
void solve_l2r_l1l2_svr(const liblinear_problem *prob);

/** init defaults */
void init_defaults();
Expand Down
4 changes: 2 additions & 2 deletions src/shogun/transfer/multitask/LibLinearMTL.cpp
Expand Up @@ -103,7 +103,7 @@ bool CLibLinearMTL::train_machine(CFeatures* data)
else
training_w=SG_MALLOC(float64_t, num_feat+0);

problem prob;
liblinear_problem prob;
if (use_bias)
{
prob.n=num_feat+1;
Expand Down Expand Up @@ -181,7 +181,7 @@ bool CLibLinearMTL::train_machine(CFeatures* data)
// To support weights for instances, use GETI(i) (i)


void CLibLinearMTL::solve_l2r_l1l2_svc(const problem *prob, double eps, double Cp, double Cn)
void CLibLinearMTL::solve_l2r_l1l2_svc(const liblinear_problem *prob, double eps, double Cp, double Cn)
{


Expand Down
2 changes: 1 addition & 1 deletion src/shogun/transfer/multitask/LibLinearMTL.h
Expand Up @@ -304,7 +304,7 @@ class CLibLinearMTL : public CLinearMachine
void init();

void solve_l2r_l1l2_svc(
const problem *prob, double eps, double Cp, double Cn);
const liblinear_problem *prob, double eps, double Cp, double Cn);


protected:
Expand Down

0 comments on commit eca7941

Please sign in to comment.