-
Notifications
You must be signed in to change notification settings - Fork 28.1k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[Spark-7780][MLLIB] Intercept in logisticregressionwith lbfgs should not be regularized #6386
Changes from all commits
a529c01
f9e2635
7ebbd56
ef2a9b0
407491e
e02bf3a
8517539
a619d42
4febcc3
e8e03a1
38a024b
478b8c5
08589f5
f40c401
f35a16a
4d431a3
7e41928
ed351ff
3ac02d7
d1ce12b
8ca0fa9
6f66f2c
0cedd50
2bf289b
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -21,13 +21,15 @@ import org.apache.spark.SparkContext | |
import org.apache.spark.annotation.Since | ||
import org.apache.spark.mllib.classification.impl.GLMClassificationModel | ||
import org.apache.spark.mllib.linalg.BLAS.dot | ||
import org.apache.spark.mllib.linalg.{DenseVector, Vector} | ||
import org.apache.spark.mllib.linalg.{DenseVector, Vector, Vectors} | ||
import org.apache.spark.mllib.optimization._ | ||
import org.apache.spark.mllib.pmml.PMMLExportable | ||
import org.apache.spark.mllib.regression._ | ||
import org.apache.spark.mllib.util.{DataValidators, Saveable, Loader} | ||
import org.apache.spark.mllib.util.MLUtils.appendBias | ||
import org.apache.spark.rdd.RDD | ||
|
||
import org.apache.spark.sql.SQLContext | ||
import org.apache.spark.storage.StorageLevel | ||
|
||
/** | ||
* Classification model trained using Multinomial/Binary Logistic Regression. | ||
|
@@ -332,6 +334,13 @@ object LogisticRegressionWithSGD { | |
* Limited-memory BFGS. Standard feature scaling and L2 regularization are used by default. | ||
* NOTE: Labels used in Logistic Regression should be {0, 1, ..., k - 1} | ||
* for k classes multi-label classification problem. | ||
* | ||
* Earlier implementations of LogisticRegressionWithLBFGS applies a regularization | ||
* penalty to all elements including the intercept. If this is called with one of | ||
* standard updaters (L1Updater, or SquaredL2Updater) this is translated | ||
* into a call to ml.LogisticRegression, otherwise this will use the existing mllib | ||
* GeneralizedLinearAlgorithm trainer, resulting in a regularization penalty to the | ||
* intercept. | ||
*/ | ||
@Since("1.1.0") | ||
class LogisticRegressionWithLBFGS | ||
|
@@ -374,4 +383,83 @@ class LogisticRegressionWithLBFGS | |
new LogisticRegressionModel(weights, intercept, numFeatures, numOfLinearPredictor + 1) | ||
} | ||
} | ||
|
||
|
||
/** | ||
* Run the algorithm with the configured parameters on an input RDD | ||
* of LabeledPoint entries starting from the initial weights provided. | ||
* If a known updater is used calls the ml implementation, to avoid | ||
* applying a regularization penalty to the intercept, otherwise | ||
* defaults to the mllib implementation. If more than two classes | ||
* or feature scaling is disabled, always uses mllib implementation. | ||
* If using ml implementation, uses ml code to generate initial weights. | ||
*/ | ||
override def run(input: RDD[LabeledPoint]): LogisticRegressionModel = { | ||
run(input, generateInitialWeights(input), false) | ||
} | ||
|
||
/** | ||
* Run the algorithm with the configured parameters on an input RDD | ||
* of LabeledPoint entries starting from the initial weights provided. | ||
* If a known updater is used calls the ml implementation, to avoid | ||
* applying a regularization penalty to the intercept, otherwise | ||
* defaults to the mllib implementation. If more than two classes | ||
* or feature scaling is disabled, always uses mllib implementation. | ||
* Uses user provided weights. | ||
*/ | ||
override def run(input: RDD[LabeledPoint], initialWeights: Vector): LogisticRegressionModel = { | ||
run(input, initialWeights, true) | ||
} | ||
|
||
private def run(input: RDD[LabeledPoint], initialWeights: Vector, userSuppliedWeights: Boolean): | ||
LogisticRegressionModel = { | ||
// ml's Logisitic regression only supports binary classifcation currently. | ||
if (numOfLinearPredictor == 1 && useFeatureScaling) { | ||
def runWithMlLogisitcRegression(elasticNetParam: Double) = { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. How about? Then we don't have change the other side. Thanks.
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We can do that, the only downside is that on the other side its ripped back out right away. This would also loose the initial weights but I could either modify the signature on the other side to take initial weights or require the the initial weights or zero (which do you think is better)? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. When people train LoR/LiR with multiple lambda of regularizations for cross-validation, the training algorithm will start from the largest lambda and return the model. The model will be used as the initial condition for the second largest lambda. The process will be repeated until all the lambdas are trained. By using the previous model as initial weights, the convergence rate will be a way faster. http://www.jstatsoft.org/v33/i01/paper As a result, in order to do so, we need to have ability to specify initial weights. Feel free to add private API to set weights. If the dim of weights is different from the data, then we can use the default one as initial condition. PS, once this private api is added, we can hook it up with CrossValidation API to train multiple lambdas efficiently. Currently, with multiple lambda, we train from scratch without using the information from previous results. No JIRA now, you can open one if you are interested in this. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I don't see the passed There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think initial weights can be part of params, but we can do it in next iteration when we work on CrossValidation stuff. This regularization path idea will be the same for Liner Regression with elastic net. |
||
// Prepare the ml LogisticRegression based on our settings | ||
val lr = new org.apache.spark.ml.classification.LogisticRegression() | ||
lr.setRegParam(optimizer.getRegParam()) | ||
lr.setElasticNetParam(elasticNetParam) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. add lr.setFitIntercept(addIntercept)
lr.setMaxIter(optimizer.getMaxIterParam())
lr.setTol(optimizer.getTolParam()) and add the corresponding api in LBFGS as private. |
||
if (userSuppliedWeights) { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
val initialWeightsWithIntercept = if(addIntercept) {
appendBias(initialWeights)
} else {
initialWeights
} we add the last element as |
||
val initialWeightsWithIntercept = if (addIntercept) { | ||
appendBias(initialWeights) | ||
} else { | ||
initialWeights | ||
} | ||
lr.setInitialWeights(initialWeightsWithIntercept) | ||
} | ||
lr.setFitIntercept(addIntercept) | ||
lr.setMaxIter(optimizer.getNumIterations()) | ||
lr.setTol(optimizer.getConvergenceTol()) | ||
// Convert our input into a DataFrame | ||
val sqlContext = new SQLContext(input.context) | ||
import sqlContext.implicits._ | ||
val df = input.toDF() | ||
// Determine if we should cache the DF | ||
val handlePersistence = input.getStorageLevel == StorageLevel.NONE | ||
if (handlePersistence) { | ||
df.persist(StorageLevel.MEMORY_AND_DISK) | ||
} | ||
// Train our model | ||
val mlLogisticRegresionModel = lr.train(df) | ||
// unpersist if we persisted | ||
if (handlePersistence) { | ||
df.unpersist() | ||
} | ||
// convert the model | ||
val weights = mlLogisticRegresionModel.weights match { | ||
case x: DenseVector => x | ||
case y: Vector => Vectors.dense(y.toArray) | ||
} | ||
createModel(weights, mlLogisticRegresionModel.intercept) | ||
} | ||
optimizer.getUpdater() match { | ||
case x: SquaredL2Updater => runWithMlLogisitcRegression(1.0) | ||
case x: L1Updater => runWithMlLogisitcRegression(0.0) | ||
case _ => super.run(input, initialWeights) | ||
} | ||
} else { | ||
super.run(input, initialWeights) | ||
} | ||
} | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
ditto. revert