-
Notifications
You must be signed in to change notification settings - Fork 29.1k
[SPARK-7780][MLLIB] intercept in logisticregressionwith lbfgs should not be regularized #10788
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
a529c01
f9e2635
7ebbd56
ef2a9b0
407491e
e02bf3a
8517539
a619d42
4febcc3
e8e03a1
38a024b
478b8c5
08589f5
f40c401
f35a16a
4d431a3
7e41928
ed351ff
3ac02d7
d1ce12b
8ca0fa9
6f66f2c
0cedd50
2bf289b
d7a2631
b0fe1e6
827dcde
ba99ce9
4b62629
58ad58a
4caab8c
daf17a5
72692f8
cec610a
e8b3ce8
0e2ea49
6fc1c23
05205b3
1c7a687
699997f
e4d6d14
43a3a32
e1b0389
a0bc58a
d695ec0
7501b4b
46ae406
e6b797a
8016ad8
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -247,15 +247,27 @@ class LogisticRegression @Since("1.2.0") ( | |
| @Since("1.5.0") | ||
| override def getThresholds: Array[Double] = super.getThresholds | ||
|
|
||
| override protected def train(dataset: DataFrame): LogisticRegressionModel = { | ||
| // Extract columns from data. If dataset is persisted, do not persist oldDataset. | ||
| private var optInitialModel: Option[LogisticRegressionModel] = None | ||
|
|
||
| /** @group setParam */ | ||
| private[spark] def setInitialModel(model: LogisticRegressionModel): this.type = { | ||
| this.optInitialModel = Some(model) | ||
| this | ||
| } | ||
|
|
||
| override protected[spark] def train(dataset: DataFrame): LogisticRegressionModel = { | ||
| val handlePersistence = dataset.rdd.getStorageLevel == StorageLevel.NONE | ||
| train(dataset, handlePersistence) | ||
| } | ||
|
|
||
| protected[spark] def train(dataset: DataFrame, handlePersistence: Boolean): | ||
| LogisticRegressionModel = { | ||
| val w = if ($(weightCol).isEmpty) lit(1.0) else col($(weightCol)) | ||
| val instances: RDD[Instance] = dataset.select(col($(labelCol)), w, col($(featuresCol))).map { | ||
| case Row(label: Double, weight: Double, features: Vector) => | ||
| Instance(label, weight, features) | ||
| } | ||
|
|
||
| val handlePersistence = dataset.rdd.getStorageLevel == StorageLevel.NONE | ||
| if (handlePersistence) instances.persist(StorageLevel.MEMORY_AND_DISK) | ||
|
|
||
| val (summarizer, labelSummarizer) = { | ||
|
|
@@ -343,7 +355,21 @@ class LogisticRegression @Since("1.2.0") ( | |
| val initialCoefficientsWithIntercept = | ||
| Vectors.zeros(if ($(fitIntercept)) numFeatures + 1 else numFeatures) | ||
|
|
||
| if ($(fitIntercept)) { | ||
| if (optInitialModel.isDefined && optInitialModel.get.coefficients.size != numFeatures) { | ||
| val vec = optInitialModel.get.coefficients | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. its used on L348 in the log warning |
||
| logWarning( | ||
| s"Initial coefficients provided ${vec} did not match the expected size ${numFeatures}") | ||
| } | ||
|
|
||
| if (optInitialModel.isDefined && optInitialModel.get.coefficients.size == numFeatures) { | ||
| val initialCoefficientsWithInterceptArray = initialCoefficientsWithIntercept.toArray | ||
| optInitialModel.get.coefficients.foreachActive { case (index, value) => | ||
| initialCoefficientsWithInterceptArray(index) = value | ||
| } | ||
| if ($(fitIntercept)) { | ||
| initialCoefficientsWithInterceptArray(numFeatures) == optInitialModel.get.intercept | ||
| } | ||
| } else if ($(fitIntercept)) { | ||
| /* | ||
| For binary logistic regression, when we initialize the coefficients as zeros, | ||
| it will converge faster if we initialize the intercept such that | ||
|
|
@@ -434,7 +460,7 @@ object LogisticRegression extends DefaultParamsReadable[LogisticRegression] { | |
| */ | ||
| @Since("1.4.0") | ||
| @Experimental | ||
| class LogisticRegressionModel private[ml] ( | ||
| class LogisticRegressionModel private[spark] ( | ||
| @Since("1.4.0") override val uid: String, | ||
| @Since("1.6.0") val coefficients: Vector, | ||
| @Since("1.3.0") val intercept: Double) | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -19,15 +19,18 @@ package org.apache.spark.mllib.classification | |
|
|
||
| import org.apache.spark.SparkContext | ||
| import org.apache.spark.annotation.Since | ||
| import org.apache.spark.ml.util.Identifiable | ||
| import org.apache.spark.mllib.classification.impl.GLMClassificationModel | ||
| import org.apache.spark.mllib.linalg.{DenseVector, Vector} | ||
| import org.apache.spark.mllib.linalg.{DenseVector, Vector, Vectors} | ||
| import org.apache.spark.mllib.linalg.BLAS.dot | ||
| import org.apache.spark.mllib.optimization._ | ||
| import org.apache.spark.mllib.pmml.PMMLExportable | ||
| import org.apache.spark.mllib.regression._ | ||
| import org.apache.spark.mllib.util.{DataValidators, Loader, Saveable} | ||
| import org.apache.spark.mllib.util.MLUtils.appendBias | ||
| import org.apache.spark.rdd.RDD | ||
|
|
||
| import org.apache.spark.sql.SQLContext | ||
| import org.apache.spark.storage.StorageLevel | ||
|
|
||
| /** | ||
| * Classification model trained using Multinomial/Binary Logistic Regression. | ||
|
|
@@ -332,6 +335,13 @@ object LogisticRegressionWithSGD { | |
| * Limited-memory BFGS. Standard feature scaling and L2 regularization are used by default. | ||
| * NOTE: Labels used in Logistic Regression should be {0, 1, ..., k - 1} | ||
| * for k classes multi-label classification problem. | ||
| * | ||
| * Earlier implementations of LogisticRegressionWithLBFGS applies a regularization | ||
| * penalty to all elements including the intercept. If this is called with one of | ||
| * standard updaters (L1Updater, or SquaredL2Updater) this is translated | ||
| * into a call to ml.LogisticRegression, otherwise this will use the existing mllib | ||
| * GeneralizedLinearAlgorithm trainer, resulting in a regularization penalty to the | ||
| * intercept. | ||
| */ | ||
| @Since("1.1.0") | ||
| class LogisticRegressionWithLBFGS | ||
|
|
@@ -374,4 +384,72 @@ class LogisticRegressionWithLBFGS | |
| new LogisticRegressionModel(weights, intercept, numFeatures, numOfLinearPredictor + 1) | ||
| } | ||
| } | ||
|
|
||
| /** | ||
| * Run Logistic Regression with the configured parameters on an input RDD | ||
| * of LabeledPoint entries. | ||
| * | ||
| * If a known updater is used calls the ml implementation, to avoid | ||
| * applying a regularization penalty to the intercept, otherwise | ||
| * defaults to the mllib implementation. If more than two classes | ||
| * or feature scaling is disabled, always uses mllib implementation. | ||
| * If using ml implementation, uses ml code to generate initial weights. | ||
| */ | ||
| override def run(input: RDD[LabeledPoint]): LogisticRegressionModel = { | ||
| run(input, generateInitialWeights(input), userSuppliedWeights = false) | ||
| } | ||
|
|
||
| /** | ||
| * Run Logistic Regression with the configured parameters on an input RDD | ||
| * of LabeledPoint entries starting from the initial weights provided. | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Add extra new line before |
||
| * | ||
| * If a known updater is used calls the ml implementation, to avoid | ||
| * applying a regularization penalty to the intercept, otherwise | ||
| * defaults to the mllib implementation. If more than two classes | ||
| * or feature scaling is disabled, always uses mllib implementation. | ||
| * Uses user provided weights. | ||
| */ | ||
| override def run(input: RDD[LabeledPoint], initialWeights: Vector): LogisticRegressionModel = { | ||
| run(input, initialWeights, userSuppliedWeights = true) | ||
| } | ||
|
|
||
| private def run(input: RDD[LabeledPoint], initialWeights: Vector, userSuppliedWeights: Boolean): | ||
| LogisticRegressionModel = { | ||
| // ml's Logisitic regression only supports binary classifcation currently. | ||
| if (numOfLinearPredictor == 1) { | ||
| def runWithMlLogisitcRegression(elasticNetParam: Double) = { | ||
| // Prepare the ml LogisticRegression based on our settings | ||
| val lr = new org.apache.spark.ml.classification.LogisticRegression() | ||
| lr.setRegParam(optimizer.getRegParam()) | ||
| lr.setElasticNetParam(elasticNetParam) | ||
| lr.setStandardization(useFeatureScaling) | ||
| if (userSuppliedWeights) { | ||
| val uid = Identifiable.randomUID("logreg-static") | ||
| lr.setInitialModel(new org.apache.spark.ml.classification.LogisticRegressionModel( | ||
| uid, initialWeights, 1.0)) | ||
| } | ||
| lr.setFitIntercept(addIntercept) | ||
| lr.setMaxIter(optimizer.getNumIterations()) | ||
| lr.setTol(optimizer.getConvergenceTol()) | ||
| // Convert our input into a DataFrame | ||
| val sqlContext = new SQLContext(input.context) | ||
| import sqlContext.implicits._ | ||
| val df = input.toDF() | ||
| // Determine if we should cache the DF | ||
| val handlePersistence = input.getStorageLevel == StorageLevel.NONE | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Will this cause double caching? Let's say input RDD is cached, so
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Good point, in a previous version of the code we passed handlePersistence down through to avoid this. I've updated it to do the same here. |
||
| // Train our model | ||
| val mlLogisticRegresionModel = lr.train(df, handlePersistence) | ||
| // convert the model | ||
| val weights = Vectors.dense(mlLogisticRegresionModel.coefficients.toArray) | ||
| createModel(weights, mlLogisticRegresionModel.intercept) | ||
| } | ||
| optimizer.getUpdater() match { | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. when
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. okay, this will make the test harder to write. I don't care this one now. |
||
| case x: SquaredL2Updater => runWithMlLogisitcRegression(1.0) | ||
| case x: L1Updater => runWithMlLogisitcRegression(0.0) | ||
| case _ => super.run(input, initialWeights) | ||
| } | ||
| } else { | ||
| super.run(input, initialWeights) | ||
| } | ||
| } | ||
| } | ||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
How about we follow #8972 , and have the following code. We can create another seprate JIRA for moving
setInitialModelto public with a sharedParam.There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
So we have setInitialWeights on StreamingLogisticRegressionWithSGD - would it be better to have it match StreamingLogisticRegressionWithSGD ?