-
Notifications
You must be signed in to change notification settings - Fork 28.1k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[SPARK-10194] [MLlib] [PySpark] SGD algorithms need convergenceTol parameter in Python #8457
Changes from 1 commit
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -241,7 +241,7 @@ class LogisticRegressionWithSGD(object): | |
@classmethod | ||
def train(cls, data, iterations=100, step=1.0, miniBatchFraction=1.0, | ||
initialWeights=None, regParam=0.01, regType="l2", intercept=False, | ||
validateData=True): | ||
validateData=True, convergenceTol=0.001): | ||
""" | ||
Train a logistic regression model on the given data. | ||
|
||
|
@@ -274,11 +274,13 @@ def train(cls, data, iterations=100, step=1.0, miniBatchFraction=1.0, | |
:param validateData: Boolean parameter which indicates if | ||
the algorithm should validate data | ||
before training. (default: True) | ||
:param convergenceTol: A condition which decides iteration termination. | ||
(default: 0.001) | ||
""" | ||
def train(rdd, i): | ||
return callMLlibFunc("trainLogisticRegressionModelWithSGD", rdd, int(iterations), | ||
float(step), float(miniBatchFraction), i, float(regParam), regType, | ||
bool(intercept), bool(validateData)) | ||
bool(intercept), bool(validateData), float(convergenceTol)) | ||
|
||
return _regression_train_wrapper(train, LogisticRegressionModel, data, initialWeights) | ||
|
||
|
@@ -439,7 +441,7 @@ class SVMWithSGD(object): | |
@classmethod | ||
def train(cls, data, iterations=100, step=1.0, regParam=0.01, | ||
miniBatchFraction=1.0, initialWeights=None, regType="l2", | ||
intercept=False, validateData=True): | ||
intercept=False, validateData=True, convergenceTol=0.001): | ||
""" | ||
Train a support vector machine on the given data. | ||
|
||
|
@@ -472,11 +474,13 @@ def train(cls, data, iterations=100, step=1.0, regParam=0.01, | |
:param validateData: Boolean parameter which indicates if | ||
the algorithm should validate data | ||
before training. (default: True) | ||
:param convergenceTol: A condition which decides iteration termination. | ||
(default: 0.001) | ||
""" | ||
def train(rdd, i): | ||
return callMLlibFunc("trainSVMModelWithSGD", rdd, int(iterations), float(step), | ||
float(regParam), float(miniBatchFraction), i, regType, | ||
bool(intercept), bool(validateData)) | ||
bool(intercept), bool(validateData), float(convergenceTol)) | ||
|
||
return _regression_train_wrapper(train, SVMModel, data, initialWeights) | ||
|
||
|
@@ -600,12 +604,15 @@ class StreamingLogisticRegressionWithSGD(StreamingLinearAlgorithm): | |
:param miniBatchFraction: Fraction of data on which SGD is run for each | ||
iteration. | ||
:param regParam: L2 Regularization parameter. | ||
:param convergenceTol: A condition which decides iteration termination. | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Not part of this PR, but do you mind documenting default values? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yes, I think I can do these in the follow-up PR because I found that there are much room to improve for |
||
""" | ||
def __init__(self, stepSize=0.1, numIterations=50, miniBatchFraction=1.0, regParam=0.01): | ||
def __init__(self, stepSize=0.1, numIterations=50, miniBatchFraction=1.0, regParam=0.01, | ||
convergenceTol=0.001): | ||
self.stepSize = stepSize | ||
self.numIterations = numIterations | ||
self.regParam = regParam | ||
self.miniBatchFraction = miniBatchFraction | ||
self.convergenceTol = convergenceTol | ||
self._model = None | ||
super(StreamingLogisticRegressionWithSGD, self).__init__( | ||
model=self._model) | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -28,7 +28,8 @@ | |
'LinearRegressionModel', 'LinearRegressionWithSGD', | ||
'RidgeRegressionModel', 'RidgeRegressionWithSGD', | ||
'LassoModel', 'LassoWithSGD', 'IsotonicRegressionModel', | ||
'IsotonicRegression'] | ||
'IsotonicRegression', 'StreamingLinearAlgorithm', | ||
'StreamingLinearRegressionWithSGD'] | ||
|
||
|
||
class LabeledPoint(object): | ||
|
@@ -202,7 +203,7 @@ class LinearRegressionWithSGD(object): | |
@classmethod | ||
def train(cls, data, iterations=100, step=1.0, miniBatchFraction=1.0, | ||
initialWeights=None, regParam=0.0, regType=None, intercept=False, | ||
validateData=True): | ||
validateData=True, convergenceTol=0.001): | ||
""" | ||
Train a linear regression model using Stochastic Gradient | ||
Descent (SGD). | ||
|
@@ -244,11 +245,14 @@ def train(cls, data, iterations=100, step=1.0, miniBatchFraction=1.0, | |
:param validateData: Boolean parameter which indicates if | ||
the algorithm should validate data | ||
before training. (default: True) | ||
:param convergenceTol: A condition which decides iteration termination. | ||
(default: 0.001) | ||
""" | ||
def train(rdd, i): | ||
return callMLlibFunc("trainLinearRegressionModelWithSGD", rdd, int(iterations), | ||
float(step), float(miniBatchFraction), i, float(regParam), | ||
regType, bool(intercept), bool(validateData)) | ||
regType, bool(intercept), bool(validateData), | ||
float(convergenceTol)) | ||
|
||
return _regression_train_wrapper(train, LinearRegressionModel, data, initialWeights) | ||
|
||
|
@@ -330,7 +334,7 @@ class LassoWithSGD(object): | |
@classmethod | ||
def train(cls, data, iterations=100, step=1.0, regParam=0.01, | ||
miniBatchFraction=1.0, initialWeights=None, intercept=False, | ||
validateData=True): | ||
validateData=True, convergenceTol=0.001): | ||
""" | ||
Train a regression model with L1-regularization using | ||
Stochastic Gradient Descent. | ||
|
@@ -362,11 +366,13 @@ def train(cls, data, iterations=100, step=1.0, regParam=0.01, | |
:param validateData: Boolean parameter which indicates if | ||
the algorithm should validate data | ||
before training. (default: True) | ||
:param convergenceTol: A condition which decides iteration termination. | ||
(default: 0.001) | ||
""" | ||
def train(rdd, i): | ||
return callMLlibFunc("trainLassoModelWithSGD", rdd, int(iterations), float(step), | ||
float(regParam), float(miniBatchFraction), i, bool(intercept), | ||
bool(validateData)) | ||
bool(validateData), float(convergenceTol)) | ||
|
||
return _regression_train_wrapper(train, LassoModel, data, initialWeights) | ||
|
||
|
@@ -449,7 +455,7 @@ class RidgeRegressionWithSGD(object): | |
@classmethod | ||
def train(cls, data, iterations=100, step=1.0, regParam=0.01, | ||
miniBatchFraction=1.0, initialWeights=None, intercept=False, | ||
validateData=True): | ||
validateData=True, convergenceTol=0.001): | ||
""" | ||
Train a regression model with L2-regularization using | ||
Stochastic Gradient Descent. | ||
|
@@ -481,11 +487,13 @@ def train(cls, data, iterations=100, step=1.0, regParam=0.01, | |
:param validateData: Boolean parameter which indicates if | ||
the algorithm should validate data | ||
before training. (default: True) | ||
:param convergenceTol: A condition which decides iteration termination. | ||
(default: 0.001) | ||
""" | ||
def train(rdd, i): | ||
return callMLlibFunc("trainRidgeModelWithSGD", rdd, int(iterations), float(step), | ||
float(regParam), float(miniBatchFraction), i, bool(intercept), | ||
bool(validateData)) | ||
bool(validateData), float(convergenceTol)) | ||
|
||
return _regression_train_wrapper(train, RidgeRegressionModel, data, initialWeights) | ||
|
||
|
@@ -636,15 +644,17 @@ class StreamingLinearRegressionWithSGD(StreamingLinearAlgorithm): | |
After training on a batch of data, the weights obtained at the end of | ||
training are used as initial weights for the next batch. | ||
|
||
:param: stepSize Step size for each iteration of gradient descent. | ||
:param: numIterations Total number of iterations run. | ||
:param: miniBatchFraction Fraction of data on which SGD is run for each | ||
:param stepSize: Step size for each iteration of gradient descent. | ||
:param numIterations: Total number of iterations run. | ||
:param miniBatchFraction: Fraction of data on which SGD is run for each | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Not your fault, but if you make any additional changes can you add a "." at the end of this sentence? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Also, the default values should be documented. |
||
iteration. | ||
:param convergenceTol: A condition which decides iteration termination. | ||
""" | ||
def __init__(self, stepSize=0.1, numIterations=50, miniBatchFraction=1.0): | ||
def __init__(self, stepSize=0.1, numIterations=50, miniBatchFraction=1.0, convergenceTol=0.001): | ||
self.stepSize = stepSize | ||
self.numIterations = numIterations | ||
self.miniBatchFraction = miniBatchFraction | ||
self.convergenceTol = convergenceTol | ||
self._model = None | ||
super(StreamingLinearRegressionWithSGD, self).__init__( | ||
model=self._model) | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
chop down args