2015-05-12 15:17:05 -04:00
|
|
|
#
|
|
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
|
|
|
|
# contributor license agreements. See the NOTICE file distributed with
|
|
|
|
# this work for additional information regarding copyright ownership.
|
|
|
|
# The ASF licenses this file to You under the Apache License, Version 2.0
|
|
|
|
# (the "License"); you may not use this file except in compliance with
|
|
|
|
# the License. You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
#
|
|
|
|
|
2020-08-30 22:23:31 -04:00
|
|
|
import sys
|
|
|
|
|
[SPARK-29212][ML][PYSPARK] Add common classes without using JVM backend
### What changes were proposed in this pull request?
Implement common base ML classes (`Predictor`, `PredictionModel`, `Classifier`, `ClasssificationModel` `ProbabilisticClassifier`, `ProbabilisticClasssificationModel`, `Regressor`, `RegrssionModel`) for non-Java backends.
Note
- `Predictor` and `JavaClassifier` should be abstract as `_fit` method is not implemented.
- `PredictionModel` should be abstract as `_transform` is not implemented.
### Why are the changes needed?
To provide extensions points for non-JVM algorithms, as well as a public (as opposed to `Java*` variants, which are commonly described in docstrings as private) hierarchy which can be used to distinguish between different classes of predictors.
For longer discussion see [SPARK-29212](https://issues.apache.org/jira/browse/SPARK-29212) and / or https://github.com/apache/spark/pull/25776.
### Does this PR introduce any user-facing change?
It adds new base classes as listed above, but effective interfaces (method resolution order notwithstanding) stay the same.
Additionally "private" `Java*` classes in`ml.regression` and `ml.classification` have been renamed to follow PEP-8 conventions (added leading underscore).
It is for discussion if the same should be done to equivalent classes from `ml.wrapper`.
If we take `JavaClassifier` as an example, type hierarchy will change from
![old pyspark ml classification JavaClassifier](https://user-images.githubusercontent.com/1554276/72657093-5c0b0c80-39a0-11ea-9069-a897d75de483.png)
to
![new pyspark ml classification _JavaClassifier](https://user-images.githubusercontent.com/1554276/72657098-64fbde00-39a0-11ea-8f80-01187a5ea5a6.png)
Similarly the old model
![old pyspark ml classification JavaClassificationModel](https://user-images.githubusercontent.com/1554276/72657103-7513bd80-39a0-11ea-9ffc-59eb6ab61fde.png)
will become
![new pyspark ml classification _JavaClassificationModel](https://user-images.githubusercontent.com/1554276/72657110-80ff7f80-39a0-11ea-9f5c-fe408664e827.png)
### How was this patch tested?
Existing unit tests.
Closes #27245 from zero323/SPARK-29212.
Authored-by: zero323 <mszymkiewicz@gmail.com>
Signed-off-by: zhengruifeng <ruifengz@foxmail.com>
2020-03-03 23:20:02 -05:00
|
|
|
from abc import ABCMeta
|
2015-11-02 19:12:04 -05:00
|
|
|
|
2020-08-30 22:23:31 -04:00
|
|
|
from pyspark import keyword_only, since
|
[SPARK-29212][ML][PYSPARK] Add common classes without using JVM backend
### What changes were proposed in this pull request?
Implement common base ML classes (`Predictor`, `PredictionModel`, `Classifier`, `ClasssificationModel` `ProbabilisticClassifier`, `ProbabilisticClasssificationModel`, `Regressor`, `RegrssionModel`) for non-Java backends.
Note
- `Predictor` and `JavaClassifier` should be abstract as `_fit` method is not implemented.
- `PredictionModel` should be abstract as `_transform` is not implemented.
### Why are the changes needed?
To provide extensions points for non-JVM algorithms, as well as a public (as opposed to `Java*` variants, which are commonly described in docstrings as private) hierarchy which can be used to distinguish between different classes of predictors.
For longer discussion see [SPARK-29212](https://issues.apache.org/jira/browse/SPARK-29212) and / or https://github.com/apache/spark/pull/25776.
### Does this PR introduce any user-facing change?
It adds new base classes as listed above, but effective interfaces (method resolution order notwithstanding) stay the same.
Additionally "private" `Java*` classes in`ml.regression` and `ml.classification` have been renamed to follow PEP-8 conventions (added leading underscore).
It is for discussion if the same should be done to equivalent classes from `ml.wrapper`.
If we take `JavaClassifier` as an example, type hierarchy will change from
![old pyspark ml classification JavaClassifier](https://user-images.githubusercontent.com/1554276/72657093-5c0b0c80-39a0-11ea-9069-a897d75de483.png)
to
![new pyspark ml classification _JavaClassifier](https://user-images.githubusercontent.com/1554276/72657098-64fbde00-39a0-11ea-8f80-01187a5ea5a6.png)
Similarly the old model
![old pyspark ml classification JavaClassificationModel](https://user-images.githubusercontent.com/1554276/72657103-7513bd80-39a0-11ea-9ffc-59eb6ab61fde.png)
will become
![new pyspark ml classification _JavaClassificationModel](https://user-images.githubusercontent.com/1554276/72657110-80ff7f80-39a0-11ea-9f5c-fe408664e827.png)
### How was this patch tested?
Existing unit tests.
Closes #27245 from zero323/SPARK-29212.
Authored-by: zero323 <mszymkiewicz@gmail.com>
Signed-off-by: zhengruifeng <ruifengz@foxmail.com>
2020-03-03 23:20:02 -05:00
|
|
|
from pyspark.ml import Predictor, PredictionModel
|
|
|
|
from pyspark.ml.base import _PredictorParams
|
2020-08-30 22:23:31 -04:00
|
|
|
from pyspark.ml.param.shared import HasFeaturesCol, HasLabelCol, HasPredictionCol, HasWeightCol, \
|
|
|
|
Param, Params, TypeConverters, HasMaxIter, HasTol, HasFitIntercept, HasAggregationDepth, \
|
2020-11-18 10:02:31 -05:00
|
|
|
HasMaxBlockSizeInMB, HasRegParam, HasSolver, HasStepSize, HasSeed, HasElasticNetParam, \
|
2020-08-30 22:23:31 -04:00
|
|
|
HasStandardization, HasLoss, HasVarianceCol
|
2019-10-12 10:13:50 -04:00
|
|
|
from pyspark.ml.tree import _DecisionTreeModel, _DecisionTreeParams, \
|
2020-08-08 11:51:57 -04:00
|
|
|
_TreeEnsembleModel, _RandomForestParams, _GBTParams, _TreeRegressorParams
|
2020-08-30 22:23:31 -04:00
|
|
|
from pyspark.ml.util import JavaMLWritable, JavaMLReadable, HasTrainingSummary, \
|
|
|
|
GeneralJavaMLWritable
|
2020-08-08 11:51:57 -04:00
|
|
|
from pyspark.ml.wrapper import JavaEstimator, JavaModel, \
|
[SPARK-29212][ML][PYSPARK] Add common classes without using JVM backend
### What changes were proposed in this pull request?
Implement common base ML classes (`Predictor`, `PredictionModel`, `Classifier`, `ClasssificationModel` `ProbabilisticClassifier`, `ProbabilisticClasssificationModel`, `Regressor`, `RegrssionModel`) for non-Java backends.
Note
- `Predictor` and `JavaClassifier` should be abstract as `_fit` method is not implemented.
- `PredictionModel` should be abstract as `_transform` is not implemented.
### Why are the changes needed?
To provide extensions points for non-JVM algorithms, as well as a public (as opposed to `Java*` variants, which are commonly described in docstrings as private) hierarchy which can be used to distinguish between different classes of predictors.
For longer discussion see [SPARK-29212](https://issues.apache.org/jira/browse/SPARK-29212) and / or https://github.com/apache/spark/pull/25776.
### Does this PR introduce any user-facing change?
It adds new base classes as listed above, but effective interfaces (method resolution order notwithstanding) stay the same.
Additionally "private" `Java*` classes in`ml.regression` and `ml.classification` have been renamed to follow PEP-8 conventions (added leading underscore).
It is for discussion if the same should be done to equivalent classes from `ml.wrapper`.
If we take `JavaClassifier` as an example, type hierarchy will change from
![old pyspark ml classification JavaClassifier](https://user-images.githubusercontent.com/1554276/72657093-5c0b0c80-39a0-11ea-9069-a897d75de483.png)
to
![new pyspark ml classification _JavaClassifier](https://user-images.githubusercontent.com/1554276/72657098-64fbde00-39a0-11ea-8f80-01187a5ea5a6.png)
Similarly the old model
![old pyspark ml classification JavaClassificationModel](https://user-images.githubusercontent.com/1554276/72657103-7513bd80-39a0-11ea-9ffc-59eb6ab61fde.png)
will become
![new pyspark ml classification _JavaClassificationModel](https://user-images.githubusercontent.com/1554276/72657110-80ff7f80-39a0-11ea-9f5c-fe408664e827.png)
### How was this patch tested?
Existing unit tests.
Closes #27245 from zero323/SPARK-29212.
Authored-by: zero323 <mszymkiewicz@gmail.com>
Signed-off-by: zhengruifeng <ruifengz@foxmail.com>
2020-03-03 23:20:02 -05:00
|
|
|
JavaPredictor, JavaPredictionModel, JavaWrapper
|
2016-06-13 22:59:53 -04:00
|
|
|
from pyspark.ml.common import inherit_doc
|
2016-04-06 15:07:47 -04:00
|
|
|
from pyspark.sql import DataFrame
|
2015-05-12 15:17:05 -04:00
|
|
|
|
|
|
|
|
2015-10-06 15:43:28 -04:00
|
|
|
__all__ = ['AFTSurvivalRegression', 'AFTSurvivalRegressionModel',
|
|
|
|
'DecisionTreeRegressor', 'DecisionTreeRegressionModel',
|
|
|
|
'GBTRegressor', 'GBTRegressionModel',
|
2016-04-14 04:42:15 -04:00
|
|
|
'GeneralizedLinearRegression', 'GeneralizedLinearRegressionModel',
|
2016-05-13 03:01:20 -04:00
|
|
|
'GeneralizedLinearRegressionSummary', 'GeneralizedLinearRegressionTrainingSummary',
|
2015-10-07 20:50:35 -04:00
|
|
|
'IsotonicRegression', 'IsotonicRegressionModel',
|
2015-10-06 15:43:28 -04:00
|
|
|
'LinearRegression', 'LinearRegressionModel',
|
2016-04-06 15:07:47 -04:00
|
|
|
'LinearRegressionSummary', 'LinearRegressionTrainingSummary',
|
2019-12-26 12:39:53 -05:00
|
|
|
'RandomForestRegressor', 'RandomForestRegressionModel',
|
|
|
|
'FMRegressor', 'FMRegressionModel']
|
2015-05-12 15:17:05 -04:00
|
|
|
|
|
|
|
|
2020-09-16 07:22:11 -04:00
|
|
|
class Regressor(Predictor, _PredictorParams, metaclass=ABCMeta):
|
[SPARK-29212][ML][PYSPARK] Add common classes without using JVM backend
### What changes were proposed in this pull request?
Implement common base ML classes (`Predictor`, `PredictionModel`, `Classifier`, `ClasssificationModel` `ProbabilisticClassifier`, `ProbabilisticClasssificationModel`, `Regressor`, `RegrssionModel`) for non-Java backends.
Note
- `Predictor` and `JavaClassifier` should be abstract as `_fit` method is not implemented.
- `PredictionModel` should be abstract as `_transform` is not implemented.
### Why are the changes needed?
To provide extensions points for non-JVM algorithms, as well as a public (as opposed to `Java*` variants, which are commonly described in docstrings as private) hierarchy which can be used to distinguish between different classes of predictors.
For longer discussion see [SPARK-29212](https://issues.apache.org/jira/browse/SPARK-29212) and / or https://github.com/apache/spark/pull/25776.
### Does this PR introduce any user-facing change?
It adds new base classes as listed above, but effective interfaces (method resolution order notwithstanding) stay the same.
Additionally "private" `Java*` classes in`ml.regression` and `ml.classification` have been renamed to follow PEP-8 conventions (added leading underscore).
It is for discussion if the same should be done to equivalent classes from `ml.wrapper`.
If we take `JavaClassifier` as an example, type hierarchy will change from
![old pyspark ml classification JavaClassifier](https://user-images.githubusercontent.com/1554276/72657093-5c0b0c80-39a0-11ea-9069-a897d75de483.png)
to
![new pyspark ml classification _JavaClassifier](https://user-images.githubusercontent.com/1554276/72657098-64fbde00-39a0-11ea-8f80-01187a5ea5a6.png)
Similarly the old model
![old pyspark ml classification JavaClassificationModel](https://user-images.githubusercontent.com/1554276/72657103-7513bd80-39a0-11ea-9ffc-59eb6ab61fde.png)
will become
![new pyspark ml classification _JavaClassificationModel](https://user-images.githubusercontent.com/1554276/72657110-80ff7f80-39a0-11ea-9f5c-fe408664e827.png)
### How was this patch tested?
Existing unit tests.
Closes #27245 from zero323/SPARK-29212.
Authored-by: zero323 <mszymkiewicz@gmail.com>
Signed-off-by: zhengruifeng <ruifengz@foxmail.com>
2020-03-03 23:20:02 -05:00
|
|
|
"""
|
|
|
|
Regressor for regression tasks.
|
|
|
|
|
|
|
|
.. versionadded:: 3.0.0
|
|
|
|
"""
|
2020-09-16 07:22:11 -04:00
|
|
|
pass
|
[SPARK-29212][ML][PYSPARK] Add common classes without using JVM backend
### What changes were proposed in this pull request?
Implement common base ML classes (`Predictor`, `PredictionModel`, `Classifier`, `ClasssificationModel` `ProbabilisticClassifier`, `ProbabilisticClasssificationModel`, `Regressor`, `RegrssionModel`) for non-Java backends.
Note
- `Predictor` and `JavaClassifier` should be abstract as `_fit` method is not implemented.
- `PredictionModel` should be abstract as `_transform` is not implemented.
### Why are the changes needed?
To provide extensions points for non-JVM algorithms, as well as a public (as opposed to `Java*` variants, which are commonly described in docstrings as private) hierarchy which can be used to distinguish between different classes of predictors.
For longer discussion see [SPARK-29212](https://issues.apache.org/jira/browse/SPARK-29212) and / or https://github.com/apache/spark/pull/25776.
### Does this PR introduce any user-facing change?
It adds new base classes as listed above, but effective interfaces (method resolution order notwithstanding) stay the same.
Additionally "private" `Java*` classes in`ml.regression` and `ml.classification` have been renamed to follow PEP-8 conventions (added leading underscore).
It is for discussion if the same should be done to equivalent classes from `ml.wrapper`.
If we take `JavaClassifier` as an example, type hierarchy will change from
![old pyspark ml classification JavaClassifier](https://user-images.githubusercontent.com/1554276/72657093-5c0b0c80-39a0-11ea-9069-a897d75de483.png)
to
![new pyspark ml classification _JavaClassifier](https://user-images.githubusercontent.com/1554276/72657098-64fbde00-39a0-11ea-8f80-01187a5ea5a6.png)
Similarly the old model
![old pyspark ml classification JavaClassificationModel](https://user-images.githubusercontent.com/1554276/72657103-7513bd80-39a0-11ea-9ffc-59eb6ab61fde.png)
will become
![new pyspark ml classification _JavaClassificationModel](https://user-images.githubusercontent.com/1554276/72657110-80ff7f80-39a0-11ea-9f5c-fe408664e827.png)
### How was this patch tested?
Existing unit tests.
Closes #27245 from zero323/SPARK-29212.
Authored-by: zero323 <mszymkiewicz@gmail.com>
Signed-off-by: zhengruifeng <ruifengz@foxmail.com>
2020-03-03 23:20:02 -05:00
|
|
|
|
|
|
|
|
2020-09-16 07:22:11 -04:00
|
|
|
class RegressionModel(PredictionModel, _PredictorParams, metaclass=ABCMeta):
|
[SPARK-29212][ML][PYSPARK] Add common classes without using JVM backend
### What changes were proposed in this pull request?
Implement common base ML classes (`Predictor`, `PredictionModel`, `Classifier`, `ClasssificationModel` `ProbabilisticClassifier`, `ProbabilisticClasssificationModel`, `Regressor`, `RegrssionModel`) for non-Java backends.
Note
- `Predictor` and `JavaClassifier` should be abstract as `_fit` method is not implemented.
- `PredictionModel` should be abstract as `_transform` is not implemented.
### Why are the changes needed?
To provide extensions points for non-JVM algorithms, as well as a public (as opposed to `Java*` variants, which are commonly described in docstrings as private) hierarchy which can be used to distinguish between different classes of predictors.
For longer discussion see [SPARK-29212](https://issues.apache.org/jira/browse/SPARK-29212) and / or https://github.com/apache/spark/pull/25776.
### Does this PR introduce any user-facing change?
It adds new base classes as listed above, but effective interfaces (method resolution order notwithstanding) stay the same.
Additionally "private" `Java*` classes in`ml.regression` and `ml.classification` have been renamed to follow PEP-8 conventions (added leading underscore).
It is for discussion if the same should be done to equivalent classes from `ml.wrapper`.
If we take `JavaClassifier` as an example, type hierarchy will change from
![old pyspark ml classification JavaClassifier](https://user-images.githubusercontent.com/1554276/72657093-5c0b0c80-39a0-11ea-9069-a897d75de483.png)
to
![new pyspark ml classification _JavaClassifier](https://user-images.githubusercontent.com/1554276/72657098-64fbde00-39a0-11ea-8f80-01187a5ea5a6.png)
Similarly the old model
![old pyspark ml classification JavaClassificationModel](https://user-images.githubusercontent.com/1554276/72657103-7513bd80-39a0-11ea-9ffc-59eb6ab61fde.png)
will become
![new pyspark ml classification _JavaClassificationModel](https://user-images.githubusercontent.com/1554276/72657110-80ff7f80-39a0-11ea-9f5c-fe408664e827.png)
### How was this patch tested?
Existing unit tests.
Closes #27245 from zero323/SPARK-29212.
Authored-by: zero323 <mszymkiewicz@gmail.com>
Signed-off-by: zhengruifeng <ruifengz@foxmail.com>
2020-03-03 23:20:02 -05:00
|
|
|
"""
|
|
|
|
Model produced by a ``Regressor``.
|
|
|
|
|
|
|
|
.. versionadded:: 3.0.0
|
|
|
|
"""
|
2020-09-16 07:22:11 -04:00
|
|
|
pass
|
[SPARK-29212][ML][PYSPARK] Add common classes without using JVM backend
### What changes were proposed in this pull request?
Implement common base ML classes (`Predictor`, `PredictionModel`, `Classifier`, `ClasssificationModel` `ProbabilisticClassifier`, `ProbabilisticClasssificationModel`, `Regressor`, `RegrssionModel`) for non-Java backends.
Note
- `Predictor` and `JavaClassifier` should be abstract as `_fit` method is not implemented.
- `PredictionModel` should be abstract as `_transform` is not implemented.
### Why are the changes needed?
To provide extensions points for non-JVM algorithms, as well as a public (as opposed to `Java*` variants, which are commonly described in docstrings as private) hierarchy which can be used to distinguish between different classes of predictors.
For longer discussion see [SPARK-29212](https://issues.apache.org/jira/browse/SPARK-29212) and / or https://github.com/apache/spark/pull/25776.
### Does this PR introduce any user-facing change?
It adds new base classes as listed above, but effective interfaces (method resolution order notwithstanding) stay the same.
Additionally "private" `Java*` classes in`ml.regression` and `ml.classification` have been renamed to follow PEP-8 conventions (added leading underscore).
It is for discussion if the same should be done to equivalent classes from `ml.wrapper`.
If we take `JavaClassifier` as an example, type hierarchy will change from
![old pyspark ml classification JavaClassifier](https://user-images.githubusercontent.com/1554276/72657093-5c0b0c80-39a0-11ea-9069-a897d75de483.png)
to
![new pyspark ml classification _JavaClassifier](https://user-images.githubusercontent.com/1554276/72657098-64fbde00-39a0-11ea-8f80-01187a5ea5a6.png)
Similarly the old model
![old pyspark ml classification JavaClassificationModel](https://user-images.githubusercontent.com/1554276/72657103-7513bd80-39a0-11ea-9ffc-59eb6ab61fde.png)
will become
![new pyspark ml classification _JavaClassificationModel](https://user-images.githubusercontent.com/1554276/72657110-80ff7f80-39a0-11ea-9f5c-fe408664e827.png)
### How was this patch tested?
Existing unit tests.
Closes #27245 from zero323/SPARK-29212.
Authored-by: zero323 <mszymkiewicz@gmail.com>
Signed-off-by: zhengruifeng <ruifengz@foxmail.com>
2020-03-03 23:20:02 -05:00
|
|
|
|
|
|
|
|
2020-09-16 07:22:11 -04:00
|
|
|
class _JavaRegressor(Regressor, JavaPredictor, metaclass=ABCMeta):
|
2020-01-17 20:34:30 -05:00
|
|
|
"""
|
|
|
|
Java Regressor for regression tasks.
|
|
|
|
|
|
|
|
.. versionadded:: 3.0.0
|
|
|
|
"""
|
2020-09-16 07:22:11 -04:00
|
|
|
pass
|
[SPARK-29212][ML][PYSPARK] Add common classes without using JVM backend
### What changes were proposed in this pull request?
Implement common base ML classes (`Predictor`, `PredictionModel`, `Classifier`, `ClasssificationModel` `ProbabilisticClassifier`, `ProbabilisticClasssificationModel`, `Regressor`, `RegrssionModel`) for non-Java backends.
Note
- `Predictor` and `JavaClassifier` should be abstract as `_fit` method is not implemented.
- `PredictionModel` should be abstract as `_transform` is not implemented.
### Why are the changes needed?
To provide extensions points for non-JVM algorithms, as well as a public (as opposed to `Java*` variants, which are commonly described in docstrings as private) hierarchy which can be used to distinguish between different classes of predictors.
For longer discussion see [SPARK-29212](https://issues.apache.org/jira/browse/SPARK-29212) and / or https://github.com/apache/spark/pull/25776.
### Does this PR introduce any user-facing change?
It adds new base classes as listed above, but effective interfaces (method resolution order notwithstanding) stay the same.
Additionally "private" `Java*` classes in`ml.regression` and `ml.classification` have been renamed to follow PEP-8 conventions (added leading underscore).
It is for discussion if the same should be done to equivalent classes from `ml.wrapper`.
If we take `JavaClassifier` as an example, type hierarchy will change from
![old pyspark ml classification JavaClassifier](https://user-images.githubusercontent.com/1554276/72657093-5c0b0c80-39a0-11ea-9069-a897d75de483.png)
to
![new pyspark ml classification _JavaClassifier](https://user-images.githubusercontent.com/1554276/72657098-64fbde00-39a0-11ea-8f80-01187a5ea5a6.png)
Similarly the old model
![old pyspark ml classification JavaClassificationModel](https://user-images.githubusercontent.com/1554276/72657103-7513bd80-39a0-11ea-9ffc-59eb6ab61fde.png)
will become
![new pyspark ml classification _JavaClassificationModel](https://user-images.githubusercontent.com/1554276/72657110-80ff7f80-39a0-11ea-9f5c-fe408664e827.png)
### How was this patch tested?
Existing unit tests.
Closes #27245 from zero323/SPARK-29212.
Authored-by: zero323 <mszymkiewicz@gmail.com>
Signed-off-by: zhengruifeng <ruifengz@foxmail.com>
2020-03-03 23:20:02 -05:00
|
|
|
|
2020-01-17 20:34:30 -05:00
|
|
|
|
2020-09-16 07:22:11 -04:00
|
|
|
class _JavaRegressionModel(RegressionModel, JavaPredictionModel, metaclass=ABCMeta):
|
2020-01-17 20:34:30 -05:00
|
|
|
"""
|
|
|
|
Java Model produced by a ``_JavaRegressor``.
|
2020-05-18 07:25:02 -04:00
|
|
|
To be mixed in with :class:`pyspark.ml.JavaModel`
|
2020-01-17 20:34:30 -05:00
|
|
|
|
|
|
|
.. versionadded:: 3.0.0
|
|
|
|
"""
|
2020-09-16 07:22:11 -04:00
|
|
|
pass
|
2020-01-17 20:34:30 -05:00
|
|
|
|
|
|
|
|
[SPARK-29212][ML][PYSPARK] Add common classes without using JVM backend
### What changes were proposed in this pull request?
Implement common base ML classes (`Predictor`, `PredictionModel`, `Classifier`, `ClasssificationModel` `ProbabilisticClassifier`, `ProbabilisticClasssificationModel`, `Regressor`, `RegrssionModel`) for non-Java backends.
Note
- `Predictor` and `JavaClassifier` should be abstract as `_fit` method is not implemented.
- `PredictionModel` should be abstract as `_transform` is not implemented.
### Why are the changes needed?
To provide extensions points for non-JVM algorithms, as well as a public (as opposed to `Java*` variants, which are commonly described in docstrings as private) hierarchy which can be used to distinguish between different classes of predictors.
For longer discussion see [SPARK-29212](https://issues.apache.org/jira/browse/SPARK-29212) and / or https://github.com/apache/spark/pull/25776.
### Does this PR introduce any user-facing change?
It adds new base classes as listed above, but effective interfaces (method resolution order notwithstanding) stay the same.
Additionally "private" `Java*` classes in`ml.regression` and `ml.classification` have been renamed to follow PEP-8 conventions (added leading underscore).
It is for discussion if the same should be done to equivalent classes from `ml.wrapper`.
If we take `JavaClassifier` as an example, type hierarchy will change from
![old pyspark ml classification JavaClassifier](https://user-images.githubusercontent.com/1554276/72657093-5c0b0c80-39a0-11ea-9069-a897d75de483.png)
to
![new pyspark ml classification _JavaClassifier](https://user-images.githubusercontent.com/1554276/72657098-64fbde00-39a0-11ea-8f80-01187a5ea5a6.png)
Similarly the old model
![old pyspark ml classification JavaClassificationModel](https://user-images.githubusercontent.com/1554276/72657103-7513bd80-39a0-11ea-9ffc-59eb6ab61fde.png)
will become
![new pyspark ml classification _JavaClassificationModel](https://user-images.githubusercontent.com/1554276/72657110-80ff7f80-39a0-11ea-9f5c-fe408664e827.png)
### How was this patch tested?
Existing unit tests.
Closes #27245 from zero323/SPARK-29212.
Authored-by: zero323 <mszymkiewicz@gmail.com>
Signed-off-by: zhengruifeng <ruifengz@foxmail.com>
2020-03-03 23:20:02 -05:00
|
|
|
class _LinearRegressionParams(_PredictorParams, HasRegParam, HasElasticNetParam, HasMaxIter,
|
2019-10-18 05:26:54 -04:00
|
|
|
HasTol, HasFitIntercept, HasStandardization, HasWeightCol, HasSolver,
|
2020-11-18 10:02:31 -05:00
|
|
|
HasAggregationDepth, HasLoss, HasMaxBlockSizeInMB):
|
2019-10-18 05:26:54 -04:00
|
|
|
"""
|
|
|
|
Params for :py:class:`LinearRegression` and :py:class:`LinearRegressionModel`.
|
|
|
|
|
|
|
|
.. versionadded:: 3.0.0
|
|
|
|
"""
|
|
|
|
|
|
|
|
solver = Param(Params._dummy(), "solver", "The solver algorithm for optimization. Supported " +
|
|
|
|
"options: auto, normal, l-bfgs.", typeConverter=TypeConverters.toString)
|
|
|
|
|
|
|
|
loss = Param(Params._dummy(), "loss", "The loss function to be optimized. Supported " +
|
|
|
|
"options: squaredError, huber.", typeConverter=TypeConverters.toString)
|
|
|
|
|
|
|
|
epsilon = Param(Params._dummy(), "epsilon", "The shape parameter to control the amount of " +
|
|
|
|
"robustness. Must be > 1.0. Only valid when loss is huber",
|
|
|
|
typeConverter=TypeConverters.toFloat)
|
|
|
|
|
2020-08-03 11:50:34 -04:00
|
|
|
def __init__(self, *args):
|
|
|
|
super(_LinearRegressionParams, self).__init__(*args)
|
2020-07-16 14:12:29 -04:00
|
|
|
self._setDefault(maxIter=100, regParam=0.0, tol=1e-6, loss="squaredError", epsilon=1.35,
|
2020-11-18 10:02:31 -05:00
|
|
|
maxBlockSizeInMB=0.0)
|
2020-07-16 14:12:29 -04:00
|
|
|
|
2019-10-18 05:26:54 -04:00
|
|
|
@since("2.3.0")
|
|
|
|
def getEpsilon(self):
|
|
|
|
"""
|
|
|
|
Gets the value of epsilon or its default value.
|
|
|
|
"""
|
|
|
|
return self.getOrDefault(self.epsilon)
|
|
|
|
|
|
|
|
|
2015-05-12 15:17:05 -04:00
|
|
|
@inherit_doc
|
[SPARK-29212][ML][PYSPARK] Add common classes without using JVM backend
### What changes were proposed in this pull request?
Implement common base ML classes (`Predictor`, `PredictionModel`, `Classifier`, `ClasssificationModel` `ProbabilisticClassifier`, `ProbabilisticClasssificationModel`, `Regressor`, `RegrssionModel`) for non-Java backends.
Note
- `Predictor` and `JavaClassifier` should be abstract as `_fit` method is not implemented.
- `PredictionModel` should be abstract as `_transform` is not implemented.
### Why are the changes needed?
To provide extensions points for non-JVM algorithms, as well as a public (as opposed to `Java*` variants, which are commonly described in docstrings as private) hierarchy which can be used to distinguish between different classes of predictors.
For longer discussion see [SPARK-29212](https://issues.apache.org/jira/browse/SPARK-29212) and / or https://github.com/apache/spark/pull/25776.
### Does this PR introduce any user-facing change?
It adds new base classes as listed above, but effective interfaces (method resolution order notwithstanding) stay the same.
Additionally "private" `Java*` classes in`ml.regression` and `ml.classification` have been renamed to follow PEP-8 conventions (added leading underscore).
It is for discussion if the same should be done to equivalent classes from `ml.wrapper`.
If we take `JavaClassifier` as an example, type hierarchy will change from
![old pyspark ml classification JavaClassifier](https://user-images.githubusercontent.com/1554276/72657093-5c0b0c80-39a0-11ea-9069-a897d75de483.png)
to
![new pyspark ml classification _JavaClassifier](https://user-images.githubusercontent.com/1554276/72657098-64fbde00-39a0-11ea-8f80-01187a5ea5a6.png)
Similarly the old model
![old pyspark ml classification JavaClassificationModel](https://user-images.githubusercontent.com/1554276/72657103-7513bd80-39a0-11ea-9ffc-59eb6ab61fde.png)
will become
![new pyspark ml classification _JavaClassificationModel](https://user-images.githubusercontent.com/1554276/72657110-80ff7f80-39a0-11ea-9f5c-fe408664e827.png)
### How was this patch tested?
Existing unit tests.
Closes #27245 from zero323/SPARK-29212.
Authored-by: zero323 <mszymkiewicz@gmail.com>
Signed-off-by: zhengruifeng <ruifengz@foxmail.com>
2020-03-03 23:20:02 -05:00
|
|
|
class LinearRegression(_JavaRegressor, _LinearRegressionParams, JavaMLWritable, JavaMLReadable):
|
2015-05-12 15:17:05 -04:00
|
|
|
"""
|
|
|
|
Linear regression.
|
|
|
|
|
2017-12-20 20:51:42 -05:00
|
|
|
The learning objective is to minimize the specified loss function, with regularization.
|
|
|
|
This supports two kinds of loss:
|
2015-05-12 15:17:05 -04:00
|
|
|
|
2017-12-20 20:51:42 -05:00
|
|
|
* squaredError (a.k.a squared loss)
|
|
|
|
* huber (a hybrid of squared error for relatively small errors and absolute error for \
|
|
|
|
relatively large ones, and we estimate the scale parameter from training data)
|
2016-05-25 01:20:00 -04:00
|
|
|
|
2017-12-20 20:51:42 -05:00
|
|
|
This supports multiple types of regularization:
|
2016-05-25 01:20:00 -04:00
|
|
|
|
2017-12-20 20:51:42 -05:00
|
|
|
* none (a.k.a. ordinary least squares)
|
|
|
|
* L2 (ridge regression)
|
|
|
|
* L1 (Lasso)
|
|
|
|
* L2 + L1 (elastic net)
|
2016-05-25 01:20:00 -04:00
|
|
|
|
2020-11-09 19:33:48 -05:00
|
|
|
.. versionadded:: 1.4.0
|
|
|
|
|
|
|
|
Notes
|
|
|
|
-----
|
|
|
|
Fitting with huber loss only supports none and L2 regularization.
|
2015-05-12 15:17:05 -04:00
|
|
|
|
2020-11-09 19:33:48 -05:00
|
|
|
Examples
|
|
|
|
--------
|
2016-05-17 15:51:07 -04:00
|
|
|
>>> from pyspark.ml.linalg import Vectors
|
2016-05-23 21:14:48 -04:00
|
|
|
>>> df = spark.createDataFrame([
|
2015-11-18 16:32:06 -05:00
|
|
|
... (1.0, 2.0, Vectors.dense(1.0)),
|
|
|
|
... (0.0, 2.0, Vectors.sparse(1, [], []))], ["label", "weight", "features"])
|
2019-10-27 23:36:10 -04:00
|
|
|
>>> lr = LinearRegression(regParam=0.0, solver="normal", weightCol="weight")
|
|
|
|
>>> lr.setMaxIter(5)
|
|
|
|
LinearRegression...
|
|
|
|
>>> lr.getMaxIter()
|
|
|
|
5
|
|
|
|
>>> lr.setRegParam(0.1)
|
|
|
|
LinearRegression...
|
|
|
|
>>> lr.getRegParam()
|
|
|
|
0.1
|
|
|
|
>>> lr.setRegParam(0.0)
|
|
|
|
LinearRegression...
|
2015-05-12 15:17:05 -04:00
|
|
|
>>> model = lr.fit(df)
|
[SPARK-28985][PYTHON][ML] Add common classes (JavaPredictor/JavaClassificationModel/JavaProbabilisticClassifier) in PYTHON
### What changes were proposed in this pull request?
Add some common classes in Python to make it have the same structure as Scala
1. Scala has ClassifierParams/Classifier/ClassificationModel:
```
trait ClassifierParams
extends PredictorParams with HasRawPredictionCol
abstract class Classifier
extends Predictor with ClassifierParams {
def setRawPredictionCol
}
abstract class ClassificationModel
extends PredictionModel with ClassifierParams {
def setRawPredictionCol
}
```
This PR makes Python has the following:
```
class JavaClassifierParams(HasRawPredictionCol, JavaPredictorParams):
pass
class JavaClassifier(JavaPredictor, JavaClassifierParams):
def setRawPredictionCol
class JavaClassificationModel(JavaPredictionModel, JavaClassifierParams):
def setRawPredictionCol
```
2. Scala has ProbabilisticClassifierParams/ProbabilisticClassifier/ProbabilisticClassificationModel:
```
trait ProbabilisticClassifierParams
extends ClassifierParams with HasProbabilityCol with HasThresholds
abstract class ProbabilisticClassifier
extends Classifier with ProbabilisticClassifierParams {
def setProbabilityCol
def setThresholds
}
abstract class ProbabilisticClassificationModel
extends ClassificationModel with ProbabilisticClassifierParams {
def setProbabilityCol
def setThresholds
}
```
This PR makes Python have the following:
```
class JavaProbabilisticClassifierParams(HasProbabilityCol, HasThresholds, JavaClassifierParams):
pass
class JavaProbabilisticClassifier(JavaClassifier, JavaProbabilisticClassifierParams):
def setProbabilityCol
def setThresholds
class JavaProbabilisticClassificationModel(JavaClassificationModel, JavaProbabilisticClassifierParams):
def setProbabilityCol
def setThresholds
```
3. Scala has PredictorParams/Predictor/PredictionModel:
```
trait PredictorParams extends Params
with HasLabelCol with HasFeaturesCol with HasPredictionCol
abstract class Predictor
extends Estimator with PredictorParams {
def setLabelCol
def setFeaturesCol
def setPredictionCol
}
abstract class PredictionModel
extends Model with PredictorParams {
def setFeaturesCol
def setPredictionCol
def numFeatures
def predict
}
```
This PR makes Python have the following:
```
class JavaPredictorParams(HasLabelCol, HasFeaturesCol, HasPredictionCol):
pass
class JavaPredictor(JavaEstimator, JavaPredictorParams):
def setLabelCol
def setFeaturesCol
def setPredictionCol
class JavaPredictionModel(JavaModel, JavaPredictorParams):
def setFeaturesCol
def setPredictionCol
def numFeatures
def predict
```
### Why are the changes needed?
Have parity between Python and Scala ML
### Does this PR introduce any user-facing change?
Yes. Add the following changes:
```
LinearSVCModel
- get/setFeatureCol
- get/setPredictionCol
- get/setLabelCol
- get/setRawPredictionCol
- predict
```
```
LogisticRegressionModel
DecisionTreeClassificationModel
RandomForestClassificationModel
GBTClassificationModel
NaiveBayesModel
MultilayerPerceptronClassificationModel
- get/setFeatureCol
- get/setPredictionCol
- get/setLabelCol
- get/setRawPredictionCol
- get/setProbabilityCol
- predict
```
```
LinearRegressionModel
IsotonicRegressionModel
DecisionTreeRegressionModel
RandomForestRegressionModel
GBTRegressionModel
AFTSurvivalRegressionModel
GeneralizedLinearRegressionModel
- get/setFeatureCol
- get/setPredictionCol
- get/setLabelCol
- predict
```
### How was this patch tested?
Add a few doc tests.
Closes #25776 from huaxingao/spark-28985.
Authored-by: Huaxin Gao <huaxing@us.ibm.com>
Signed-off-by: Sean Owen <sean.owen@databricks.com>
2019-09-19 09:17:25 -04:00
|
|
|
>>> model.setFeaturesCol("features")
|
[SPARK-29867][ML][PYTHON] Add __repr__ in Python ML Models
### What changes were proposed in this pull request?
Add ```__repr__``` in Python ML Models
### Why are the changes needed?
In Python ML Models, some of them have ```__repr__```, others don't. In the doctest, when calling Model.setXXX, some of the Models print out the xxxModel... correctly, some of them can't because of lacking the ```__repr__``` method. For example:
```
>>> gm = GaussianMixture(k=3, tol=0.0001, seed=10)
>>> model = gm.fit(df)
>>> model.setPredictionCol("newPrediction")
GaussianMixture...
```
After the change, the above code will become the following:
```
>>> gm = GaussianMixture(k=3, tol=0.0001, seed=10)
>>> model = gm.fit(df)
>>> model.setPredictionCol("newPrediction")
GaussianMixtureModel...
```
### Does this PR introduce any user-facing change?
Yes.
### How was this patch tested?
doctest
Closes #26489 from huaxingao/spark-29876.
Authored-by: Huaxin Gao <huaxing@us.ibm.com>
Signed-off-by: Dongjoon Hyun <dhyun@apple.com>
2019-11-16 00:44:39 -05:00
|
|
|
LinearRegressionModel...
|
[SPARK-28985][PYTHON][ML] Add common classes (JavaPredictor/JavaClassificationModel/JavaProbabilisticClassifier) in PYTHON
### What changes were proposed in this pull request?
Add some common classes in Python to make it have the same structure as Scala
1. Scala has ClassifierParams/Classifier/ClassificationModel:
```
trait ClassifierParams
extends PredictorParams with HasRawPredictionCol
abstract class Classifier
extends Predictor with ClassifierParams {
def setRawPredictionCol
}
abstract class ClassificationModel
extends PredictionModel with ClassifierParams {
def setRawPredictionCol
}
```
This PR makes Python has the following:
```
class JavaClassifierParams(HasRawPredictionCol, JavaPredictorParams):
pass
class JavaClassifier(JavaPredictor, JavaClassifierParams):
def setRawPredictionCol
class JavaClassificationModel(JavaPredictionModel, JavaClassifierParams):
def setRawPredictionCol
```
2. Scala has ProbabilisticClassifierParams/ProbabilisticClassifier/ProbabilisticClassificationModel:
```
trait ProbabilisticClassifierParams
extends ClassifierParams with HasProbabilityCol with HasThresholds
abstract class ProbabilisticClassifier
extends Classifier with ProbabilisticClassifierParams {
def setProbabilityCol
def setThresholds
}
abstract class ProbabilisticClassificationModel
extends ClassificationModel with ProbabilisticClassifierParams {
def setProbabilityCol
def setThresholds
}
```
This PR makes Python have the following:
```
class JavaProbabilisticClassifierParams(HasProbabilityCol, HasThresholds, JavaClassifierParams):
pass
class JavaProbabilisticClassifier(JavaClassifier, JavaProbabilisticClassifierParams):
def setProbabilityCol
def setThresholds
class JavaProbabilisticClassificationModel(JavaClassificationModel, JavaProbabilisticClassifierParams):
def setProbabilityCol
def setThresholds
```
3. Scala has PredictorParams/Predictor/PredictionModel:
```
trait PredictorParams extends Params
with HasLabelCol with HasFeaturesCol with HasPredictionCol
abstract class Predictor
extends Estimator with PredictorParams {
def setLabelCol
def setFeaturesCol
def setPredictionCol
}
abstract class PredictionModel
extends Model with PredictorParams {
def setFeaturesCol
def setPredictionCol
def numFeatures
def predict
}
```
This PR makes Python have the following:
```
class JavaPredictorParams(HasLabelCol, HasFeaturesCol, HasPredictionCol):
pass
class JavaPredictor(JavaEstimator, JavaPredictorParams):
def setLabelCol
def setFeaturesCol
def setPredictionCol
class JavaPredictionModel(JavaModel, JavaPredictorParams):
def setFeaturesCol
def setPredictionCol
def numFeatures
def predict
```
### Why are the changes needed?
Have parity between Python and Scala ML
### Does this PR introduce any user-facing change?
Yes. Add the following changes:
```
LinearSVCModel
- get/setFeatureCol
- get/setPredictionCol
- get/setLabelCol
- get/setRawPredictionCol
- predict
```
```
LogisticRegressionModel
DecisionTreeClassificationModel
RandomForestClassificationModel
GBTClassificationModel
NaiveBayesModel
MultilayerPerceptronClassificationModel
- get/setFeatureCol
- get/setPredictionCol
- get/setLabelCol
- get/setRawPredictionCol
- get/setProbabilityCol
- predict
```
```
LinearRegressionModel
IsotonicRegressionModel
DecisionTreeRegressionModel
RandomForestRegressionModel
GBTRegressionModel
AFTSurvivalRegressionModel
GeneralizedLinearRegressionModel
- get/setFeatureCol
- get/setPredictionCol
- get/setLabelCol
- predict
```
### How was this patch tested?
Add a few doc tests.
Closes #25776 from huaxingao/spark-28985.
Authored-by: Huaxin Gao <huaxing@us.ibm.com>
Signed-off-by: Sean Owen <sean.owen@databricks.com>
2019-09-19 09:17:25 -04:00
|
|
|
>>> model.setPredictionCol("newPrediction")
|
[SPARK-29867][ML][PYTHON] Add __repr__ in Python ML Models
### What changes were proposed in this pull request?
Add ```__repr__``` in Python ML Models
### Why are the changes needed?
In Python ML Models, some of them have ```__repr__```, others don't. In the doctest, when calling Model.setXXX, some of the Models print out the xxxModel... correctly, some of them can't because of lacking the ```__repr__``` method. For example:
```
>>> gm = GaussianMixture(k=3, tol=0.0001, seed=10)
>>> model = gm.fit(df)
>>> model.setPredictionCol("newPrediction")
GaussianMixture...
```
After the change, the above code will become the following:
```
>>> gm = GaussianMixture(k=3, tol=0.0001, seed=10)
>>> model = gm.fit(df)
>>> model.setPredictionCol("newPrediction")
GaussianMixtureModel...
```
### Does this PR introduce any user-facing change?
Yes.
### How was this patch tested?
doctest
Closes #26489 from huaxingao/spark-29876.
Authored-by: Huaxin Gao <huaxing@us.ibm.com>
Signed-off-by: Dongjoon Hyun <dhyun@apple.com>
2019-11-16 00:44:39 -05:00
|
|
|
LinearRegressionModel...
|
2019-10-18 05:26:54 -04:00
|
|
|
>>> model.getMaxIter()
|
|
|
|
5
|
2020-11-18 10:02:31 -05:00
|
|
|
>>> model.getMaxBlockSizeInMB()
|
|
|
|
0.0
|
2016-05-23 21:14:48 -04:00
|
|
|
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
|
[SPARK-28985][PYTHON][ML] Add common classes (JavaPredictor/JavaClassificationModel/JavaProbabilisticClassifier) in PYTHON
### What changes were proposed in this pull request?
Add some common classes in Python to make it have the same structure as Scala
1. Scala has ClassifierParams/Classifier/ClassificationModel:
```
trait ClassifierParams
extends PredictorParams with HasRawPredictionCol
abstract class Classifier
extends Predictor with ClassifierParams {
def setRawPredictionCol
}
abstract class ClassificationModel
extends PredictionModel with ClassifierParams {
def setRawPredictionCol
}
```
This PR makes Python has the following:
```
class JavaClassifierParams(HasRawPredictionCol, JavaPredictorParams):
pass
class JavaClassifier(JavaPredictor, JavaClassifierParams):
def setRawPredictionCol
class JavaClassificationModel(JavaPredictionModel, JavaClassifierParams):
def setRawPredictionCol
```
2. Scala has ProbabilisticClassifierParams/ProbabilisticClassifier/ProbabilisticClassificationModel:
```
trait ProbabilisticClassifierParams
extends ClassifierParams with HasProbabilityCol with HasThresholds
abstract class ProbabilisticClassifier
extends Classifier with ProbabilisticClassifierParams {
def setProbabilityCol
def setThresholds
}
abstract class ProbabilisticClassificationModel
extends ClassificationModel with ProbabilisticClassifierParams {
def setProbabilityCol
def setThresholds
}
```
This PR makes Python have the following:
```
class JavaProbabilisticClassifierParams(HasProbabilityCol, HasThresholds, JavaClassifierParams):
pass
class JavaProbabilisticClassifier(JavaClassifier, JavaProbabilisticClassifierParams):
def setProbabilityCol
def setThresholds
class JavaProbabilisticClassificationModel(JavaClassificationModel, JavaProbabilisticClassifierParams):
def setProbabilityCol
def setThresholds
```
3. Scala has PredictorParams/Predictor/PredictionModel:
```
trait PredictorParams extends Params
with HasLabelCol with HasFeaturesCol with HasPredictionCol
abstract class Predictor
extends Estimator with PredictorParams {
def setLabelCol
def setFeaturesCol
def setPredictionCol
}
abstract class PredictionModel
extends Model with PredictorParams {
def setFeaturesCol
def setPredictionCol
def numFeatures
def predict
}
```
This PR makes Python have the following:
```
class JavaPredictorParams(HasLabelCol, HasFeaturesCol, HasPredictionCol):
pass
class JavaPredictor(JavaEstimator, JavaPredictorParams):
def setLabelCol
def setFeaturesCol
def setPredictionCol
class JavaPredictionModel(JavaModel, JavaPredictorParams):
def setFeaturesCol
def setPredictionCol
def numFeatures
def predict
```
### Why are the changes needed?
Have parity between Python and Scala ML
### Does this PR introduce any user-facing change?
Yes. Add the following changes:
```
LinearSVCModel
- get/setFeatureCol
- get/setPredictionCol
- get/setLabelCol
- get/setRawPredictionCol
- predict
```
```
LogisticRegressionModel
DecisionTreeClassificationModel
RandomForestClassificationModel
GBTClassificationModel
NaiveBayesModel
MultilayerPerceptronClassificationModel
- get/setFeatureCol
- get/setPredictionCol
- get/setLabelCol
- get/setRawPredictionCol
- get/setProbabilityCol
- predict
```
```
LinearRegressionModel
IsotonicRegressionModel
DecisionTreeRegressionModel
RandomForestRegressionModel
GBTRegressionModel
AFTSurvivalRegressionModel
GeneralizedLinearRegressionModel
- get/setFeatureCol
- get/setPredictionCol
- get/setLabelCol
- predict
```
### How was this patch tested?
Add a few doc tests.
Closes #25776 from huaxingao/spark-28985.
Authored-by: Huaxin Gao <huaxing@us.ibm.com>
Signed-off-by: Sean Owen <sean.owen@databricks.com>
2019-09-19 09:17:25 -04:00
|
|
|
>>> abs(model.predict(test0.head().features) - (-1.0)) < 0.001
|
|
|
|
True
|
|
|
|
>>> abs(model.transform(test0).head().newPrediction - (-1.0)) < 0.001
|
2015-11-05 12:56:18 -05:00
|
|
|
True
|
|
|
|
>>> abs(model.coefficients[0] - 1.0) < 0.001
|
|
|
|
True
|
|
|
|
>>> abs(model.intercept - 0.0) < 0.001
|
|
|
|
True
|
2016-05-23 21:14:48 -04:00
|
|
|
>>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
|
[SPARK-28985][PYTHON][ML] Add common classes (JavaPredictor/JavaClassificationModel/JavaProbabilisticClassifier) in PYTHON
### What changes were proposed in this pull request?
Add some common classes in Python to make it have the same structure as Scala
1. Scala has ClassifierParams/Classifier/ClassificationModel:
```
trait ClassifierParams
extends PredictorParams with HasRawPredictionCol
abstract class Classifier
extends Predictor with ClassifierParams {
def setRawPredictionCol
}
abstract class ClassificationModel
extends PredictionModel with ClassifierParams {
def setRawPredictionCol
}
```
This PR makes Python has the following:
```
class JavaClassifierParams(HasRawPredictionCol, JavaPredictorParams):
pass
class JavaClassifier(JavaPredictor, JavaClassifierParams):
def setRawPredictionCol
class JavaClassificationModel(JavaPredictionModel, JavaClassifierParams):
def setRawPredictionCol
```
2. Scala has ProbabilisticClassifierParams/ProbabilisticClassifier/ProbabilisticClassificationModel:
```
trait ProbabilisticClassifierParams
extends ClassifierParams with HasProbabilityCol with HasThresholds
abstract class ProbabilisticClassifier
extends Classifier with ProbabilisticClassifierParams {
def setProbabilityCol
def setThresholds
}
abstract class ProbabilisticClassificationModel
extends ClassificationModel with ProbabilisticClassifierParams {
def setProbabilityCol
def setThresholds
}
```
This PR makes Python have the following:
```
class JavaProbabilisticClassifierParams(HasProbabilityCol, HasThresholds, JavaClassifierParams):
pass
class JavaProbabilisticClassifier(JavaClassifier, JavaProbabilisticClassifierParams):
def setProbabilityCol
def setThresholds
class JavaProbabilisticClassificationModel(JavaClassificationModel, JavaProbabilisticClassifierParams):
def setProbabilityCol
def setThresholds
```
3. Scala has PredictorParams/Predictor/PredictionModel:
```
trait PredictorParams extends Params
with HasLabelCol with HasFeaturesCol with HasPredictionCol
abstract class Predictor
extends Estimator with PredictorParams {
def setLabelCol
def setFeaturesCol
def setPredictionCol
}
abstract class PredictionModel
extends Model with PredictorParams {
def setFeaturesCol
def setPredictionCol
def numFeatures
def predict
}
```
This PR makes Python have the following:
```
class JavaPredictorParams(HasLabelCol, HasFeaturesCol, HasPredictionCol):
pass
class JavaPredictor(JavaEstimator, JavaPredictorParams):
def setLabelCol
def setFeaturesCol
def setPredictionCol
class JavaPredictionModel(JavaModel, JavaPredictorParams):
def setFeaturesCol
def setPredictionCol
def numFeatures
def predict
```
### Why are the changes needed?
Have parity between Python and Scala ML
### Does this PR introduce any user-facing change?
Yes. Add the following changes:
```
LinearSVCModel
- get/setFeatureCol
- get/setPredictionCol
- get/setLabelCol
- get/setRawPredictionCol
- predict
```
```
LogisticRegressionModel
DecisionTreeClassificationModel
RandomForestClassificationModel
GBTClassificationModel
NaiveBayesModel
MultilayerPerceptronClassificationModel
- get/setFeatureCol
- get/setPredictionCol
- get/setLabelCol
- get/setRawPredictionCol
- get/setProbabilityCol
- predict
```
```
LinearRegressionModel
IsotonicRegressionModel
DecisionTreeRegressionModel
RandomForestRegressionModel
GBTRegressionModel
AFTSurvivalRegressionModel
GeneralizedLinearRegressionModel
- get/setFeatureCol
- get/setPredictionCol
- get/setLabelCol
- predict
```
### How was this patch tested?
Add a few doc tests.
Closes #25776 from huaxingao/spark-28985.
Authored-by: Huaxin Gao <huaxing@us.ibm.com>
Signed-off-by: Sean Owen <sean.owen@databricks.com>
2019-09-19 09:17:25 -04:00
|
|
|
>>> abs(model.transform(test1).head().newPrediction - 1.0) < 0.001
|
2015-11-05 12:56:18 -05:00
|
|
|
True
|
2015-05-12 15:17:05 -04:00
|
|
|
>>> lr.setParams("vector")
|
|
|
|
Traceback (most recent call last):
|
|
|
|
...
|
|
|
|
TypeError: Method setParams forces keyword arguments.
|
2016-02-20 04:07:19 -05:00
|
|
|
>>> lr_path = temp_path + "/lr"
|
2016-01-29 12:22:24 -05:00
|
|
|
>>> lr.save(lr_path)
|
|
|
|
>>> lr2 = LinearRegression.load(lr_path)
|
|
|
|
>>> lr2.getMaxIter()
|
|
|
|
5
|
2016-02-20 04:07:19 -05:00
|
|
|
>>> model_path = temp_path + "/lr_model"
|
2016-01-29 12:22:24 -05:00
|
|
|
>>> model.save(model_path)
|
|
|
|
>>> model2 = LinearRegressionModel.load(model_path)
|
|
|
|
>>> model.coefficients[0] == model2.coefficients[0]
|
|
|
|
True
|
|
|
|
>>> model.intercept == model2.intercept
|
|
|
|
True
|
2020-08-03 11:50:34 -04:00
|
|
|
>>> model.transform(test0).take(1) == model2.transform(test0).take(1)
|
|
|
|
True
|
2016-08-22 06:21:22 -04:00
|
|
|
>>> model.numFeatures
|
|
|
|
1
|
2018-06-28 16:20:08 -04:00
|
|
|
>>> model.write().format("pmml").save(model_path + "_2")
|
2015-05-12 15:17:05 -04:00
|
|
|
"""
|
2015-05-18 15:02:18 -04:00
|
|
|
|
2015-05-12 15:17:05 -04:00
|
|
|
@keyword_only
|
[SPARK-32933][PYTHON] Use keyword-only syntax for keyword_only methods
### What changes were proposed in this pull request?
This PR adjusts signatures of methods decorated with `keyword_only` to indicate using [Python 3 keyword-only syntax](https://www.python.org/dev/peps/pep-3102/).
__Note__:
For the moment the goal is not to replace `keyword_only`. For justification see https://github.com/apache/spark/pull/29591#discussion_r489402579
### Why are the changes needed?
Right now it is not clear that `keyword_only` methods are indeed keyword only. This proposal addresses that.
In practice we could probably capture `locals` and drop `keyword_only` completel, i.e:
```python
keyword_only
def __init__(self, *, featuresCol="features"):
...
kwargs = self._input_kwargs
self.setParams(**kwargs)
```
could be replaced with
```python
def __init__(self, *, featuresCol="features"):
kwargs = locals()
del kwargs["self"]
...
self.setParams(**kwargs)
```
### Does this PR introduce _any_ user-facing change?
Docstrings and inspect tools will now indicate that `keyword_only` methods expect only keyword arguments.
For example with ` LinearSVC` will change from
```
>>> from pyspark.ml.classification import LinearSVC
>>> ?LinearSVC.__init__
Signature:
LinearSVC.__init__(
self,
featuresCol='features',
labelCol='label',
predictionCol='prediction',
maxIter=100,
regParam=0.0,
tol=1e-06,
rawPredictionCol='rawPrediction',
fitIntercept=True,
standardization=True,
threshold=0.0,
weightCol=None,
aggregationDepth=2,
)
Docstring: __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, aggregationDepth=2):
File: /path/to/python/pyspark/ml/classification.py
Type: function
```
to
```
>>> from pyspark.ml.classification import LinearSVC
>>> ?LinearSVC.__init__
Signature:
LinearSVC.__init__ (
self,
*,
featuresCol='features',
labelCol='label',
predictionCol='prediction',
maxIter=100,
regParam=0.0,
tol=1e-06,
rawPredictionCol='rawPrediction',
fitIntercept=True,
standardization=True,
threshold=0.0,
weightCol=None,
aggregationDepth=2,
blockSize=1,
)
Docstring: __init__(self, \*, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, aggregationDepth=2, blockSize=1):
File: ~/Workspace/spark/python/pyspark/ml/classification.py
Type: function
```
### How was this patch tested?
Existing tests.
Closes #29799 from zero323/SPARK-32933.
Authored-by: zero323 <mszymkiewicz@gmail.com>
Signed-off-by: HyukjinKwon <gurwls223@apache.org>
2020-09-22 20:28:33 -04:00
|
|
|
def __init__(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
|
2015-09-11 11:50:35 -04:00
|
|
|
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True,
|
2017-12-20 20:51:42 -05:00
|
|
|
standardization=True, solver="auto", weightCol=None, aggregationDepth=2,
|
2020-11-18 10:02:31 -05:00
|
|
|
loss="squaredError", epsilon=1.35, maxBlockSizeInMB=0.0):
|
2015-05-12 15:17:05 -04:00
|
|
|
"""
|
[SPARK-32933][PYTHON] Use keyword-only syntax for keyword_only methods
### What changes were proposed in this pull request?
This PR adjusts signatures of methods decorated with `keyword_only` to indicate using [Python 3 keyword-only syntax](https://www.python.org/dev/peps/pep-3102/).
__Note__:
For the moment the goal is not to replace `keyword_only`. For justification see https://github.com/apache/spark/pull/29591#discussion_r489402579
### Why are the changes needed?
Right now it is not clear that `keyword_only` methods are indeed keyword only. This proposal addresses that.
In practice we could probably capture `locals` and drop `keyword_only` completel, i.e:
```python
keyword_only
def __init__(self, *, featuresCol="features"):
...
kwargs = self._input_kwargs
self.setParams(**kwargs)
```
could be replaced with
```python
def __init__(self, *, featuresCol="features"):
kwargs = locals()
del kwargs["self"]
...
self.setParams(**kwargs)
```
### Does this PR introduce _any_ user-facing change?
Docstrings and inspect tools will now indicate that `keyword_only` methods expect only keyword arguments.
For example with ` LinearSVC` will change from
```
>>> from pyspark.ml.classification import LinearSVC
>>> ?LinearSVC.__init__
Signature:
LinearSVC.__init__(
self,
featuresCol='features',
labelCol='label',
predictionCol='prediction',
maxIter=100,
regParam=0.0,
tol=1e-06,
rawPredictionCol='rawPrediction',
fitIntercept=True,
standardization=True,
threshold=0.0,
weightCol=None,
aggregationDepth=2,
)
Docstring: __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, aggregationDepth=2):
File: /path/to/python/pyspark/ml/classification.py
Type: function
```
to
```
>>> from pyspark.ml.classification import LinearSVC
>>> ?LinearSVC.__init__
Signature:
LinearSVC.__init__ (
self,
*,
featuresCol='features',
labelCol='label',
predictionCol='prediction',
maxIter=100,
regParam=0.0,
tol=1e-06,
rawPredictionCol='rawPrediction',
fitIntercept=True,
standardization=True,
threshold=0.0,
weightCol=None,
aggregationDepth=2,
blockSize=1,
)
Docstring: __init__(self, \*, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, aggregationDepth=2, blockSize=1):
File: ~/Workspace/spark/python/pyspark/ml/classification.py
Type: function
```
### How was this patch tested?
Existing tests.
Closes #29799 from zero323/SPARK-32933.
Authored-by: zero323 <mszymkiewicz@gmail.com>
Signed-off-by: HyukjinKwon <gurwls223@apache.org>
2020-09-22 20:28:33 -04:00
|
|
|
__init__(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
|
2015-09-11 11:50:35 -04:00
|
|
|
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True, \
|
2017-12-20 20:51:42 -05:00
|
|
|
standardization=True, solver="auto", weightCol=None, aggregationDepth=2, \
|
2020-11-18 10:02:31 -05:00
|
|
|
loss="squaredError", epsilon=1.35, maxBlockSizeInMB=0.0)
|
2015-05-12 15:17:05 -04:00
|
|
|
"""
|
|
|
|
super(LinearRegression, self).__init__()
|
2015-05-18 15:02:18 -04:00
|
|
|
self._java_obj = self._new_java_obj(
|
|
|
|
"org.apache.spark.ml.regression.LinearRegression", self.uid)
|
2017-03-03 19:43:45 -05:00
|
|
|
kwargs = self._input_kwargs
|
2015-05-12 15:17:05 -04:00
|
|
|
self.setParams(**kwargs)
|
|
|
|
|
|
|
|
@keyword_only
|
2015-09-17 11:45:20 -04:00
|
|
|
@since("1.4.0")
|
[SPARK-32933][PYTHON] Use keyword-only syntax for keyword_only methods
### What changes were proposed in this pull request?
This PR adjusts signatures of methods decorated with `keyword_only` to indicate using [Python 3 keyword-only syntax](https://www.python.org/dev/peps/pep-3102/).
__Note__:
For the moment the goal is not to replace `keyword_only`. For justification see https://github.com/apache/spark/pull/29591#discussion_r489402579
### Why are the changes needed?
Right now it is not clear that `keyword_only` methods are indeed keyword only. This proposal addresses that.
In practice we could probably capture `locals` and drop `keyword_only` completel, i.e:
```python
keyword_only
def __init__(self, *, featuresCol="features"):
...
kwargs = self._input_kwargs
self.setParams(**kwargs)
```
could be replaced with
```python
def __init__(self, *, featuresCol="features"):
kwargs = locals()
del kwargs["self"]
...
self.setParams(**kwargs)
```
### Does this PR introduce _any_ user-facing change?
Docstrings and inspect tools will now indicate that `keyword_only` methods expect only keyword arguments.
For example with ` LinearSVC` will change from
```
>>> from pyspark.ml.classification import LinearSVC
>>> ?LinearSVC.__init__
Signature:
LinearSVC.__init__(
self,
featuresCol='features',
labelCol='label',
predictionCol='prediction',
maxIter=100,
regParam=0.0,
tol=1e-06,
rawPredictionCol='rawPrediction',
fitIntercept=True,
standardization=True,
threshold=0.0,
weightCol=None,
aggregationDepth=2,
)
Docstring: __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, aggregationDepth=2):
File: /path/to/python/pyspark/ml/classification.py
Type: function
```
to
```
>>> from pyspark.ml.classification import LinearSVC
>>> ?LinearSVC.__init__
Signature:
LinearSVC.__init__ (
self,
*,
featuresCol='features',
labelCol='label',
predictionCol='prediction',
maxIter=100,
regParam=0.0,
tol=1e-06,
rawPredictionCol='rawPrediction',
fitIntercept=True,
standardization=True,
threshold=0.0,
weightCol=None,
aggregationDepth=2,
blockSize=1,
)
Docstring: __init__(self, \*, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, aggregationDepth=2, blockSize=1):
File: ~/Workspace/spark/python/pyspark/ml/classification.py
Type: function
```
### How was this patch tested?
Existing tests.
Closes #29799 from zero323/SPARK-32933.
Authored-by: zero323 <mszymkiewicz@gmail.com>
Signed-off-by: HyukjinKwon <gurwls223@apache.org>
2020-09-22 20:28:33 -04:00
|
|
|
def setParams(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
|
2015-09-11 11:50:35 -04:00
|
|
|
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True,
|
2017-12-20 20:51:42 -05:00
|
|
|
standardization=True, solver="auto", weightCol=None, aggregationDepth=2,
|
2020-11-18 10:02:31 -05:00
|
|
|
loss="squaredError", epsilon=1.35, maxBlockSizeInMB=0.0):
|
2015-05-12 15:17:05 -04:00
|
|
|
"""
|
[SPARK-32933][PYTHON] Use keyword-only syntax for keyword_only methods
### What changes were proposed in this pull request?
This PR adjusts signatures of methods decorated with `keyword_only` to indicate using [Python 3 keyword-only syntax](https://www.python.org/dev/peps/pep-3102/).
__Note__:
For the moment the goal is not to replace `keyword_only`. For justification see https://github.com/apache/spark/pull/29591#discussion_r489402579
### Why are the changes needed?
Right now it is not clear that `keyword_only` methods are indeed keyword only. This proposal addresses that.
In practice we could probably capture `locals` and drop `keyword_only` completel, i.e:
```python
keyword_only
def __init__(self, *, featuresCol="features"):
...
kwargs = self._input_kwargs
self.setParams(**kwargs)
```
could be replaced with
```python
def __init__(self, *, featuresCol="features"):
kwargs = locals()
del kwargs["self"]
...
self.setParams(**kwargs)
```
### Does this PR introduce _any_ user-facing change?
Docstrings and inspect tools will now indicate that `keyword_only` methods expect only keyword arguments.
For example with ` LinearSVC` will change from
```
>>> from pyspark.ml.classification import LinearSVC
>>> ?LinearSVC.__init__
Signature:
LinearSVC.__init__(
self,
featuresCol='features',
labelCol='label',
predictionCol='prediction',
maxIter=100,
regParam=0.0,
tol=1e-06,
rawPredictionCol='rawPrediction',
fitIntercept=True,
standardization=True,
threshold=0.0,
weightCol=None,
aggregationDepth=2,
)
Docstring: __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, aggregationDepth=2):
File: /path/to/python/pyspark/ml/classification.py
Type: function
```
to
```
>>> from pyspark.ml.classification import LinearSVC
>>> ?LinearSVC.__init__
Signature:
LinearSVC.__init__ (
self,
*,
featuresCol='features',
labelCol='label',
predictionCol='prediction',
maxIter=100,
regParam=0.0,
tol=1e-06,
rawPredictionCol='rawPrediction',
fitIntercept=True,
standardization=True,
threshold=0.0,
weightCol=None,
aggregationDepth=2,
blockSize=1,
)
Docstring: __init__(self, \*, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, aggregationDepth=2, blockSize=1):
File: ~/Workspace/spark/python/pyspark/ml/classification.py
Type: function
```
### How was this patch tested?
Existing tests.
Closes #29799 from zero323/SPARK-32933.
Authored-by: zero323 <mszymkiewicz@gmail.com>
Signed-off-by: HyukjinKwon <gurwls223@apache.org>
2020-09-22 20:28:33 -04:00
|
|
|
setParams(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
|
2015-09-11 11:50:35 -04:00
|
|
|
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True, \
|
2017-12-20 20:51:42 -05:00
|
|
|
standardization=True, solver="auto", weightCol=None, aggregationDepth=2, \
|
2020-11-18 10:02:31 -05:00
|
|
|
loss="squaredError", epsilon=1.35, maxBlockSizeInMB=0.0)
|
2015-05-12 15:17:05 -04:00
|
|
|
Sets params for linear regression.
|
|
|
|
"""
|
2017-03-03 19:43:45 -05:00
|
|
|
kwargs = self._input_kwargs
|
2015-05-12 15:17:05 -04:00
|
|
|
return self._set(**kwargs)
|
|
|
|
|
|
|
|
def _create_model(self, java_model):
|
|
|
|
return LinearRegressionModel(java_model)
|
|
|
|
|
2017-12-20 20:51:42 -05:00
|
|
|
@since("2.3.0")
|
|
|
|
def setEpsilon(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`epsilon`.
|
|
|
|
"""
|
|
|
|
return self._set(epsilon=value)
|
|
|
|
|
2019-10-27 23:36:10 -04:00
|
|
|
def setMaxIter(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`maxIter`.
|
|
|
|
"""
|
|
|
|
return self._set(maxIter=value)
|
|
|
|
|
|
|
|
def setRegParam(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`regParam`.
|
|
|
|
"""
|
|
|
|
return self._set(regParam=value)
|
|
|
|
|
|
|
|
def setTol(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`tol`.
|
|
|
|
"""
|
|
|
|
return self._set(tol=value)
|
|
|
|
|
|
|
|
def setElasticNetParam(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`elasticNetParam`.
|
|
|
|
"""
|
|
|
|
return self._set(elasticNetParam=value)
|
|
|
|
|
|
|
|
def setFitIntercept(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`fitIntercept`.
|
|
|
|
"""
|
|
|
|
return self._set(fitIntercept=value)
|
|
|
|
|
|
|
|
def setStandardization(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`standardization`.
|
|
|
|
"""
|
|
|
|
return self._set(standardization=value)
|
|
|
|
|
|
|
|
def setWeightCol(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`weightCol`.
|
|
|
|
"""
|
|
|
|
return self._set(weightCol=value)
|
|
|
|
|
|
|
|
def setSolver(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`solver`.
|
|
|
|
"""
|
|
|
|
return self._set(solver=value)
|
|
|
|
|
|
|
|
def setAggregationDepth(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`aggregationDepth`.
|
|
|
|
"""
|
|
|
|
return self._set(aggregationDepth=value)
|
|
|
|
|
|
|
|
def setLoss(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`loss`.
|
|
|
|
"""
|
|
|
|
return self._set(lossType=value)
|
|
|
|
|
[SPARK-30660][ML][PYSPARK] LinearRegression blockify input vectors
### What changes were proposed in this pull request?
1, add new param blockSize;
2, add a new class InstanceBlock;
3, if blockSize==1, keep original behavior; if blockSize>1, stack input vectors to blocks (like ALS/MLP);
4, if blockSize>1, standardize the input outside of optimization procedure;
### Why are the changes needed?
it will obtain performance gain on dense datasets, such as `epsilon`
1, reduce RAM to persist traing dataset; (save about 40% RAM)
2, use Level-2 BLAS routines; (up to 6X(squaredError)~12X(huber) speedup)
### Does this PR introduce _any_ user-facing change?
Yes, a new param is added
### How was this patch tested?
existing and added testsuites
Closes #28471 from zhengruifeng/blockify_lir_II.
Authored-by: zhengruifeng <ruifengz@foxmail.com>
Signed-off-by: zhengruifeng <ruifengz@foxmail.com>
2020-05-07 22:52:01 -04:00
|
|
|
@since("3.1.0")
|
2020-11-18 10:02:31 -05:00
|
|
|
def setMaxBlockSizeInMB(self, value):
|
[SPARK-30660][ML][PYSPARK] LinearRegression blockify input vectors
### What changes were proposed in this pull request?
1, add new param blockSize;
2, add a new class InstanceBlock;
3, if blockSize==1, keep original behavior; if blockSize>1, stack input vectors to blocks (like ALS/MLP);
4, if blockSize>1, standardize the input outside of optimization procedure;
### Why are the changes needed?
it will obtain performance gain on dense datasets, such as `epsilon`
1, reduce RAM to persist traing dataset; (save about 40% RAM)
2, use Level-2 BLAS routines; (up to 6X(squaredError)~12X(huber) speedup)
### Does this PR introduce _any_ user-facing change?
Yes, a new param is added
### How was this patch tested?
existing and added testsuites
Closes #28471 from zhengruifeng/blockify_lir_II.
Authored-by: zhengruifeng <ruifengz@foxmail.com>
Signed-off-by: zhengruifeng <ruifengz@foxmail.com>
2020-05-07 22:52:01 -04:00
|
|
|
"""
|
2020-11-18 10:02:31 -05:00
|
|
|
Sets the value of :py:attr:`maxBlockSizeInMB`.
|
[SPARK-30660][ML][PYSPARK] LinearRegression blockify input vectors
### What changes were proposed in this pull request?
1, add new param blockSize;
2, add a new class InstanceBlock;
3, if blockSize==1, keep original behavior; if blockSize>1, stack input vectors to blocks (like ALS/MLP);
4, if blockSize>1, standardize the input outside of optimization procedure;
### Why are the changes needed?
it will obtain performance gain on dense datasets, such as `epsilon`
1, reduce RAM to persist traing dataset; (save about 40% RAM)
2, use Level-2 BLAS routines; (up to 6X(squaredError)~12X(huber) speedup)
### Does this PR introduce _any_ user-facing change?
Yes, a new param is added
### How was this patch tested?
existing and added testsuites
Closes #28471 from zhengruifeng/blockify_lir_II.
Authored-by: zhengruifeng <ruifengz@foxmail.com>
Signed-off-by: zhengruifeng <ruifengz@foxmail.com>
2020-05-07 22:52:01 -04:00
|
|
|
"""
|
2020-11-18 10:02:31 -05:00
|
|
|
return self._set(maxBlockSizeInMB=value)
|
[SPARK-30660][ML][PYSPARK] LinearRegression blockify input vectors
### What changes were proposed in this pull request?
1, add new param blockSize;
2, add a new class InstanceBlock;
3, if blockSize==1, keep original behavior; if blockSize>1, stack input vectors to blocks (like ALS/MLP);
4, if blockSize>1, standardize the input outside of optimization procedure;
### Why are the changes needed?
it will obtain performance gain on dense datasets, such as `epsilon`
1, reduce RAM to persist traing dataset; (save about 40% RAM)
2, use Level-2 BLAS routines; (up to 6X(squaredError)~12X(huber) speedup)
### Does this PR introduce _any_ user-facing change?
Yes, a new param is added
### How was this patch tested?
existing and added testsuites
Closes #28471 from zhengruifeng/blockify_lir_II.
Authored-by: zhengruifeng <ruifengz@foxmail.com>
Signed-off-by: zhengruifeng <ruifengz@foxmail.com>
2020-05-07 22:52:01 -04:00
|
|
|
|
2017-12-20 20:51:42 -05:00
|
|
|
|
[SPARK-29212][ML][PYSPARK] Add common classes without using JVM backend
### What changes were proposed in this pull request?
Implement common base ML classes (`Predictor`, `PredictionModel`, `Classifier`, `ClasssificationModel` `ProbabilisticClassifier`, `ProbabilisticClasssificationModel`, `Regressor`, `RegrssionModel`) for non-Java backends.
Note
- `Predictor` and `JavaClassifier` should be abstract as `_fit` method is not implemented.
- `PredictionModel` should be abstract as `_transform` is not implemented.
### Why are the changes needed?
To provide extensions points for non-JVM algorithms, as well as a public (as opposed to `Java*` variants, which are commonly described in docstrings as private) hierarchy which can be used to distinguish between different classes of predictors.
For longer discussion see [SPARK-29212](https://issues.apache.org/jira/browse/SPARK-29212) and / or https://github.com/apache/spark/pull/25776.
### Does this PR introduce any user-facing change?
It adds new base classes as listed above, but effective interfaces (method resolution order notwithstanding) stay the same.
Additionally "private" `Java*` classes in`ml.regression` and `ml.classification` have been renamed to follow PEP-8 conventions (added leading underscore).
It is for discussion if the same should be done to equivalent classes from `ml.wrapper`.
If we take `JavaClassifier` as an example, type hierarchy will change from
![old pyspark ml classification JavaClassifier](https://user-images.githubusercontent.com/1554276/72657093-5c0b0c80-39a0-11ea-9069-a897d75de483.png)
to
![new pyspark ml classification _JavaClassifier](https://user-images.githubusercontent.com/1554276/72657098-64fbde00-39a0-11ea-8f80-01187a5ea5a6.png)
Similarly the old model
![old pyspark ml classification JavaClassificationModel](https://user-images.githubusercontent.com/1554276/72657103-7513bd80-39a0-11ea-9ffc-59eb6ab61fde.png)
will become
![new pyspark ml classification _JavaClassificationModel](https://user-images.githubusercontent.com/1554276/72657110-80ff7f80-39a0-11ea-9f5c-fe408664e827.png)
### How was this patch tested?
Existing unit tests.
Closes #27245 from zero323/SPARK-29212.
Authored-by: zero323 <mszymkiewicz@gmail.com>
Signed-off-by: zhengruifeng <ruifengz@foxmail.com>
2020-03-03 23:20:02 -05:00
|
|
|
class LinearRegressionModel(_JavaRegressionModel, _LinearRegressionParams, GeneralJavaMLWritable,
|
2019-10-18 05:26:54 -04:00
|
|
|
JavaMLReadable, HasTrainingSummary):
|
2015-05-12 15:17:05 -04:00
|
|
|
"""
|
2016-05-25 01:20:00 -04:00
|
|
|
Model fitted by :class:`LinearRegression`.
|
2015-09-17 11:45:20 -04:00
|
|
|
|
|
|
|
.. versionadded:: 1.4.0
|
2015-05-12 15:17:05 -04:00
|
|
|
"""
|
|
|
|
|
2015-11-02 19:12:04 -05:00
|
|
|
@property
|
2016-06-22 13:05:25 -04:00
|
|
|
@since("2.0.0")
|
2015-11-02 19:12:04 -05:00
|
|
|
def coefficients(self):
|
|
|
|
"""
|
|
|
|
Model coefficients.
|
|
|
|
"""
|
|
|
|
return self._call_java("coefficients")
|
|
|
|
|
2015-05-14 21:13:58 -04:00
|
|
|
@property
|
2015-09-17 11:45:20 -04:00
|
|
|
@since("1.4.0")
|
2015-05-14 21:13:58 -04:00
|
|
|
def intercept(self):
|
|
|
|
"""
|
|
|
|
Model intercept.
|
|
|
|
"""
|
|
|
|
return self._call_java("intercept")
|
|
|
|
|
2017-12-20 20:51:42 -05:00
|
|
|
@property
|
|
|
|
@since("2.3.0")
|
|
|
|
def scale(self):
|
2018-09-12 23:19:43 -04:00
|
|
|
r"""
|
|
|
|
The value by which :math:`\|y - X'w\|` is scaled down when loss is "huber", otherwise 1.0.
|
2017-12-20 20:51:42 -05:00
|
|
|
"""
|
|
|
|
return self._call_java("scale")
|
|
|
|
|
2016-04-06 15:07:47 -04:00
|
|
|
@property
|
|
|
|
@since("2.0.0")
|
|
|
|
def summary(self):
|
|
|
|
"""
|
|
|
|
Gets summary (e.g. residuals, mse, r-squared ) of model on
|
|
|
|
training set. An exception is thrown if
|
|
|
|
`trainingSummary is None`.
|
|
|
|
"""
|
2016-11-21 08:36:49 -05:00
|
|
|
if self.hasSummary:
|
2019-02-01 18:29:58 -05:00
|
|
|
return LinearRegressionTrainingSummary(super(LinearRegressionModel, self).summary)
|
2016-11-21 08:36:49 -05:00
|
|
|
else:
|
|
|
|
raise RuntimeError("No training summary available for this %s" %
|
|
|
|
self.__class__.__name__)
|
2016-04-06 15:07:47 -04:00
|
|
|
|
|
|
|
def evaluate(self, dataset):
|
|
|
|
"""
|
|
|
|
Evaluates the model on a test dataset.
|
|
|
|
|
2020-11-09 19:33:48 -05:00
|
|
|
.. versionadded:: 2.0.0
|
|
|
|
|
|
|
|
Parameters
|
|
|
|
----------
|
|
|
|
dataset : :py:class:`pyspark.sql.DataFrame`
|
|
|
|
Test dataset to evaluate model on, where dataset is an
|
|
|
|
instance of :py:class:`pyspark.sql.DataFrame`
|
2016-04-06 15:07:47 -04:00
|
|
|
"""
|
|
|
|
if not isinstance(dataset, DataFrame):
|
|
|
|
raise ValueError("dataset must be a DataFrame but got %s." % type(dataset))
|
|
|
|
java_lr_summary = self._call_java("evaluate", dataset)
|
|
|
|
return LinearRegressionSummary(java_lr_summary)
|
|
|
|
|
|
|
|
|
2016-04-13 17:08:57 -04:00
|
|
|
class LinearRegressionSummary(JavaWrapper):
|
2016-04-06 15:07:47 -04:00
|
|
|
"""
|
|
|
|
Linear regression results evaluated on a dataset.
|
|
|
|
|
|
|
|
.. versionadded:: 2.0.0
|
|
|
|
"""
|
|
|
|
|
|
|
|
@property
|
|
|
|
@since("2.0.0")
|
|
|
|
def predictions(self):
|
|
|
|
"""
|
|
|
|
Dataframe outputted by the model's `transform` method.
|
|
|
|
"""
|
|
|
|
return self._call_java("predictions")
|
|
|
|
|
|
|
|
@property
|
|
|
|
@since("2.0.0")
|
|
|
|
def predictionCol(self):
|
|
|
|
"""
|
|
|
|
Field in "predictions" which gives the predicted value of
|
|
|
|
the label at each instance.
|
|
|
|
"""
|
|
|
|
return self._call_java("predictionCol")
|
|
|
|
|
|
|
|
@property
|
|
|
|
@since("2.0.0")
|
|
|
|
def labelCol(self):
|
|
|
|
"""
|
|
|
|
Field in "predictions" which gives the true label of each
|
|
|
|
instance.
|
|
|
|
"""
|
|
|
|
return self._call_java("labelCol")
|
|
|
|
|
|
|
|
@property
|
|
|
|
@since("2.0.0")
|
|
|
|
def featuresCol(self):
|
|
|
|
"""
|
|
|
|
Field in "predictions" which gives the features of each instance
|
|
|
|
as a vector.
|
|
|
|
"""
|
|
|
|
return self._call_java("featuresCol")
|
|
|
|
|
|
|
|
@property
|
|
|
|
@since("2.0.0")
|
|
|
|
def explainedVariance(self):
|
2018-09-12 23:19:43 -04:00
|
|
|
r"""
|
2016-04-06 15:07:47 -04:00
|
|
|
Returns the explained variance regression score.
|
2018-09-12 23:19:43 -04:00
|
|
|
explainedVariance = :math:`1 - \frac{variance(y - \hat{y})}{variance(y)}`
|
2016-05-09 04:11:17 -04:00
|
|
|
|
2020-11-09 19:33:48 -05:00
|
|
|
Notes
|
|
|
|
-----
|
|
|
|
This ignores instance weights (setting all to 1.0) from
|
|
|
|
`LinearRegression.weightCol`. This will change in later Spark
|
|
|
|
versions.
|
2016-04-06 15:07:47 -04:00
|
|
|
|
2020-11-09 19:33:48 -05:00
|
|
|
For additional information see
|
|
|
|
`Explained variation on Wikipedia \
|
|
|
|
<http://en.wikipedia.org/wiki/Explained_variation>`_
|
2016-04-06 15:07:47 -04:00
|
|
|
"""
|
|
|
|
return self._call_java("explainedVariance")
|
|
|
|
|
|
|
|
@property
|
|
|
|
@since("2.0.0")
|
|
|
|
def meanAbsoluteError(self):
|
|
|
|
"""
|
|
|
|
Returns the mean absolute error, which is a risk function
|
|
|
|
corresponding to the expected value of the absolute error
|
|
|
|
loss or l1-norm loss.
|
|
|
|
|
2020-11-09 19:33:48 -05:00
|
|
|
Notes
|
|
|
|
-----
|
|
|
|
This ignores instance weights (setting all to 1.0) from
|
|
|
|
`LinearRegression.weightCol`. This will change in later Spark
|
|
|
|
versions.
|
2016-04-06 15:07:47 -04:00
|
|
|
"""
|
|
|
|
return self._call_java("meanAbsoluteError")
|
|
|
|
|
|
|
|
@property
|
|
|
|
@since("2.0.0")
|
|
|
|
def meanSquaredError(self):
|
|
|
|
"""
|
|
|
|
Returns the mean squared error, which is a risk function
|
|
|
|
corresponding to the expected value of the squared error
|
|
|
|
loss or quadratic loss.
|
|
|
|
|
2020-11-09 19:33:48 -05:00
|
|
|
Notes
|
|
|
|
-----
|
|
|
|
This ignores instance weights (setting all to 1.0) from
|
|
|
|
`LinearRegression.weightCol`. This will change in later Spark
|
|
|
|
versions.
|
2016-04-06 15:07:47 -04:00
|
|
|
"""
|
|
|
|
return self._call_java("meanSquaredError")
|
|
|
|
|
|
|
|
@property
|
|
|
|
@since("2.0.0")
|
|
|
|
def rootMeanSquaredError(self):
|
|
|
|
"""
|
|
|
|
Returns the root mean squared error, which is defined as the
|
|
|
|
square root of the mean squared error.
|
|
|
|
|
2020-11-09 19:33:48 -05:00
|
|
|
Notes
|
|
|
|
-----
|
|
|
|
This ignores instance weights (setting all to 1.0) from
|
|
|
|
`LinearRegression.weightCol`. This will change in later Spark
|
|
|
|
versions.
|
2016-04-06 15:07:47 -04:00
|
|
|
"""
|
|
|
|
return self._call_java("rootMeanSquaredError")
|
|
|
|
|
|
|
|
@property
|
|
|
|
@since("2.0.0")
|
|
|
|
def r2(self):
|
|
|
|
"""
|
2018-03-26 18:45:27 -04:00
|
|
|
Returns R^2, the coefficient of determination.
|
2016-05-09 04:11:17 -04:00
|
|
|
|
2020-11-09 19:33:48 -05:00
|
|
|
Notes
|
|
|
|
-----
|
|
|
|
This ignores instance weights (setting all to 1.0) from
|
|
|
|
`LinearRegression.weightCol`. This will change in later Spark
|
|
|
|
versions.
|
2016-04-06 15:07:47 -04:00
|
|
|
|
2020-11-09 19:33:48 -05:00
|
|
|
See also `Wikipedia coefficient of determination \
|
|
|
|
<http://en.wikipedia.org/wiki/Coefficient_of_determination>`_
|
2016-04-06 15:07:47 -04:00
|
|
|
"""
|
|
|
|
return self._call_java("r2")
|
|
|
|
|
2018-03-26 18:45:27 -04:00
|
|
|
@property
|
|
|
|
@since("2.4.0")
|
|
|
|
def r2adj(self):
|
|
|
|
"""
|
|
|
|
Returns Adjusted R^2, the adjusted coefficient of determination.
|
|
|
|
|
2020-11-09 19:33:48 -05:00
|
|
|
Notes
|
|
|
|
-----
|
|
|
|
This ignores instance weights (setting all to 1.0) from
|
|
|
|
`LinearRegression.weightCol`. This will change in later Spark versions.
|
2018-03-26 18:45:27 -04:00
|
|
|
|
2020-11-09 19:33:48 -05:00
|
|
|
`Wikipedia coefficient of determination, Adjusted R^2 \
|
|
|
|
<https://en.wikipedia.org/wiki/Coefficient_of_determination#Adjusted_R2>`_
|
2018-03-26 18:45:27 -04:00
|
|
|
"""
|
|
|
|
return self._call_java("r2adj")
|
|
|
|
|
2016-04-06 15:07:47 -04:00
|
|
|
@property
|
|
|
|
@since("2.0.0")
|
|
|
|
def residuals(self):
|
|
|
|
"""
|
|
|
|
Residuals (label - predicted value)
|
|
|
|
"""
|
|
|
|
return self._call_java("residuals")
|
|
|
|
|
|
|
|
@property
|
|
|
|
@since("2.0.0")
|
|
|
|
def numInstances(self):
|
|
|
|
"""
|
|
|
|
Number of instances in DataFrame predictions
|
|
|
|
"""
|
|
|
|
return self._call_java("numInstances")
|
|
|
|
|
2017-05-22 10:42:37 -04:00
|
|
|
@property
|
|
|
|
@since("2.2.0")
|
|
|
|
def degreesOfFreedom(self):
|
|
|
|
"""
|
|
|
|
Degrees of freedom.
|
|
|
|
"""
|
|
|
|
return self._call_java("degreesOfFreedom")
|
|
|
|
|
2016-04-06 15:07:47 -04:00
|
|
|
@property
|
|
|
|
@since("2.0.0")
|
|
|
|
def devianceResiduals(self):
|
|
|
|
"""
|
|
|
|
The weighted residuals, the usual residuals rescaled by the
|
|
|
|
square root of the instance weights.
|
|
|
|
"""
|
|
|
|
return self._call_java("devianceResiduals")
|
|
|
|
|
|
|
|
@property
|
|
|
|
def coefficientStandardErrors(self):
|
|
|
|
"""
|
|
|
|
Standard error of estimated coefficients and intercept.
|
|
|
|
This value is only available when using the "normal" solver.
|
|
|
|
|
2016-04-08 23:15:44 -04:00
|
|
|
If :py:attr:`LinearRegression.fitIntercept` is set to True,
|
|
|
|
then the last element returned corresponds to the intercept.
|
|
|
|
|
2020-11-09 19:33:48 -05:00
|
|
|
.. versionadded:: 2.0.0
|
|
|
|
|
|
|
|
See Also
|
|
|
|
--------
|
|
|
|
LinearRegression.solver
|
2016-04-06 15:07:47 -04:00
|
|
|
"""
|
|
|
|
return self._call_java("coefficientStandardErrors")
|
|
|
|
|
|
|
|
@property
|
|
|
|
def tValues(self):
|
|
|
|
"""
|
|
|
|
T-statistic of estimated coefficients and intercept.
|
|
|
|
This value is only available when using the "normal" solver.
|
|
|
|
|
2016-04-08 23:15:44 -04:00
|
|
|
If :py:attr:`LinearRegression.fitIntercept` is set to True,
|
|
|
|
then the last element returned corresponds to the intercept.
|
|
|
|
|
2020-11-09 19:33:48 -05:00
|
|
|
.. versionadded:: 2.0.0
|
|
|
|
|
|
|
|
See Also
|
|
|
|
--------
|
|
|
|
LinearRegression.solver
|
2016-04-06 15:07:47 -04:00
|
|
|
"""
|
|
|
|
return self._call_java("tValues")
|
|
|
|
|
|
|
|
@property
|
|
|
|
def pValues(self):
|
|
|
|
"""
|
|
|
|
Two-sided p-value of estimated coefficients and intercept.
|
|
|
|
This value is only available when using the "normal" solver.
|
|
|
|
|
2016-04-08 23:15:44 -04:00
|
|
|
If :py:attr:`LinearRegression.fitIntercept` is set to True,
|
|
|
|
then the last element returned corresponds to the intercept.
|
|
|
|
|
2020-11-09 19:33:48 -05:00
|
|
|
.. versionadded:: 2.0.0
|
|
|
|
|
|
|
|
See Also
|
|
|
|
--------
|
|
|
|
LinearRegression.solver
|
2016-04-06 15:07:47 -04:00
|
|
|
"""
|
|
|
|
return self._call_java("pValues")
|
|
|
|
|
|
|
|
|
|
|
|
@inherit_doc
|
|
|
|
class LinearRegressionTrainingSummary(LinearRegressionSummary):
|
|
|
|
"""
|
|
|
|
Linear regression training results. Currently, the training summary ignores the
|
|
|
|
training weights except for the objective trace.
|
|
|
|
|
|
|
|
.. versionadded:: 2.0.0
|
|
|
|
"""
|
|
|
|
|
|
|
|
@property
|
|
|
|
def objectiveHistory(self):
|
|
|
|
"""
|
|
|
|
Objective function (scaled loss + regularization) at each
|
|
|
|
iteration.
|
|
|
|
This value is only available when using the "l-bfgs" solver.
|
|
|
|
|
2020-11-09 19:33:48 -05:00
|
|
|
.. versionadded:: 2.0.0
|
|
|
|
|
|
|
|
See Also
|
|
|
|
--------
|
|
|
|
LinearRegression.solver
|
2016-04-06 15:07:47 -04:00
|
|
|
"""
|
|
|
|
return self._call_java("objectiveHistory")
|
|
|
|
|
|
|
|
@property
|
|
|
|
def totalIterations(self):
|
|
|
|
"""
|
|
|
|
Number of training iterations until termination.
|
|
|
|
This value is only available when using the "l-bfgs" solver.
|
|
|
|
|
2020-11-09 19:33:48 -05:00
|
|
|
.. versionadded:: 2.0.0
|
|
|
|
|
|
|
|
See Also
|
|
|
|
--------
|
|
|
|
LinearRegression.solver
|
2016-04-06 15:07:47 -04:00
|
|
|
"""
|
|
|
|
return self._call_java("totalIterations")
|
|
|
|
|
2015-05-12 15:17:05 -04:00
|
|
|
|
2019-10-18 05:26:54 -04:00
|
|
|
class _IsotonicRegressionParams(HasFeaturesCol, HasLabelCol, HasPredictionCol, HasWeightCol):
|
[SPARK-28985][PYTHON][ML][FOLLOW-UP] Add _IsotonicRegressionBase
### What changes were proposed in this pull request?
Adds
```python
class _IsotonicRegressionBase(HasFeaturesCol, HasLabelCol, HasPredictionCol, HasWeightCol): ...
```
with related `Params` and uses it to replace `JavaPredictor` and `HasWeightCol` in `IsotonicRegression` base classes and `JavaPredictionModel,` in `IsotonicRegressionModel` base classes.
### Why are the changes needed?
Previous work (#25776) on [SPARK-28985](https://issues.apache.org/jira/browse/SPARK-28985) replaced `JavaEstimator`, `HasFeaturesCol`, `HasLabelCol`, `HasPredictionCol` in `IsotonicRegression` and `JavaModel` in `IsotonicRegressionModel` with newly added `JavaPredictor`:
https://github.com/apache/spark/blob/e97b55d32285052a1f76cca35377c4b21eb2e7d7/python/pyspark/ml/wrapper.py#L377
and `JavaPredictionModel`
https://github.com/apache/spark/blob/e97b55d32285052a1f76cca35377c4b21eb2e7d7/python/pyspark/ml/wrapper.py#L405
respectively.
This however is inconsistent with Scala counterpart where both classes extend private `IsotonicRegressionBase`
https://github.com/apache/spark/blob/3cb1b57809d0b4a93223669f5c10cea8fc53eff6/mllib/src/main/scala/org/apache/spark/ml/regression/IsotonicRegression.scala#L42-L43
This preserves some of the existing inconsistencies (`model` as defined in [the official example](https://github.com/apache/spark/blob/master/examples/src/main/python/ml/isotonic_regression_example.py)), i.e.
```python
from pyspark.ml.regression impor IsotonicRegressionMode
from pyspark.ml.param.shared import HasWeightCol
issubclass(IsotonicRegressionModel, HasWeightCol)
# False
hasattr(model, "weightCol")
# True
```
as well as introduces a bug, by adding unsupported `predict` method:
```python
import inspect
hasattr(model, "predict")
# True
inspect.getfullargspec(IsotonicRegressionModel.predict)
# FullArgSpec(args=['self', 'value'], varargs=None, varkw=None, defaults=None, kwonlyargs=[], kwonlydefaults=None, annotations={})
IsotonicRegressionModel.predict.__doc__
# Predict label for the given features.\n\n .. versionadded:: 3.0.0'
model.predict(dataset.first().features)
# Py4JError: An error occurred while calling o49.predict. Trace:
# py4j.Py4JException: Method predict([class org.apache.spark.ml.linalg.SparseVector]) does not exist
# ...
```
Furthermore existing implementation can cause further problems in the future, if `Predictor` / `PredictionModel` API changes.
### Does this PR introduce any user-facing change?
Yes. It:
- Removes invalid `IsotonicRegressionModel.predict` method.
- Adds `HasWeightColumn` to `IsotonicRegressionModel`.
however the faulty implementation hasn't been released yet, and proposed additions have negligible potential for breaking existing code (and none, compared to changes already made in #25776).
### How was this patch tested?
- Existing unit tests.
- Manual testing.
CC huaxingao, zhengruifeng
Closes #26023 from zero323/SPARK-28985-FOLLOW-UP-isotonic-regression.
Authored-by: zero323 <mszymkiewicz@gmail.com>
Signed-off-by: Sean Owen <sean.owen@databricks.com>
2019-10-04 19:06:10 -04:00
|
|
|
"""
|
|
|
|
Params for :py:class:`IsotonicRegression` and :py:class:`IsotonicRegressionModel`.
|
|
|
|
|
|
|
|
.. versionadded:: 3.0.0
|
|
|
|
"""
|
|
|
|
|
|
|
|
isotonic = Param(
|
|
|
|
Params._dummy(), "isotonic",
|
|
|
|
"whether the output sequence should be isotonic/increasing (true) or" +
|
|
|
|
"antitonic/decreasing (false).", typeConverter=TypeConverters.toBoolean)
|
|
|
|
featureIndex = Param(
|
|
|
|
Params._dummy(), "featureIndex",
|
|
|
|
"The index of the feature if featuresCol is a vector column, no effect otherwise.",
|
|
|
|
typeConverter=TypeConverters.toInt)
|
|
|
|
|
2020-08-03 11:50:34 -04:00
|
|
|
def __init__(self, *args):
|
|
|
|
super(_IsotonicRegressionParams, self).__init__(*args)
|
2020-07-16 14:12:29 -04:00
|
|
|
self._setDefault(isotonic=True, featureIndex=0)
|
|
|
|
|
[SPARK-28985][PYTHON][ML][FOLLOW-UP] Add _IsotonicRegressionBase
### What changes were proposed in this pull request?
Adds
```python
class _IsotonicRegressionBase(HasFeaturesCol, HasLabelCol, HasPredictionCol, HasWeightCol): ...
```
with related `Params` and uses it to replace `JavaPredictor` and `HasWeightCol` in `IsotonicRegression` base classes and `JavaPredictionModel,` in `IsotonicRegressionModel` base classes.
### Why are the changes needed?
Previous work (#25776) on [SPARK-28985](https://issues.apache.org/jira/browse/SPARK-28985) replaced `JavaEstimator`, `HasFeaturesCol`, `HasLabelCol`, `HasPredictionCol` in `IsotonicRegression` and `JavaModel` in `IsotonicRegressionModel` with newly added `JavaPredictor`:
https://github.com/apache/spark/blob/e97b55d32285052a1f76cca35377c4b21eb2e7d7/python/pyspark/ml/wrapper.py#L377
and `JavaPredictionModel`
https://github.com/apache/spark/blob/e97b55d32285052a1f76cca35377c4b21eb2e7d7/python/pyspark/ml/wrapper.py#L405
respectively.
This however is inconsistent with Scala counterpart where both classes extend private `IsotonicRegressionBase`
https://github.com/apache/spark/blob/3cb1b57809d0b4a93223669f5c10cea8fc53eff6/mllib/src/main/scala/org/apache/spark/ml/regression/IsotonicRegression.scala#L42-L43
This preserves some of the existing inconsistencies (`model` as defined in [the official example](https://github.com/apache/spark/blob/master/examples/src/main/python/ml/isotonic_regression_example.py)), i.e.
```python
from pyspark.ml.regression impor IsotonicRegressionMode
from pyspark.ml.param.shared import HasWeightCol
issubclass(IsotonicRegressionModel, HasWeightCol)
# False
hasattr(model, "weightCol")
# True
```
as well as introduces a bug, by adding unsupported `predict` method:
```python
import inspect
hasattr(model, "predict")
# True
inspect.getfullargspec(IsotonicRegressionModel.predict)
# FullArgSpec(args=['self', 'value'], varargs=None, varkw=None, defaults=None, kwonlyargs=[], kwonlydefaults=None, annotations={})
IsotonicRegressionModel.predict.__doc__
# Predict label for the given features.\n\n .. versionadded:: 3.0.0'
model.predict(dataset.first().features)
# Py4JError: An error occurred while calling o49.predict. Trace:
# py4j.Py4JException: Method predict([class org.apache.spark.ml.linalg.SparseVector]) does not exist
# ...
```
Furthermore existing implementation can cause further problems in the future, if `Predictor` / `PredictionModel` API changes.
### Does this PR introduce any user-facing change?
Yes. It:
- Removes invalid `IsotonicRegressionModel.predict` method.
- Adds `HasWeightColumn` to `IsotonicRegressionModel`.
however the faulty implementation hasn't been released yet, and proposed additions have negligible potential for breaking existing code (and none, compared to changes already made in #25776).
### How was this patch tested?
- Existing unit tests.
- Manual testing.
CC huaxingao, zhengruifeng
Closes #26023 from zero323/SPARK-28985-FOLLOW-UP-isotonic-regression.
Authored-by: zero323 <mszymkiewicz@gmail.com>
Signed-off-by: Sean Owen <sean.owen@databricks.com>
2019-10-04 19:06:10 -04:00
|
|
|
def getIsotonic(self):
|
|
|
|
"""
|
|
|
|
Gets the value of isotonic or its default value.
|
|
|
|
"""
|
|
|
|
return self.getOrDefault(self.isotonic)
|
|
|
|
|
|
|
|
def getFeatureIndex(self):
|
|
|
|
"""
|
|
|
|
Gets the value of featureIndex or its default value.
|
|
|
|
"""
|
|
|
|
return self.getOrDefault(self.featureIndex)
|
|
|
|
|
|
|
|
|
2015-10-07 20:50:35 -04:00
|
|
|
@inherit_doc
|
2019-10-18 05:26:54 -04:00
|
|
|
class IsotonicRegression(JavaEstimator, _IsotonicRegressionParams, HasWeightCol,
|
[SPARK-28985][PYTHON][ML][FOLLOW-UP] Add _IsotonicRegressionBase
### What changes were proposed in this pull request?
Adds
```python
class _IsotonicRegressionBase(HasFeaturesCol, HasLabelCol, HasPredictionCol, HasWeightCol): ...
```
with related `Params` and uses it to replace `JavaPredictor` and `HasWeightCol` in `IsotonicRegression` base classes and `JavaPredictionModel,` in `IsotonicRegressionModel` base classes.
### Why are the changes needed?
Previous work (#25776) on [SPARK-28985](https://issues.apache.org/jira/browse/SPARK-28985) replaced `JavaEstimator`, `HasFeaturesCol`, `HasLabelCol`, `HasPredictionCol` in `IsotonicRegression` and `JavaModel` in `IsotonicRegressionModel` with newly added `JavaPredictor`:
https://github.com/apache/spark/blob/e97b55d32285052a1f76cca35377c4b21eb2e7d7/python/pyspark/ml/wrapper.py#L377
and `JavaPredictionModel`
https://github.com/apache/spark/blob/e97b55d32285052a1f76cca35377c4b21eb2e7d7/python/pyspark/ml/wrapper.py#L405
respectively.
This however is inconsistent with Scala counterpart where both classes extend private `IsotonicRegressionBase`
https://github.com/apache/spark/blob/3cb1b57809d0b4a93223669f5c10cea8fc53eff6/mllib/src/main/scala/org/apache/spark/ml/regression/IsotonicRegression.scala#L42-L43
This preserves some of the existing inconsistencies (`model` as defined in [the official example](https://github.com/apache/spark/blob/master/examples/src/main/python/ml/isotonic_regression_example.py)), i.e.
```python
from pyspark.ml.regression impor IsotonicRegressionMode
from pyspark.ml.param.shared import HasWeightCol
issubclass(IsotonicRegressionModel, HasWeightCol)
# False
hasattr(model, "weightCol")
# True
```
as well as introduces a bug, by adding unsupported `predict` method:
```python
import inspect
hasattr(model, "predict")
# True
inspect.getfullargspec(IsotonicRegressionModel.predict)
# FullArgSpec(args=['self', 'value'], varargs=None, varkw=None, defaults=None, kwonlyargs=[], kwonlydefaults=None, annotations={})
IsotonicRegressionModel.predict.__doc__
# Predict label for the given features.\n\n .. versionadded:: 3.0.0'
model.predict(dataset.first().features)
# Py4JError: An error occurred while calling o49.predict. Trace:
# py4j.Py4JException: Method predict([class org.apache.spark.ml.linalg.SparseVector]) does not exist
# ...
```
Furthermore existing implementation can cause further problems in the future, if `Predictor` / `PredictionModel` API changes.
### Does this PR introduce any user-facing change?
Yes. It:
- Removes invalid `IsotonicRegressionModel.predict` method.
- Adds `HasWeightColumn` to `IsotonicRegressionModel`.
however the faulty implementation hasn't been released yet, and proposed additions have negligible potential for breaking existing code (and none, compared to changes already made in #25776).
### How was this patch tested?
- Existing unit tests.
- Manual testing.
CC huaxingao, zhengruifeng
Closes #26023 from zero323/SPARK-28985-FOLLOW-UP-isotonic-regression.
Authored-by: zero323 <mszymkiewicz@gmail.com>
Signed-off-by: Sean Owen <sean.owen@databricks.com>
2019-10-04 19:06:10 -04:00
|
|
|
JavaMLWritable, JavaMLReadable):
|
2015-10-07 20:50:35 -04:00
|
|
|
"""
|
|
|
|
Currently implemented using parallelized pool adjacent violators algorithm.
|
|
|
|
Only univariate (single feature) algorithm supported.
|
|
|
|
|
2020-11-09 19:33:48 -05:00
|
|
|
.. versionadded:: 1.6.0
|
|
|
|
|
|
|
|
Examples
|
|
|
|
--------
|
2016-05-17 15:51:07 -04:00
|
|
|
>>> from pyspark.ml.linalg import Vectors
|
2016-05-23 21:14:48 -04:00
|
|
|
>>> df = spark.createDataFrame([
|
2015-10-07 20:50:35 -04:00
|
|
|
... (1.0, Vectors.dense(1.0)),
|
|
|
|
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
|
|
|
|
>>> ir = IsotonicRegression()
|
|
|
|
>>> model = ir.fit(df)
|
[SPARK-28985][PYTHON][ML] Add common classes (JavaPredictor/JavaClassificationModel/JavaProbabilisticClassifier) in PYTHON
### What changes were proposed in this pull request?
Add some common classes in Python to make it have the same structure as Scala
1. Scala has ClassifierParams/Classifier/ClassificationModel:
```
trait ClassifierParams
extends PredictorParams with HasRawPredictionCol
abstract class Classifier
extends Predictor with ClassifierParams {
def setRawPredictionCol
}
abstract class ClassificationModel
extends PredictionModel with ClassifierParams {
def setRawPredictionCol
}
```
This PR makes Python has the following:
```
class JavaClassifierParams(HasRawPredictionCol, JavaPredictorParams):
pass
class JavaClassifier(JavaPredictor, JavaClassifierParams):
def setRawPredictionCol
class JavaClassificationModel(JavaPredictionModel, JavaClassifierParams):
def setRawPredictionCol
```
2. Scala has ProbabilisticClassifierParams/ProbabilisticClassifier/ProbabilisticClassificationModel:
```
trait ProbabilisticClassifierParams
extends ClassifierParams with HasProbabilityCol with HasThresholds
abstract class ProbabilisticClassifier
extends Classifier with ProbabilisticClassifierParams {
def setProbabilityCol
def setThresholds
}
abstract class ProbabilisticClassificationModel
extends ClassificationModel with ProbabilisticClassifierParams {
def setProbabilityCol
def setThresholds
}
```
This PR makes Python have the following:
```
class JavaProbabilisticClassifierParams(HasProbabilityCol, HasThresholds, JavaClassifierParams):
pass
class JavaProbabilisticClassifier(JavaClassifier, JavaProbabilisticClassifierParams):
def setProbabilityCol
def setThresholds
class JavaProbabilisticClassificationModel(JavaClassificationModel, JavaProbabilisticClassifierParams):
def setProbabilityCol
def setThresholds
```
3. Scala has PredictorParams/Predictor/PredictionModel:
```
trait PredictorParams extends Params
with HasLabelCol with HasFeaturesCol with HasPredictionCol
abstract class Predictor
extends Estimator with PredictorParams {
def setLabelCol
def setFeaturesCol
def setPredictionCol
}
abstract class PredictionModel
extends Model with PredictorParams {
def setFeaturesCol
def setPredictionCol
def numFeatures
def predict
}
```
This PR makes Python have the following:
```
class JavaPredictorParams(HasLabelCol, HasFeaturesCol, HasPredictionCol):
pass
class JavaPredictor(JavaEstimator, JavaPredictorParams):
def setLabelCol
def setFeaturesCol
def setPredictionCol
class JavaPredictionModel(JavaModel, JavaPredictorParams):
def setFeaturesCol
def setPredictionCol
def numFeatures
def predict
```
### Why are the changes needed?
Have parity between Python and Scala ML
### Does this PR introduce any user-facing change?
Yes. Add the following changes:
```
LinearSVCModel
- get/setFeatureCol
- get/setPredictionCol
- get/setLabelCol
- get/setRawPredictionCol
- predict
```
```
LogisticRegressionModel
DecisionTreeClassificationModel
RandomForestClassificationModel
GBTClassificationModel
NaiveBayesModel
MultilayerPerceptronClassificationModel
- get/setFeatureCol
- get/setPredictionCol
- get/setLabelCol
- get/setRawPredictionCol
- get/setProbabilityCol
- predict
```
```
LinearRegressionModel
IsotonicRegressionModel
DecisionTreeRegressionModel
RandomForestRegressionModel
GBTRegressionModel
AFTSurvivalRegressionModel
GeneralizedLinearRegressionModel
- get/setFeatureCol
- get/setPredictionCol
- get/setLabelCol
- predict
```
### How was this patch tested?
Add a few doc tests.
Closes #25776 from huaxingao/spark-28985.
Authored-by: Huaxin Gao <huaxing@us.ibm.com>
Signed-off-by: Sean Owen <sean.owen@databricks.com>
2019-09-19 09:17:25 -04:00
|
|
|
>>> model.setFeaturesCol("features")
|
[SPARK-29867][ML][PYTHON] Add __repr__ in Python ML Models
### What changes were proposed in this pull request?
Add ```__repr__``` in Python ML Models
### Why are the changes needed?
In Python ML Models, some of them have ```__repr__```, others don't. In the doctest, when calling Model.setXXX, some of the Models print out the xxxModel... correctly, some of them can't because of lacking the ```__repr__``` method. For example:
```
>>> gm = GaussianMixture(k=3, tol=0.0001, seed=10)
>>> model = gm.fit(df)
>>> model.setPredictionCol("newPrediction")
GaussianMixture...
```
After the change, the above code will become the following:
```
>>> gm = GaussianMixture(k=3, tol=0.0001, seed=10)
>>> model = gm.fit(df)
>>> model.setPredictionCol("newPrediction")
GaussianMixtureModel...
```
### Does this PR introduce any user-facing change?
Yes.
### How was this patch tested?
doctest
Closes #26489 from huaxingao/spark-29876.
Authored-by: Huaxin Gao <huaxing@us.ibm.com>
Signed-off-by: Dongjoon Hyun <dhyun@apple.com>
2019-11-16 00:44:39 -05:00
|
|
|
IsotonicRegressionModel...
|
2020-01-14 23:29:23 -05:00
|
|
|
>>> model.numFeatures
|
2020-01-09 10:23:10 -05:00
|
|
|
1
|
2016-05-23 21:14:48 -04:00
|
|
|
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
|
2015-10-07 20:50:35 -04:00
|
|
|
>>> model.transform(test0).head().prediction
|
|
|
|
0.0
|
2020-01-09 10:23:10 -05:00
|
|
|
>>> model.predict(test0.head().features[model.getFeatureIndex()])
|
|
|
|
0.0
|
2015-10-07 20:50:35 -04:00
|
|
|
>>> model.boundaries
|
|
|
|
DenseVector([0.0, 1.0])
|
2016-02-26 00:09:02 -05:00
|
|
|
>>> ir_path = temp_path + "/ir"
|
|
|
|
>>> ir.save(ir_path)
|
|
|
|
>>> ir2 = IsotonicRegression.load(ir_path)
|
|
|
|
>>> ir2.getIsotonic()
|
|
|
|
True
|
|
|
|
>>> model_path = temp_path + "/ir_model"
|
|
|
|
>>> model.save(model_path)
|
|
|
|
>>> model2 = IsotonicRegressionModel.load(model_path)
|
|
|
|
>>> model.boundaries == model2.boundaries
|
|
|
|
True
|
|
|
|
>>> model.predictions == model2.predictions
|
|
|
|
True
|
2020-08-03 11:50:34 -04:00
|
|
|
>>> model.transform(test0).take(1) == model2.transform(test0).take(1)
|
|
|
|
True
|
2015-10-07 20:50:35 -04:00
|
|
|
"""
|
|
|
|
@keyword_only
|
[SPARK-32933][PYTHON] Use keyword-only syntax for keyword_only methods
### What changes were proposed in this pull request?
This PR adjusts signatures of methods decorated with `keyword_only` to indicate using [Python 3 keyword-only syntax](https://www.python.org/dev/peps/pep-3102/).
__Note__:
For the moment the goal is not to replace `keyword_only`. For justification see https://github.com/apache/spark/pull/29591#discussion_r489402579
### Why are the changes needed?
Right now it is not clear that `keyword_only` methods are indeed keyword only. This proposal addresses that.
In practice we could probably capture `locals` and drop `keyword_only` completel, i.e:
```python
keyword_only
def __init__(self, *, featuresCol="features"):
...
kwargs = self._input_kwargs
self.setParams(**kwargs)
```
could be replaced with
```python
def __init__(self, *, featuresCol="features"):
kwargs = locals()
del kwargs["self"]
...
self.setParams(**kwargs)
```
### Does this PR introduce _any_ user-facing change?
Docstrings and inspect tools will now indicate that `keyword_only` methods expect only keyword arguments.
For example with ` LinearSVC` will change from
```
>>> from pyspark.ml.classification import LinearSVC
>>> ?LinearSVC.__init__
Signature:
LinearSVC.__init__(
self,
featuresCol='features',
labelCol='label',
predictionCol='prediction',
maxIter=100,
regParam=0.0,
tol=1e-06,
rawPredictionCol='rawPrediction',
fitIntercept=True,
standardization=True,
threshold=0.0,
weightCol=None,
aggregationDepth=2,
)
Docstring: __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, aggregationDepth=2):
File: /path/to/python/pyspark/ml/classification.py
Type: function
```
to
```
>>> from pyspark.ml.classification import LinearSVC
>>> ?LinearSVC.__init__
Signature:
LinearSVC.__init__ (
self,
*,
featuresCol='features',
labelCol='label',
predictionCol='prediction',
maxIter=100,
regParam=0.0,
tol=1e-06,
rawPredictionCol='rawPrediction',
fitIntercept=True,
standardization=True,
threshold=0.0,
weightCol=None,
aggregationDepth=2,
blockSize=1,
)
Docstring: __init__(self, \*, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, aggregationDepth=2, blockSize=1):
File: ~/Workspace/spark/python/pyspark/ml/classification.py
Type: function
```
### How was this patch tested?
Existing tests.
Closes #29799 from zero323/SPARK-32933.
Authored-by: zero323 <mszymkiewicz@gmail.com>
Signed-off-by: HyukjinKwon <gurwls223@apache.org>
2020-09-22 20:28:33 -04:00
|
|
|
def __init__(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
|
2015-10-07 20:50:35 -04:00
|
|
|
weightCol=None, isotonic=True, featureIndex=0):
|
|
|
|
"""
|
[SPARK-32933][PYTHON] Use keyword-only syntax for keyword_only methods
### What changes were proposed in this pull request?
This PR adjusts signatures of methods decorated with `keyword_only` to indicate using [Python 3 keyword-only syntax](https://www.python.org/dev/peps/pep-3102/).
__Note__:
For the moment the goal is not to replace `keyword_only`. For justification see https://github.com/apache/spark/pull/29591#discussion_r489402579
### Why are the changes needed?
Right now it is not clear that `keyword_only` methods are indeed keyword only. This proposal addresses that.
In practice we could probably capture `locals` and drop `keyword_only` completel, i.e:
```python
keyword_only
def __init__(self, *, featuresCol="features"):
...
kwargs = self._input_kwargs
self.setParams(**kwargs)
```
could be replaced with
```python
def __init__(self, *, featuresCol="features"):
kwargs = locals()
del kwargs["self"]
...
self.setParams(**kwargs)
```
### Does this PR introduce _any_ user-facing change?
Docstrings and inspect tools will now indicate that `keyword_only` methods expect only keyword arguments.
For example with ` LinearSVC` will change from
```
>>> from pyspark.ml.classification import LinearSVC
>>> ?LinearSVC.__init__
Signature:
LinearSVC.__init__(
self,
featuresCol='features',
labelCol='label',
predictionCol='prediction',
maxIter=100,
regParam=0.0,
tol=1e-06,
rawPredictionCol='rawPrediction',
fitIntercept=True,
standardization=True,
threshold=0.0,
weightCol=None,
aggregationDepth=2,
)
Docstring: __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, aggregationDepth=2):
File: /path/to/python/pyspark/ml/classification.py
Type: function
```
to
```
>>> from pyspark.ml.classification import LinearSVC
>>> ?LinearSVC.__init__
Signature:
LinearSVC.__init__ (
self,
*,
featuresCol='features',
labelCol='label',
predictionCol='prediction',
maxIter=100,
regParam=0.0,
tol=1e-06,
rawPredictionCol='rawPrediction',
fitIntercept=True,
standardization=True,
threshold=0.0,
weightCol=None,
aggregationDepth=2,
blockSize=1,
)
Docstring: __init__(self, \*, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, aggregationDepth=2, blockSize=1):
File: ~/Workspace/spark/python/pyspark/ml/classification.py
Type: function
```
### How was this patch tested?
Existing tests.
Closes #29799 from zero323/SPARK-32933.
Authored-by: zero323 <mszymkiewicz@gmail.com>
Signed-off-by: HyukjinKwon <gurwls223@apache.org>
2020-09-22 20:28:33 -04:00
|
|
|
__init__(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
|
2015-10-07 20:50:35 -04:00
|
|
|
weightCol=None, isotonic=True, featureIndex=0):
|
|
|
|
"""
|
|
|
|
super(IsotonicRegression, self).__init__()
|
|
|
|
self._java_obj = self._new_java_obj(
|
|
|
|
"org.apache.spark.ml.regression.IsotonicRegression", self.uid)
|
2017-03-03 19:43:45 -05:00
|
|
|
kwargs = self._input_kwargs
|
2015-10-07 20:50:35 -04:00
|
|
|
self.setParams(**kwargs)
|
|
|
|
|
|
|
|
@keyword_only
|
[SPARK-32933][PYTHON] Use keyword-only syntax for keyword_only methods
### What changes were proposed in this pull request?
This PR adjusts signatures of methods decorated with `keyword_only` to indicate using [Python 3 keyword-only syntax](https://www.python.org/dev/peps/pep-3102/).
__Note__:
For the moment the goal is not to replace `keyword_only`. For justification see https://github.com/apache/spark/pull/29591#discussion_r489402579
### Why are the changes needed?
Right now it is not clear that `keyword_only` methods are indeed keyword only. This proposal addresses that.
In practice we could probably capture `locals` and drop `keyword_only` completel, i.e:
```python
keyword_only
def __init__(self, *, featuresCol="features"):
...
kwargs = self._input_kwargs
self.setParams(**kwargs)
```
could be replaced with
```python
def __init__(self, *, featuresCol="features"):
kwargs = locals()
del kwargs["self"]
...
self.setParams(**kwargs)
```
### Does this PR introduce _any_ user-facing change?
Docstrings and inspect tools will now indicate that `keyword_only` methods expect only keyword arguments.
For example with ` LinearSVC` will change from
```
>>> from pyspark.ml.classification import LinearSVC
>>> ?LinearSVC.__init__
Signature:
LinearSVC.__init__(
self,
featuresCol='features',
labelCol='label',
predictionCol='prediction',
maxIter=100,
regParam=0.0,
tol=1e-06,
rawPredictionCol='rawPrediction',
fitIntercept=True,
standardization=True,
threshold=0.0,
weightCol=None,
aggregationDepth=2,
)
Docstring: __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, aggregationDepth=2):
File: /path/to/python/pyspark/ml/classification.py
Type: function
```
to
```
>>> from pyspark.ml.classification import LinearSVC
>>> ?LinearSVC.__init__
Signature:
LinearSVC.__init__ (
self,
*,
featuresCol='features',
labelCol='label',
predictionCol='prediction',
maxIter=100,
regParam=0.0,
tol=1e-06,
rawPredictionCol='rawPrediction',
fitIntercept=True,
standardization=True,
threshold=0.0,
weightCol=None,
aggregationDepth=2,
blockSize=1,
)
Docstring: __init__(self, \*, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, aggregationDepth=2, blockSize=1):
File: ~/Workspace/spark/python/pyspark/ml/classification.py
Type: function
```
### How was this patch tested?
Existing tests.
Closes #29799 from zero323/SPARK-32933.
Authored-by: zero323 <mszymkiewicz@gmail.com>
Signed-off-by: HyukjinKwon <gurwls223@apache.org>
2020-09-22 20:28:33 -04:00
|
|
|
def setParams(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
|
2015-10-07 20:50:35 -04:00
|
|
|
weightCol=None, isotonic=True, featureIndex=0):
|
|
|
|
"""
|
[SPARK-32933][PYTHON] Use keyword-only syntax for keyword_only methods
### What changes were proposed in this pull request?
This PR adjusts signatures of methods decorated with `keyword_only` to indicate using [Python 3 keyword-only syntax](https://www.python.org/dev/peps/pep-3102/).
__Note__:
For the moment the goal is not to replace `keyword_only`. For justification see https://github.com/apache/spark/pull/29591#discussion_r489402579
### Why are the changes needed?
Right now it is not clear that `keyword_only` methods are indeed keyword only. This proposal addresses that.
In practice we could probably capture `locals` and drop `keyword_only` completel, i.e:
```python
keyword_only
def __init__(self, *, featuresCol="features"):
...
kwargs = self._input_kwargs
self.setParams(**kwargs)
```
could be replaced with
```python
def __init__(self, *, featuresCol="features"):
kwargs = locals()
del kwargs["self"]
...
self.setParams(**kwargs)
```
### Does this PR introduce _any_ user-facing change?
Docstrings and inspect tools will now indicate that `keyword_only` methods expect only keyword arguments.
For example with ` LinearSVC` will change from
```
>>> from pyspark.ml.classification import LinearSVC
>>> ?LinearSVC.__init__
Signature:
LinearSVC.__init__(
self,
featuresCol='features',
labelCol='label',
predictionCol='prediction',
maxIter=100,
regParam=0.0,
tol=1e-06,
rawPredictionCol='rawPrediction',
fitIntercept=True,
standardization=True,
threshold=0.0,
weightCol=None,
aggregationDepth=2,
)
Docstring: __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, aggregationDepth=2):
File: /path/to/python/pyspark/ml/classification.py
Type: function
```
to
```
>>> from pyspark.ml.classification import LinearSVC
>>> ?LinearSVC.__init__
Signature:
LinearSVC.__init__ (
self,
*,
featuresCol='features',
labelCol='label',
predictionCol='prediction',
maxIter=100,
regParam=0.0,
tol=1e-06,
rawPredictionCol='rawPrediction',
fitIntercept=True,
standardization=True,
threshold=0.0,
weightCol=None,
aggregationDepth=2,
blockSize=1,
)
Docstring: __init__(self, \*, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, aggregationDepth=2, blockSize=1):
File: ~/Workspace/spark/python/pyspark/ml/classification.py
Type: function
```
### How was this patch tested?
Existing tests.
Closes #29799 from zero323/SPARK-32933.
Authored-by: zero323 <mszymkiewicz@gmail.com>
Signed-off-by: HyukjinKwon <gurwls223@apache.org>
2020-09-22 20:28:33 -04:00
|
|
|
setParams(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
|
2015-10-07 20:50:35 -04:00
|
|
|
weightCol=None, isotonic=True, featureIndex=0):
|
|
|
|
Set the params for IsotonicRegression.
|
|
|
|
"""
|
2017-03-03 19:43:45 -05:00
|
|
|
kwargs = self._input_kwargs
|
2015-10-07 20:50:35 -04:00
|
|
|
return self._set(**kwargs)
|
|
|
|
|
|
|
|
def _create_model(self, java_model):
|
|
|
|
return IsotonicRegressionModel(java_model)
|
|
|
|
|
|
|
|
def setIsotonic(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`isotonic`.
|
|
|
|
"""
|
2016-05-03 10:46:13 -04:00
|
|
|
return self._set(isotonic=value)
|
2015-10-07 20:50:35 -04:00
|
|
|
|
|
|
|
def setFeatureIndex(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`featureIndex`.
|
|
|
|
"""
|
2016-05-03 10:46:13 -04:00
|
|
|
return self._set(featureIndex=value)
|
2015-10-07 20:50:35 -04:00
|
|
|
|
2019-10-27 23:36:10 -04:00
|
|
|
@since("1.6.0")
|
|
|
|
def setFeaturesCol(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`featuresCol`.
|
|
|
|
"""
|
|
|
|
return self._set(featuresCol=value)
|
|
|
|
|
|
|
|
@since("1.6.0")
|
|
|
|
def setPredictionCol(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`predictionCol`.
|
|
|
|
"""
|
|
|
|
return self._set(predictionCol=value)
|
|
|
|
|
|
|
|
@since("1.6.0")
|
|
|
|
def setLabelCol(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`labelCol`.
|
|
|
|
"""
|
|
|
|
return self._set(labelCol=value)
|
|
|
|
|
|
|
|
@since("1.6.0")
|
|
|
|
def setWeightCol(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`weightCol`.
|
|
|
|
"""
|
|
|
|
return self._set(weightCol=value)
|
|
|
|
|
2015-10-07 20:50:35 -04:00
|
|
|
|
2019-10-18 05:26:54 -04:00
|
|
|
class IsotonicRegressionModel(JavaModel, _IsotonicRegressionParams, JavaMLWritable,
|
|
|
|
JavaMLReadable):
|
2015-10-07 20:50:35 -04:00
|
|
|
"""
|
2016-05-25 01:20:00 -04:00
|
|
|
Model fitted by :class:`IsotonicRegression`.
|
[SPARK-14812][ML][MLLIB][PYTHON] Experimental, DeveloperApi annotation audit for ML
## What changes were proposed in this pull request?
General decisions to follow, except where noted:
* spark.mllib, pyspark.mllib: Remove all Experimental annotations. Leave DeveloperApi annotations alone.
* spark.ml, pyspark.ml
** Annotate Estimator-Model pairs of classes and companion objects the same way.
** For all algorithms marked Experimental with Since tag <= 1.6, remove Experimental annotation.
** For all algorithms marked Experimental with Since tag = 2.0, leave Experimental annotation.
* DeveloperApi annotations are left alone, except where noted.
* No changes to which types are sealed.
Exceptions where I am leaving items Experimental in spark.ml, pyspark.ml, mainly because the items are new:
* Model Summary classes
* MLWriter, MLReader, MLWritable, MLReadable
* Evaluator and subclasses: There is discussion of changes around evaluating multiple metrics at once for efficiency.
* RFormula: Its behavior may need to change slightly to match R in edge cases.
* AFTSurvivalRegression
* MultilayerPerceptronClassifier
DeveloperApi changes:
* ml.tree.Node, ml.tree.Split, and subclasses should no longer be DeveloperApi
## How was this patch tested?
N/A
Note to reviewers:
* spark.ml.clustering.LDA underwent significant changes (additional methods), so let me know if you want me to leave it Experimental.
* Be careful to check for cases where a class should no longer be Experimental but has an Experimental method, val, or other feature. I did not find such cases, but please verify.
Author: Joseph K. Bradley <joseph@databricks.com>
Closes #14147 from jkbradley/experimental-audit.
2016-07-13 15:33:39 -04:00
|
|
|
|
|
|
|
.. versionadded:: 1.6.0
|
2015-10-07 20:50:35 -04:00
|
|
|
"""
|
|
|
|
|
2019-10-27 23:36:10 -04:00
|
|
|
@since("3.0.0")
|
|
|
|
def setFeaturesCol(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`featuresCol`.
|
|
|
|
"""
|
|
|
|
return self._set(featuresCol=value)
|
|
|
|
|
|
|
|
@since("3.0.0")
|
|
|
|
def setPredictionCol(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`predictionCol`.
|
|
|
|
"""
|
|
|
|
return self._set(predictionCol=value)
|
|
|
|
|
|
|
|
def setFeatureIndex(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`featureIndex`.
|
|
|
|
"""
|
|
|
|
return self._set(featureIndex=value)
|
|
|
|
|
2015-10-07 20:50:35 -04:00
|
|
|
@property
|
[SPARK-14812][ML][MLLIB][PYTHON] Experimental, DeveloperApi annotation audit for ML
## What changes were proposed in this pull request?
General decisions to follow, except where noted:
* spark.mllib, pyspark.mllib: Remove all Experimental annotations. Leave DeveloperApi annotations alone.
* spark.ml, pyspark.ml
** Annotate Estimator-Model pairs of classes and companion objects the same way.
** For all algorithms marked Experimental with Since tag <= 1.6, remove Experimental annotation.
** For all algorithms marked Experimental with Since tag = 2.0, leave Experimental annotation.
* DeveloperApi annotations are left alone, except where noted.
* No changes to which types are sealed.
Exceptions where I am leaving items Experimental in spark.ml, pyspark.ml, mainly because the items are new:
* Model Summary classes
* MLWriter, MLReader, MLWritable, MLReadable
* Evaluator and subclasses: There is discussion of changes around evaluating multiple metrics at once for efficiency.
* RFormula: Its behavior may need to change slightly to match R in edge cases.
* AFTSurvivalRegression
* MultilayerPerceptronClassifier
DeveloperApi changes:
* ml.tree.Node, ml.tree.Split, and subclasses should no longer be DeveloperApi
## How was this patch tested?
N/A
Note to reviewers:
* spark.ml.clustering.LDA underwent significant changes (additional methods), so let me know if you want me to leave it Experimental.
* Be careful to check for cases where a class should no longer be Experimental but has an Experimental method, val, or other feature. I did not find such cases, but please verify.
Author: Joseph K. Bradley <joseph@databricks.com>
Closes #14147 from jkbradley/experimental-audit.
2016-07-13 15:33:39 -04:00
|
|
|
@since("1.6.0")
|
2015-10-07 20:50:35 -04:00
|
|
|
def boundaries(self):
|
|
|
|
"""
|
2016-05-25 01:20:00 -04:00
|
|
|
Boundaries in increasing order for which predictions are known.
|
2015-10-07 20:50:35 -04:00
|
|
|
"""
|
|
|
|
return self._call_java("boundaries")
|
|
|
|
|
|
|
|
@property
|
[SPARK-14812][ML][MLLIB][PYTHON] Experimental, DeveloperApi annotation audit for ML
## What changes were proposed in this pull request?
General decisions to follow, except where noted:
* spark.mllib, pyspark.mllib: Remove all Experimental annotations. Leave DeveloperApi annotations alone.
* spark.ml, pyspark.ml
** Annotate Estimator-Model pairs of classes and companion objects the same way.
** For all algorithms marked Experimental with Since tag <= 1.6, remove Experimental annotation.
** For all algorithms marked Experimental with Since tag = 2.0, leave Experimental annotation.
* DeveloperApi annotations are left alone, except where noted.
* No changes to which types are sealed.
Exceptions where I am leaving items Experimental in spark.ml, pyspark.ml, mainly because the items are new:
* Model Summary classes
* MLWriter, MLReader, MLWritable, MLReadable
* Evaluator and subclasses: There is discussion of changes around evaluating multiple metrics at once for efficiency.
* RFormula: Its behavior may need to change slightly to match R in edge cases.
* AFTSurvivalRegression
* MultilayerPerceptronClassifier
DeveloperApi changes:
* ml.tree.Node, ml.tree.Split, and subclasses should no longer be DeveloperApi
## How was this patch tested?
N/A
Note to reviewers:
* spark.ml.clustering.LDA underwent significant changes (additional methods), so let me know if you want me to leave it Experimental.
* Be careful to check for cases where a class should no longer be Experimental but has an Experimental method, val, or other feature. I did not find such cases, but please verify.
Author: Joseph K. Bradley <joseph@databricks.com>
Closes #14147 from jkbradley/experimental-audit.
2016-07-13 15:33:39 -04:00
|
|
|
@since("1.6.0")
|
2015-10-07 20:50:35 -04:00
|
|
|
def predictions(self):
|
|
|
|
"""
|
|
|
|
Predictions associated with the boundaries at the same index, monotone because of isotonic
|
|
|
|
regression.
|
|
|
|
"""
|
|
|
|
return self._call_java("predictions")
|
|
|
|
|
2020-01-14 23:29:23 -05:00
|
|
|
@property
|
2020-01-09 10:23:10 -05:00
|
|
|
@since("3.0.0")
|
|
|
|
def numFeatures(self):
|
|
|
|
"""
|
|
|
|
Returns the number of features the model was trained on. If unknown, returns -1
|
|
|
|
"""
|
|
|
|
return self._call_java("numFeatures")
|
|
|
|
|
|
|
|
@since("3.0.0")
|
|
|
|
def predict(self, value):
|
|
|
|
"""
|
|
|
|
Predict label for the given features.
|
|
|
|
"""
|
|
|
|
return self._call_java("predict", value)
|
|
|
|
|
2015-10-07 20:50:35 -04:00
|
|
|
|
2019-10-12 10:13:50 -04:00
|
|
|
class _DecisionTreeRegressorParams(_DecisionTreeParams, _TreeRegressorParams, HasVarianceCol):
|
2019-08-15 11:21:26 -04:00
|
|
|
"""
|
2019-10-12 10:13:50 -04:00
|
|
|
Params for :py:class:`DecisionTreeRegressor` and :py:class:`DecisionTreeRegressionModel`.
|
2018-12-07 16:53:35 -05:00
|
|
|
|
|
|
|
.. versionadded:: 3.0.0
|
|
|
|
"""
|
|
|
|
|
2020-08-03 11:50:34 -04:00
|
|
|
def __init__(self, *args):
|
|
|
|
super(_DecisionTreeRegressorParams, self).__init__(*args)
|
2020-07-16 14:12:29 -04:00
|
|
|
self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
|
|
|
|
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
|
|
|
|
impurity="variance", leafCol="", minWeightFractionPerNode=0.0)
|
2018-12-07 16:53:35 -05:00
|
|
|
|
2015-05-12 15:17:05 -04:00
|
|
|
|
|
|
|
@inherit_doc
|
[SPARK-29212][ML][PYSPARK] Add common classes without using JVM backend
### What changes were proposed in this pull request?
Implement common base ML classes (`Predictor`, `PredictionModel`, `Classifier`, `ClasssificationModel` `ProbabilisticClassifier`, `ProbabilisticClasssificationModel`, `Regressor`, `RegrssionModel`) for non-Java backends.
Note
- `Predictor` and `JavaClassifier` should be abstract as `_fit` method is not implemented.
- `PredictionModel` should be abstract as `_transform` is not implemented.
### Why are the changes needed?
To provide extensions points for non-JVM algorithms, as well as a public (as opposed to `Java*` variants, which are commonly described in docstrings as private) hierarchy which can be used to distinguish between different classes of predictors.
For longer discussion see [SPARK-29212](https://issues.apache.org/jira/browse/SPARK-29212) and / or https://github.com/apache/spark/pull/25776.
### Does this PR introduce any user-facing change?
It adds new base classes as listed above, but effective interfaces (method resolution order notwithstanding) stay the same.
Additionally "private" `Java*` classes in`ml.regression` and `ml.classification` have been renamed to follow PEP-8 conventions (added leading underscore).
It is for discussion if the same should be done to equivalent classes from `ml.wrapper`.
If we take `JavaClassifier` as an example, type hierarchy will change from
![old pyspark ml classification JavaClassifier](https://user-images.githubusercontent.com/1554276/72657093-5c0b0c80-39a0-11ea-9069-a897d75de483.png)
to
![new pyspark ml classification _JavaClassifier](https://user-images.githubusercontent.com/1554276/72657098-64fbde00-39a0-11ea-8f80-01187a5ea5a6.png)
Similarly the old model
![old pyspark ml classification JavaClassificationModel](https://user-images.githubusercontent.com/1554276/72657103-7513bd80-39a0-11ea-9ffc-59eb6ab61fde.png)
will become
![new pyspark ml classification _JavaClassificationModel](https://user-images.githubusercontent.com/1554276/72657110-80ff7f80-39a0-11ea-9f5c-fe408664e827.png)
### How was this patch tested?
Existing unit tests.
Closes #27245 from zero323/SPARK-29212.
Authored-by: zero323 <mszymkiewicz@gmail.com>
Signed-off-by: zhengruifeng <ruifengz@foxmail.com>
2020-03-03 23:20:02 -05:00
|
|
|
class DecisionTreeRegressor(_JavaRegressor, _DecisionTreeRegressorParams, JavaMLWritable,
|
2019-10-12 10:13:50 -04:00
|
|
|
JavaMLReadable):
|
2015-05-12 15:17:05 -04:00
|
|
|
"""
|
2016-05-09 04:11:17 -04:00
|
|
|
`Decision tree <http://en.wikipedia.org/wiki/Decision_tree_learning>`_
|
2015-05-12 15:17:05 -04:00
|
|
|
learning algorithm for regression.
|
|
|
|
It supports both continuous and categorical features.
|
|
|
|
|
2020-11-09 19:33:48 -05:00
|
|
|
.. versionadded:: 1.4.0
|
|
|
|
|
|
|
|
Examples
|
|
|
|
--------
|
2016-05-17 15:51:07 -04:00
|
|
|
>>> from pyspark.ml.linalg import Vectors
|
2016-05-23 21:14:48 -04:00
|
|
|
>>> df = spark.createDataFrame([
|
2015-05-12 15:17:05 -04:00
|
|
|
... (1.0, Vectors.dense(1.0)),
|
|
|
|
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
|
2019-10-27 23:36:10 -04:00
|
|
|
>>> dt = DecisionTreeRegressor(maxDepth=2)
|
|
|
|
>>> dt.setVarianceCol("variance")
|
|
|
|
DecisionTreeRegressor...
|
2015-05-12 15:17:05 -04:00
|
|
|
>>> model = dt.fit(df)
|
2019-10-12 10:13:50 -04:00
|
|
|
>>> model.getVarianceCol()
|
|
|
|
'variance'
|
|
|
|
>>> model.setLeafCol("leafId")
|
|
|
|
DecisionTreeRegressionModel...
|
2015-07-07 11:58:08 -04:00
|
|
|
>>> model.depth
|
|
|
|
1
|
|
|
|
>>> model.numNodes
|
|
|
|
3
|
2016-03-11 02:54:23 -05:00
|
|
|
>>> model.featureImportances
|
|
|
|
SparseVector(1, {0: 1.0})
|
2016-08-22 06:21:22 -04:00
|
|
|
>>> model.numFeatures
|
|
|
|
1
|
2016-05-23 21:14:48 -04:00
|
|
|
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
|
[SPARK-28985][PYTHON][ML] Add common classes (JavaPredictor/JavaClassificationModel/JavaProbabilisticClassifier) in PYTHON
### What changes were proposed in this pull request?
Add some common classes in Python to make it have the same structure as Scala
1. Scala has ClassifierParams/Classifier/ClassificationModel:
```
trait ClassifierParams
extends PredictorParams with HasRawPredictionCol
abstract class Classifier
extends Predictor with ClassifierParams {
def setRawPredictionCol
}
abstract class ClassificationModel
extends PredictionModel with ClassifierParams {
def setRawPredictionCol
}
```
This PR makes Python has the following:
```
class JavaClassifierParams(HasRawPredictionCol, JavaPredictorParams):
pass
class JavaClassifier(JavaPredictor, JavaClassifierParams):
def setRawPredictionCol
class JavaClassificationModel(JavaPredictionModel, JavaClassifierParams):
def setRawPredictionCol
```
2. Scala has ProbabilisticClassifierParams/ProbabilisticClassifier/ProbabilisticClassificationModel:
```
trait ProbabilisticClassifierParams
extends ClassifierParams with HasProbabilityCol with HasThresholds
abstract class ProbabilisticClassifier
extends Classifier with ProbabilisticClassifierParams {
def setProbabilityCol
def setThresholds
}
abstract class ProbabilisticClassificationModel
extends ClassificationModel with ProbabilisticClassifierParams {
def setProbabilityCol
def setThresholds
}
```
This PR makes Python have the following:
```
class JavaProbabilisticClassifierParams(HasProbabilityCol, HasThresholds, JavaClassifierParams):
pass
class JavaProbabilisticClassifier(JavaClassifier, JavaProbabilisticClassifierParams):
def setProbabilityCol
def setThresholds
class JavaProbabilisticClassificationModel(JavaClassificationModel, JavaProbabilisticClassifierParams):
def setProbabilityCol
def setThresholds
```
3. Scala has PredictorParams/Predictor/PredictionModel:
```
trait PredictorParams extends Params
with HasLabelCol with HasFeaturesCol with HasPredictionCol
abstract class Predictor
extends Estimator with PredictorParams {
def setLabelCol
def setFeaturesCol
def setPredictionCol
}
abstract class PredictionModel
extends Model with PredictorParams {
def setFeaturesCol
def setPredictionCol
def numFeatures
def predict
}
```
This PR makes Python have the following:
```
class JavaPredictorParams(HasLabelCol, HasFeaturesCol, HasPredictionCol):
pass
class JavaPredictor(JavaEstimator, JavaPredictorParams):
def setLabelCol
def setFeaturesCol
def setPredictionCol
class JavaPredictionModel(JavaModel, JavaPredictorParams):
def setFeaturesCol
def setPredictionCol
def numFeatures
def predict
```
### Why are the changes needed?
Have parity between Python and Scala ML
### Does this PR introduce any user-facing change?
Yes. Add the following changes:
```
LinearSVCModel
- get/setFeatureCol
- get/setPredictionCol
- get/setLabelCol
- get/setRawPredictionCol
- predict
```
```
LogisticRegressionModel
DecisionTreeClassificationModel
RandomForestClassificationModel
GBTClassificationModel
NaiveBayesModel
MultilayerPerceptronClassificationModel
- get/setFeatureCol
- get/setPredictionCol
- get/setLabelCol
- get/setRawPredictionCol
- get/setProbabilityCol
- predict
```
```
LinearRegressionModel
IsotonicRegressionModel
DecisionTreeRegressionModel
RandomForestRegressionModel
GBTRegressionModel
AFTSurvivalRegressionModel
GeneralizedLinearRegressionModel
- get/setFeatureCol
- get/setPredictionCol
- get/setLabelCol
- predict
```
### How was this patch tested?
Add a few doc tests.
Closes #25776 from huaxingao/spark-28985.
Authored-by: Huaxin Gao <huaxing@us.ibm.com>
Signed-off-by: Sean Owen <sean.owen@databricks.com>
2019-09-19 09:17:25 -04:00
|
|
|
>>> model.predict(test0.head().features)
|
|
|
|
0.0
|
2019-08-23 18:18:35 -04:00
|
|
|
>>> result = model.transform(test0).head()
|
|
|
|
>>> result.prediction
|
|
|
|
0.0
|
2019-10-12 10:13:50 -04:00
|
|
|
>>> model.predictLeaf(test0.head().features)
|
|
|
|
0.0
|
2019-08-23 18:18:35 -04:00
|
|
|
>>> result.leafId
|
2015-05-12 15:17:05 -04:00
|
|
|
0.0
|
2016-05-23 21:14:48 -04:00
|
|
|
>>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
|
2015-05-12 15:17:05 -04:00
|
|
|
>>> model.transform(test1).head().prediction
|
|
|
|
1.0
|
2016-03-24 22:20:49 -04:00
|
|
|
>>> dtr_path = temp_path + "/dtr"
|
|
|
|
>>> dt.save(dtr_path)
|
|
|
|
>>> dt2 = DecisionTreeRegressor.load(dtr_path)
|
|
|
|
>>> dt2.getMaxDepth()
|
|
|
|
2
|
|
|
|
>>> model_path = temp_path + "/dtr_model"
|
|
|
|
>>> model.save(model_path)
|
|
|
|
>>> model2 = DecisionTreeRegressionModel.load(model_path)
|
|
|
|
>>> model.numNodes == model2.numNodes
|
|
|
|
True
|
|
|
|
>>> model.depth == model2.depth
|
|
|
|
True
|
2016-04-08 13:47:05 -04:00
|
|
|
>>> model.transform(test1).head().variance
|
|
|
|
0.0
|
2020-08-03 11:50:34 -04:00
|
|
|
>>> model.transform(test0).take(1) == model2.transform(test0).take(1)
|
|
|
|
True
|
2019-02-27 22:11:30 -05:00
|
|
|
>>> df3 = spark.createDataFrame([
|
|
|
|
... (1.0, 0.2, Vectors.dense(1.0)),
|
|
|
|
... (1.0, 0.8, Vectors.dense(1.0)),
|
|
|
|
... (0.0, 1.0, Vectors.sparse(1, [], []))], ["label", "weight", "features"])
|
|
|
|
>>> dt3 = DecisionTreeRegressor(maxDepth=2, weightCol="weight", varianceCol="variance")
|
|
|
|
>>> model3 = dt3.fit(df3)
|
|
|
|
>>> print(model3.toDebugString)
|
2019-11-11 14:03:26 -05:00
|
|
|
DecisionTreeRegressionModel...depth=1, numNodes=3...
|
2015-05-12 15:17:05 -04:00
|
|
|
"""
|
|
|
|
|
|
|
|
@keyword_only
|
[SPARK-32933][PYTHON] Use keyword-only syntax for keyword_only methods
### What changes were proposed in this pull request?
This PR adjusts signatures of methods decorated with `keyword_only` to indicate using [Python 3 keyword-only syntax](https://www.python.org/dev/peps/pep-3102/).
__Note__:
For the moment the goal is not to replace `keyword_only`. For justification see https://github.com/apache/spark/pull/29591#discussion_r489402579
### Why are the changes needed?
Right now it is not clear that `keyword_only` methods are indeed keyword only. This proposal addresses that.
In practice we could probably capture `locals` and drop `keyword_only` completel, i.e:
```python
keyword_only
def __init__(self, *, featuresCol="features"):
...
kwargs = self._input_kwargs
self.setParams(**kwargs)
```
could be replaced with
```python
def __init__(self, *, featuresCol="features"):
kwargs = locals()
del kwargs["self"]
...
self.setParams(**kwargs)
```
### Does this PR introduce _any_ user-facing change?
Docstrings and inspect tools will now indicate that `keyword_only` methods expect only keyword arguments.
For example with ` LinearSVC` will change from
```
>>> from pyspark.ml.classification import LinearSVC
>>> ?LinearSVC.__init__
Signature:
LinearSVC.__init__(
self,
featuresCol='features',
labelCol='label',
predictionCol='prediction',
maxIter=100,
regParam=0.0,
tol=1e-06,
rawPredictionCol='rawPrediction',
fitIntercept=True,
standardization=True,
threshold=0.0,
weightCol=None,
aggregationDepth=2,
)
Docstring: __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, aggregationDepth=2):
File: /path/to/python/pyspark/ml/classification.py
Type: function
```
to
```
>>> from pyspark.ml.classification import LinearSVC
>>> ?LinearSVC.__init__
Signature:
LinearSVC.__init__ (
self,
*,
featuresCol='features',
labelCol='label',
predictionCol='prediction',
maxIter=100,
regParam=0.0,
tol=1e-06,
rawPredictionCol='rawPrediction',
fitIntercept=True,
standardization=True,
threshold=0.0,
weightCol=None,
aggregationDepth=2,
blockSize=1,
)
Docstring: __init__(self, \*, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, aggregationDepth=2, blockSize=1):
File: ~/Workspace/spark/python/pyspark/ml/classification.py
Type: function
```
### How was this patch tested?
Existing tests.
Closes #29799 from zero323/SPARK-32933.
Authored-by: zero323 <mszymkiewicz@gmail.com>
Signed-off-by: HyukjinKwon <gurwls223@apache.org>
2020-09-22 20:28:33 -04:00
|
|
|
def __init__(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
|
2015-05-12 15:17:05 -04:00
|
|
|
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
|
2016-01-06 13:52:25 -05:00
|
|
|
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="variance",
|
2019-10-12 10:13:50 -04:00
|
|
|
seed=None, varianceCol=None, weightCol=None, leafCol="",
|
|
|
|
minWeightFractionPerNode=0.0):
|
2015-05-12 15:17:05 -04:00
|
|
|
"""
|
[SPARK-32933][PYTHON] Use keyword-only syntax for keyword_only methods
### What changes were proposed in this pull request?
This PR adjusts signatures of methods decorated with `keyword_only` to indicate using [Python 3 keyword-only syntax](https://www.python.org/dev/peps/pep-3102/).
__Note__:
For the moment the goal is not to replace `keyword_only`. For justification see https://github.com/apache/spark/pull/29591#discussion_r489402579
### Why are the changes needed?
Right now it is not clear that `keyword_only` methods are indeed keyword only. This proposal addresses that.
In practice we could probably capture `locals` and drop `keyword_only` completel, i.e:
```python
keyword_only
def __init__(self, *, featuresCol="features"):
...
kwargs = self._input_kwargs
self.setParams(**kwargs)
```
could be replaced with
```python
def __init__(self, *, featuresCol="features"):
kwargs = locals()
del kwargs["self"]
...
self.setParams(**kwargs)
```
### Does this PR introduce _any_ user-facing change?
Docstrings and inspect tools will now indicate that `keyword_only` methods expect only keyword arguments.
For example with ` LinearSVC` will change from
```
>>> from pyspark.ml.classification import LinearSVC
>>> ?LinearSVC.__init__
Signature:
LinearSVC.__init__(
self,
featuresCol='features',
labelCol='label',
predictionCol='prediction',
maxIter=100,
regParam=0.0,
tol=1e-06,
rawPredictionCol='rawPrediction',
fitIntercept=True,
standardization=True,
threshold=0.0,
weightCol=None,
aggregationDepth=2,
)
Docstring: __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, aggregationDepth=2):
File: /path/to/python/pyspark/ml/classification.py
Type: function
```
to
```
>>> from pyspark.ml.classification import LinearSVC
>>> ?LinearSVC.__init__
Signature:
LinearSVC.__init__ (
self,
*,
featuresCol='features',
labelCol='label',
predictionCol='prediction',
maxIter=100,
regParam=0.0,
tol=1e-06,
rawPredictionCol='rawPrediction',
fitIntercept=True,
standardization=True,
threshold=0.0,
weightCol=None,
aggregationDepth=2,
blockSize=1,
)
Docstring: __init__(self, \*, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, aggregationDepth=2, blockSize=1):
File: ~/Workspace/spark/python/pyspark/ml/classification.py
Type: function
```
### How was this patch tested?
Existing tests.
Closes #29799 from zero323/SPARK-32933.
Authored-by: zero323 <mszymkiewicz@gmail.com>
Signed-off-by: HyukjinKwon <gurwls223@apache.org>
2020-09-22 20:28:33 -04:00
|
|
|
__init__(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
|
2015-05-14 21:16:22 -04:00
|
|
|
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
|
2016-01-06 13:52:25 -05:00
|
|
|
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, \
|
2019-08-23 18:18:35 -04:00
|
|
|
impurity="variance", seed=None, varianceCol=None, weightCol=None, \
|
2019-10-12 10:13:50 -04:00
|
|
|
leafCol="", minWeightFractionPerNode=0.0)
|
2015-05-12 15:17:05 -04:00
|
|
|
"""
|
|
|
|
super(DecisionTreeRegressor, self).__init__()
|
2015-05-18 15:02:18 -04:00
|
|
|
self._java_obj = self._new_java_obj(
|
|
|
|
"org.apache.spark.ml.regression.DecisionTreeRegressor", self.uid)
|
2017-03-03 19:43:45 -05:00
|
|
|
kwargs = self._input_kwargs
|
2015-05-12 15:17:05 -04:00
|
|
|
self.setParams(**kwargs)
|
|
|
|
|
|
|
|
@keyword_only
|
2015-09-17 11:45:20 -04:00
|
|
|
@since("1.4.0")
|
[SPARK-32933][PYTHON] Use keyword-only syntax for keyword_only methods
### What changes were proposed in this pull request?
This PR adjusts signatures of methods decorated with `keyword_only` to indicate using [Python 3 keyword-only syntax](https://www.python.org/dev/peps/pep-3102/).
__Note__:
For the moment the goal is not to replace `keyword_only`. For justification see https://github.com/apache/spark/pull/29591#discussion_r489402579
### Why are the changes needed?
Right now it is not clear that `keyword_only` methods are indeed keyword only. This proposal addresses that.
In practice we could probably capture `locals` and drop `keyword_only` completel, i.e:
```python
keyword_only
def __init__(self, *, featuresCol="features"):
...
kwargs = self._input_kwargs
self.setParams(**kwargs)
```
could be replaced with
```python
def __init__(self, *, featuresCol="features"):
kwargs = locals()
del kwargs["self"]
...
self.setParams(**kwargs)
```
### Does this PR introduce _any_ user-facing change?
Docstrings and inspect tools will now indicate that `keyword_only` methods expect only keyword arguments.
For example with ` LinearSVC` will change from
```
>>> from pyspark.ml.classification import LinearSVC
>>> ?LinearSVC.__init__
Signature:
LinearSVC.__init__(
self,
featuresCol='features',
labelCol='label',
predictionCol='prediction',
maxIter=100,
regParam=0.0,
tol=1e-06,
rawPredictionCol='rawPrediction',
fitIntercept=True,
standardization=True,
threshold=0.0,
weightCol=None,
aggregationDepth=2,
)
Docstring: __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, aggregationDepth=2):
File: /path/to/python/pyspark/ml/classification.py
Type: function
```
to
```
>>> from pyspark.ml.classification import LinearSVC
>>> ?LinearSVC.__init__
Signature:
LinearSVC.__init__ (
self,
*,
featuresCol='features',
labelCol='label',
predictionCol='prediction',
maxIter=100,
regParam=0.0,
tol=1e-06,
rawPredictionCol='rawPrediction',
fitIntercept=True,
standardization=True,
threshold=0.0,
weightCol=None,
aggregationDepth=2,
blockSize=1,
)
Docstring: __init__(self, \*, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, aggregationDepth=2, blockSize=1):
File: ~/Workspace/spark/python/pyspark/ml/classification.py
Type: function
```
### How was this patch tested?
Existing tests.
Closes #29799 from zero323/SPARK-32933.
Authored-by: zero323 <mszymkiewicz@gmail.com>
Signed-off-by: HyukjinKwon <gurwls223@apache.org>
2020-09-22 20:28:33 -04:00
|
|
|
def setParams(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
|
2015-05-12 15:17:05 -04:00
|
|
|
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
|
|
|
|
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
|
2019-08-23 18:18:35 -04:00
|
|
|
impurity="variance", seed=None, varianceCol=None, weightCol=None,
|
2019-10-12 10:13:50 -04:00
|
|
|
leafCol="", minWeightFractionPerNode=0.0):
|
2015-05-12 15:17:05 -04:00
|
|
|
"""
|
[SPARK-32933][PYTHON] Use keyword-only syntax for keyword_only methods
### What changes were proposed in this pull request?
This PR adjusts signatures of methods decorated with `keyword_only` to indicate using [Python 3 keyword-only syntax](https://www.python.org/dev/peps/pep-3102/).
__Note__:
For the moment the goal is not to replace `keyword_only`. For justification see https://github.com/apache/spark/pull/29591#discussion_r489402579
### Why are the changes needed?
Right now it is not clear that `keyword_only` methods are indeed keyword only. This proposal addresses that.
In practice we could probably capture `locals` and drop `keyword_only` completel, i.e:
```python
keyword_only
def __init__(self, *, featuresCol="features"):
...
kwargs = self._input_kwargs
self.setParams(**kwargs)
```
could be replaced with
```python
def __init__(self, *, featuresCol="features"):
kwargs = locals()
del kwargs["self"]
...
self.setParams(**kwargs)
```
### Does this PR introduce _any_ user-facing change?
Docstrings and inspect tools will now indicate that `keyword_only` methods expect only keyword arguments.
For example with ` LinearSVC` will change from
```
>>> from pyspark.ml.classification import LinearSVC
>>> ?LinearSVC.__init__
Signature:
LinearSVC.__init__(
self,
featuresCol='features',
labelCol='label',
predictionCol='prediction',
maxIter=100,
regParam=0.0,
tol=1e-06,
rawPredictionCol='rawPrediction',
fitIntercept=True,
standardization=True,
threshold=0.0,
weightCol=None,
aggregationDepth=2,
)
Docstring: __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, aggregationDepth=2):
File: /path/to/python/pyspark/ml/classification.py
Type: function
```
to
```
>>> from pyspark.ml.classification import LinearSVC
>>> ?LinearSVC.__init__
Signature:
LinearSVC.__init__ (
self,
*,
featuresCol='features',
labelCol='label',
predictionCol='prediction',
maxIter=100,
regParam=0.0,
tol=1e-06,
rawPredictionCol='rawPrediction',
fitIntercept=True,
standardization=True,
threshold=0.0,
weightCol=None,
aggregationDepth=2,
blockSize=1,
)
Docstring: __init__(self, \*, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, aggregationDepth=2, blockSize=1):
File: ~/Workspace/spark/python/pyspark/ml/classification.py
Type: function
```
### How was this patch tested?
Existing tests.
Closes #29799 from zero323/SPARK-32933.
Authored-by: zero323 <mszymkiewicz@gmail.com>
Signed-off-by: HyukjinKwon <gurwls223@apache.org>
2020-09-22 20:28:33 -04:00
|
|
|
setParams(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
|
2015-05-14 21:16:22 -04:00
|
|
|
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
|
2016-01-06 13:52:25 -05:00
|
|
|
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, \
|
2019-08-23 18:18:35 -04:00
|
|
|
impurity="variance", seed=None, varianceCol=None, weightCol=None, \
|
2019-10-12 10:13:50 -04:00
|
|
|
leafCol="", minWeightFractionPerNode=0.0)
|
2015-05-12 15:17:05 -04:00
|
|
|
Sets params for the DecisionTreeRegressor.
|
|
|
|
"""
|
2017-03-03 19:43:45 -05:00
|
|
|
kwargs = self._input_kwargs
|
2015-05-12 15:17:05 -04:00
|
|
|
return self._set(**kwargs)
|
|
|
|
|
|
|
|
def _create_model(self, java_model):
|
|
|
|
return DecisionTreeRegressionModel(java_model)
|
|
|
|
|
2019-10-27 23:36:10 -04:00
|
|
|
@since("1.4.0")
|
2019-07-20 11:44:33 -04:00
|
|
|
def setMaxDepth(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`maxDepth`.
|
|
|
|
"""
|
|
|
|
return self._set(maxDepth=value)
|
|
|
|
|
2019-10-27 23:36:10 -04:00
|
|
|
@since("1.4.0")
|
2019-07-20 11:44:33 -04:00
|
|
|
def setMaxBins(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`maxBins`.
|
|
|
|
"""
|
|
|
|
return self._set(maxBins=value)
|
|
|
|
|
2019-10-27 23:36:10 -04:00
|
|
|
@since("1.4.0")
|
2019-07-20 11:44:33 -04:00
|
|
|
def setMinInstancesPerNode(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`minInstancesPerNode`.
|
|
|
|
"""
|
|
|
|
return self._set(minInstancesPerNode=value)
|
|
|
|
|
2019-10-12 10:13:50 -04:00
|
|
|
@since("3.0.0")
|
|
|
|
def setMinWeightFractionPerNode(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`minWeightFractionPerNode`.
|
|
|
|
"""
|
|
|
|
return self._set(minWeightFractionPerNode=value)
|
|
|
|
|
2019-10-27 23:36:10 -04:00
|
|
|
@since("1.4.0")
|
2019-07-20 11:44:33 -04:00
|
|
|
def setMinInfoGain(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`minInfoGain`.
|
|
|
|
"""
|
|
|
|
return self._set(minInfoGain=value)
|
|
|
|
|
2019-10-27 23:36:10 -04:00
|
|
|
@since("1.4.0")
|
2019-07-20 11:44:33 -04:00
|
|
|
def setMaxMemoryInMB(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`maxMemoryInMB`.
|
|
|
|
"""
|
|
|
|
return self._set(maxMemoryInMB=value)
|
|
|
|
|
2019-10-27 23:36:10 -04:00
|
|
|
@since("1.4.0")
|
2019-07-20 11:44:33 -04:00
|
|
|
def setCacheNodeIds(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`cacheNodeIds`.
|
|
|
|
"""
|
|
|
|
return self._set(cacheNodeIds=value)
|
|
|
|
|
|
|
|
@since("1.4.0")
|
|
|
|
def setImpurity(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`impurity`.
|
|
|
|
"""
|
|
|
|
return self._set(impurity=value)
|
|
|
|
|
2019-10-27 23:36:10 -04:00
|
|
|
@since("1.4.0")
|
|
|
|
def setCheckpointInterval(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`checkpointInterval`.
|
|
|
|
"""
|
|
|
|
return self._set(checkpointInterval=value)
|
|
|
|
|
|
|
|
def setSeed(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`seed`.
|
|
|
|
"""
|
|
|
|
return self._set(seed=value)
|
|
|
|
|
|
|
|
@since("3.0.0")
|
|
|
|
def setWeightCol(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`weightCol`.
|
|
|
|
"""
|
|
|
|
return self._set(weightCol=value)
|
|
|
|
|
|
|
|
@since("2.0.0")
|
|
|
|
def setVarianceCol(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`varianceCol`.
|
|
|
|
"""
|
|
|
|
return self._set(varianceCol=value)
|
|
|
|
|
2015-05-12 15:17:05 -04:00
|
|
|
|
2015-07-07 11:58:08 -04:00
|
|
|
@inherit_doc
|
2020-01-17 20:34:30 -05:00
|
|
|
class DecisionTreeRegressionModel(
|
[SPARK-29212][ML][PYSPARK] Add common classes without using JVM backend
### What changes were proposed in this pull request?
Implement common base ML classes (`Predictor`, `PredictionModel`, `Classifier`, `ClasssificationModel` `ProbabilisticClassifier`, `ProbabilisticClasssificationModel`, `Regressor`, `RegrssionModel`) for non-Java backends.
Note
- `Predictor` and `JavaClassifier` should be abstract as `_fit` method is not implemented.
- `PredictionModel` should be abstract as `_transform` is not implemented.
### Why are the changes needed?
To provide extensions points for non-JVM algorithms, as well as a public (as opposed to `Java*` variants, which are commonly described in docstrings as private) hierarchy which can be used to distinguish between different classes of predictors.
For longer discussion see [SPARK-29212](https://issues.apache.org/jira/browse/SPARK-29212) and / or https://github.com/apache/spark/pull/25776.
### Does this PR introduce any user-facing change?
It adds new base classes as listed above, but effective interfaces (method resolution order notwithstanding) stay the same.
Additionally "private" `Java*` classes in`ml.regression` and `ml.classification` have been renamed to follow PEP-8 conventions (added leading underscore).
It is for discussion if the same should be done to equivalent classes from `ml.wrapper`.
If we take `JavaClassifier` as an example, type hierarchy will change from
![old pyspark ml classification JavaClassifier](https://user-images.githubusercontent.com/1554276/72657093-5c0b0c80-39a0-11ea-9069-a897d75de483.png)
to
![new pyspark ml classification _JavaClassifier](https://user-images.githubusercontent.com/1554276/72657098-64fbde00-39a0-11ea-8f80-01187a5ea5a6.png)
Similarly the old model
![old pyspark ml classification JavaClassificationModel](https://user-images.githubusercontent.com/1554276/72657103-7513bd80-39a0-11ea-9ffc-59eb6ab61fde.png)
will become
![new pyspark ml classification _JavaClassificationModel](https://user-images.githubusercontent.com/1554276/72657110-80ff7f80-39a0-11ea-9f5c-fe408664e827.png)
### How was this patch tested?
Existing unit tests.
Closes #27245 from zero323/SPARK-29212.
Authored-by: zero323 <mszymkiewicz@gmail.com>
Signed-off-by: zhengruifeng <ruifengz@foxmail.com>
2020-03-03 23:20:02 -05:00
|
|
|
_JavaRegressionModel, _DecisionTreeModel, _DecisionTreeRegressorParams,
|
2020-01-17 20:34:30 -05:00
|
|
|
JavaMLWritable, JavaMLReadable
|
|
|
|
):
|
2015-05-12 15:17:05 -04:00
|
|
|
"""
|
2016-05-25 01:20:00 -04:00
|
|
|
Model fitted by :class:`DecisionTreeRegressor`.
|
2015-09-17 11:45:20 -04:00
|
|
|
|
|
|
|
.. versionadded:: 1.4.0
|
2015-05-12 15:17:05 -04:00
|
|
|
"""
|
|
|
|
|
2019-10-27 23:36:10 -04:00
|
|
|
@since("3.0.0")
|
|
|
|
def setVarianceCol(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`varianceCol`.
|
|
|
|
"""
|
|
|
|
return self._set(varianceCol=value)
|
|
|
|
|
2016-03-11 02:54:23 -05:00
|
|
|
@property
|
|
|
|
def featureImportances(self):
|
|
|
|
"""
|
|
|
|
Estimate of the importance of each feature.
|
|
|
|
|
|
|
|
This generalizes the idea of "Gini" importance to other losses,
|
|
|
|
following the explanation of Gini importance from "Random Forests" documentation
|
|
|
|
by Leo Breiman and Adele Cutler, and following the implementation from scikit-learn.
|
|
|
|
|
|
|
|
This feature importance is calculated as follows:
|
|
|
|
- importance(feature j) = sum (over nodes which split on feature j) of the gain,
|
|
|
|
where gain is scaled by the number of instances passing through node
|
|
|
|
- Normalize importances for tree to sum to 1.
|
|
|
|
|
2020-11-09 19:33:48 -05:00
|
|
|
.. versionadded:: 2.0.0
|
|
|
|
|
|
|
|
Notes
|
|
|
|
-----
|
|
|
|
Feature importance for single decision trees can have high variance due to
|
|
|
|
correlated predictor variables. Consider using a :py:class:`RandomForestRegressor`
|
|
|
|
to determine feature importance instead.
|
2016-03-11 02:54:23 -05:00
|
|
|
"""
|
|
|
|
return self._call_java("featureImportances")
|
|
|
|
|
2015-05-12 15:17:05 -04:00
|
|
|
|
2019-10-12 10:13:50 -04:00
|
|
|
class _RandomForestRegressorParams(_RandomForestParams, _TreeRegressorParams):
|
|
|
|
"""
|
|
|
|
Params for :py:class:`RandomForestRegressor` and :py:class:`RandomForestRegressionModel`.
|
|
|
|
|
|
|
|
.. versionadded:: 3.0.0
|
|
|
|
"""
|
2020-07-16 14:12:29 -04:00
|
|
|
|
2020-08-03 11:50:34 -04:00
|
|
|
def __init__(self, *args):
|
|
|
|
super(_RandomForestRegressorParams, self).__init__(*args)
|
2020-07-16 14:12:29 -04:00
|
|
|
self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
|
|
|
|
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
|
|
|
|
impurity="variance", subsamplingRate=1.0, numTrees=20,
|
|
|
|
featureSubsetStrategy="auto", leafCol="", minWeightFractionPerNode=0.0,
|
|
|
|
bootstrap=True)
|
2019-10-12 10:13:50 -04:00
|
|
|
|
|
|
|
|
2015-05-12 15:17:05 -04:00
|
|
|
@inherit_doc
|
[SPARK-29212][ML][PYSPARK] Add common classes without using JVM backend
### What changes were proposed in this pull request?
Implement common base ML classes (`Predictor`, `PredictionModel`, `Classifier`, `ClasssificationModel` `ProbabilisticClassifier`, `ProbabilisticClasssificationModel`, `Regressor`, `RegrssionModel`) for non-Java backends.
Note
- `Predictor` and `JavaClassifier` should be abstract as `_fit` method is not implemented.
- `PredictionModel` should be abstract as `_transform` is not implemented.
### Why are the changes needed?
To provide extensions points for non-JVM algorithms, as well as a public (as opposed to `Java*` variants, which are commonly described in docstrings as private) hierarchy which can be used to distinguish between different classes of predictors.
For longer discussion see [SPARK-29212](https://issues.apache.org/jira/browse/SPARK-29212) and / or https://github.com/apache/spark/pull/25776.
### Does this PR introduce any user-facing change?
It adds new base classes as listed above, but effective interfaces (method resolution order notwithstanding) stay the same.
Additionally "private" `Java*` classes in`ml.regression` and `ml.classification` have been renamed to follow PEP-8 conventions (added leading underscore).
It is for discussion if the same should be done to equivalent classes from `ml.wrapper`.
If we take `JavaClassifier` as an example, type hierarchy will change from
![old pyspark ml classification JavaClassifier](https://user-images.githubusercontent.com/1554276/72657093-5c0b0c80-39a0-11ea-9069-a897d75de483.png)
to
![new pyspark ml classification _JavaClassifier](https://user-images.githubusercontent.com/1554276/72657098-64fbde00-39a0-11ea-8f80-01187a5ea5a6.png)
Similarly the old model
![old pyspark ml classification JavaClassificationModel](https://user-images.githubusercontent.com/1554276/72657103-7513bd80-39a0-11ea-9ffc-59eb6ab61fde.png)
will become
![new pyspark ml classification _JavaClassificationModel](https://user-images.githubusercontent.com/1554276/72657110-80ff7f80-39a0-11ea-9f5c-fe408664e827.png)
### How was this patch tested?
Existing unit tests.
Closes #27245 from zero323/SPARK-29212.
Authored-by: zero323 <mszymkiewicz@gmail.com>
Signed-off-by: zhengruifeng <ruifengz@foxmail.com>
2020-03-03 23:20:02 -05:00
|
|
|
class RandomForestRegressor(_JavaRegressor, _RandomForestRegressorParams, JavaMLWritable,
|
2019-10-12 10:13:50 -04:00
|
|
|
JavaMLReadable):
|
2015-05-12 15:17:05 -04:00
|
|
|
"""
|
2016-05-09 04:11:17 -04:00
|
|
|
`Random Forest <http://en.wikipedia.org/wiki/Random_forest>`_
|
2015-05-12 15:17:05 -04:00
|
|
|
learning algorithm for regression.
|
|
|
|
It supports both continuous and categorical features.
|
|
|
|
|
2020-11-09 19:33:48 -05:00
|
|
|
.. versionadded:: 1.4.0
|
|
|
|
|
|
|
|
Examples
|
|
|
|
--------
|
2015-07-07 11:58:08 -04:00
|
|
|
>>> from numpy import allclose
|
2016-05-17 15:51:07 -04:00
|
|
|
>>> from pyspark.ml.linalg import Vectors
|
2016-05-23 21:14:48 -04:00
|
|
|
>>> df = spark.createDataFrame([
|
2015-05-12 15:17:05 -04:00
|
|
|
... (1.0, Vectors.dense(1.0)),
|
|
|
|
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
|
2019-10-27 23:36:10 -04:00
|
|
|
>>> rf = RandomForestRegressor(numTrees=2, maxDepth=2)
|
2020-01-14 09:25:51 -05:00
|
|
|
>>> rf.getMinWeightFractionPerNode()
|
|
|
|
0.0
|
2019-10-27 23:36:10 -04:00
|
|
|
>>> rf.setSeed(42)
|
|
|
|
RandomForestRegressor...
|
2015-05-12 15:17:05 -04:00
|
|
|
>>> model = rf.fit(df)
|
2020-01-23 03:44:13 -05:00
|
|
|
>>> model.getBootstrap()
|
|
|
|
True
|
2019-10-12 10:13:50 -04:00
|
|
|
>>> model.getSeed()
|
|
|
|
42
|
|
|
|
>>> model.setLeafCol("leafId")
|
|
|
|
RandomForestRegressionModel...
|
2016-03-11 02:54:23 -05:00
|
|
|
>>> model.featureImportances
|
|
|
|
SparseVector(1, {0: 1.0})
|
2015-07-07 11:58:08 -04:00
|
|
|
>>> allclose(model.treeWeights, [1.0, 1.0])
|
|
|
|
True
|
2016-05-23 21:14:48 -04:00
|
|
|
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
|
[SPARK-28985][PYTHON][ML] Add common classes (JavaPredictor/JavaClassificationModel/JavaProbabilisticClassifier) in PYTHON
### What changes were proposed in this pull request?
Add some common classes in Python to make it have the same structure as Scala
1. Scala has ClassifierParams/Classifier/ClassificationModel:
```
trait ClassifierParams
extends PredictorParams with HasRawPredictionCol
abstract class Classifier
extends Predictor with ClassifierParams {
def setRawPredictionCol
}
abstract class ClassificationModel
extends PredictionModel with ClassifierParams {
def setRawPredictionCol
}
```
This PR makes Python has the following:
```
class JavaClassifierParams(HasRawPredictionCol, JavaPredictorParams):
pass
class JavaClassifier(JavaPredictor, JavaClassifierParams):
def setRawPredictionCol
class JavaClassificationModel(JavaPredictionModel, JavaClassifierParams):
def setRawPredictionCol
```
2. Scala has ProbabilisticClassifierParams/ProbabilisticClassifier/ProbabilisticClassificationModel:
```
trait ProbabilisticClassifierParams
extends ClassifierParams with HasProbabilityCol with HasThresholds
abstract class ProbabilisticClassifier
extends Classifier with ProbabilisticClassifierParams {
def setProbabilityCol
def setThresholds
}
abstract class ProbabilisticClassificationModel
extends ClassificationModel with ProbabilisticClassifierParams {
def setProbabilityCol
def setThresholds
}
```
This PR makes Python have the following:
```
class JavaProbabilisticClassifierParams(HasProbabilityCol, HasThresholds, JavaClassifierParams):
pass
class JavaProbabilisticClassifier(JavaClassifier, JavaProbabilisticClassifierParams):
def setProbabilityCol
def setThresholds
class JavaProbabilisticClassificationModel(JavaClassificationModel, JavaProbabilisticClassifierParams):
def setProbabilityCol
def setThresholds
```
3. Scala has PredictorParams/Predictor/PredictionModel:
```
trait PredictorParams extends Params
with HasLabelCol with HasFeaturesCol with HasPredictionCol
abstract class Predictor
extends Estimator with PredictorParams {
def setLabelCol
def setFeaturesCol
def setPredictionCol
}
abstract class PredictionModel
extends Model with PredictorParams {
def setFeaturesCol
def setPredictionCol
def numFeatures
def predict
}
```
This PR makes Python have the following:
```
class JavaPredictorParams(HasLabelCol, HasFeaturesCol, HasPredictionCol):
pass
class JavaPredictor(JavaEstimator, JavaPredictorParams):
def setLabelCol
def setFeaturesCol
def setPredictionCol
class JavaPredictionModel(JavaModel, JavaPredictorParams):
def setFeaturesCol
def setPredictionCol
def numFeatures
def predict
```
### Why are the changes needed?
Have parity between Python and Scala ML
### Does this PR introduce any user-facing change?
Yes. Add the following changes:
```
LinearSVCModel
- get/setFeatureCol
- get/setPredictionCol
- get/setLabelCol
- get/setRawPredictionCol
- predict
```
```
LogisticRegressionModel
DecisionTreeClassificationModel
RandomForestClassificationModel
GBTClassificationModel
NaiveBayesModel
MultilayerPerceptronClassificationModel
- get/setFeatureCol
- get/setPredictionCol
- get/setLabelCol
- get/setRawPredictionCol
- get/setProbabilityCol
- predict
```
```
LinearRegressionModel
IsotonicRegressionModel
DecisionTreeRegressionModel
RandomForestRegressionModel
GBTRegressionModel
AFTSurvivalRegressionModel
GeneralizedLinearRegressionModel
- get/setFeatureCol
- get/setPredictionCol
- get/setLabelCol
- predict
```
### How was this patch tested?
Add a few doc tests.
Closes #25776 from huaxingao/spark-28985.
Authored-by: Huaxin Gao <huaxing@us.ibm.com>
Signed-off-by: Sean Owen <sean.owen@databricks.com>
2019-09-19 09:17:25 -04:00
|
|
|
>>> model.predict(test0.head().features)
|
|
|
|
0.0
|
2019-10-12 10:13:50 -04:00
|
|
|
>>> model.predictLeaf(test0.head().features)
|
|
|
|
DenseVector([0.0, 0.0])
|
2019-08-23 18:18:35 -04:00
|
|
|
>>> result = model.transform(test0).head()
|
|
|
|
>>> result.prediction
|
2015-05-12 15:17:05 -04:00
|
|
|
0.0
|
2019-08-23 18:18:35 -04:00
|
|
|
>>> result.leafId
|
|
|
|
DenseVector([0.0, 0.0])
|
2016-08-22 06:21:22 -04:00
|
|
|
>>> model.numFeatures
|
|
|
|
1
|
2016-06-02 18:55:14 -04:00
|
|
|
>>> model.trees
|
2019-11-11 14:03:26 -05:00
|
|
|
[DecisionTreeRegressionModel...depth=..., DecisionTreeRegressionModel...]
|
2016-06-02 18:55:14 -04:00
|
|
|
>>> model.getNumTrees
|
|
|
|
2
|
2016-05-23 21:14:48 -04:00
|
|
|
>>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
|
2015-05-12 15:17:05 -04:00
|
|
|
>>> model.transform(test1).head().prediction
|
|
|
|
0.5
|
2016-04-08 13:39:12 -04:00
|
|
|
>>> rfr_path = temp_path + "/rfr"
|
|
|
|
>>> rf.save(rfr_path)
|
|
|
|
>>> rf2 = RandomForestRegressor.load(rfr_path)
|
|
|
|
>>> rf2.getNumTrees()
|
|
|
|
2
|
|
|
|
>>> model_path = temp_path + "/rfr_model"
|
|
|
|
>>> model.save(model_path)
|
|
|
|
>>> model2 = RandomForestRegressionModel.load(model_path)
|
|
|
|
>>> model.featureImportances == model2.featureImportances
|
|
|
|
True
|
2020-08-03 11:50:34 -04:00
|
|
|
>>> model.transform(test0).take(1) == model2.transform(test0).take(1)
|
|
|
|
True
|
2015-05-12 15:17:05 -04:00
|
|
|
"""
|
|
|
|
|
|
|
|
@keyword_only
|
[SPARK-32933][PYTHON] Use keyword-only syntax for keyword_only methods
### What changes were proposed in this pull request?
This PR adjusts signatures of methods decorated with `keyword_only` to indicate using [Python 3 keyword-only syntax](https://www.python.org/dev/peps/pep-3102/).
__Note__:
For the moment the goal is not to replace `keyword_only`. For justification see https://github.com/apache/spark/pull/29591#discussion_r489402579
### Why are the changes needed?
Right now it is not clear that `keyword_only` methods are indeed keyword only. This proposal addresses that.
In practice we could probably capture `locals` and drop `keyword_only` completel, i.e:
```python
keyword_only
def __init__(self, *, featuresCol="features"):
...
kwargs = self._input_kwargs
self.setParams(**kwargs)
```
could be replaced with
```python
def __init__(self, *, featuresCol="features"):
kwargs = locals()
del kwargs["self"]
...
self.setParams(**kwargs)
```
### Does this PR introduce _any_ user-facing change?
Docstrings and inspect tools will now indicate that `keyword_only` methods expect only keyword arguments.
For example with ` LinearSVC` will change from
```
>>> from pyspark.ml.classification import LinearSVC
>>> ?LinearSVC.__init__
Signature:
LinearSVC.__init__(
self,
featuresCol='features',
labelCol='label',
predictionCol='prediction',
maxIter=100,
regParam=0.0,
tol=1e-06,
rawPredictionCol='rawPrediction',
fitIntercept=True,
standardization=True,
threshold=0.0,
weightCol=None,
aggregationDepth=2,
)
Docstring: __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, aggregationDepth=2):
File: /path/to/python/pyspark/ml/classification.py
Type: function
```
to
```
>>> from pyspark.ml.classification import LinearSVC
>>> ?LinearSVC.__init__
Signature:
LinearSVC.__init__ (
self,
*,
featuresCol='features',
labelCol='label',
predictionCol='prediction',
maxIter=100,
regParam=0.0,
tol=1e-06,
rawPredictionCol='rawPrediction',
fitIntercept=True,
standardization=True,
threshold=0.0,
weightCol=None,
aggregationDepth=2,
blockSize=1,
)
Docstring: __init__(self, \*, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, aggregationDepth=2, blockSize=1):
File: ~/Workspace/spark/python/pyspark/ml/classification.py
Type: function
```
### How was this patch tested?
Existing tests.
Closes #29799 from zero323/SPARK-32933.
Authored-by: zero323 <mszymkiewicz@gmail.com>
Signed-off-by: HyukjinKwon <gurwls223@apache.org>
2020-09-22 20:28:33 -04:00
|
|
|
def __init__(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
|
2015-05-12 15:17:05 -04:00
|
|
|
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
|
2015-10-27 16:55:03 -04:00
|
|
|
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
|
|
|
|
impurity="variance", subsamplingRate=1.0, seed=None, numTrees=20,
|
2020-01-14 09:25:51 -05:00
|
|
|
featureSubsetStrategy="auto", leafCol="", minWeightFractionPerNode=0.0,
|
2020-01-23 03:44:13 -05:00
|
|
|
weightCol=None, bootstrap=True):
|
2015-05-12 15:17:05 -04:00
|
|
|
"""
|
[SPARK-32933][PYTHON] Use keyword-only syntax for keyword_only methods
### What changes were proposed in this pull request?
This PR adjusts signatures of methods decorated with `keyword_only` to indicate using [Python 3 keyword-only syntax](https://www.python.org/dev/peps/pep-3102/).
__Note__:
For the moment the goal is not to replace `keyword_only`. For justification see https://github.com/apache/spark/pull/29591#discussion_r489402579
### Why are the changes needed?
Right now it is not clear that `keyword_only` methods are indeed keyword only. This proposal addresses that.
In practice we could probably capture `locals` and drop `keyword_only` completel, i.e:
```python
keyword_only
def __init__(self, *, featuresCol="features"):
...
kwargs = self._input_kwargs
self.setParams(**kwargs)
```
could be replaced with
```python
def __init__(self, *, featuresCol="features"):
kwargs = locals()
del kwargs["self"]
...
self.setParams(**kwargs)
```
### Does this PR introduce _any_ user-facing change?
Docstrings and inspect tools will now indicate that `keyword_only` methods expect only keyword arguments.
For example with ` LinearSVC` will change from
```
>>> from pyspark.ml.classification import LinearSVC
>>> ?LinearSVC.__init__
Signature:
LinearSVC.__init__(
self,
featuresCol='features',
labelCol='label',
predictionCol='prediction',
maxIter=100,
regParam=0.0,
tol=1e-06,
rawPredictionCol='rawPrediction',
fitIntercept=True,
standardization=True,
threshold=0.0,
weightCol=None,
aggregationDepth=2,
)
Docstring: __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, aggregationDepth=2):
File: /path/to/python/pyspark/ml/classification.py
Type: function
```
to
```
>>> from pyspark.ml.classification import LinearSVC
>>> ?LinearSVC.__init__
Signature:
LinearSVC.__init__ (
self,
*,
featuresCol='features',
labelCol='label',
predictionCol='prediction',
maxIter=100,
regParam=0.0,
tol=1e-06,
rawPredictionCol='rawPrediction',
fitIntercept=True,
standardization=True,
threshold=0.0,
weightCol=None,
aggregationDepth=2,
blockSize=1,
)
Docstring: __init__(self, \*, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, aggregationDepth=2, blockSize=1):
File: ~/Workspace/spark/python/pyspark/ml/classification.py
Type: function
```
### How was this patch tested?
Existing tests.
Closes #29799 from zero323/SPARK-32933.
Authored-by: zero323 <mszymkiewicz@gmail.com>
Signed-off-by: HyukjinKwon <gurwls223@apache.org>
2020-09-22 20:28:33 -04:00
|
|
|
__init__(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
|
2015-05-14 21:16:22 -04:00
|
|
|
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
|
|
|
|
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, \
|
2015-10-27 16:55:03 -04:00
|
|
|
impurity="variance", subsamplingRate=1.0, seed=None, numTrees=20, \
|
2020-01-14 09:25:51 -05:00
|
|
|
featureSubsetStrategy="auto", leafCol=", minWeightFractionPerNode=0.0", \
|
2020-01-23 03:44:13 -05:00
|
|
|
weightCol=None, bootstrap=True)
|
2015-05-12 15:17:05 -04:00
|
|
|
"""
|
|
|
|
super(RandomForestRegressor, self).__init__()
|
2015-05-18 15:02:18 -04:00
|
|
|
self._java_obj = self._new_java_obj(
|
|
|
|
"org.apache.spark.ml.regression.RandomForestRegressor", self.uid)
|
2017-03-03 19:43:45 -05:00
|
|
|
kwargs = self._input_kwargs
|
2015-05-12 15:17:05 -04:00
|
|
|
self.setParams(**kwargs)
|
|
|
|
|
|
|
|
@keyword_only
|
2015-09-17 11:45:20 -04:00
|
|
|
@since("1.4.0")
|
[SPARK-32933][PYTHON] Use keyword-only syntax for keyword_only methods
### What changes were proposed in this pull request?
This PR adjusts signatures of methods decorated with `keyword_only` to indicate using [Python 3 keyword-only syntax](https://www.python.org/dev/peps/pep-3102/).
__Note__:
For the moment the goal is not to replace `keyword_only`. For justification see https://github.com/apache/spark/pull/29591#discussion_r489402579
### Why are the changes needed?
Right now it is not clear that `keyword_only` methods are indeed keyword only. This proposal addresses that.
In practice we could probably capture `locals` and drop `keyword_only` completel, i.e:
```python
keyword_only
def __init__(self, *, featuresCol="features"):
...
kwargs = self._input_kwargs
self.setParams(**kwargs)
```
could be replaced with
```python
def __init__(self, *, featuresCol="features"):
kwargs = locals()
del kwargs["self"]
...
self.setParams(**kwargs)
```
### Does this PR introduce _any_ user-facing change?
Docstrings and inspect tools will now indicate that `keyword_only` methods expect only keyword arguments.
For example with ` LinearSVC` will change from
```
>>> from pyspark.ml.classification import LinearSVC
>>> ?LinearSVC.__init__
Signature:
LinearSVC.__init__(
self,
featuresCol='features',
labelCol='label',
predictionCol='prediction',
maxIter=100,
regParam=0.0,
tol=1e-06,
rawPredictionCol='rawPrediction',
fitIntercept=True,
standardization=True,
threshold=0.0,
weightCol=None,
aggregationDepth=2,
)
Docstring: __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, aggregationDepth=2):
File: /path/to/python/pyspark/ml/classification.py
Type: function
```
to
```
>>> from pyspark.ml.classification import LinearSVC
>>> ?LinearSVC.__init__
Signature:
LinearSVC.__init__ (
self,
*,
featuresCol='features',
labelCol='label',
predictionCol='prediction',
maxIter=100,
regParam=0.0,
tol=1e-06,
rawPredictionCol='rawPrediction',
fitIntercept=True,
standardization=True,
threshold=0.0,
weightCol=None,
aggregationDepth=2,
blockSize=1,
)
Docstring: __init__(self, \*, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, aggregationDepth=2, blockSize=1):
File: ~/Workspace/spark/python/pyspark/ml/classification.py
Type: function
```
### How was this patch tested?
Existing tests.
Closes #29799 from zero323/SPARK-32933.
Authored-by: zero323 <mszymkiewicz@gmail.com>
Signed-off-by: HyukjinKwon <gurwls223@apache.org>
2020-09-22 20:28:33 -04:00
|
|
|
def setParams(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
|
2015-05-12 15:17:05 -04:00
|
|
|
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
|
2015-10-27 16:55:03 -04:00
|
|
|
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
|
|
|
|
impurity="variance", subsamplingRate=1.0, seed=None, numTrees=20,
|
2020-01-14 09:25:51 -05:00
|
|
|
featureSubsetStrategy="auto", leafCol="", minWeightFractionPerNode=0.0,
|
2020-01-23 03:44:13 -05:00
|
|
|
weightCol=None, bootstrap=True):
|
2015-05-12 15:17:05 -04:00
|
|
|
"""
|
[SPARK-32933][PYTHON] Use keyword-only syntax for keyword_only methods
### What changes were proposed in this pull request?
This PR adjusts signatures of methods decorated with `keyword_only` to indicate using [Python 3 keyword-only syntax](https://www.python.org/dev/peps/pep-3102/).
__Note__:
For the moment the goal is not to replace `keyword_only`. For justification see https://github.com/apache/spark/pull/29591#discussion_r489402579
### Why are the changes needed?
Right now it is not clear that `keyword_only` methods are indeed keyword only. This proposal addresses that.
In practice we could probably capture `locals` and drop `keyword_only` completel, i.e:
```python
keyword_only
def __init__(self, *, featuresCol="features"):
...
kwargs = self._input_kwargs
self.setParams(**kwargs)
```
could be replaced with
```python
def __init__(self, *, featuresCol="features"):
kwargs = locals()
del kwargs["self"]
...
self.setParams(**kwargs)
```
### Does this PR introduce _any_ user-facing change?
Docstrings and inspect tools will now indicate that `keyword_only` methods expect only keyword arguments.
For example with ` LinearSVC` will change from
```
>>> from pyspark.ml.classification import LinearSVC
>>> ?LinearSVC.__init__
Signature:
LinearSVC.__init__(
self,
featuresCol='features',
labelCol='label',
predictionCol='prediction',
maxIter=100,
regParam=0.0,
tol=1e-06,
rawPredictionCol='rawPrediction',
fitIntercept=True,
standardization=True,
threshold=0.0,
weightCol=None,
aggregationDepth=2,
)
Docstring: __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, aggregationDepth=2):
File: /path/to/python/pyspark/ml/classification.py
Type: function
```
to
```
>>> from pyspark.ml.classification import LinearSVC
>>> ?LinearSVC.__init__
Signature:
LinearSVC.__init__ (
self,
*,
featuresCol='features',
labelCol='label',
predictionCol='prediction',
maxIter=100,
regParam=0.0,
tol=1e-06,
rawPredictionCol='rawPrediction',
fitIntercept=True,
standardization=True,
threshold=0.0,
weightCol=None,
aggregationDepth=2,
blockSize=1,
)
Docstring: __init__(self, \*, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, aggregationDepth=2, blockSize=1):
File: ~/Workspace/spark/python/pyspark/ml/classification.py
Type: function
```
### How was this patch tested?
Existing tests.
Closes #29799 from zero323/SPARK-32933.
Authored-by: zero323 <mszymkiewicz@gmail.com>
Signed-off-by: HyukjinKwon <gurwls223@apache.org>
2020-09-22 20:28:33 -04:00
|
|
|
setParams(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
|
2015-05-14 21:16:22 -04:00
|
|
|
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
|
2015-10-27 16:55:03 -04:00
|
|
|
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, \
|
|
|
|
impurity="variance", subsamplingRate=1.0, seed=None, numTrees=20, \
|
2020-01-14 09:25:51 -05:00
|
|
|
featureSubsetStrategy="auto", leafCol="", minWeightFractionPerNode=0.0, \
|
2020-01-23 03:44:13 -05:00
|
|
|
weightCol=None, bootstrap=True)
|
2015-05-12 15:17:05 -04:00
|
|
|
Sets params for linear regression.
|
|
|
|
"""
|
2017-03-03 19:43:45 -05:00
|
|
|
kwargs = self._input_kwargs
|
2015-05-12 15:17:05 -04:00
|
|
|
return self._set(**kwargs)
|
|
|
|
|
|
|
|
def _create_model(self, java_model):
|
|
|
|
return RandomForestRegressionModel(java_model)
|
|
|
|
|
2019-07-20 11:44:33 -04:00
|
|
|
def setMaxDepth(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`maxDepth`.
|
|
|
|
"""
|
|
|
|
return self._set(maxDepth=value)
|
|
|
|
|
|
|
|
def setMaxBins(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`maxBins`.
|
|
|
|
"""
|
|
|
|
return self._set(maxBins=value)
|
|
|
|
|
|
|
|
def setMinInstancesPerNode(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`minInstancesPerNode`.
|
|
|
|
"""
|
|
|
|
return self._set(minInstancesPerNode=value)
|
|
|
|
|
|
|
|
def setMinInfoGain(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`minInfoGain`.
|
|
|
|
"""
|
|
|
|
return self._set(minInfoGain=value)
|
|
|
|
|
|
|
|
def setMaxMemoryInMB(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`maxMemoryInMB`.
|
|
|
|
"""
|
|
|
|
return self._set(maxMemoryInMB=value)
|
|
|
|
|
|
|
|
def setCacheNodeIds(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`cacheNodeIds`.
|
|
|
|
"""
|
|
|
|
return self._set(cacheNodeIds=value)
|
|
|
|
|
|
|
|
@since("1.4.0")
|
|
|
|
def setImpurity(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`impurity`.
|
|
|
|
"""
|
|
|
|
return self._set(impurity=value)
|
|
|
|
|
|
|
|
@since("1.4.0")
|
|
|
|
def setNumTrees(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`numTrees`.
|
|
|
|
"""
|
|
|
|
return self._set(numTrees=value)
|
|
|
|
|
2020-01-23 03:44:13 -05:00
|
|
|
@since("3.0.0")
|
|
|
|
def setBootstrap(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`bootstrap`.
|
|
|
|
"""
|
|
|
|
return self._set(bootstrap=value)
|
|
|
|
|
2019-07-20 11:44:33 -04:00
|
|
|
@since("1.4.0")
|
|
|
|
def setSubsamplingRate(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`subsamplingRate`.
|
|
|
|
"""
|
|
|
|
return self._set(subsamplingRate=value)
|
|
|
|
|
2018-05-30 14:04:09 -04:00
|
|
|
@since("2.4.0")
|
|
|
|
def setFeatureSubsetStrategy(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`featureSubsetStrategy`.
|
|
|
|
"""
|
|
|
|
return self._set(featureSubsetStrategy=value)
|
|
|
|
|
2019-10-27 23:36:10 -04:00
|
|
|
def setCheckpointInterval(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`checkpointInterval`.
|
|
|
|
"""
|
|
|
|
return self._set(checkpointInterval=value)
|
|
|
|
|
|
|
|
def setSeed(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`seed`.
|
|
|
|
"""
|
|
|
|
return self._set(seed=value)
|
|
|
|
|
2020-01-14 09:25:51 -05:00
|
|
|
@since("3.0.0")
|
|
|
|
def setWeightCol(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`weightCol`.
|
|
|
|
"""
|
|
|
|
return self._set(weightCol=value)
|
|
|
|
|
|
|
|
@since("3.0.0")
|
|
|
|
def setMinWeightFractionPerNode(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`minWeightFractionPerNode`.
|
|
|
|
"""
|
|
|
|
return self._set(minWeightFractionPerNode=value)
|
|
|
|
|
2015-05-12 15:17:05 -04:00
|
|
|
|
2020-01-17 20:34:30 -05:00
|
|
|
class RandomForestRegressionModel(
|
[SPARK-29212][ML][PYSPARK] Add common classes without using JVM backend
### What changes were proposed in this pull request?
Implement common base ML classes (`Predictor`, `PredictionModel`, `Classifier`, `ClasssificationModel` `ProbabilisticClassifier`, `ProbabilisticClasssificationModel`, `Regressor`, `RegrssionModel`) for non-Java backends.
Note
- `Predictor` and `JavaClassifier` should be abstract as `_fit` method is not implemented.
- `PredictionModel` should be abstract as `_transform` is not implemented.
### Why are the changes needed?
To provide extensions points for non-JVM algorithms, as well as a public (as opposed to `Java*` variants, which are commonly described in docstrings as private) hierarchy which can be used to distinguish between different classes of predictors.
For longer discussion see [SPARK-29212](https://issues.apache.org/jira/browse/SPARK-29212) and / or https://github.com/apache/spark/pull/25776.
### Does this PR introduce any user-facing change?
It adds new base classes as listed above, but effective interfaces (method resolution order notwithstanding) stay the same.
Additionally "private" `Java*` classes in`ml.regression` and `ml.classification` have been renamed to follow PEP-8 conventions (added leading underscore).
It is for discussion if the same should be done to equivalent classes from `ml.wrapper`.
If we take `JavaClassifier` as an example, type hierarchy will change from
![old pyspark ml classification JavaClassifier](https://user-images.githubusercontent.com/1554276/72657093-5c0b0c80-39a0-11ea-9069-a897d75de483.png)
to
![new pyspark ml classification _JavaClassifier](https://user-images.githubusercontent.com/1554276/72657098-64fbde00-39a0-11ea-8f80-01187a5ea5a6.png)
Similarly the old model
![old pyspark ml classification JavaClassificationModel](https://user-images.githubusercontent.com/1554276/72657103-7513bd80-39a0-11ea-9ffc-59eb6ab61fde.png)
will become
![new pyspark ml classification _JavaClassificationModel](https://user-images.githubusercontent.com/1554276/72657110-80ff7f80-39a0-11ea-9f5c-fe408664e827.png)
### How was this patch tested?
Existing unit tests.
Closes #27245 from zero323/SPARK-29212.
Authored-by: zero323 <mszymkiewicz@gmail.com>
Signed-off-by: zhengruifeng <ruifengz@foxmail.com>
2020-03-03 23:20:02 -05:00
|
|
|
_JavaRegressionModel, _TreeEnsembleModel, _RandomForestRegressorParams,
|
2020-01-17 20:34:30 -05:00
|
|
|
JavaMLWritable, JavaMLReadable
|
|
|
|
):
|
2015-05-12 15:17:05 -04:00
|
|
|
"""
|
2016-05-25 01:20:00 -04:00
|
|
|
Model fitted by :class:`RandomForestRegressor`.
|
2015-09-17 11:45:20 -04:00
|
|
|
|
|
|
|
.. versionadded:: 1.4.0
|
2015-05-12 15:17:05 -04:00
|
|
|
"""
|
|
|
|
|
2016-06-02 18:55:14 -04:00
|
|
|
@property
|
|
|
|
@since("2.0.0")
|
|
|
|
def trees(self):
|
|
|
|
"""Trees in this ensemble. Warning: These have null parent Estimators."""
|
|
|
|
return [DecisionTreeRegressionModel(m) for m in list(self._call_java("trees"))]
|
|
|
|
|
2016-03-11 02:54:23 -05:00
|
|
|
@property
|
|
|
|
def featureImportances(self):
|
|
|
|
"""
|
|
|
|
Estimate of the importance of each feature.
|
|
|
|
|
2016-03-31 16:00:10 -04:00
|
|
|
Each feature's importance is the average of its importance across all trees in the ensemble
|
|
|
|
The importance vector is normalized to sum to 1. This method is suggested by Hastie et al.
|
|
|
|
(Hastie, Tibshirani, Friedman. "The Elements of Statistical Learning, 2nd Edition." 2001.)
|
|
|
|
and follows the implementation from scikit-learn.
|
2016-03-11 02:54:23 -05:00
|
|
|
|
2020-11-09 19:33:48 -05:00
|
|
|
.. versionadded:: 2.0.0
|
|
|
|
|
|
|
|
Examples
|
|
|
|
--------
|
|
|
|
DecisionTreeRegressionModel.featureImportances
|
2016-03-11 02:54:23 -05:00
|
|
|
"""
|
|
|
|
return self._call_java("featureImportances")
|
|
|
|
|
2015-05-12 15:17:05 -04:00
|
|
|
|
2019-10-12 10:13:50 -04:00
|
|
|
class _GBTRegressorParams(_GBTParams, _TreeRegressorParams):
|
|
|
|
"""
|
|
|
|
Params for :py:class:`GBTRegressor` and :py:class:`GBTRegressorModel`.
|
|
|
|
|
|
|
|
.. versionadded:: 3.0.0
|
|
|
|
"""
|
|
|
|
|
|
|
|
supportedLossTypes = ["squared", "absolute"]
|
|
|
|
|
|
|
|
lossType = Param(Params._dummy(), "lossType",
|
|
|
|
"Loss function which GBT tries to minimize (case-insensitive). " +
|
|
|
|
"Supported options: " + ", ".join(supportedLossTypes),
|
|
|
|
typeConverter=TypeConverters.toString)
|
|
|
|
|
2020-08-03 11:50:34 -04:00
|
|
|
def __init__(self, *args):
|
|
|
|
super(_GBTRegressorParams, self).__init__(*args)
|
2020-07-16 14:12:29 -04:00
|
|
|
self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
|
|
|
|
maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0,
|
|
|
|
checkpointInterval=10, lossType="squared", maxIter=20, stepSize=0.1,
|
|
|
|
impurity="variance", featureSubsetStrategy="all", validationTol=0.01,
|
|
|
|
leafCol="", minWeightFractionPerNode=0.0)
|
|
|
|
|
2019-10-12 10:13:50 -04:00
|
|
|
@since("1.4.0")
|
|
|
|
def getLossType(self):
|
|
|
|
"""
|
|
|
|
Gets the value of lossType or its default value.
|
|
|
|
"""
|
|
|
|
return self.getOrDefault(self.lossType)
|
|
|
|
|
|
|
|
|
2015-05-12 15:17:05 -04:00
|
|
|
@inherit_doc
|
[SPARK-29212][ML][PYSPARK] Add common classes without using JVM backend
### What changes were proposed in this pull request?
Implement common base ML classes (`Predictor`, `PredictionModel`, `Classifier`, `ClasssificationModel` `ProbabilisticClassifier`, `ProbabilisticClasssificationModel`, `Regressor`, `RegrssionModel`) for non-Java backends.
Note
- `Predictor` and `JavaClassifier` should be abstract as `_fit` method is not implemented.
- `PredictionModel` should be abstract as `_transform` is not implemented.
### Why are the changes needed?
To provide extensions points for non-JVM algorithms, as well as a public (as opposed to `Java*` variants, which are commonly described in docstrings as private) hierarchy which can be used to distinguish between different classes of predictors.
For longer discussion see [SPARK-29212](https://issues.apache.org/jira/browse/SPARK-29212) and / or https://github.com/apache/spark/pull/25776.
### Does this PR introduce any user-facing change?
It adds new base classes as listed above, but effective interfaces (method resolution order notwithstanding) stay the same.
Additionally "private" `Java*` classes in`ml.regression` and `ml.classification` have been renamed to follow PEP-8 conventions (added leading underscore).
It is for discussion if the same should be done to equivalent classes from `ml.wrapper`.
If we take `JavaClassifier` as an example, type hierarchy will change from
![old pyspark ml classification JavaClassifier](https://user-images.githubusercontent.com/1554276/72657093-5c0b0c80-39a0-11ea-9069-a897d75de483.png)
to
![new pyspark ml classification _JavaClassifier](https://user-images.githubusercontent.com/1554276/72657098-64fbde00-39a0-11ea-8f80-01187a5ea5a6.png)
Similarly the old model
![old pyspark ml classification JavaClassificationModel](https://user-images.githubusercontent.com/1554276/72657103-7513bd80-39a0-11ea-9ffc-59eb6ab61fde.png)
will become
![new pyspark ml classification _JavaClassificationModel](https://user-images.githubusercontent.com/1554276/72657110-80ff7f80-39a0-11ea-9f5c-fe408664e827.png)
### How was this patch tested?
Existing unit tests.
Closes #27245 from zero323/SPARK-29212.
Authored-by: zero323 <mszymkiewicz@gmail.com>
Signed-off-by: zhengruifeng <ruifengz@foxmail.com>
2020-03-03 23:20:02 -05:00
|
|
|
class GBTRegressor(_JavaRegressor, _GBTRegressorParams, JavaMLWritable, JavaMLReadable):
|
2015-05-12 15:17:05 -04:00
|
|
|
"""
|
2016-05-09 04:11:17 -04:00
|
|
|
`Gradient-Boosted Trees (GBTs) <http://en.wikipedia.org/wiki/Gradient_boosting>`_
|
2015-05-12 15:17:05 -04:00
|
|
|
learning algorithm for regression.
|
|
|
|
It supports both continuous and categorical features.
|
|
|
|
|
2020-11-09 19:33:48 -05:00
|
|
|
.. versionadded:: 1.4.0
|
|
|
|
|
|
|
|
Examples
|
|
|
|
--------
|
2015-07-07 11:58:08 -04:00
|
|
|
>>> from numpy import allclose
|
2016-05-17 15:51:07 -04:00
|
|
|
>>> from pyspark.ml.linalg import Vectors
|
2016-05-23 21:14:48 -04:00
|
|
|
>>> df = spark.createDataFrame([
|
2015-05-12 15:17:05 -04:00
|
|
|
... (1.0, Vectors.dense(1.0)),
|
|
|
|
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
|
2019-10-27 23:36:10 -04:00
|
|
|
>>> gbt = GBTRegressor(maxDepth=2, seed=42, leafCol="leafId")
|
|
|
|
>>> gbt.setMaxIter(5)
|
|
|
|
GBTRegressor...
|
2019-12-09 14:39:33 -05:00
|
|
|
>>> gbt.setMinWeightFractionPerNode(0.049)
|
|
|
|
GBTRegressor...
|
2019-10-27 23:36:10 -04:00
|
|
|
>>> gbt.getMaxIter()
|
|
|
|
5
|
2016-05-12 03:19:27 -04:00
|
|
|
>>> print(gbt.getImpurity())
|
|
|
|
variance
|
2018-05-30 14:04:09 -04:00
|
|
|
>>> print(gbt.getFeatureSubsetStrategy())
|
|
|
|
all
|
2015-05-12 15:17:05 -04:00
|
|
|
>>> model = gbt.fit(df)
|
2016-03-31 16:00:10 -04:00
|
|
|
>>> model.featureImportances
|
|
|
|
SparseVector(1, {0: 1.0})
|
2016-08-22 06:21:22 -04:00
|
|
|
>>> model.numFeatures
|
|
|
|
1
|
2015-07-07 11:58:08 -04:00
|
|
|
>>> allclose(model.treeWeights, [1.0, 0.1, 0.1, 0.1, 0.1])
|
|
|
|
True
|
2016-05-23 21:14:48 -04:00
|
|
|
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
|
[SPARK-28985][PYTHON][ML] Add common classes (JavaPredictor/JavaClassificationModel/JavaProbabilisticClassifier) in PYTHON
### What changes were proposed in this pull request?
Add some common classes in Python to make it have the same structure as Scala
1. Scala has ClassifierParams/Classifier/ClassificationModel:
```
trait ClassifierParams
extends PredictorParams with HasRawPredictionCol
abstract class Classifier
extends Predictor with ClassifierParams {
def setRawPredictionCol
}
abstract class ClassificationModel
extends PredictionModel with ClassifierParams {
def setRawPredictionCol
}
```
This PR makes Python has the following:
```
class JavaClassifierParams(HasRawPredictionCol, JavaPredictorParams):
pass
class JavaClassifier(JavaPredictor, JavaClassifierParams):
def setRawPredictionCol
class JavaClassificationModel(JavaPredictionModel, JavaClassifierParams):
def setRawPredictionCol
```
2. Scala has ProbabilisticClassifierParams/ProbabilisticClassifier/ProbabilisticClassificationModel:
```
trait ProbabilisticClassifierParams
extends ClassifierParams with HasProbabilityCol with HasThresholds
abstract class ProbabilisticClassifier
extends Classifier with ProbabilisticClassifierParams {
def setProbabilityCol
def setThresholds
}
abstract class ProbabilisticClassificationModel
extends ClassificationModel with ProbabilisticClassifierParams {
def setProbabilityCol
def setThresholds
}
```
This PR makes Python have the following:
```
class JavaProbabilisticClassifierParams(HasProbabilityCol, HasThresholds, JavaClassifierParams):
pass
class JavaProbabilisticClassifier(JavaClassifier, JavaProbabilisticClassifierParams):
def setProbabilityCol
def setThresholds
class JavaProbabilisticClassificationModel(JavaClassificationModel, JavaProbabilisticClassifierParams):
def setProbabilityCol
def setThresholds
```
3. Scala has PredictorParams/Predictor/PredictionModel:
```
trait PredictorParams extends Params
with HasLabelCol with HasFeaturesCol with HasPredictionCol
abstract class Predictor
extends Estimator with PredictorParams {
def setLabelCol
def setFeaturesCol
def setPredictionCol
}
abstract class PredictionModel
extends Model with PredictorParams {
def setFeaturesCol
def setPredictionCol
def numFeatures
def predict
}
```
This PR makes Python have the following:
```
class JavaPredictorParams(HasLabelCol, HasFeaturesCol, HasPredictionCol):
pass
class JavaPredictor(JavaEstimator, JavaPredictorParams):
def setLabelCol
def setFeaturesCol
def setPredictionCol
class JavaPredictionModel(JavaModel, JavaPredictorParams):
def setFeaturesCol
def setPredictionCol
def numFeatures
def predict
```
### Why are the changes needed?
Have parity between Python and Scala ML
### Does this PR introduce any user-facing change?
Yes. Add the following changes:
```
LinearSVCModel
- get/setFeatureCol
- get/setPredictionCol
- get/setLabelCol
- get/setRawPredictionCol
- predict
```
```
LogisticRegressionModel
DecisionTreeClassificationModel
RandomForestClassificationModel
GBTClassificationModel
NaiveBayesModel
MultilayerPerceptronClassificationModel
- get/setFeatureCol
- get/setPredictionCol
- get/setLabelCol
- get/setRawPredictionCol
- get/setProbabilityCol
- predict
```
```
LinearRegressionModel
IsotonicRegressionModel
DecisionTreeRegressionModel
RandomForestRegressionModel
GBTRegressionModel
AFTSurvivalRegressionModel
GeneralizedLinearRegressionModel
- get/setFeatureCol
- get/setPredictionCol
- get/setLabelCol
- predict
```
### How was this patch tested?
Add a few doc tests.
Closes #25776 from huaxingao/spark-28985.
Authored-by: Huaxin Gao <huaxing@us.ibm.com>
Signed-off-by: Sean Owen <sean.owen@databricks.com>
2019-09-19 09:17:25 -04:00
|
|
|
>>> model.predict(test0.head().features)
|
|
|
|
0.0
|
2019-10-12 10:13:50 -04:00
|
|
|
>>> model.predictLeaf(test0.head().features)
|
|
|
|
DenseVector([0.0, 0.0, 0.0, 0.0, 0.0])
|
2019-08-23 18:18:35 -04:00
|
|
|
>>> result = model.transform(test0).head()
|
|
|
|
>>> result.prediction
|
2015-05-12 15:17:05 -04:00
|
|
|
0.0
|
2019-08-23 18:18:35 -04:00
|
|
|
>>> result.leafId
|
|
|
|
DenseVector([0.0, 0.0, 0.0, 0.0, 0.0])
|
2016-05-23 21:14:48 -04:00
|
|
|
>>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
|
2015-05-12 15:17:05 -04:00
|
|
|
>>> model.transform(test1).head().prediction
|
|
|
|
1.0
|
2016-04-15 00:36:03 -04:00
|
|
|
>>> gbtr_path = temp_path + "gbtr"
|
|
|
|
>>> gbt.save(gbtr_path)
|
|
|
|
>>> gbt2 = GBTRegressor.load(gbtr_path)
|
|
|
|
>>> gbt2.getMaxDepth()
|
|
|
|
2
|
|
|
|
>>> model_path = temp_path + "gbtr_model"
|
|
|
|
>>> model.save(model_path)
|
|
|
|
>>> model2 = GBTRegressionModel.load(model_path)
|
|
|
|
>>> model.featureImportances == model2.featureImportances
|
|
|
|
True
|
|
|
|
>>> model.treeWeights == model2.treeWeights
|
|
|
|
True
|
2020-08-03 11:50:34 -04:00
|
|
|
>>> model.transform(test0).take(1) == model2.transform(test0).take(1)
|
|
|
|
True
|
2016-06-20 19:28:11 -04:00
|
|
|
>>> model.trees
|
2019-11-11 14:03:26 -05:00
|
|
|
[DecisionTreeRegressionModel...depth=..., DecisionTreeRegressionModel...]
|
2018-05-15 17:16:31 -04:00
|
|
|
>>> validation = spark.createDataFrame([(0.0, Vectors.dense(-1.0))],
|
|
|
|
... ["label", "features"])
|
|
|
|
>>> model.evaluateEachIteration(validation, "squared")
|
|
|
|
[0.0, 0.0, 0.0, 0.0, 0.0]
|
2018-12-07 16:53:35 -05:00
|
|
|
>>> gbt = gbt.setValidationIndicatorCol("validationIndicator")
|
|
|
|
>>> gbt.getValidationIndicatorCol()
|
|
|
|
'validationIndicator'
|
|
|
|
>>> gbt.getValidationTol()
|
|
|
|
0.01
|
2015-05-12 15:17:05 -04:00
|
|
|
"""
|
|
|
|
|
|
|
|
@keyword_only
|
[SPARK-32933][PYTHON] Use keyword-only syntax for keyword_only methods
### What changes were proposed in this pull request?
This PR adjusts signatures of methods decorated with `keyword_only` to indicate using [Python 3 keyword-only syntax](https://www.python.org/dev/peps/pep-3102/).
__Note__:
For the moment the goal is not to replace `keyword_only`. For justification see https://github.com/apache/spark/pull/29591#discussion_r489402579
### Why are the changes needed?
Right now it is not clear that `keyword_only` methods are indeed keyword only. This proposal addresses that.
In practice we could probably capture `locals` and drop `keyword_only` completel, i.e:
```python
keyword_only
def __init__(self, *, featuresCol="features"):
...
kwargs = self._input_kwargs
self.setParams(**kwargs)
```
could be replaced with
```python
def __init__(self, *, featuresCol="features"):
kwargs = locals()
del kwargs["self"]
...
self.setParams(**kwargs)
```
### Does this PR introduce _any_ user-facing change?
Docstrings and inspect tools will now indicate that `keyword_only` methods expect only keyword arguments.
For example with ` LinearSVC` will change from
```
>>> from pyspark.ml.classification import LinearSVC
>>> ?LinearSVC.__init__
Signature:
LinearSVC.__init__(
self,
featuresCol='features',
labelCol='label',
predictionCol='prediction',
maxIter=100,
regParam=0.0,
tol=1e-06,
rawPredictionCol='rawPrediction',
fitIntercept=True,
standardization=True,
threshold=0.0,
weightCol=None,
aggregationDepth=2,
)
Docstring: __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, aggregationDepth=2):
File: /path/to/python/pyspark/ml/classification.py
Type: function
```
to
```
>>> from pyspark.ml.classification import LinearSVC
>>> ?LinearSVC.__init__
Signature:
LinearSVC.__init__ (
self,
*,
featuresCol='features',
labelCol='label',
predictionCol='prediction',
maxIter=100,
regParam=0.0,
tol=1e-06,
rawPredictionCol='rawPrediction',
fitIntercept=True,
standardization=True,
threshold=0.0,
weightCol=None,
aggregationDepth=2,
blockSize=1,
)
Docstring: __init__(self, \*, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, aggregationDepth=2, blockSize=1):
File: ~/Workspace/spark/python/pyspark/ml/classification.py
Type: function
```
### How was this patch tested?
Existing tests.
Closes #29799 from zero323/SPARK-32933.
Authored-by: zero323 <mszymkiewicz@gmail.com>
Signed-off-by: HyukjinKwon <gurwls223@apache.org>
2020-09-22 20:28:33 -04:00
|
|
|
def __init__(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
|
2015-05-12 15:17:05 -04:00
|
|
|
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
|
2015-10-27 16:55:03 -04:00
|
|
|
maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0,
|
2016-05-12 03:19:27 -04:00
|
|
|
checkpointInterval=10, lossType="squared", maxIter=20, stepSize=0.1, seed=None,
|
2018-12-07 16:53:35 -05:00
|
|
|
impurity="variance", featureSubsetStrategy="all", validationTol=0.01,
|
2019-12-09 14:39:33 -05:00
|
|
|
validationIndicatorCol=None, leafCol="", minWeightFractionPerNode=0.0,
|
|
|
|
weightCol=None):
|
2015-05-12 15:17:05 -04:00
|
|
|
"""
|
[SPARK-32933][PYTHON] Use keyword-only syntax for keyword_only methods
### What changes were proposed in this pull request?
This PR adjusts signatures of methods decorated with `keyword_only` to indicate using [Python 3 keyword-only syntax](https://www.python.org/dev/peps/pep-3102/).
__Note__:
For the moment the goal is not to replace `keyword_only`. For justification see https://github.com/apache/spark/pull/29591#discussion_r489402579
### Why are the changes needed?
Right now it is not clear that `keyword_only` methods are indeed keyword only. This proposal addresses that.
In practice we could probably capture `locals` and drop `keyword_only` completel, i.e:
```python
keyword_only
def __init__(self, *, featuresCol="features"):
...
kwargs = self._input_kwargs
self.setParams(**kwargs)
```
could be replaced with
```python
def __init__(self, *, featuresCol="features"):
kwargs = locals()
del kwargs["self"]
...
self.setParams(**kwargs)
```
### Does this PR introduce _any_ user-facing change?
Docstrings and inspect tools will now indicate that `keyword_only` methods expect only keyword arguments.
For example with ` LinearSVC` will change from
```
>>> from pyspark.ml.classification import LinearSVC
>>> ?LinearSVC.__init__
Signature:
LinearSVC.__init__(
self,
featuresCol='features',
labelCol='label',
predictionCol='prediction',
maxIter=100,
regParam=0.0,
tol=1e-06,
rawPredictionCol='rawPrediction',
fitIntercept=True,
standardization=True,
threshold=0.0,
weightCol=None,
aggregationDepth=2,
)
Docstring: __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, aggregationDepth=2):
File: /path/to/python/pyspark/ml/classification.py
Type: function
```
to
```
>>> from pyspark.ml.classification import LinearSVC
>>> ?LinearSVC.__init__
Signature:
LinearSVC.__init__ (
self,
*,
featuresCol='features',
labelCol='label',
predictionCol='prediction',
maxIter=100,
regParam=0.0,
tol=1e-06,
rawPredictionCol='rawPrediction',
fitIntercept=True,
standardization=True,
threshold=0.0,
weightCol=None,
aggregationDepth=2,
blockSize=1,
)
Docstring: __init__(self, \*, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, aggregationDepth=2, blockSize=1):
File: ~/Workspace/spark/python/pyspark/ml/classification.py
Type: function
```
### How was this patch tested?
Existing tests.
Closes #29799 from zero323/SPARK-32933.
Authored-by: zero323 <mszymkiewicz@gmail.com>
Signed-off-by: HyukjinKwon <gurwls223@apache.org>
2020-09-22 20:28:33 -04:00
|
|
|
__init__(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
|
2015-05-14 21:16:22 -04:00
|
|
|
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
|
2015-10-27 16:55:03 -04:00
|
|
|
maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0, \
|
2016-05-12 03:19:27 -04:00
|
|
|
checkpointInterval=10, lossType="squared", maxIter=20, stepSize=0.1, seed=None, \
|
2018-12-07 16:53:35 -05:00
|
|
|
impurity="variance", featureSubsetStrategy="all", validationTol=0.01, \
|
2019-12-09 14:39:33 -05:00
|
|
|
validationIndicatorCol=None, leafCol="", minWeightFractionPerNode=0.0,
|
|
|
|
weightCol=None)
|
2015-05-12 15:17:05 -04:00
|
|
|
"""
|
|
|
|
super(GBTRegressor, self).__init__()
|
2015-05-18 15:02:18 -04:00
|
|
|
self._java_obj = self._new_java_obj("org.apache.spark.ml.regression.GBTRegressor", self.uid)
|
2017-03-03 19:43:45 -05:00
|
|
|
kwargs = self._input_kwargs
|
2015-05-12 15:17:05 -04:00
|
|
|
self.setParams(**kwargs)
|
|
|
|
|
|
|
|
@keyword_only
|
2015-09-17 11:45:20 -04:00
|
|
|
@since("1.4.0")
|
[SPARK-32933][PYTHON] Use keyword-only syntax for keyword_only methods
### What changes were proposed in this pull request?
This PR adjusts signatures of methods decorated with `keyword_only` to indicate using [Python 3 keyword-only syntax](https://www.python.org/dev/peps/pep-3102/).
__Note__:
For the moment the goal is not to replace `keyword_only`. For justification see https://github.com/apache/spark/pull/29591#discussion_r489402579
### Why are the changes needed?
Right now it is not clear that `keyword_only` methods are indeed keyword only. This proposal addresses that.
In practice we could probably capture `locals` and drop `keyword_only` completel, i.e:
```python
keyword_only
def __init__(self, *, featuresCol="features"):
...
kwargs = self._input_kwargs
self.setParams(**kwargs)
```
could be replaced with
```python
def __init__(self, *, featuresCol="features"):
kwargs = locals()
del kwargs["self"]
...
self.setParams(**kwargs)
```
### Does this PR introduce _any_ user-facing change?
Docstrings and inspect tools will now indicate that `keyword_only` methods expect only keyword arguments.
For example with ` LinearSVC` will change from
```
>>> from pyspark.ml.classification import LinearSVC
>>> ?LinearSVC.__init__
Signature:
LinearSVC.__init__(
self,
featuresCol='features',
labelCol='label',
predictionCol='prediction',
maxIter=100,
regParam=0.0,
tol=1e-06,
rawPredictionCol='rawPrediction',
fitIntercept=True,
standardization=True,
threshold=0.0,
weightCol=None,
aggregationDepth=2,
)
Docstring: __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, aggregationDepth=2):
File: /path/to/python/pyspark/ml/classification.py
Type: function
```
to
```
>>> from pyspark.ml.classification import LinearSVC
>>> ?LinearSVC.__init__
Signature:
LinearSVC.__init__ (
self,
*,
featuresCol='features',
labelCol='label',
predictionCol='prediction',
maxIter=100,
regParam=0.0,
tol=1e-06,
rawPredictionCol='rawPrediction',
fitIntercept=True,
standardization=True,
threshold=0.0,
weightCol=None,
aggregationDepth=2,
blockSize=1,
)
Docstring: __init__(self, \*, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, aggregationDepth=2, blockSize=1):
File: ~/Workspace/spark/python/pyspark/ml/classification.py
Type: function
```
### How was this patch tested?
Existing tests.
Closes #29799 from zero323/SPARK-32933.
Authored-by: zero323 <mszymkiewicz@gmail.com>
Signed-off-by: HyukjinKwon <gurwls223@apache.org>
2020-09-22 20:28:33 -04:00
|
|
|
def setParams(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
|
2015-05-12 15:17:05 -04:00
|
|
|
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
|
2015-10-27 16:55:03 -04:00
|
|
|
maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0,
|
2016-05-12 03:19:27 -04:00
|
|
|
checkpointInterval=10, lossType="squared", maxIter=20, stepSize=0.1, seed=None,
|
2020-11-27 11:22:45 -05:00
|
|
|
impurity="variance", featureSubsetStrategy="all", validationTol=0.01,
|
2019-12-09 14:39:33 -05:00
|
|
|
validationIndicatorCol=None, leafCol="", minWeightFractionPerNode=0.0,
|
|
|
|
weightCol=None):
|
2015-05-12 15:17:05 -04:00
|
|
|
"""
|
[SPARK-32933][PYTHON] Use keyword-only syntax for keyword_only methods
### What changes were proposed in this pull request?
This PR adjusts signatures of methods decorated with `keyword_only` to indicate using [Python 3 keyword-only syntax](https://www.python.org/dev/peps/pep-3102/).
__Note__:
For the moment the goal is not to replace `keyword_only`. For justification see https://github.com/apache/spark/pull/29591#discussion_r489402579
### Why are the changes needed?
Right now it is not clear that `keyword_only` methods are indeed keyword only. This proposal addresses that.
In practice we could probably capture `locals` and drop `keyword_only` completel, i.e:
```python
keyword_only
def __init__(self, *, featuresCol="features"):
...
kwargs = self._input_kwargs
self.setParams(**kwargs)
```
could be replaced with
```python
def __init__(self, *, featuresCol="features"):
kwargs = locals()
del kwargs["self"]
...
self.setParams(**kwargs)
```
### Does this PR introduce _any_ user-facing change?
Docstrings and inspect tools will now indicate that `keyword_only` methods expect only keyword arguments.
For example with ` LinearSVC` will change from
```
>>> from pyspark.ml.classification import LinearSVC
>>> ?LinearSVC.__init__
Signature:
LinearSVC.__init__(
self,
featuresCol='features',
labelCol='label',
predictionCol='prediction',
maxIter=100,
regParam=0.0,
tol=1e-06,
rawPredictionCol='rawPrediction',
fitIntercept=True,
standardization=True,
threshold=0.0,
weightCol=None,
aggregationDepth=2,
)
Docstring: __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, aggregationDepth=2):
File: /path/to/python/pyspark/ml/classification.py
Type: function
```
to
```
>>> from pyspark.ml.classification import LinearSVC
>>> ?LinearSVC.__init__
Signature:
LinearSVC.__init__ (
self,
*,
featuresCol='features',
labelCol='label',
predictionCol='prediction',
maxIter=100,
regParam=0.0,
tol=1e-06,
rawPredictionCol='rawPrediction',
fitIntercept=True,
standardization=True,
threshold=0.0,
weightCol=None,
aggregationDepth=2,
blockSize=1,
)
Docstring: __init__(self, \*, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, aggregationDepth=2, blockSize=1):
File: ~/Workspace/spark/python/pyspark/ml/classification.py
Type: function
```
### How was this patch tested?
Existing tests.
Closes #29799 from zero323/SPARK-32933.
Authored-by: zero323 <mszymkiewicz@gmail.com>
Signed-off-by: HyukjinKwon <gurwls223@apache.org>
2020-09-22 20:28:33 -04:00
|
|
|
setParams(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
|
2015-05-14 21:16:22 -04:00
|
|
|
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
|
2015-10-27 16:55:03 -04:00
|
|
|
maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0, \
|
2016-05-12 03:19:27 -04:00
|
|
|
checkpointInterval=10, lossType="squared", maxIter=20, stepSize=0.1, seed=None, \
|
2018-12-07 16:53:35 -05:00
|
|
|
impurity="variance", featureSubsetStrategy="all", validationTol=0.01, \
|
2019-12-09 14:39:33 -05:00
|
|
|
validationIndicatorCol=None, leafCol="", minWeightFractionPerNode=0.0, \
|
|
|
|
weightCol=None)
|
2015-05-12 15:17:05 -04:00
|
|
|
Sets params for Gradient Boosted Tree Regression.
|
|
|
|
"""
|
2017-03-03 19:43:45 -05:00
|
|
|
kwargs = self._input_kwargs
|
2015-05-12 15:17:05 -04:00
|
|
|
return self._set(**kwargs)
|
|
|
|
|
|
|
|
def _create_model(self, java_model):
|
|
|
|
return GBTRegressionModel(java_model)
|
|
|
|
|
2019-10-27 23:36:10 -04:00
|
|
|
@since("1.4.0")
|
2019-07-20 11:44:33 -04:00
|
|
|
def setMaxDepth(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`maxDepth`.
|
|
|
|
"""
|
|
|
|
return self._set(maxDepth=value)
|
|
|
|
|
2019-10-27 23:36:10 -04:00
|
|
|
@since("1.4.0")
|
2019-07-20 11:44:33 -04:00
|
|
|
def setMaxBins(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`maxBins`.
|
|
|
|
"""
|
|
|
|
return self._set(maxBins=value)
|
|
|
|
|
2019-10-27 23:36:10 -04:00
|
|
|
@since("1.4.0")
|
2019-07-20 11:44:33 -04:00
|
|
|
def setMinInstancesPerNode(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`minInstancesPerNode`.
|
|
|
|
"""
|
|
|
|
return self._set(minInstancesPerNode=value)
|
|
|
|
|
2019-10-27 23:36:10 -04:00
|
|
|
@since("1.4.0")
|
2019-07-20 11:44:33 -04:00
|
|
|
def setMinInfoGain(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`minInfoGain`.
|
|
|
|
"""
|
|
|
|
return self._set(minInfoGain=value)
|
|
|
|
|
2019-10-27 23:36:10 -04:00
|
|
|
@since("1.4.0")
|
2019-07-20 11:44:33 -04:00
|
|
|
def setMaxMemoryInMB(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`maxMemoryInMB`.
|
|
|
|
"""
|
|
|
|
return self._set(maxMemoryInMB=value)
|
|
|
|
|
2019-10-27 23:36:10 -04:00
|
|
|
@since("1.4.0")
|
2019-07-20 11:44:33 -04:00
|
|
|
def setCacheNodeIds(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`cacheNodeIds`.
|
|
|
|
"""
|
|
|
|
return self._set(cacheNodeIds=value)
|
|
|
|
|
|
|
|
@since("1.4.0")
|
|
|
|
def setImpurity(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`impurity`.
|
|
|
|
"""
|
|
|
|
return self._set(impurity=value)
|
|
|
|
|
2015-09-17 11:45:20 -04:00
|
|
|
@since("1.4.0")
|
2015-05-12 15:17:05 -04:00
|
|
|
def setLossType(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`lossType`.
|
|
|
|
"""
|
2016-05-03 10:46:13 -04:00
|
|
|
return self._set(lossType=value)
|
2015-05-12 15:17:05 -04:00
|
|
|
|
2019-07-20 11:44:33 -04:00
|
|
|
@since("1.4.0")
|
|
|
|
def setSubsamplingRate(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`subsamplingRate`.
|
|
|
|
"""
|
|
|
|
return self._set(subsamplingRate=value)
|
|
|
|
|
2018-05-30 14:04:09 -04:00
|
|
|
@since("2.4.0")
|
|
|
|
def setFeatureSubsetStrategy(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`featureSubsetStrategy`.
|
|
|
|
"""
|
|
|
|
return self._set(featureSubsetStrategy=value)
|
|
|
|
|
2018-12-07 16:53:35 -05:00
|
|
|
@since("3.0.0")
|
|
|
|
def setValidationIndicatorCol(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`validationIndicatorCol`.
|
|
|
|
"""
|
|
|
|
return self._set(validationIndicatorCol=value)
|
|
|
|
|
2019-10-27 23:36:10 -04:00
|
|
|
@since("1.4.0")
|
|
|
|
def setMaxIter(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`maxIter`.
|
|
|
|
"""
|
|
|
|
return self._set(maxIter=value)
|
|
|
|
|
|
|
|
@since("1.4.0")
|
|
|
|
def setCheckpointInterval(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`checkpointInterval`.
|
|
|
|
"""
|
|
|
|
return self._set(checkpointInterval=value)
|
|
|
|
|
|
|
|
@since("1.4.0")
|
|
|
|
def setSeed(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`seed`.
|
|
|
|
"""
|
|
|
|
return self._set(seed=value)
|
|
|
|
|
|
|
|
@since("1.4.0")
|
|
|
|
def setStepSize(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`stepSize`.
|
|
|
|
"""
|
|
|
|
return self._set(stepSize=value)
|
|
|
|
|
2019-12-09 14:39:33 -05:00
|
|
|
@since("3.0.0")
|
|
|
|
def setWeightCol(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`weightCol`.
|
|
|
|
"""
|
|
|
|
return self._set(weightCol=value)
|
|
|
|
|
|
|
|
@since("3.0.0")
|
|
|
|
def setMinWeightFractionPerNode(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`minWeightFractionPerNode`.
|
|
|
|
"""
|
|
|
|
return self._set(minWeightFractionPerNode=value)
|
|
|
|
|
2015-05-12 15:17:05 -04:00
|
|
|
|
2020-01-17 20:34:30 -05:00
|
|
|
class GBTRegressionModel(
|
[SPARK-29212][ML][PYSPARK] Add common classes without using JVM backend
### What changes were proposed in this pull request?
Implement common base ML classes (`Predictor`, `PredictionModel`, `Classifier`, `ClasssificationModel` `ProbabilisticClassifier`, `ProbabilisticClasssificationModel`, `Regressor`, `RegrssionModel`) for non-Java backends.
Note
- `Predictor` and `JavaClassifier` should be abstract as `_fit` method is not implemented.
- `PredictionModel` should be abstract as `_transform` is not implemented.
### Why are the changes needed?
To provide extensions points for non-JVM algorithms, as well as a public (as opposed to `Java*` variants, which are commonly described in docstrings as private) hierarchy which can be used to distinguish between different classes of predictors.
For longer discussion see [SPARK-29212](https://issues.apache.org/jira/browse/SPARK-29212) and / or https://github.com/apache/spark/pull/25776.
### Does this PR introduce any user-facing change?
It adds new base classes as listed above, but effective interfaces (method resolution order notwithstanding) stay the same.
Additionally "private" `Java*` classes in`ml.regression` and `ml.classification` have been renamed to follow PEP-8 conventions (added leading underscore).
It is for discussion if the same should be done to equivalent classes from `ml.wrapper`.
If we take `JavaClassifier` as an example, type hierarchy will change from
![old pyspark ml classification JavaClassifier](https://user-images.githubusercontent.com/1554276/72657093-5c0b0c80-39a0-11ea-9069-a897d75de483.png)
to
![new pyspark ml classification _JavaClassifier](https://user-images.githubusercontent.com/1554276/72657098-64fbde00-39a0-11ea-8f80-01187a5ea5a6.png)
Similarly the old model
![old pyspark ml classification JavaClassificationModel](https://user-images.githubusercontent.com/1554276/72657103-7513bd80-39a0-11ea-9ffc-59eb6ab61fde.png)
will become
![new pyspark ml classification _JavaClassificationModel](https://user-images.githubusercontent.com/1554276/72657110-80ff7f80-39a0-11ea-9f5c-fe408664e827.png)
### How was this patch tested?
Existing unit tests.
Closes #27245 from zero323/SPARK-29212.
Authored-by: zero323 <mszymkiewicz@gmail.com>
Signed-off-by: zhengruifeng <ruifengz@foxmail.com>
2020-03-03 23:20:02 -05:00
|
|
|
_JavaRegressionModel, _TreeEnsembleModel, _GBTRegressorParams,
|
2020-01-17 20:34:30 -05:00
|
|
|
JavaMLWritable, JavaMLReadable
|
|
|
|
):
|
2015-05-12 15:17:05 -04:00
|
|
|
"""
|
2016-05-25 01:20:00 -04:00
|
|
|
Model fitted by :class:`GBTRegressor`.
|
2015-09-17 11:45:20 -04:00
|
|
|
|
|
|
|
.. versionadded:: 1.4.0
|
2015-05-12 15:17:05 -04:00
|
|
|
"""
|
|
|
|
|
2016-03-31 16:00:10 -04:00
|
|
|
@property
|
|
|
|
def featureImportances(self):
|
|
|
|
"""
|
|
|
|
Estimate of the importance of each feature.
|
|
|
|
|
|
|
|
Each feature's importance is the average of its importance across all trees in the ensemble
|
|
|
|
The importance vector is normalized to sum to 1. This method is suggested by Hastie et al.
|
|
|
|
(Hastie, Tibshirani, Friedman. "The Elements of Statistical Learning, 2nd Edition." 2001.)
|
|
|
|
and follows the implementation from scikit-learn.
|
|
|
|
|
2020-11-09 19:33:48 -05:00
|
|
|
.. versionadded:: 2.0.0
|
|
|
|
|
|
|
|
Examples
|
|
|
|
--------
|
|
|
|
DecisionTreeRegressionModel.featureImportances
|
2016-03-31 16:00:10 -04:00
|
|
|
"""
|
|
|
|
return self._call_java("featureImportances")
|
|
|
|
|
2016-06-02 18:55:14 -04:00
|
|
|
@property
|
|
|
|
@since("2.0.0")
|
|
|
|
def trees(self):
|
|
|
|
"""Trees in this ensemble. Warning: These have null parent Estimators."""
|
|
|
|
return [DecisionTreeRegressionModel(m) for m in list(self._call_java("trees"))]
|
|
|
|
|
2018-05-15 17:16:31 -04:00
|
|
|
def evaluateEachIteration(self, dataset, loss):
|
|
|
|
"""
|
|
|
|
Method to compute error or loss for every iteration of gradient boosting.
|
|
|
|
|
2020-11-09 19:33:48 -05:00
|
|
|
.. versionadded:: 2.4.0
|
|
|
|
|
|
|
|
Parameters
|
|
|
|
----------
|
|
|
|
dataset : :py:class:`pyspark.sql.DataFrame`
|
2018-05-15 17:16:31 -04:00
|
|
|
Test dataset to evaluate model on, where dataset is an
|
|
|
|
instance of :py:class:`pyspark.sql.DataFrame`
|
2020-11-09 19:33:48 -05:00
|
|
|
loss : str
|
2018-05-15 17:16:31 -04:00
|
|
|
The loss function used to compute error.
|
|
|
|
Supported options: squared, absolute
|
|
|
|
"""
|
|
|
|
return self._call_java("evaluateEachIteration", dataset, loss)
|
|
|
|
|
2015-05-12 15:17:05 -04:00
|
|
|
|
[SPARK-29212][ML][PYSPARK] Add common classes without using JVM backend
### What changes were proposed in this pull request?
Implement common base ML classes (`Predictor`, `PredictionModel`, `Classifier`, `ClasssificationModel` `ProbabilisticClassifier`, `ProbabilisticClasssificationModel`, `Regressor`, `RegrssionModel`) for non-Java backends.
Note
- `Predictor` and `JavaClassifier` should be abstract as `_fit` method is not implemented.
- `PredictionModel` should be abstract as `_transform` is not implemented.
### Why are the changes needed?
To provide extensions points for non-JVM algorithms, as well as a public (as opposed to `Java*` variants, which are commonly described in docstrings as private) hierarchy which can be used to distinguish between different classes of predictors.
For longer discussion see [SPARK-29212](https://issues.apache.org/jira/browse/SPARK-29212) and / or https://github.com/apache/spark/pull/25776.
### Does this PR introduce any user-facing change?
It adds new base classes as listed above, but effective interfaces (method resolution order notwithstanding) stay the same.
Additionally "private" `Java*` classes in`ml.regression` and `ml.classification` have been renamed to follow PEP-8 conventions (added leading underscore).
It is for discussion if the same should be done to equivalent classes from `ml.wrapper`.
If we take `JavaClassifier` as an example, type hierarchy will change from
![old pyspark ml classification JavaClassifier](https://user-images.githubusercontent.com/1554276/72657093-5c0b0c80-39a0-11ea-9069-a897d75de483.png)
to
![new pyspark ml classification _JavaClassifier](https://user-images.githubusercontent.com/1554276/72657098-64fbde00-39a0-11ea-8f80-01187a5ea5a6.png)
Similarly the old model
![old pyspark ml classification JavaClassificationModel](https://user-images.githubusercontent.com/1554276/72657103-7513bd80-39a0-11ea-9ffc-59eb6ab61fde.png)
will become
![new pyspark ml classification _JavaClassificationModel](https://user-images.githubusercontent.com/1554276/72657110-80ff7f80-39a0-11ea-9f5c-fe408664e827.png)
### How was this patch tested?
Existing unit tests.
Closes #27245 from zero323/SPARK-29212.
Authored-by: zero323 <mszymkiewicz@gmail.com>
Signed-off-by: zhengruifeng <ruifengz@foxmail.com>
2020-03-03 23:20:02 -05:00
|
|
|
class _AFTSurvivalRegressionParams(_PredictorParams, HasMaxIter, HasTol, HasFitIntercept,
|
2020-11-18 10:02:31 -05:00
|
|
|
HasAggregationDepth, HasMaxBlockSizeInMB):
|
[SPARK-28985][PYTHON][ML][FOLLOW-UP] Add _AFTSurvivalRegressionParams
### What changes were proposed in this pull request?
Adds
```python
_AFTSurvivalRegressionParams(HasFeaturesCol, HasLabelCol, HasPredictionCol,
HasMaxIter, HasTol, HasFitIntercept,
HasAggregationDepth): ...
```
with related Params and uses it to replace `HasFitIntercept`, `HasMaxIter`, `HasTol` and `HasAggregationDepth` in `AFTSurvivalRegression` base classes and `JavaPredictionModel,` in `AFTSurvivalRegressionModel` base classes.
### Why are the changes needed?
Previous work (#25776) on [SPARK-28985](https://issues.apache.org/jira/browse/SPARK-28985) replaced `JavaEstimator`, `HasFeaturesCol`, `HasLabelCol`, `HasPredictionCol` in `AFTSurvivalRegression` and `JavaModel` in `AFTSurvivalRegressionModel` with newly added `JavaPredictor`:
https://github.com/apache/spark/blob/e97b55d32285052a1f76cca35377c4b21eb2e7d7/python/pyspark/ml/wrapper.py#L377
and `JavaPredictionModel`
https://github.com/apache/spark/blob/e97b55d32285052a1f76cca35377c4b21eb2e7d7/python/pyspark/ml/wrapper.py#L405
respectively.
This however is inconsistent with Scala counterpart where both classes extend private `AFTSurvivalRegressionBase`
https://github.com/apache/spark/blob/eb037a8180be4ab7570eda1fa9cbf3c84b92c3f7/mllib/src/main/scala/org/apache/spark/ml/regression/AFTSurvivalRegression.scala#L48-L50
This preserves some of the existing inconsistencies (variables as defined in [the official example](https://github.com/apache/spark/blob/master/examples/src/main/python/ml/aft_survival_regression.p))
```
from pyspark.ml.regression import AFTSurvivalRegression, AFTSurvivalRegressionModel
from pyspark.ml.param.shared import HasMaxIter, HasTol, HasFitIntercept, HasAggregationDepth
from pyspark.ml.param import Param
issubclass(AFTSurvivalRegressionModel, HasMaxIter)
# False
hasattr(model, "maxIter") and isinstance(model.maxIter, Param)
# True
issubclass(AFTSurvivalRegressionModel, HasTol)
# False
hasattr(model, "tol") and isinstance(model.tol, Param)
# True
```
and can cause problems in the future, if Predictor / PredictionModel API changes (unlike [`IsotonicRegression`](https://github.com/apache/spark/pull/26023), current implementation is technically speaking correct, though incomplete).
### Does this PR introduce any user-facing change?
Yes, it adds a number of base classes to `AFTSurvivalRegressionModel`. These change purely additive and have negligible potential for breaking existing code (and none, compared to changes already made in #25776). Additionally affected API hasn't been released in the current form yet.
### How was this patch tested?
- Existing unit tests.
- Manual testing.
CC huaxingao, zhengruifeng
Closes #26024 from zero323/SPARK-28985-FOLLOW-UP-aftsurival-regression.
Authored-by: zero323 <mszymkiewicz@gmail.com>
Signed-off-by: Sean Owen <sean.owen@databricks.com>
2019-10-04 19:04:21 -04:00
|
|
|
"""
|
|
|
|
Params for :py:class:`AFTSurvivalRegression` and :py:class:`AFTSurvivalRegressionModel`.
|
|
|
|
|
|
|
|
.. versionadded:: 3.0.0
|
|
|
|
"""
|
|
|
|
|
|
|
|
censorCol = Param(
|
|
|
|
Params._dummy(), "censorCol",
|
|
|
|
"censor column name. The value of this column could be 0 or 1. " +
|
|
|
|
"If the value is 1, it means the event has occurred i.e. " +
|
|
|
|
"uncensored; otherwise censored.", typeConverter=TypeConverters.toString)
|
|
|
|
quantileProbabilities = Param(
|
|
|
|
Params._dummy(), "quantileProbabilities",
|
|
|
|
"quantile probabilities array. Values of the quantile probabilities array " +
|
|
|
|
"should be in the range (0, 1) and the array should be non-empty.",
|
|
|
|
typeConverter=TypeConverters.toListFloat)
|
|
|
|
quantilesCol = Param(
|
|
|
|
Params._dummy(), "quantilesCol",
|
|
|
|
"quantiles column name. This column will output quantiles of " +
|
|
|
|
"corresponding quantileProbabilities if it is set.",
|
|
|
|
typeConverter=TypeConverters.toString)
|
|
|
|
|
2020-08-03 11:50:34 -04:00
|
|
|
def __init__(self, *args):
|
|
|
|
super(_AFTSurvivalRegressionParams, self).__init__(*args)
|
2020-07-16 14:12:29 -04:00
|
|
|
self._setDefault(censorCol="censor",
|
|
|
|
quantileProbabilities=[0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99],
|
2020-11-18 10:02:31 -05:00
|
|
|
maxIter=100, tol=1E-6, maxBlockSizeInMB=0.0)
|
2020-07-16 14:12:29 -04:00
|
|
|
|
[SPARK-28985][PYTHON][ML][FOLLOW-UP] Add _AFTSurvivalRegressionParams
### What changes were proposed in this pull request?
Adds
```python
_AFTSurvivalRegressionParams(HasFeaturesCol, HasLabelCol, HasPredictionCol,
HasMaxIter, HasTol, HasFitIntercept,
HasAggregationDepth): ...
```
with related Params and uses it to replace `HasFitIntercept`, `HasMaxIter`, `HasTol` and `HasAggregationDepth` in `AFTSurvivalRegression` base classes and `JavaPredictionModel,` in `AFTSurvivalRegressionModel` base classes.
### Why are the changes needed?
Previous work (#25776) on [SPARK-28985](https://issues.apache.org/jira/browse/SPARK-28985) replaced `JavaEstimator`, `HasFeaturesCol`, `HasLabelCol`, `HasPredictionCol` in `AFTSurvivalRegression` and `JavaModel` in `AFTSurvivalRegressionModel` with newly added `JavaPredictor`:
https://github.com/apache/spark/blob/e97b55d32285052a1f76cca35377c4b21eb2e7d7/python/pyspark/ml/wrapper.py#L377
and `JavaPredictionModel`
https://github.com/apache/spark/blob/e97b55d32285052a1f76cca35377c4b21eb2e7d7/python/pyspark/ml/wrapper.py#L405
respectively.
This however is inconsistent with Scala counterpart where both classes extend private `AFTSurvivalRegressionBase`
https://github.com/apache/spark/blob/eb037a8180be4ab7570eda1fa9cbf3c84b92c3f7/mllib/src/main/scala/org/apache/spark/ml/regression/AFTSurvivalRegression.scala#L48-L50
This preserves some of the existing inconsistencies (variables as defined in [the official example](https://github.com/apache/spark/blob/master/examples/src/main/python/ml/aft_survival_regression.p))
```
from pyspark.ml.regression import AFTSurvivalRegression, AFTSurvivalRegressionModel
from pyspark.ml.param.shared import HasMaxIter, HasTol, HasFitIntercept, HasAggregationDepth
from pyspark.ml.param import Param
issubclass(AFTSurvivalRegressionModel, HasMaxIter)
# False
hasattr(model, "maxIter") and isinstance(model.maxIter, Param)
# True
issubclass(AFTSurvivalRegressionModel, HasTol)
# False
hasattr(model, "tol") and isinstance(model.tol, Param)
# True
```
and can cause problems in the future, if Predictor / PredictionModel API changes (unlike [`IsotonicRegression`](https://github.com/apache/spark/pull/26023), current implementation is technically speaking correct, though incomplete).
### Does this PR introduce any user-facing change?
Yes, it adds a number of base classes to `AFTSurvivalRegressionModel`. These change purely additive and have negligible potential for breaking existing code (and none, compared to changes already made in #25776). Additionally affected API hasn't been released in the current form yet.
### How was this patch tested?
- Existing unit tests.
- Manual testing.
CC huaxingao, zhengruifeng
Closes #26024 from zero323/SPARK-28985-FOLLOW-UP-aftsurival-regression.
Authored-by: zero323 <mszymkiewicz@gmail.com>
Signed-off-by: Sean Owen <sean.owen@databricks.com>
2019-10-04 19:04:21 -04:00
|
|
|
@since("1.6.0")
|
|
|
|
def getCensorCol(self):
|
|
|
|
"""
|
|
|
|
Gets the value of censorCol or its default value.
|
|
|
|
"""
|
|
|
|
return self.getOrDefault(self.censorCol)
|
|
|
|
|
|
|
|
@since("1.6.0")
|
|
|
|
def getQuantileProbabilities(self):
|
|
|
|
"""
|
|
|
|
Gets the value of quantileProbabilities or its default value.
|
|
|
|
"""
|
|
|
|
return self.getOrDefault(self.quantileProbabilities)
|
|
|
|
|
|
|
|
@since("1.6.0")
|
|
|
|
def getQuantilesCol(self):
|
|
|
|
"""
|
|
|
|
Gets the value of quantilesCol or its default value.
|
|
|
|
"""
|
|
|
|
return self.getOrDefault(self.quantilesCol)
|
|
|
|
|
|
|
|
|
2015-10-06 15:43:28 -04:00
|
|
|
@inherit_doc
|
[SPARK-29212][ML][PYSPARK] Add common classes without using JVM backend
### What changes were proposed in this pull request?
Implement common base ML classes (`Predictor`, `PredictionModel`, `Classifier`, `ClasssificationModel` `ProbabilisticClassifier`, `ProbabilisticClasssificationModel`, `Regressor`, `RegrssionModel`) for non-Java backends.
Note
- `Predictor` and `JavaClassifier` should be abstract as `_fit` method is not implemented.
- `PredictionModel` should be abstract as `_transform` is not implemented.
### Why are the changes needed?
To provide extensions points for non-JVM algorithms, as well as a public (as opposed to `Java*` variants, which are commonly described in docstrings as private) hierarchy which can be used to distinguish between different classes of predictors.
For longer discussion see [SPARK-29212](https://issues.apache.org/jira/browse/SPARK-29212) and / or https://github.com/apache/spark/pull/25776.
### Does this PR introduce any user-facing change?
It adds new base classes as listed above, but effective interfaces (method resolution order notwithstanding) stay the same.
Additionally "private" `Java*` classes in`ml.regression` and `ml.classification` have been renamed to follow PEP-8 conventions (added leading underscore).
It is for discussion if the same should be done to equivalent classes from `ml.wrapper`.
If we take `JavaClassifier` as an example, type hierarchy will change from
![old pyspark ml classification JavaClassifier](https://user-images.githubusercontent.com/1554276/72657093-5c0b0c80-39a0-11ea-9069-a897d75de483.png)
to
![new pyspark ml classification _JavaClassifier](https://user-images.githubusercontent.com/1554276/72657098-64fbde00-39a0-11ea-8f80-01187a5ea5a6.png)
Similarly the old model
![old pyspark ml classification JavaClassificationModel](https://user-images.githubusercontent.com/1554276/72657103-7513bd80-39a0-11ea-9ffc-59eb6ab61fde.png)
will become
![new pyspark ml classification _JavaClassificationModel](https://user-images.githubusercontent.com/1554276/72657110-80ff7f80-39a0-11ea-9f5c-fe408664e827.png)
### How was this patch tested?
Existing unit tests.
Closes #27245 from zero323/SPARK-29212.
Authored-by: zero323 <mszymkiewicz@gmail.com>
Signed-off-by: zhengruifeng <ruifengz@foxmail.com>
2020-03-03 23:20:02 -05:00
|
|
|
class AFTSurvivalRegression(_JavaRegressor, _AFTSurvivalRegressionParams,
|
[SPARK-28985][PYTHON][ML][FOLLOW-UP] Add _AFTSurvivalRegressionParams
### What changes were proposed in this pull request?
Adds
```python
_AFTSurvivalRegressionParams(HasFeaturesCol, HasLabelCol, HasPredictionCol,
HasMaxIter, HasTol, HasFitIntercept,
HasAggregationDepth): ...
```
with related Params and uses it to replace `HasFitIntercept`, `HasMaxIter`, `HasTol` and `HasAggregationDepth` in `AFTSurvivalRegression` base classes and `JavaPredictionModel,` in `AFTSurvivalRegressionModel` base classes.
### Why are the changes needed?
Previous work (#25776) on [SPARK-28985](https://issues.apache.org/jira/browse/SPARK-28985) replaced `JavaEstimator`, `HasFeaturesCol`, `HasLabelCol`, `HasPredictionCol` in `AFTSurvivalRegression` and `JavaModel` in `AFTSurvivalRegressionModel` with newly added `JavaPredictor`:
https://github.com/apache/spark/blob/e97b55d32285052a1f76cca35377c4b21eb2e7d7/python/pyspark/ml/wrapper.py#L377
and `JavaPredictionModel`
https://github.com/apache/spark/blob/e97b55d32285052a1f76cca35377c4b21eb2e7d7/python/pyspark/ml/wrapper.py#L405
respectively.
This however is inconsistent with Scala counterpart where both classes extend private `AFTSurvivalRegressionBase`
https://github.com/apache/spark/blob/eb037a8180be4ab7570eda1fa9cbf3c84b92c3f7/mllib/src/main/scala/org/apache/spark/ml/regression/AFTSurvivalRegression.scala#L48-L50
This preserves some of the existing inconsistencies (variables as defined in [the official example](https://github.com/apache/spark/blob/master/examples/src/main/python/ml/aft_survival_regression.p))
```
from pyspark.ml.regression import AFTSurvivalRegression, AFTSurvivalRegressionModel
from pyspark.ml.param.shared import HasMaxIter, HasTol, HasFitIntercept, HasAggregationDepth
from pyspark.ml.param import Param
issubclass(AFTSurvivalRegressionModel, HasMaxIter)
# False
hasattr(model, "maxIter") and isinstance(model.maxIter, Param)
# True
issubclass(AFTSurvivalRegressionModel, HasTol)
# False
hasattr(model, "tol") and isinstance(model.tol, Param)
# True
```
and can cause problems in the future, if Predictor / PredictionModel API changes (unlike [`IsotonicRegression`](https://github.com/apache/spark/pull/26023), current implementation is technically speaking correct, though incomplete).
### Does this PR introduce any user-facing change?
Yes, it adds a number of base classes to `AFTSurvivalRegressionModel`. These change purely additive and have negligible potential for breaking existing code (and none, compared to changes already made in #25776). Additionally affected API hasn't been released in the current form yet.
### How was this patch tested?
- Existing unit tests.
- Manual testing.
CC huaxingao, zhengruifeng
Closes #26024 from zero323/SPARK-28985-FOLLOW-UP-aftsurival-regression.
Authored-by: zero323 <mszymkiewicz@gmail.com>
Signed-off-by: Sean Owen <sean.owen@databricks.com>
2019-10-04 19:04:21 -04:00
|
|
|
JavaMLWritable, JavaMLReadable):
|
2015-10-06 15:43:28 -04:00
|
|
|
"""
|
|
|
|
Accelerated Failure Time (AFT) Model Survival Regression
|
|
|
|
|
|
|
|
Fit a parametric AFT survival regression model based on the Weibull distribution
|
|
|
|
of the survival time.
|
|
|
|
|
2020-11-09 19:33:48 -05:00
|
|
|
Notes
|
|
|
|
-----
|
|
|
|
For more information see Wikipedia page on
|
|
|
|
`AFT Model <https://en.wikipedia.org/wiki/Accelerated_failure_time_model>`_
|
|
|
|
|
2015-10-06 15:43:28 -04:00
|
|
|
|
2020-11-09 19:33:48 -05:00
|
|
|
Examples
|
|
|
|
--------
|
2016-05-17 15:51:07 -04:00
|
|
|
>>> from pyspark.ml.linalg import Vectors
|
2016-05-23 21:14:48 -04:00
|
|
|
>>> df = spark.createDataFrame([
|
2015-10-06 15:43:28 -04:00
|
|
|
... (1.0, Vectors.dense(1.0), 1.0),
|
2017-08-09 02:44:10 -04:00
|
|
|
... (1e-40, Vectors.sparse(1, [], []), 0.0)], ["label", "features", "censor"])
|
2015-10-06 15:43:28 -04:00
|
|
|
>>> aftsr = AFTSurvivalRegression()
|
2019-10-27 23:36:10 -04:00
|
|
|
>>> aftsr.setMaxIter(10)
|
|
|
|
AFTSurvivalRegression...
|
|
|
|
>>> aftsr.getMaxIter()
|
|
|
|
10
|
|
|
|
>>> aftsr.clear(aftsr.maxIter)
|
2015-10-06 15:43:28 -04:00
|
|
|
>>> model = aftsr.fit(df)
|
2020-11-18 10:02:31 -05:00
|
|
|
>>> model.getMaxBlockSizeInMB()
|
|
|
|
0.0
|
[SPARK-28985][PYTHON][ML] Add common classes (JavaPredictor/JavaClassificationModel/JavaProbabilisticClassifier) in PYTHON
### What changes were proposed in this pull request?
Add some common classes in Python to make it have the same structure as Scala
1. Scala has ClassifierParams/Classifier/ClassificationModel:
```
trait ClassifierParams
extends PredictorParams with HasRawPredictionCol
abstract class Classifier
extends Predictor with ClassifierParams {
def setRawPredictionCol
}
abstract class ClassificationModel
extends PredictionModel with ClassifierParams {
def setRawPredictionCol
}
```
This PR makes Python has the following:
```
class JavaClassifierParams(HasRawPredictionCol, JavaPredictorParams):
pass
class JavaClassifier(JavaPredictor, JavaClassifierParams):
def setRawPredictionCol
class JavaClassificationModel(JavaPredictionModel, JavaClassifierParams):
def setRawPredictionCol
```
2. Scala has ProbabilisticClassifierParams/ProbabilisticClassifier/ProbabilisticClassificationModel:
```
trait ProbabilisticClassifierParams
extends ClassifierParams with HasProbabilityCol with HasThresholds
abstract class ProbabilisticClassifier
extends Classifier with ProbabilisticClassifierParams {
def setProbabilityCol
def setThresholds
}
abstract class ProbabilisticClassificationModel
extends ClassificationModel with ProbabilisticClassifierParams {
def setProbabilityCol
def setThresholds
}
```
This PR makes Python have the following:
```
class JavaProbabilisticClassifierParams(HasProbabilityCol, HasThresholds, JavaClassifierParams):
pass
class JavaProbabilisticClassifier(JavaClassifier, JavaProbabilisticClassifierParams):
def setProbabilityCol
def setThresholds
class JavaProbabilisticClassificationModel(JavaClassificationModel, JavaProbabilisticClassifierParams):
def setProbabilityCol
def setThresholds
```
3. Scala has PredictorParams/Predictor/PredictionModel:
```
trait PredictorParams extends Params
with HasLabelCol with HasFeaturesCol with HasPredictionCol
abstract class Predictor
extends Estimator with PredictorParams {
def setLabelCol
def setFeaturesCol
def setPredictionCol
}
abstract class PredictionModel
extends Model with PredictorParams {
def setFeaturesCol
def setPredictionCol
def numFeatures
def predict
}
```
This PR makes Python have the following:
```
class JavaPredictorParams(HasLabelCol, HasFeaturesCol, HasPredictionCol):
pass
class JavaPredictor(JavaEstimator, JavaPredictorParams):
def setLabelCol
def setFeaturesCol
def setPredictionCol
class JavaPredictionModel(JavaModel, JavaPredictorParams):
def setFeaturesCol
def setPredictionCol
def numFeatures
def predict
```
### Why are the changes needed?
Have parity between Python and Scala ML
### Does this PR introduce any user-facing change?
Yes. Add the following changes:
```
LinearSVCModel
- get/setFeatureCol
- get/setPredictionCol
- get/setLabelCol
- get/setRawPredictionCol
- predict
```
```
LogisticRegressionModel
DecisionTreeClassificationModel
RandomForestClassificationModel
GBTClassificationModel
NaiveBayesModel
MultilayerPerceptronClassificationModel
- get/setFeatureCol
- get/setPredictionCol
- get/setLabelCol
- get/setRawPredictionCol
- get/setProbabilityCol
- predict
```
```
LinearRegressionModel
IsotonicRegressionModel
DecisionTreeRegressionModel
RandomForestRegressionModel
GBTRegressionModel
AFTSurvivalRegressionModel
GeneralizedLinearRegressionModel
- get/setFeatureCol
- get/setPredictionCol
- get/setLabelCol
- predict
```
### How was this patch tested?
Add a few doc tests.
Closes #25776 from huaxingao/spark-28985.
Authored-by: Huaxin Gao <huaxing@us.ibm.com>
Signed-off-by: Sean Owen <sean.owen@databricks.com>
2019-09-19 09:17:25 -04:00
|
|
|
>>> model.setFeaturesCol("features")
|
[SPARK-29867][ML][PYTHON] Add __repr__ in Python ML Models
### What changes were proposed in this pull request?
Add ```__repr__``` in Python ML Models
### Why are the changes needed?
In Python ML Models, some of them have ```__repr__```, others don't. In the doctest, when calling Model.setXXX, some of the Models print out the xxxModel... correctly, some of them can't because of lacking the ```__repr__``` method. For example:
```
>>> gm = GaussianMixture(k=3, tol=0.0001, seed=10)
>>> model = gm.fit(df)
>>> model.setPredictionCol("newPrediction")
GaussianMixture...
```
After the change, the above code will become the following:
```
>>> gm = GaussianMixture(k=3, tol=0.0001, seed=10)
>>> model = gm.fit(df)
>>> model.setPredictionCol("newPrediction")
GaussianMixtureModel...
```
### Does this PR introduce any user-facing change?
Yes.
### How was this patch tested?
doctest
Closes #26489 from huaxingao/spark-29876.
Authored-by: Huaxin Gao <huaxing@us.ibm.com>
Signed-off-by: Dongjoon Hyun <dhyun@apple.com>
2019-11-16 00:44:39 -05:00
|
|
|
AFTSurvivalRegressionModel...
|
2015-10-06 15:43:28 -04:00
|
|
|
>>> model.predict(Vectors.dense(6.3))
|
|
|
|
1.0
|
|
|
|
>>> model.predictQuantiles(Vectors.dense(6.3))
|
|
|
|
DenseVector([0.0101, 0.0513, 0.1054, 0.2877, 0.6931, 1.3863, 2.3026, 2.9957, 4.6052])
|
|
|
|
>>> model.transform(df).show()
|
2017-08-09 02:44:10 -04:00
|
|
|
+-------+---------+------+----------+
|
|
|
|
| label| features|censor|prediction|
|
|
|
|
+-------+---------+------+----------+
|
|
|
|
| 1.0| [1.0]| 1.0| 1.0|
|
|
|
|
|1.0E-40|(1,[],[])| 0.0| 1.0|
|
|
|
|
+-------+---------+------+----------+
|
2015-10-06 15:43:28 -04:00
|
|
|
...
|
2016-02-26 00:09:02 -05:00
|
|
|
>>> aftsr_path = temp_path + "/aftsr"
|
|
|
|
>>> aftsr.save(aftsr_path)
|
|
|
|
>>> aftsr2 = AFTSurvivalRegression.load(aftsr_path)
|
|
|
|
>>> aftsr2.getMaxIter()
|
|
|
|
100
|
|
|
|
>>> model_path = temp_path + "/aftsr_model"
|
|
|
|
>>> model.save(model_path)
|
|
|
|
>>> model2 = AFTSurvivalRegressionModel.load(model_path)
|
|
|
|
>>> model.coefficients == model2.coefficients
|
|
|
|
True
|
|
|
|
>>> model.intercept == model2.intercept
|
|
|
|
True
|
|
|
|
>>> model.scale == model2.scale
|
|
|
|
True
|
2020-08-03 11:50:34 -04:00
|
|
|
>>> model.transform(df).take(1) == model2.transform(df).take(1)
|
|
|
|
True
|
2015-10-06 15:43:28 -04:00
|
|
|
|
|
|
|
.. versionadded:: 1.6.0
|
|
|
|
"""
|
|
|
|
|
|
|
|
@keyword_only
|
[SPARK-32933][PYTHON] Use keyword-only syntax for keyword_only methods
### What changes were proposed in this pull request?
This PR adjusts signatures of methods decorated with `keyword_only` to indicate using [Python 3 keyword-only syntax](https://www.python.org/dev/peps/pep-3102/).
__Note__:
For the moment the goal is not to replace `keyword_only`. For justification see https://github.com/apache/spark/pull/29591#discussion_r489402579
### Why are the changes needed?
Right now it is not clear that `keyword_only` methods are indeed keyword only. This proposal addresses that.
In practice we could probably capture `locals` and drop `keyword_only` completel, i.e:
```python
keyword_only
def __init__(self, *, featuresCol="features"):
...
kwargs = self._input_kwargs
self.setParams(**kwargs)
```
could be replaced with
```python
def __init__(self, *, featuresCol="features"):
kwargs = locals()
del kwargs["self"]
...
self.setParams(**kwargs)
```
### Does this PR introduce _any_ user-facing change?
Docstrings and inspect tools will now indicate that `keyword_only` methods expect only keyword arguments.
For example with ` LinearSVC` will change from
```
>>> from pyspark.ml.classification import LinearSVC
>>> ?LinearSVC.__init__
Signature:
LinearSVC.__init__(
self,
featuresCol='features',
labelCol='label',
predictionCol='prediction',
maxIter=100,
regParam=0.0,
tol=1e-06,
rawPredictionCol='rawPrediction',
fitIntercept=True,
standardization=True,
threshold=0.0,
weightCol=None,
aggregationDepth=2,
)
Docstring: __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, aggregationDepth=2):
File: /path/to/python/pyspark/ml/classification.py
Type: function
```
to
```
>>> from pyspark.ml.classification import LinearSVC
>>> ?LinearSVC.__init__
Signature:
LinearSVC.__init__ (
self,
*,
featuresCol='features',
labelCol='label',
predictionCol='prediction',
maxIter=100,
regParam=0.0,
tol=1e-06,
rawPredictionCol='rawPrediction',
fitIntercept=True,
standardization=True,
threshold=0.0,
weightCol=None,
aggregationDepth=2,
blockSize=1,
)
Docstring: __init__(self, \*, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, aggregationDepth=2, blockSize=1):
File: ~/Workspace/spark/python/pyspark/ml/classification.py
Type: function
```
### How was this patch tested?
Existing tests.
Closes #29799 from zero323/SPARK-32933.
Authored-by: zero323 <mszymkiewicz@gmail.com>
Signed-off-by: HyukjinKwon <gurwls223@apache.org>
2020-09-22 20:28:33 -04:00
|
|
|
def __init__(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
|
2015-10-06 15:43:28 -04:00
|
|
|
fitIntercept=True, maxIter=100, tol=1E-6, censorCol="censor",
|
[SPARK-32320][PYSPARK] Remove mutable default arguments
This is bad practice, and might lead to unexpected behaviour:
https://florimond.dev/blog/articles/2018/08/python-mutable-defaults-are-the-source-of-all-evil/
```
fokkodriesprongFan spark % grep -R "={}" python | grep def
python/pyspark/resource/profile.py: def __init__(self, _java_resource_profile=None, _exec_req={}, _task_req={}):
python/pyspark/sql/functions.py:def from_json(col, schema, options={}):
python/pyspark/sql/functions.py:def to_json(col, options={}):
python/pyspark/sql/functions.py:def schema_of_json(json, options={}):
python/pyspark/sql/functions.py:def schema_of_csv(csv, options={}):
python/pyspark/sql/functions.py:def to_csv(col, options={}):
python/pyspark/sql/functions.py:def from_csv(col, schema, options={}):
python/pyspark/sql/avro/functions.py:def from_avro(data, jsonFormatSchema, options={}):
```
```
fokkodriesprongFan spark % grep -R "=\[\]" python | grep def
python/pyspark/ml/tuning.py: def __init__(self, bestModel, avgMetrics=[], subModels=None):
python/pyspark/ml/tuning.py: def __init__(self, bestModel, validationMetrics=[], subModels=None):
```
### What changes were proposed in this pull request?
Removing the mutable default arguments.
### Why are the changes needed?
Removing the mutable default arguments, and changing the signature to `Optional[...]`.
### Does this PR introduce _any_ user-facing change?
No 👍
### How was this patch tested?
Using the Flake8 bugbear code analysis plugin.
Closes #29122 from Fokko/SPARK-32320.
Authored-by: Fokko Driesprong <fokko@apache.org>
Signed-off-by: Ruifeng Zheng <ruifengz@foxmail.com>
2020-12-07 20:35:36 -05:00
|
|
|
quantileProbabilities=list([0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99]), # noqa: B005
|
2020-11-18 10:02:31 -05:00
|
|
|
quantilesCol=None, aggregationDepth=2, maxBlockSizeInMB=0.0):
|
2015-10-06 15:43:28 -04:00
|
|
|
"""
|
[SPARK-32933][PYTHON] Use keyword-only syntax for keyword_only methods
### What changes were proposed in this pull request?
This PR adjusts signatures of methods decorated with `keyword_only` to indicate using [Python 3 keyword-only syntax](https://www.python.org/dev/peps/pep-3102/).
__Note__:
For the moment the goal is not to replace `keyword_only`. For justification see https://github.com/apache/spark/pull/29591#discussion_r489402579
### Why are the changes needed?
Right now it is not clear that `keyword_only` methods are indeed keyword only. This proposal addresses that.
In practice we could probably capture `locals` and drop `keyword_only` completel, i.e:
```python
keyword_only
def __init__(self, *, featuresCol="features"):
...
kwargs = self._input_kwargs
self.setParams(**kwargs)
```
could be replaced with
```python
def __init__(self, *, featuresCol="features"):
kwargs = locals()
del kwargs["self"]
...
self.setParams(**kwargs)
```
### Does this PR introduce _any_ user-facing change?
Docstrings and inspect tools will now indicate that `keyword_only` methods expect only keyword arguments.
For example with ` LinearSVC` will change from
```
>>> from pyspark.ml.classification import LinearSVC
>>> ?LinearSVC.__init__
Signature:
LinearSVC.__init__(
self,
featuresCol='features',
labelCol='label',
predictionCol='prediction',
maxIter=100,
regParam=0.0,
tol=1e-06,
rawPredictionCol='rawPrediction',
fitIntercept=True,
standardization=True,
threshold=0.0,
weightCol=None,
aggregationDepth=2,
)
Docstring: __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, aggregationDepth=2):
File: /path/to/python/pyspark/ml/classification.py
Type: function
```
to
```
>>> from pyspark.ml.classification import LinearSVC
>>> ?LinearSVC.__init__
Signature:
LinearSVC.__init__ (
self,
*,
featuresCol='features',
labelCol='label',
predictionCol='prediction',
maxIter=100,
regParam=0.0,
tol=1e-06,
rawPredictionCol='rawPrediction',
fitIntercept=True,
standardization=True,
threshold=0.0,
weightCol=None,
aggregationDepth=2,
blockSize=1,
)
Docstring: __init__(self, \*, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, aggregationDepth=2, blockSize=1):
File: ~/Workspace/spark/python/pyspark/ml/classification.py
Type: function
```
### How was this patch tested?
Existing tests.
Closes #29799 from zero323/SPARK-32933.
Authored-by: zero323 <mszymkiewicz@gmail.com>
Signed-off-by: HyukjinKwon <gurwls223@apache.org>
2020-09-22 20:28:33 -04:00
|
|
|
__init__(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
|
2015-10-06 15:43:28 -04:00
|
|
|
fitIntercept=True, maxIter=100, tol=1E-6, censorCol="censor", \
|
|
|
|
quantileProbabilities=[0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99], \
|
2020-11-18 10:02:31 -05:00
|
|
|
quantilesCol=None, aggregationDepth=2, maxBlockSizeInMB=0.0)
|
2015-10-06 15:43:28 -04:00
|
|
|
"""
|
|
|
|
super(AFTSurvivalRegression, self).__init__()
|
|
|
|
self._java_obj = self._new_java_obj(
|
|
|
|
"org.apache.spark.ml.regression.AFTSurvivalRegression", self.uid)
|
2017-03-03 19:43:45 -05:00
|
|
|
kwargs = self._input_kwargs
|
2015-10-06 15:43:28 -04:00
|
|
|
self.setParams(**kwargs)
|
|
|
|
|
|
|
|
@keyword_only
|
|
|
|
@since("1.6.0")
|
[SPARK-32933][PYTHON] Use keyword-only syntax for keyword_only methods
### What changes were proposed in this pull request?
This PR adjusts signatures of methods decorated with `keyword_only` to indicate using [Python 3 keyword-only syntax](https://www.python.org/dev/peps/pep-3102/).
__Note__:
For the moment the goal is not to replace `keyword_only`. For justification see https://github.com/apache/spark/pull/29591#discussion_r489402579
### Why are the changes needed?
Right now it is not clear that `keyword_only` methods are indeed keyword only. This proposal addresses that.
In practice we could probably capture `locals` and drop `keyword_only` completel, i.e:
```python
keyword_only
def __init__(self, *, featuresCol="features"):
...
kwargs = self._input_kwargs
self.setParams(**kwargs)
```
could be replaced with
```python
def __init__(self, *, featuresCol="features"):
kwargs = locals()
del kwargs["self"]
...
self.setParams(**kwargs)
```
### Does this PR introduce _any_ user-facing change?
Docstrings and inspect tools will now indicate that `keyword_only` methods expect only keyword arguments.
For example with ` LinearSVC` will change from
```
>>> from pyspark.ml.classification import LinearSVC
>>> ?LinearSVC.__init__
Signature:
LinearSVC.__init__(
self,
featuresCol='features',
labelCol='label',
predictionCol='prediction',
maxIter=100,
regParam=0.0,
tol=1e-06,
rawPredictionCol='rawPrediction',
fitIntercept=True,
standardization=True,
threshold=0.0,
weightCol=None,
aggregationDepth=2,
)
Docstring: __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, aggregationDepth=2):
File: /path/to/python/pyspark/ml/classification.py
Type: function
```
to
```
>>> from pyspark.ml.classification import LinearSVC
>>> ?LinearSVC.__init__
Signature:
LinearSVC.__init__ (
self,
*,
featuresCol='features',
labelCol='label',
predictionCol='prediction',
maxIter=100,
regParam=0.0,
tol=1e-06,
rawPredictionCol='rawPrediction',
fitIntercept=True,
standardization=True,
threshold=0.0,
weightCol=None,
aggregationDepth=2,
blockSize=1,
)
Docstring: __init__(self, \*, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, aggregationDepth=2, blockSize=1):
File: ~/Workspace/spark/python/pyspark/ml/classification.py
Type: function
```
### How was this patch tested?
Existing tests.
Closes #29799 from zero323/SPARK-32933.
Authored-by: zero323 <mszymkiewicz@gmail.com>
Signed-off-by: HyukjinKwon <gurwls223@apache.org>
2020-09-22 20:28:33 -04:00
|
|
|
def setParams(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
|
2015-10-06 15:43:28 -04:00
|
|
|
fitIntercept=True, maxIter=100, tol=1E-6, censorCol="censor",
|
[SPARK-32320][PYSPARK] Remove mutable default arguments
This is bad practice, and might lead to unexpected behaviour:
https://florimond.dev/blog/articles/2018/08/python-mutable-defaults-are-the-source-of-all-evil/
```
fokkodriesprongFan spark % grep -R "={}" python | grep def
python/pyspark/resource/profile.py: def __init__(self, _java_resource_profile=None, _exec_req={}, _task_req={}):
python/pyspark/sql/functions.py:def from_json(col, schema, options={}):
python/pyspark/sql/functions.py:def to_json(col, options={}):
python/pyspark/sql/functions.py:def schema_of_json(json, options={}):
python/pyspark/sql/functions.py:def schema_of_csv(csv, options={}):
python/pyspark/sql/functions.py:def to_csv(col, options={}):
python/pyspark/sql/functions.py:def from_csv(col, schema, options={}):
python/pyspark/sql/avro/functions.py:def from_avro(data, jsonFormatSchema, options={}):
```
```
fokkodriesprongFan spark % grep -R "=\[\]" python | grep def
python/pyspark/ml/tuning.py: def __init__(self, bestModel, avgMetrics=[], subModels=None):
python/pyspark/ml/tuning.py: def __init__(self, bestModel, validationMetrics=[], subModels=None):
```
### What changes were proposed in this pull request?
Removing the mutable default arguments.
### Why are the changes needed?
Removing the mutable default arguments, and changing the signature to `Optional[...]`.
### Does this PR introduce _any_ user-facing change?
No 👍
### How was this patch tested?
Using the Flake8 bugbear code analysis plugin.
Closes #29122 from Fokko/SPARK-32320.
Authored-by: Fokko Driesprong <fokko@apache.org>
Signed-off-by: Ruifeng Zheng <ruifengz@foxmail.com>
2020-12-07 20:35:36 -05:00
|
|
|
quantileProbabilities=list([0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99]), # noqa: B005
|
2020-11-18 10:02:31 -05:00
|
|
|
quantilesCol=None, aggregationDepth=2, maxBlockSizeInMB=0.0):
|
2015-10-06 15:43:28 -04:00
|
|
|
"""
|
[SPARK-32933][PYTHON] Use keyword-only syntax for keyword_only methods
### What changes were proposed in this pull request?
This PR adjusts signatures of methods decorated with `keyword_only` to indicate using [Python 3 keyword-only syntax](https://www.python.org/dev/peps/pep-3102/).
__Note__:
For the moment the goal is not to replace `keyword_only`. For justification see https://github.com/apache/spark/pull/29591#discussion_r489402579
### Why are the changes needed?
Right now it is not clear that `keyword_only` methods are indeed keyword only. This proposal addresses that.
In practice we could probably capture `locals` and drop `keyword_only` completel, i.e:
```python
keyword_only
def __init__(self, *, featuresCol="features"):
...
kwargs = self._input_kwargs
self.setParams(**kwargs)
```
could be replaced with
```python
def __init__(self, *, featuresCol="features"):
kwargs = locals()
del kwargs["self"]
...
self.setParams(**kwargs)
```
### Does this PR introduce _any_ user-facing change?
Docstrings and inspect tools will now indicate that `keyword_only` methods expect only keyword arguments.
For example with ` LinearSVC` will change from
```
>>> from pyspark.ml.classification import LinearSVC
>>> ?LinearSVC.__init__
Signature:
LinearSVC.__init__(
self,
featuresCol='features',
labelCol='label',
predictionCol='prediction',
maxIter=100,
regParam=0.0,
tol=1e-06,
rawPredictionCol='rawPrediction',
fitIntercept=True,
standardization=True,
threshold=0.0,
weightCol=None,
aggregationDepth=2,
)
Docstring: __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, aggregationDepth=2):
File: /path/to/python/pyspark/ml/classification.py
Type: function
```
to
```
>>> from pyspark.ml.classification import LinearSVC
>>> ?LinearSVC.__init__
Signature:
LinearSVC.__init__ (
self,
*,
featuresCol='features',
labelCol='label',
predictionCol='prediction',
maxIter=100,
regParam=0.0,
tol=1e-06,
rawPredictionCol='rawPrediction',
fitIntercept=True,
standardization=True,
threshold=0.0,
weightCol=None,
aggregationDepth=2,
blockSize=1,
)
Docstring: __init__(self, \*, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, aggregationDepth=2, blockSize=1):
File: ~/Workspace/spark/python/pyspark/ml/classification.py
Type: function
```
### How was this patch tested?
Existing tests.
Closes #29799 from zero323/SPARK-32933.
Authored-by: zero323 <mszymkiewicz@gmail.com>
Signed-off-by: HyukjinKwon <gurwls223@apache.org>
2020-09-22 20:28:33 -04:00
|
|
|
setParams(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
|
2015-10-06 15:43:28 -04:00
|
|
|
fitIntercept=True, maxIter=100, tol=1E-6, censorCol="censor", \
|
|
|
|
quantileProbabilities=[0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99], \
|
2020-11-18 10:02:31 -05:00
|
|
|
quantilesCol=None, aggregationDepth=2, maxBlockSizeInMB=0.0):
|
2015-10-06 15:43:28 -04:00
|
|
|
"""
|
2017-03-03 19:43:45 -05:00
|
|
|
kwargs = self._input_kwargs
|
2015-10-06 17:58:42 -04:00
|
|
|
return self._set(**kwargs)
|
2015-10-06 15:43:28 -04:00
|
|
|
|
|
|
|
def _create_model(self, java_model):
|
|
|
|
return AFTSurvivalRegressionModel(java_model)
|
|
|
|
|
|
|
|
@since("1.6.0")
|
|
|
|
def setCensorCol(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`censorCol`.
|
|
|
|
"""
|
2016-05-03 10:46:13 -04:00
|
|
|
return self._set(censorCol=value)
|
2015-10-06 15:43:28 -04:00
|
|
|
|
|
|
|
@since("1.6.0")
|
|
|
|
def setQuantileProbabilities(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`quantileProbabilities`.
|
|
|
|
"""
|
2016-05-03 10:46:13 -04:00
|
|
|
return self._set(quantileProbabilities=value)
|
2015-10-06 15:43:28 -04:00
|
|
|
|
|
|
|
@since("1.6.0")
|
|
|
|
def setQuantilesCol(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`quantilesCol`.
|
|
|
|
"""
|
2016-05-03 10:46:13 -04:00
|
|
|
return self._set(quantilesCol=value)
|
2015-10-06 15:43:28 -04:00
|
|
|
|
2019-10-27 23:36:10 -04:00
|
|
|
@since("1.6.0")
|
|
|
|
def setMaxIter(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`maxIter`.
|
|
|
|
"""
|
|
|
|
return self._set(maxIter=value)
|
|
|
|
|
|
|
|
@since("1.6.0")
|
|
|
|
def setTol(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`tol`.
|
|
|
|
"""
|
|
|
|
return self._set(tol=value)
|
|
|
|
|
|
|
|
@since("1.6.0")
|
|
|
|
def setFitIntercept(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`fitIntercept`.
|
|
|
|
"""
|
|
|
|
return self._set(fitIntercept=value)
|
|
|
|
|
|
|
|
@since("2.1.0")
|
|
|
|
def setAggregationDepth(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`aggregationDepth`.
|
|
|
|
"""
|
|
|
|
return self._set(aggregationDepth=value)
|
|
|
|
|
[SPARK-31656][ML][PYSPARK] AFT blockify input vectors
### What changes were proposed in this pull request?
1, add new param blockSize;
2, add a new class InstanceBlock;
3, if blockSize==1, keep original behavior; if blockSize>1, stack input vectors to blocks (like ALS/MLP);
4, if blockSize>1, standardize the input outside of optimization procedure;
### Why are the changes needed?
it will obtain performance gain on dense datasets, such as epsilon
1, reduce RAM to persist traing dataset; (save about 40% RAM)
2, use Level-2 BLAS routines; (~10X speedup)
### Does this PR introduce _any_ user-facing change?
Yes, a new param is added
### How was this patch tested?
existing and added testsuites
Closes #28473 from zhengruifeng/blockify_aft.
Authored-by: zhengruifeng <ruifengz@foxmail.com>
Signed-off-by: zhengruifeng <ruifengz@foxmail.com>
2020-05-08 02:06:36 -04:00
|
|
|
@since("3.1.0")
|
2020-11-18 10:02:31 -05:00
|
|
|
def setMaxBlockSizeInMB(self, value):
|
[SPARK-31656][ML][PYSPARK] AFT blockify input vectors
### What changes were proposed in this pull request?
1, add new param blockSize;
2, add a new class InstanceBlock;
3, if blockSize==1, keep original behavior; if blockSize>1, stack input vectors to blocks (like ALS/MLP);
4, if blockSize>1, standardize the input outside of optimization procedure;
### Why are the changes needed?
it will obtain performance gain on dense datasets, such as epsilon
1, reduce RAM to persist traing dataset; (save about 40% RAM)
2, use Level-2 BLAS routines; (~10X speedup)
### Does this PR introduce _any_ user-facing change?
Yes, a new param is added
### How was this patch tested?
existing and added testsuites
Closes #28473 from zhengruifeng/blockify_aft.
Authored-by: zhengruifeng <ruifengz@foxmail.com>
Signed-off-by: zhengruifeng <ruifengz@foxmail.com>
2020-05-08 02:06:36 -04:00
|
|
|
"""
|
2020-11-18 10:02:31 -05:00
|
|
|
Sets the value of :py:attr:`maxBlockSizeInMB`.
|
[SPARK-31656][ML][PYSPARK] AFT blockify input vectors
### What changes were proposed in this pull request?
1, add new param blockSize;
2, add a new class InstanceBlock;
3, if blockSize==1, keep original behavior; if blockSize>1, stack input vectors to blocks (like ALS/MLP);
4, if blockSize>1, standardize the input outside of optimization procedure;
### Why are the changes needed?
it will obtain performance gain on dense datasets, such as epsilon
1, reduce RAM to persist traing dataset; (save about 40% RAM)
2, use Level-2 BLAS routines; (~10X speedup)
### Does this PR introduce _any_ user-facing change?
Yes, a new param is added
### How was this patch tested?
existing and added testsuites
Closes #28473 from zhengruifeng/blockify_aft.
Authored-by: zhengruifeng <ruifengz@foxmail.com>
Signed-off-by: zhengruifeng <ruifengz@foxmail.com>
2020-05-08 02:06:36 -04:00
|
|
|
"""
|
2020-11-18 10:02:31 -05:00
|
|
|
return self._set(maxBlockSizeInMB=value)
|
[SPARK-31656][ML][PYSPARK] AFT blockify input vectors
### What changes were proposed in this pull request?
1, add new param blockSize;
2, add a new class InstanceBlock;
3, if blockSize==1, keep original behavior; if blockSize>1, stack input vectors to blocks (like ALS/MLP);
4, if blockSize>1, standardize the input outside of optimization procedure;
### Why are the changes needed?
it will obtain performance gain on dense datasets, such as epsilon
1, reduce RAM to persist traing dataset; (save about 40% RAM)
2, use Level-2 BLAS routines; (~10X speedup)
### Does this PR introduce _any_ user-facing change?
Yes, a new param is added
### How was this patch tested?
existing and added testsuites
Closes #28473 from zhengruifeng/blockify_aft.
Authored-by: zhengruifeng <ruifengz@foxmail.com>
Signed-off-by: zhengruifeng <ruifengz@foxmail.com>
2020-05-08 02:06:36 -04:00
|
|
|
|
2015-10-06 15:43:28 -04:00
|
|
|
|
[SPARK-29212][ML][PYSPARK] Add common classes without using JVM backend
### What changes were proposed in this pull request?
Implement common base ML classes (`Predictor`, `PredictionModel`, `Classifier`, `ClasssificationModel` `ProbabilisticClassifier`, `ProbabilisticClasssificationModel`, `Regressor`, `RegrssionModel`) for non-Java backends.
Note
- `Predictor` and `JavaClassifier` should be abstract as `_fit` method is not implemented.
- `PredictionModel` should be abstract as `_transform` is not implemented.
### Why are the changes needed?
To provide extensions points for non-JVM algorithms, as well as a public (as opposed to `Java*` variants, which are commonly described in docstrings as private) hierarchy which can be used to distinguish between different classes of predictors.
For longer discussion see [SPARK-29212](https://issues.apache.org/jira/browse/SPARK-29212) and / or https://github.com/apache/spark/pull/25776.
### Does this PR introduce any user-facing change?
It adds new base classes as listed above, but effective interfaces (method resolution order notwithstanding) stay the same.
Additionally "private" `Java*` classes in`ml.regression` and `ml.classification` have been renamed to follow PEP-8 conventions (added leading underscore).
It is for discussion if the same should be done to equivalent classes from `ml.wrapper`.
If we take `JavaClassifier` as an example, type hierarchy will change from
![old pyspark ml classification JavaClassifier](https://user-images.githubusercontent.com/1554276/72657093-5c0b0c80-39a0-11ea-9069-a897d75de483.png)
to
![new pyspark ml classification _JavaClassifier](https://user-images.githubusercontent.com/1554276/72657098-64fbde00-39a0-11ea-8f80-01187a5ea5a6.png)
Similarly the old model
![old pyspark ml classification JavaClassificationModel](https://user-images.githubusercontent.com/1554276/72657103-7513bd80-39a0-11ea-9ffc-59eb6ab61fde.png)
will become
![new pyspark ml classification _JavaClassificationModel](https://user-images.githubusercontent.com/1554276/72657110-80ff7f80-39a0-11ea-9f5c-fe408664e827.png)
### How was this patch tested?
Existing unit tests.
Closes #27245 from zero323/SPARK-29212.
Authored-by: zero323 <mszymkiewicz@gmail.com>
Signed-off-by: zhengruifeng <ruifengz@foxmail.com>
2020-03-03 23:20:02 -05:00
|
|
|
class AFTSurvivalRegressionModel(_JavaRegressionModel, _AFTSurvivalRegressionParams,
|
[SPARK-28985][PYTHON][ML][FOLLOW-UP] Add _AFTSurvivalRegressionParams
### What changes were proposed in this pull request?
Adds
```python
_AFTSurvivalRegressionParams(HasFeaturesCol, HasLabelCol, HasPredictionCol,
HasMaxIter, HasTol, HasFitIntercept,
HasAggregationDepth): ...
```
with related Params and uses it to replace `HasFitIntercept`, `HasMaxIter`, `HasTol` and `HasAggregationDepth` in `AFTSurvivalRegression` base classes and `JavaPredictionModel,` in `AFTSurvivalRegressionModel` base classes.
### Why are the changes needed?
Previous work (#25776) on [SPARK-28985](https://issues.apache.org/jira/browse/SPARK-28985) replaced `JavaEstimator`, `HasFeaturesCol`, `HasLabelCol`, `HasPredictionCol` in `AFTSurvivalRegression` and `JavaModel` in `AFTSurvivalRegressionModel` with newly added `JavaPredictor`:
https://github.com/apache/spark/blob/e97b55d32285052a1f76cca35377c4b21eb2e7d7/python/pyspark/ml/wrapper.py#L377
and `JavaPredictionModel`
https://github.com/apache/spark/blob/e97b55d32285052a1f76cca35377c4b21eb2e7d7/python/pyspark/ml/wrapper.py#L405
respectively.
This however is inconsistent with Scala counterpart where both classes extend private `AFTSurvivalRegressionBase`
https://github.com/apache/spark/blob/eb037a8180be4ab7570eda1fa9cbf3c84b92c3f7/mllib/src/main/scala/org/apache/spark/ml/regression/AFTSurvivalRegression.scala#L48-L50
This preserves some of the existing inconsistencies (variables as defined in [the official example](https://github.com/apache/spark/blob/master/examples/src/main/python/ml/aft_survival_regression.p))
```
from pyspark.ml.regression import AFTSurvivalRegression, AFTSurvivalRegressionModel
from pyspark.ml.param.shared import HasMaxIter, HasTol, HasFitIntercept, HasAggregationDepth
from pyspark.ml.param import Param
issubclass(AFTSurvivalRegressionModel, HasMaxIter)
# False
hasattr(model, "maxIter") and isinstance(model.maxIter, Param)
# True
issubclass(AFTSurvivalRegressionModel, HasTol)
# False
hasattr(model, "tol") and isinstance(model.tol, Param)
# True
```
and can cause problems in the future, if Predictor / PredictionModel API changes (unlike [`IsotonicRegression`](https://github.com/apache/spark/pull/26023), current implementation is technically speaking correct, though incomplete).
### Does this PR introduce any user-facing change?
Yes, it adds a number of base classes to `AFTSurvivalRegressionModel`. These change purely additive and have negligible potential for breaking existing code (and none, compared to changes already made in #25776). Additionally affected API hasn't been released in the current form yet.
### How was this patch tested?
- Existing unit tests.
- Manual testing.
CC huaxingao, zhengruifeng
Closes #26024 from zero323/SPARK-28985-FOLLOW-UP-aftsurival-regression.
Authored-by: zero323 <mszymkiewicz@gmail.com>
Signed-off-by: Sean Owen <sean.owen@databricks.com>
2019-10-04 19:04:21 -04:00
|
|
|
JavaMLWritable, JavaMLReadable):
|
2015-10-06 15:43:28 -04:00
|
|
|
"""
|
2016-05-25 01:20:00 -04:00
|
|
|
Model fitted by :class:`AFTSurvivalRegression`.
|
2015-10-06 15:43:28 -04:00
|
|
|
|
|
|
|
.. versionadded:: 1.6.0
|
|
|
|
"""
|
|
|
|
|
2019-10-27 23:36:10 -04:00
|
|
|
@since("3.0.0")
|
|
|
|
def setQuantileProbabilities(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`quantileProbabilities`.
|
|
|
|
"""
|
|
|
|
return self._set(quantileProbabilities=value)
|
|
|
|
|
|
|
|
@since("3.0.0")
|
|
|
|
def setQuantilesCol(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`quantilesCol`.
|
|
|
|
"""
|
|
|
|
return self._set(quantilesCol=value)
|
|
|
|
|
2015-11-05 12:00:03 -05:00
|
|
|
@property
|
2016-06-22 13:05:25 -04:00
|
|
|
@since("2.0.0")
|
2015-11-05 12:00:03 -05:00
|
|
|
def coefficients(self):
|
|
|
|
"""
|
|
|
|
Model coefficients.
|
|
|
|
"""
|
|
|
|
return self._call_java("coefficients")
|
|
|
|
|
|
|
|
@property
|
|
|
|
@since("1.6.0")
|
|
|
|
def intercept(self):
|
|
|
|
"""
|
|
|
|
Model intercept.
|
|
|
|
"""
|
|
|
|
return self._call_java("intercept")
|
|
|
|
|
|
|
|
@property
|
|
|
|
@since("1.6.0")
|
|
|
|
def scale(self):
|
|
|
|
"""
|
2018-08-11 22:23:36 -04:00
|
|
|
Model scale parameter.
|
2015-11-05 12:00:03 -05:00
|
|
|
"""
|
|
|
|
return self._call_java("scale")
|
|
|
|
|
2016-06-22 13:05:25 -04:00
|
|
|
@since("2.0.0")
|
2015-10-06 15:43:28 -04:00
|
|
|
def predictQuantiles(self, features):
|
|
|
|
"""
|
|
|
|
Predicted Quantiles
|
|
|
|
"""
|
|
|
|
return self._call_java("predictQuantiles", features)
|
|
|
|
|
|
|
|
|
[SPARK-29212][ML][PYSPARK] Add common classes without using JVM backend
### What changes were proposed in this pull request?
Implement common base ML classes (`Predictor`, `PredictionModel`, `Classifier`, `ClasssificationModel` `ProbabilisticClassifier`, `ProbabilisticClasssificationModel`, `Regressor`, `RegrssionModel`) for non-Java backends.
Note
- `Predictor` and `JavaClassifier` should be abstract as `_fit` method is not implemented.
- `PredictionModel` should be abstract as `_transform` is not implemented.
### Why are the changes needed?
To provide extensions points for non-JVM algorithms, as well as a public (as opposed to `Java*` variants, which are commonly described in docstrings as private) hierarchy which can be used to distinguish between different classes of predictors.
For longer discussion see [SPARK-29212](https://issues.apache.org/jira/browse/SPARK-29212) and / or https://github.com/apache/spark/pull/25776.
### Does this PR introduce any user-facing change?
It adds new base classes as listed above, but effective interfaces (method resolution order notwithstanding) stay the same.
Additionally "private" `Java*` classes in`ml.regression` and `ml.classification` have been renamed to follow PEP-8 conventions (added leading underscore).
It is for discussion if the same should be done to equivalent classes from `ml.wrapper`.
If we take `JavaClassifier` as an example, type hierarchy will change from
![old pyspark ml classification JavaClassifier](https://user-images.githubusercontent.com/1554276/72657093-5c0b0c80-39a0-11ea-9069-a897d75de483.png)
to
![new pyspark ml classification _JavaClassifier](https://user-images.githubusercontent.com/1554276/72657098-64fbde00-39a0-11ea-8f80-01187a5ea5a6.png)
Similarly the old model
![old pyspark ml classification JavaClassificationModel](https://user-images.githubusercontent.com/1554276/72657103-7513bd80-39a0-11ea-9ffc-59eb6ab61fde.png)
will become
![new pyspark ml classification _JavaClassificationModel](https://user-images.githubusercontent.com/1554276/72657110-80ff7f80-39a0-11ea-9f5c-fe408664e827.png)
### How was this patch tested?
Existing unit tests.
Closes #27245 from zero323/SPARK-29212.
Authored-by: zero323 <mszymkiewicz@gmail.com>
Signed-off-by: zhengruifeng <ruifengz@foxmail.com>
2020-03-03 23:20:02 -05:00
|
|
|
class _GeneralizedLinearRegressionParams(_PredictorParams, HasFitIntercept, HasMaxIter,
|
2019-11-05 21:34:53 -05:00
|
|
|
HasTol, HasRegParam, HasWeightCol, HasSolver,
|
|
|
|
HasAggregationDepth):
|
2019-10-18 05:26:54 -04:00
|
|
|
"""
|
|
|
|
Params for :py:class:`GeneralizedLinearRegression` and
|
|
|
|
:py:class:`GeneralizedLinearRegressionModel`.
|
|
|
|
|
|
|
|
.. versionadded:: 3.0.0
|
|
|
|
"""
|
|
|
|
|
|
|
|
family = Param(Params._dummy(), "family", "The name of family which is a description of " +
|
|
|
|
"the error distribution to be used in the model. Supported options: " +
|
|
|
|
"gaussian (default), binomial, poisson, gamma and tweedie.",
|
|
|
|
typeConverter=TypeConverters.toString)
|
|
|
|
link = Param(Params._dummy(), "link", "The name of link function which provides the " +
|
|
|
|
"relationship between the linear predictor and the mean of the distribution " +
|
|
|
|
"function. Supported options: identity, log, inverse, logit, probit, cloglog " +
|
|
|
|
"and sqrt.", typeConverter=TypeConverters.toString)
|
|
|
|
linkPredictionCol = Param(Params._dummy(), "linkPredictionCol", "link prediction (linear " +
|
|
|
|
"predictor) column name", typeConverter=TypeConverters.toString)
|
|
|
|
variancePower = Param(Params._dummy(), "variancePower", "The power in the variance function " +
|
|
|
|
"of the Tweedie distribution which characterizes the relationship " +
|
|
|
|
"between the variance and mean of the distribution. Only applicable " +
|
|
|
|
"for the Tweedie family. Supported values: 0 and [1, Inf).",
|
|
|
|
typeConverter=TypeConverters.toFloat)
|
|
|
|
linkPower = Param(Params._dummy(), "linkPower", "The index in the power link function. " +
|
|
|
|
"Only applicable to the Tweedie family.",
|
|
|
|
typeConverter=TypeConverters.toFloat)
|
|
|
|
solver = Param(Params._dummy(), "solver", "The solver algorithm for optimization. Supported " +
|
|
|
|
"options: irls.", typeConverter=TypeConverters.toString)
|
|
|
|
offsetCol = Param(Params._dummy(), "offsetCol", "The offset column name. If this is not set " +
|
|
|
|
"or empty, we treat all instance offsets as 0.0",
|
|
|
|
typeConverter=TypeConverters.toString)
|
|
|
|
|
2020-08-03 11:50:34 -04:00
|
|
|
def __init__(self, *args):
|
|
|
|
super(_GeneralizedLinearRegressionParams, self).__init__(*args)
|
[SPARK-32232][ML][PYSPARK] Make sure ML has the same default solver values between Scala and Python
# What changes were proposed in this pull request?
current problems:
```
mlp = MultilayerPerceptronClassifier(layers=[2, 2, 2], seed=123)
model = mlp.fit(df)
path = tempfile.mkdtemp()
model_path = path + "/mlp"
model.save(model_path)
model2 = MultilayerPerceptronClassificationModel.load(model_path)
self.assertEqual(model2.getSolver(), "l-bfgs") # this fails because model2.getSolver() returns 'auto'
model2.transform(df)
# this fails with Exception pyspark.sql.utils.IllegalArgumentException: MultilayerPerceptronClassifier_dec859ed24ec parameter solver given invalid value auto.
```
FMClassifier/Regression and GeneralizedLinearRegression have the same problems.
Here are the root cause of the problems:
1. In HasSolver, both Scala and Python default solver to 'auto'
2. On Scala side, mlp overrides the default of solver to 'l-bfgs', FMClassifier/Regression overrides the default of solver to 'adamW', and glr overrides the default of solver to 'irls'
3. On Scala side, mlp overrides the default of solver in MultilayerPerceptronClassificationParams, so both MultilayerPerceptronClassification and MultilayerPerceptronClassificationModel have 'l-bfgs' as default
4. On Python side, mlp overrides the default of solver in MultilayerPerceptronClassification, so it has default as 'l-bfgs', but MultilayerPerceptronClassificationModel doesn't override the default so it gets the default from HasSolver which is 'auto'. In theory, we don't care about the solver value or any other params values for MultilayerPerceptronClassificationModel, because we have the fitted model already. That's why on Python side, we never set default values for any of the XXXModel.
5. when calling getSolver on the loaded mlp model, it calls this line of code underneath:
```
def _transfer_params_from_java(self):
"""
Transforms the embedded params from the companion Java object.
"""
......
# SPARK-14931: Only check set params back to avoid default params mismatch.
if self._java_obj.isSet(java_param):
value = _java2py(sc, self._java_obj.getOrDefault(java_param))
self._set(**{param.name: value})
......
```
that's why model2.getSolver() returns 'auto'. The code doesn't get the default Scala value (in this case 'l-bfgs') to set to Python param, so it takes the default value (in this case 'auto') on Python side.
6. when calling model2.transform(df), it calls this underneath:
```
def _transfer_params_to_java(self):
"""
Transforms the embedded params to the companion Java object.
"""
......
if self.hasDefault(param):
pair = self._make_java_param_pair(param, self._defaultParamMap[param])
pair_defaults.append(pair)
......
```
Again, it gets the Python default solver which is 'auto', and this caused the Exception
7. Currently, on Scala side, for some of the algorithms, we set default values in the XXXParam, so both estimator and transformer get the default value. However, for some of the algorithms, we only set default in estimators, and the XXXModel doesn't get the default value. On Python side, we never set defaults for the XXXModel. This causes the default value inconsistency.
8. My proposed solution: set default params in XXXParam for both Scala and Python, so both the estimator and transformer have the same default value for both Scala and Python. I currently only changed solver in this PR. If everyone is OK with the fix, I will change all the other params as well.
I hope my explanation makes sense to your folks :)
### Why are the changes needed?
Fix bug
### Does this PR introduce _any_ user-facing change?
No
### How was this patch tested?
existing and new tests
Closes #29060 from huaxingao/solver_parity.
Authored-by: Huaxin Gao <huaxing@us.ibm.com>
Signed-off-by: Sean Owen <srowen@gmail.com>
2020-07-11 11:37:26 -04:00
|
|
|
self._setDefault(family="gaussian", maxIter=25, tol=1e-6, regParam=0.0, solver="irls",
|
|
|
|
variancePower=0.0, aggregationDepth=2)
|
|
|
|
|
2019-10-18 05:26:54 -04:00
|
|
|
@since("2.0.0")
|
|
|
|
def getFamily(self):
|
|
|
|
"""
|
|
|
|
Gets the value of family or its default value.
|
|
|
|
"""
|
|
|
|
return self.getOrDefault(self.family)
|
|
|
|
|
|
|
|
@since("2.0.0")
|
|
|
|
def getLinkPredictionCol(self):
|
|
|
|
"""
|
|
|
|
Gets the value of linkPredictionCol or its default value.
|
|
|
|
"""
|
|
|
|
return self.getOrDefault(self.linkPredictionCol)
|
|
|
|
|
|
|
|
@since("2.0.0")
|
|
|
|
def getLink(self):
|
|
|
|
"""
|
|
|
|
Gets the value of link or its default value.
|
|
|
|
"""
|
|
|
|
return self.getOrDefault(self.link)
|
|
|
|
|
|
|
|
@since("2.2.0")
|
|
|
|
def getVariancePower(self):
|
|
|
|
"""
|
|
|
|
Gets the value of variancePower or its default value.
|
|
|
|
"""
|
|
|
|
return self.getOrDefault(self.variancePower)
|
|
|
|
|
|
|
|
@since("2.2.0")
|
|
|
|
def getLinkPower(self):
|
|
|
|
"""
|
|
|
|
Gets the value of linkPower or its default value.
|
|
|
|
"""
|
|
|
|
return self.getOrDefault(self.linkPower)
|
|
|
|
|
|
|
|
@since("2.3.0")
|
|
|
|
def getOffsetCol(self):
|
|
|
|
"""
|
|
|
|
Gets the value of offsetCol or its default value.
|
|
|
|
"""
|
|
|
|
return self.getOrDefault(self.offsetCol)
|
|
|
|
|
|
|
|
|
2016-04-12 14:29:12 -04:00
|
|
|
@inherit_doc
|
[SPARK-29212][ML][PYSPARK] Add common classes without using JVM backend
### What changes were proposed in this pull request?
Implement common base ML classes (`Predictor`, `PredictionModel`, `Classifier`, `ClasssificationModel` `ProbabilisticClassifier`, `ProbabilisticClasssificationModel`, `Regressor`, `RegrssionModel`) for non-Java backends.
Note
- `Predictor` and `JavaClassifier` should be abstract as `_fit` method is not implemented.
- `PredictionModel` should be abstract as `_transform` is not implemented.
### Why are the changes needed?
To provide extensions points for non-JVM algorithms, as well as a public (as opposed to `Java*` variants, which are commonly described in docstrings as private) hierarchy which can be used to distinguish between different classes of predictors.
For longer discussion see [SPARK-29212](https://issues.apache.org/jira/browse/SPARK-29212) and / or https://github.com/apache/spark/pull/25776.
### Does this PR introduce any user-facing change?
It adds new base classes as listed above, but effective interfaces (method resolution order notwithstanding) stay the same.
Additionally "private" `Java*` classes in`ml.regression` and `ml.classification` have been renamed to follow PEP-8 conventions (added leading underscore).
It is for discussion if the same should be done to equivalent classes from `ml.wrapper`.
If we take `JavaClassifier` as an example, type hierarchy will change from
![old pyspark ml classification JavaClassifier](https://user-images.githubusercontent.com/1554276/72657093-5c0b0c80-39a0-11ea-9069-a897d75de483.png)
to
![new pyspark ml classification _JavaClassifier](https://user-images.githubusercontent.com/1554276/72657098-64fbde00-39a0-11ea-8f80-01187a5ea5a6.png)
Similarly the old model
![old pyspark ml classification JavaClassificationModel](https://user-images.githubusercontent.com/1554276/72657103-7513bd80-39a0-11ea-9ffc-59eb6ab61fde.png)
will become
![new pyspark ml classification _JavaClassificationModel](https://user-images.githubusercontent.com/1554276/72657110-80ff7f80-39a0-11ea-9f5c-fe408664e827.png)
### How was this patch tested?
Existing unit tests.
Closes #27245 from zero323/SPARK-29212.
Authored-by: zero323 <mszymkiewicz@gmail.com>
Signed-off-by: zhengruifeng <ruifengz@foxmail.com>
2020-03-03 23:20:02 -05:00
|
|
|
class GeneralizedLinearRegression(_JavaRegressor, _GeneralizedLinearRegressionParams,
|
2019-10-18 05:26:54 -04:00
|
|
|
JavaMLWritable, JavaMLReadable):
|
2016-04-12 14:29:12 -04:00
|
|
|
"""
|
|
|
|
Generalized Linear Regression.
|
|
|
|
|
|
|
|
Fit a Generalized Linear Model specified by giving a symbolic description of the linear
|
|
|
|
predictor (link function) and a description of the error distribution (family). It supports
|
2017-03-08 05:09:36 -05:00
|
|
|
"gaussian", "binomial", "poisson", "gamma" and "tweedie" as family. Valid link functions for
|
|
|
|
each family is listed below. The first link function of each family is the default one.
|
2016-05-19 14:59:19 -04:00
|
|
|
|
|
|
|
* "gaussian" -> "identity", "log", "inverse"
|
|
|
|
|
|
|
|
* "binomial" -> "logit", "probit", "cloglog"
|
|
|
|
|
|
|
|
* "poisson" -> "log", "identity", "sqrt"
|
|
|
|
|
|
|
|
* "gamma" -> "inverse", "identity", "log"
|
2016-04-12 14:29:12 -04:00
|
|
|
|
2017-03-08 05:09:36 -05:00
|
|
|
* "tweedie" -> power link function specified through "linkPower". \
|
|
|
|
The default link power in the tweedie family is 1 - variancePower.
|
|
|
|
|
2020-11-09 19:33:48 -05:00
|
|
|
.. versionadded:: 2.0.0
|
|
|
|
|
|
|
|
Notes
|
|
|
|
-----
|
|
|
|
For more information see Wikipedia page on
|
|
|
|
`GLM <https://en.wikipedia.org/wiki/Generalized_linear_model>`_
|
2016-04-12 14:29:12 -04:00
|
|
|
|
2020-11-09 19:33:48 -05:00
|
|
|
Examples
|
|
|
|
--------
|
2016-05-17 15:51:07 -04:00
|
|
|
>>> from pyspark.ml.linalg import Vectors
|
2016-05-23 21:14:48 -04:00
|
|
|
>>> df = spark.createDataFrame([
|
2016-04-12 14:29:12 -04:00
|
|
|
... (1.0, Vectors.dense(0.0, 0.0)),
|
|
|
|
... (1.0, Vectors.dense(1.0, 2.0)),
|
|
|
|
... (2.0, Vectors.dense(0.0, 0.0)),
|
|
|
|
... (2.0, Vectors.dense(1.0, 1.0)),], ["label", "features"])
|
2016-05-19 14:59:19 -04:00
|
|
|
>>> glr = GeneralizedLinearRegression(family="gaussian", link="identity", linkPredictionCol="p")
|
2019-10-27 23:36:10 -04:00
|
|
|
>>> glr.setRegParam(0.1)
|
|
|
|
GeneralizedLinearRegression...
|
|
|
|
>>> glr.getRegParam()
|
|
|
|
0.1
|
|
|
|
>>> glr.clear(glr.regParam)
|
|
|
|
>>> glr.setMaxIter(10)
|
|
|
|
GeneralizedLinearRegression...
|
|
|
|
>>> glr.getMaxIter()
|
|
|
|
10
|
|
|
|
>>> glr.clear(glr.maxIter)
|
2016-04-12 14:29:12 -04:00
|
|
|
>>> model = glr.fit(df)
|
[SPARK-28985][PYTHON][ML] Add common classes (JavaPredictor/JavaClassificationModel/JavaProbabilisticClassifier) in PYTHON
### What changes were proposed in this pull request?
Add some common classes in Python to make it have the same structure as Scala
1. Scala has ClassifierParams/Classifier/ClassificationModel:
```
trait ClassifierParams
extends PredictorParams with HasRawPredictionCol
abstract class Classifier
extends Predictor with ClassifierParams {
def setRawPredictionCol
}
abstract class ClassificationModel
extends PredictionModel with ClassifierParams {
def setRawPredictionCol
}
```
This PR makes Python has the following:
```
class JavaClassifierParams(HasRawPredictionCol, JavaPredictorParams):
pass
class JavaClassifier(JavaPredictor, JavaClassifierParams):
def setRawPredictionCol
class JavaClassificationModel(JavaPredictionModel, JavaClassifierParams):
def setRawPredictionCol
```
2. Scala has ProbabilisticClassifierParams/ProbabilisticClassifier/ProbabilisticClassificationModel:
```
trait ProbabilisticClassifierParams
extends ClassifierParams with HasProbabilityCol with HasThresholds
abstract class ProbabilisticClassifier
extends Classifier with ProbabilisticClassifierParams {
def setProbabilityCol
def setThresholds
}
abstract class ProbabilisticClassificationModel
extends ClassificationModel with ProbabilisticClassifierParams {
def setProbabilityCol
def setThresholds
}
```
This PR makes Python have the following:
```
class JavaProbabilisticClassifierParams(HasProbabilityCol, HasThresholds, JavaClassifierParams):
pass
class JavaProbabilisticClassifier(JavaClassifier, JavaProbabilisticClassifierParams):
def setProbabilityCol
def setThresholds
class JavaProbabilisticClassificationModel(JavaClassificationModel, JavaProbabilisticClassifierParams):
def setProbabilityCol
def setThresholds
```
3. Scala has PredictorParams/Predictor/PredictionModel:
```
trait PredictorParams extends Params
with HasLabelCol with HasFeaturesCol with HasPredictionCol
abstract class Predictor
extends Estimator with PredictorParams {
def setLabelCol
def setFeaturesCol
def setPredictionCol
}
abstract class PredictionModel
extends Model with PredictorParams {
def setFeaturesCol
def setPredictionCol
def numFeatures
def predict
}
```
This PR makes Python have the following:
```
class JavaPredictorParams(HasLabelCol, HasFeaturesCol, HasPredictionCol):
pass
class JavaPredictor(JavaEstimator, JavaPredictorParams):
def setLabelCol
def setFeaturesCol
def setPredictionCol
class JavaPredictionModel(JavaModel, JavaPredictorParams):
def setFeaturesCol
def setPredictionCol
def numFeatures
def predict
```
### Why are the changes needed?
Have parity between Python and Scala ML
### Does this PR introduce any user-facing change?
Yes. Add the following changes:
```
LinearSVCModel
- get/setFeatureCol
- get/setPredictionCol
- get/setLabelCol
- get/setRawPredictionCol
- predict
```
```
LogisticRegressionModel
DecisionTreeClassificationModel
RandomForestClassificationModel
GBTClassificationModel
NaiveBayesModel
MultilayerPerceptronClassificationModel
- get/setFeatureCol
- get/setPredictionCol
- get/setLabelCol
- get/setRawPredictionCol
- get/setProbabilityCol
- predict
```
```
LinearRegressionModel
IsotonicRegressionModel
DecisionTreeRegressionModel
RandomForestRegressionModel
GBTRegressionModel
AFTSurvivalRegressionModel
GeneralizedLinearRegressionModel
- get/setFeatureCol
- get/setPredictionCol
- get/setLabelCol
- predict
```
### How was this patch tested?
Add a few doc tests.
Closes #25776 from huaxingao/spark-28985.
Authored-by: Huaxin Gao <huaxing@us.ibm.com>
Signed-off-by: Sean Owen <sean.owen@databricks.com>
2019-09-19 09:17:25 -04:00
|
|
|
>>> model.setFeaturesCol("features")
|
[SPARK-29867][ML][PYTHON] Add __repr__ in Python ML Models
### What changes were proposed in this pull request?
Add ```__repr__``` in Python ML Models
### Why are the changes needed?
In Python ML Models, some of them have ```__repr__```, others don't. In the doctest, when calling Model.setXXX, some of the Models print out the xxxModel... correctly, some of them can't because of lacking the ```__repr__``` method. For example:
```
>>> gm = GaussianMixture(k=3, tol=0.0001, seed=10)
>>> model = gm.fit(df)
>>> model.setPredictionCol("newPrediction")
GaussianMixture...
```
After the change, the above code will become the following:
```
>>> gm = GaussianMixture(k=3, tol=0.0001, seed=10)
>>> model = gm.fit(df)
>>> model.setPredictionCol("newPrediction")
GaussianMixtureModel...
```
### Does this PR introduce any user-facing change?
Yes.
### How was this patch tested?
doctest
Closes #26489 from huaxingao/spark-29876.
Authored-by: Huaxin Gao <huaxing@us.ibm.com>
Signed-off-by: Dongjoon Hyun <dhyun@apple.com>
2019-11-16 00:44:39 -05:00
|
|
|
GeneralizedLinearRegressionModel...
|
2019-10-18 05:26:54 -04:00
|
|
|
>>> model.getMaxIter()
|
|
|
|
25
|
2019-11-05 21:34:53 -05:00
|
|
|
>>> model.getAggregationDepth()
|
|
|
|
2
|
2016-05-19 14:59:19 -04:00
|
|
|
>>> transformed = model.transform(df)
|
|
|
|
>>> abs(transformed.head().prediction - 1.5) < 0.001
|
|
|
|
True
|
|
|
|
>>> abs(transformed.head().p - 1.5) < 0.001
|
2016-04-12 14:29:12 -04:00
|
|
|
True
|
|
|
|
>>> model.coefficients
|
|
|
|
DenseVector([1.5..., -1.0...])
|
2016-08-22 06:21:22 -04:00
|
|
|
>>> model.numFeatures
|
|
|
|
2
|
2016-04-12 14:29:12 -04:00
|
|
|
>>> abs(model.intercept - 1.5) < 0.001
|
|
|
|
True
|
|
|
|
>>> glr_path = temp_path + "/glr"
|
|
|
|
>>> glr.save(glr_path)
|
|
|
|
>>> glr2 = GeneralizedLinearRegression.load(glr_path)
|
|
|
|
>>> glr.getFamily() == glr2.getFamily()
|
|
|
|
True
|
|
|
|
>>> model_path = temp_path + "/glr_model"
|
|
|
|
>>> model.save(model_path)
|
|
|
|
>>> model2 = GeneralizedLinearRegressionModel.load(model_path)
|
|
|
|
>>> model.intercept == model2.intercept
|
|
|
|
True
|
|
|
|
>>> model.coefficients[0] == model2.coefficients[0]
|
|
|
|
True
|
2020-08-03 11:50:34 -04:00
|
|
|
>>> model.transform(df).take(1) == model2.transform(df).take(1)
|
|
|
|
True
|
2016-04-12 14:29:12 -04:00
|
|
|
"""
|
|
|
|
|
|
|
|
@keyword_only
|
[SPARK-32933][PYTHON] Use keyword-only syntax for keyword_only methods
### What changes were proposed in this pull request?
This PR adjusts signatures of methods decorated with `keyword_only` to indicate using [Python 3 keyword-only syntax](https://www.python.org/dev/peps/pep-3102/).
__Note__:
For the moment the goal is not to replace `keyword_only`. For justification see https://github.com/apache/spark/pull/29591#discussion_r489402579
### Why are the changes needed?
Right now it is not clear that `keyword_only` methods are indeed keyword only. This proposal addresses that.
In practice we could probably capture `locals` and drop `keyword_only` completel, i.e:
```python
keyword_only
def __init__(self, *, featuresCol="features"):
...
kwargs = self._input_kwargs
self.setParams(**kwargs)
```
could be replaced with
```python
def __init__(self, *, featuresCol="features"):
kwargs = locals()
del kwargs["self"]
...
self.setParams(**kwargs)
```
### Does this PR introduce _any_ user-facing change?
Docstrings and inspect tools will now indicate that `keyword_only` methods expect only keyword arguments.
For example with ` LinearSVC` will change from
```
>>> from pyspark.ml.classification import LinearSVC
>>> ?LinearSVC.__init__
Signature:
LinearSVC.__init__(
self,
featuresCol='features',
labelCol='label',
predictionCol='prediction',
maxIter=100,
regParam=0.0,
tol=1e-06,
rawPredictionCol='rawPrediction',
fitIntercept=True,
standardization=True,
threshold=0.0,
weightCol=None,
aggregationDepth=2,
)
Docstring: __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, aggregationDepth=2):
File: /path/to/python/pyspark/ml/classification.py
Type: function
```
to
```
>>> from pyspark.ml.classification import LinearSVC
>>> ?LinearSVC.__init__
Signature:
LinearSVC.__init__ (
self,
*,
featuresCol='features',
labelCol='label',
predictionCol='prediction',
maxIter=100,
regParam=0.0,
tol=1e-06,
rawPredictionCol='rawPrediction',
fitIntercept=True,
standardization=True,
threshold=0.0,
weightCol=None,
aggregationDepth=2,
blockSize=1,
)
Docstring: __init__(self, \*, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, aggregationDepth=2, blockSize=1):
File: ~/Workspace/spark/python/pyspark/ml/classification.py
Type: function
```
### How was this patch tested?
Existing tests.
Closes #29799 from zero323/SPARK-32933.
Authored-by: zero323 <mszymkiewicz@gmail.com>
Signed-off-by: HyukjinKwon <gurwls223@apache.org>
2020-09-22 20:28:33 -04:00
|
|
|
def __init__(self, *, labelCol="label", featuresCol="features", predictionCol="prediction",
|
2016-04-12 14:29:12 -04:00
|
|
|
family="gaussian", link=None, fitIntercept=True, maxIter=25, tol=1e-6,
|
2017-03-08 05:09:36 -05:00
|
|
|
regParam=0.0, weightCol=None, solver="irls", linkPredictionCol=None,
|
2019-11-05 21:34:53 -05:00
|
|
|
variancePower=0.0, linkPower=None, offsetCol=None, aggregationDepth=2):
|
2016-04-12 14:29:12 -04:00
|
|
|
"""
|
[SPARK-32933][PYTHON] Use keyword-only syntax for keyword_only methods
### What changes were proposed in this pull request?
This PR adjusts signatures of methods decorated with `keyword_only` to indicate using [Python 3 keyword-only syntax](https://www.python.org/dev/peps/pep-3102/).
__Note__:
For the moment the goal is not to replace `keyword_only`. For justification see https://github.com/apache/spark/pull/29591#discussion_r489402579
### Why are the changes needed?
Right now it is not clear that `keyword_only` methods are indeed keyword only. This proposal addresses that.
In practice we could probably capture `locals` and drop `keyword_only` completel, i.e:
```python
keyword_only
def __init__(self, *, featuresCol="features"):
...
kwargs = self._input_kwargs
self.setParams(**kwargs)
```
could be replaced with
```python
def __init__(self, *, featuresCol="features"):
kwargs = locals()
del kwargs["self"]
...
self.setParams(**kwargs)
```
### Does this PR introduce _any_ user-facing change?
Docstrings and inspect tools will now indicate that `keyword_only` methods expect only keyword arguments.
For example with ` LinearSVC` will change from
```
>>> from pyspark.ml.classification import LinearSVC
>>> ?LinearSVC.__init__
Signature:
LinearSVC.__init__(
self,
featuresCol='features',
labelCol='label',
predictionCol='prediction',
maxIter=100,
regParam=0.0,
tol=1e-06,
rawPredictionCol='rawPrediction',
fitIntercept=True,
standardization=True,
threshold=0.0,
weightCol=None,
aggregationDepth=2,
)
Docstring: __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, aggregationDepth=2):
File: /path/to/python/pyspark/ml/classification.py
Type: function
```
to
```
>>> from pyspark.ml.classification import LinearSVC
>>> ?LinearSVC.__init__
Signature:
LinearSVC.__init__ (
self,
*,
featuresCol='features',
labelCol='label',
predictionCol='prediction',
maxIter=100,
regParam=0.0,
tol=1e-06,
rawPredictionCol='rawPrediction',
fitIntercept=True,
standardization=True,
threshold=0.0,
weightCol=None,
aggregationDepth=2,
blockSize=1,
)
Docstring: __init__(self, \*, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, aggregationDepth=2, blockSize=1):
File: ~/Workspace/spark/python/pyspark/ml/classification.py
Type: function
```
### How was this patch tested?
Existing tests.
Closes #29799 from zero323/SPARK-32933.
Authored-by: zero323 <mszymkiewicz@gmail.com>
Signed-off-by: HyukjinKwon <gurwls223@apache.org>
2020-09-22 20:28:33 -04:00
|
|
|
__init__(self, \\*, labelCol="label", featuresCol="features", predictionCol="prediction", \
|
2016-04-12 14:29:12 -04:00
|
|
|
family="gaussian", link=None, fitIntercept=True, maxIter=25, tol=1e-6, \
|
2017-03-08 05:09:36 -05:00
|
|
|
regParam=0.0, weightCol=None, solver="irls", linkPredictionCol=None, \
|
2019-11-05 21:34:53 -05:00
|
|
|
variancePower=0.0, linkPower=None, offsetCol=None, aggregationDepth=2)
|
2016-04-12 14:29:12 -04:00
|
|
|
"""
|
|
|
|
super(GeneralizedLinearRegression, self).__init__()
|
|
|
|
self._java_obj = self._new_java_obj(
|
|
|
|
"org.apache.spark.ml.regression.GeneralizedLinearRegression", self.uid)
|
2017-03-03 19:43:45 -05:00
|
|
|
kwargs = self._input_kwargs
|
2017-03-08 05:09:36 -05:00
|
|
|
|
2016-04-12 14:29:12 -04:00
|
|
|
self.setParams(**kwargs)
|
|
|
|
|
|
|
|
@keyword_only
|
|
|
|
@since("2.0.0")
|
[SPARK-32933][PYTHON] Use keyword-only syntax for keyword_only methods
### What changes were proposed in this pull request?
This PR adjusts signatures of methods decorated with `keyword_only` to indicate using [Python 3 keyword-only syntax](https://www.python.org/dev/peps/pep-3102/).
__Note__:
For the moment the goal is not to replace `keyword_only`. For justification see https://github.com/apache/spark/pull/29591#discussion_r489402579
### Why are the changes needed?
Right now it is not clear that `keyword_only` methods are indeed keyword only. This proposal addresses that.
In practice we could probably capture `locals` and drop `keyword_only` completel, i.e:
```python
keyword_only
def __init__(self, *, featuresCol="features"):
...
kwargs = self._input_kwargs
self.setParams(**kwargs)
```
could be replaced with
```python
def __init__(self, *, featuresCol="features"):
kwargs = locals()
del kwargs["self"]
...
self.setParams(**kwargs)
```
### Does this PR introduce _any_ user-facing change?
Docstrings and inspect tools will now indicate that `keyword_only` methods expect only keyword arguments.
For example with ` LinearSVC` will change from
```
>>> from pyspark.ml.classification import LinearSVC
>>> ?LinearSVC.__init__
Signature:
LinearSVC.__init__(
self,
featuresCol='features',
labelCol='label',
predictionCol='prediction',
maxIter=100,
regParam=0.0,
tol=1e-06,
rawPredictionCol='rawPrediction',
fitIntercept=True,
standardization=True,
threshold=0.0,
weightCol=None,
aggregationDepth=2,
)
Docstring: __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, aggregationDepth=2):
File: /path/to/python/pyspark/ml/classification.py
Type: function
```
to
```
>>> from pyspark.ml.classification import LinearSVC
>>> ?LinearSVC.__init__
Signature:
LinearSVC.__init__ (
self,
*,
featuresCol='features',
labelCol='label',
predictionCol='prediction',
maxIter=100,
regParam=0.0,
tol=1e-06,
rawPredictionCol='rawPrediction',
fitIntercept=True,
standardization=True,
threshold=0.0,
weightCol=None,
aggregationDepth=2,
blockSize=1,
)
Docstring: __init__(self, \*, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, aggregationDepth=2, blockSize=1):
File: ~/Workspace/spark/python/pyspark/ml/classification.py
Type: function
```
### How was this patch tested?
Existing tests.
Closes #29799 from zero323/SPARK-32933.
Authored-by: zero323 <mszymkiewicz@gmail.com>
Signed-off-by: HyukjinKwon <gurwls223@apache.org>
2020-09-22 20:28:33 -04:00
|
|
|
def setParams(self, *, labelCol="label", featuresCol="features", predictionCol="prediction",
|
2016-04-12 14:29:12 -04:00
|
|
|
family="gaussian", link=None, fitIntercept=True, maxIter=25, tol=1e-6,
|
2017-03-08 05:09:36 -05:00
|
|
|
regParam=0.0, weightCol=None, solver="irls", linkPredictionCol=None,
|
2019-11-05 21:34:53 -05:00
|
|
|
variancePower=0.0, linkPower=None, offsetCol=None, aggregationDepth=2):
|
2016-04-12 14:29:12 -04:00
|
|
|
"""
|
[SPARK-32933][PYTHON] Use keyword-only syntax for keyword_only methods
### What changes were proposed in this pull request?
This PR adjusts signatures of methods decorated with `keyword_only` to indicate using [Python 3 keyword-only syntax](https://www.python.org/dev/peps/pep-3102/).
__Note__:
For the moment the goal is not to replace `keyword_only`. For justification see https://github.com/apache/spark/pull/29591#discussion_r489402579
### Why are the changes needed?
Right now it is not clear that `keyword_only` methods are indeed keyword only. This proposal addresses that.
In practice we could probably capture `locals` and drop `keyword_only` completel, i.e:
```python
keyword_only
def __init__(self, *, featuresCol="features"):
...
kwargs = self._input_kwargs
self.setParams(**kwargs)
```
could be replaced with
```python
def __init__(self, *, featuresCol="features"):
kwargs = locals()
del kwargs["self"]
...
self.setParams(**kwargs)
```
### Does this PR introduce _any_ user-facing change?
Docstrings and inspect tools will now indicate that `keyword_only` methods expect only keyword arguments.
For example with ` LinearSVC` will change from
```
>>> from pyspark.ml.classification import LinearSVC
>>> ?LinearSVC.__init__
Signature:
LinearSVC.__init__(
self,
featuresCol='features',
labelCol='label',
predictionCol='prediction',
maxIter=100,
regParam=0.0,
tol=1e-06,
rawPredictionCol='rawPrediction',
fitIntercept=True,
standardization=True,
threshold=0.0,
weightCol=None,
aggregationDepth=2,
)
Docstring: __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, aggregationDepth=2):
File: /path/to/python/pyspark/ml/classification.py
Type: function
```
to
```
>>> from pyspark.ml.classification import LinearSVC
>>> ?LinearSVC.__init__
Signature:
LinearSVC.__init__ (
self,
*,
featuresCol='features',
labelCol='label',
predictionCol='prediction',
maxIter=100,
regParam=0.0,
tol=1e-06,
rawPredictionCol='rawPrediction',
fitIntercept=True,
standardization=True,
threshold=0.0,
weightCol=None,
aggregationDepth=2,
blockSize=1,
)
Docstring: __init__(self, \*, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, aggregationDepth=2, blockSize=1):
File: ~/Workspace/spark/python/pyspark/ml/classification.py
Type: function
```
### How was this patch tested?
Existing tests.
Closes #29799 from zero323/SPARK-32933.
Authored-by: zero323 <mszymkiewicz@gmail.com>
Signed-off-by: HyukjinKwon <gurwls223@apache.org>
2020-09-22 20:28:33 -04:00
|
|
|
setParams(self, \\*, labelCol="label", featuresCol="features", predictionCol="prediction", \
|
2016-04-12 14:29:12 -04:00
|
|
|
family="gaussian", link=None, fitIntercept=True, maxIter=25, tol=1e-6, \
|
2017-03-08 05:09:36 -05:00
|
|
|
regParam=0.0, weightCol=None, solver="irls", linkPredictionCol=None, \
|
2019-11-05 21:34:53 -05:00
|
|
|
variancePower=0.0, linkPower=None, offsetCol=None, aggregationDepth=2)
|
2016-04-12 14:29:12 -04:00
|
|
|
Sets params for generalized linear regression.
|
|
|
|
"""
|
2017-03-03 19:43:45 -05:00
|
|
|
kwargs = self._input_kwargs
|
2016-04-12 14:29:12 -04:00
|
|
|
return self._set(**kwargs)
|
|
|
|
|
|
|
|
def _create_model(self, java_model):
|
|
|
|
return GeneralizedLinearRegressionModel(java_model)
|
|
|
|
|
|
|
|
@since("2.0.0")
|
|
|
|
def setFamily(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`family`.
|
|
|
|
"""
|
2016-05-03 10:46:13 -04:00
|
|
|
return self._set(family=value)
|
2016-04-12 14:29:12 -04:00
|
|
|
|
2016-05-19 14:59:19 -04:00
|
|
|
@since("2.0.0")
|
|
|
|
def setLinkPredictionCol(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`linkPredictionCol`.
|
|
|
|
"""
|
|
|
|
return self._set(linkPredictionCol=value)
|
|
|
|
|
2016-04-12 14:29:12 -04:00
|
|
|
@since("2.0.0")
|
|
|
|
def setLink(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`link`.
|
|
|
|
"""
|
2016-05-03 10:46:13 -04:00
|
|
|
return self._set(link=value)
|
2016-04-12 14:29:12 -04:00
|
|
|
|
2017-03-08 05:09:36 -05:00
|
|
|
@since("2.2.0")
|
|
|
|
def setVariancePower(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`variancePower`.
|
|
|
|
"""
|
|
|
|
return self._set(variancePower=value)
|
|
|
|
|
|
|
|
@since("2.2.0")
|
|
|
|
def setLinkPower(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`linkPower`.
|
|
|
|
"""
|
|
|
|
return self._set(linkPower=value)
|
|
|
|
|
2017-07-05 06:41:00 -04:00
|
|
|
@since("2.3.0")
|
|
|
|
def setOffsetCol(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`offsetCol`.
|
|
|
|
"""
|
|
|
|
return self._set(offsetCol=value)
|
|
|
|
|
2019-10-27 23:36:10 -04:00
|
|
|
@since("2.0.0")
|
|
|
|
def setMaxIter(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`maxIter`.
|
|
|
|
"""
|
|
|
|
return self._set(maxIter=value)
|
|
|
|
|
|
|
|
@since("2.0.0")
|
|
|
|
def setRegParam(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`regParam`.
|
|
|
|
"""
|
|
|
|
return self._set(regParam=value)
|
|
|
|
|
|
|
|
@since("2.0.0")
|
|
|
|
def setTol(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`tol`.
|
|
|
|
"""
|
|
|
|
return self._set(tol=value)
|
|
|
|
|
2020-01-31 02:36:39 -05:00
|
|
|
@since("2.0.0")
|
2019-10-27 23:36:10 -04:00
|
|
|
def setFitIntercept(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`fitIntercept`.
|
|
|
|
"""
|
|
|
|
return self._set(fitIntercept=value)
|
|
|
|
|
|
|
|
@since("2.0.0")
|
|
|
|
def setWeightCol(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`weightCol`.
|
|
|
|
"""
|
|
|
|
return self._set(weightCol=value)
|
|
|
|
|
|
|
|
@since("2.0.0")
|
|
|
|
def setSolver(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`solver`.
|
|
|
|
"""
|
|
|
|
return self._set(solver=value)
|
|
|
|
|
2019-11-05 21:34:53 -05:00
|
|
|
@since("3.0.0")
|
|
|
|
def setAggregationDepth(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`aggregationDepth`.
|
|
|
|
"""
|
|
|
|
return self._set(aggregationDepth=value)
|
|
|
|
|
2016-04-12 14:29:12 -04:00
|
|
|
|
[SPARK-29212][ML][PYSPARK] Add common classes without using JVM backend
### What changes were proposed in this pull request?
Implement common base ML classes (`Predictor`, `PredictionModel`, `Classifier`, `ClasssificationModel` `ProbabilisticClassifier`, `ProbabilisticClasssificationModel`, `Regressor`, `RegrssionModel`) for non-Java backends.
Note
- `Predictor` and `JavaClassifier` should be abstract as `_fit` method is not implemented.
- `PredictionModel` should be abstract as `_transform` is not implemented.
### Why are the changes needed?
To provide extensions points for non-JVM algorithms, as well as a public (as opposed to `Java*` variants, which are commonly described in docstrings as private) hierarchy which can be used to distinguish between different classes of predictors.
For longer discussion see [SPARK-29212](https://issues.apache.org/jira/browse/SPARK-29212) and / or https://github.com/apache/spark/pull/25776.
### Does this PR introduce any user-facing change?
It adds new base classes as listed above, but effective interfaces (method resolution order notwithstanding) stay the same.
Additionally "private" `Java*` classes in`ml.regression` and `ml.classification` have been renamed to follow PEP-8 conventions (added leading underscore).
It is for discussion if the same should be done to equivalent classes from `ml.wrapper`.
If we take `JavaClassifier` as an example, type hierarchy will change from
![old pyspark ml classification JavaClassifier](https://user-images.githubusercontent.com/1554276/72657093-5c0b0c80-39a0-11ea-9069-a897d75de483.png)
to
![new pyspark ml classification _JavaClassifier](https://user-images.githubusercontent.com/1554276/72657098-64fbde00-39a0-11ea-8f80-01187a5ea5a6.png)
Similarly the old model
![old pyspark ml classification JavaClassificationModel](https://user-images.githubusercontent.com/1554276/72657103-7513bd80-39a0-11ea-9ffc-59eb6ab61fde.png)
will become
![new pyspark ml classification _JavaClassificationModel](https://user-images.githubusercontent.com/1554276/72657110-80ff7f80-39a0-11ea-9f5c-fe408664e827.png)
### How was this patch tested?
Existing unit tests.
Closes #27245 from zero323/SPARK-29212.
Authored-by: zero323 <mszymkiewicz@gmail.com>
Signed-off-by: zhengruifeng <ruifengz@foxmail.com>
2020-03-03 23:20:02 -05:00
|
|
|
class GeneralizedLinearRegressionModel(_JavaRegressionModel, _GeneralizedLinearRegressionParams,
|
2019-10-18 05:26:54 -04:00
|
|
|
JavaMLWritable, JavaMLReadable, HasTrainingSummary):
|
2016-04-12 14:29:12 -04:00
|
|
|
"""
|
2016-05-25 01:20:00 -04:00
|
|
|
Model fitted by :class:`GeneralizedLinearRegression`.
|
2016-04-12 14:29:12 -04:00
|
|
|
|
|
|
|
.. versionadded:: 2.0.0
|
|
|
|
"""
|
|
|
|
|
2019-10-27 23:36:10 -04:00
|
|
|
@since("3.0.0")
|
|
|
|
def setLinkPredictionCol(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`linkPredictionCol`.
|
|
|
|
"""
|
|
|
|
return self._set(linkPredictionCol=value)
|
|
|
|
|
2016-04-12 14:29:12 -04:00
|
|
|
@property
|
|
|
|
@since("2.0.0")
|
|
|
|
def coefficients(self):
|
|
|
|
"""
|
|
|
|
Model coefficients.
|
|
|
|
"""
|
|
|
|
return self._call_java("coefficients")
|
|
|
|
|
|
|
|
@property
|
|
|
|
@since("2.0.0")
|
|
|
|
def intercept(self):
|
|
|
|
"""
|
|
|
|
Model intercept.
|
|
|
|
"""
|
|
|
|
return self._call_java("intercept")
|
|
|
|
|
2016-05-13 03:01:20 -04:00
|
|
|
@property
|
|
|
|
@since("2.0.0")
|
|
|
|
def summary(self):
|
|
|
|
"""
|
|
|
|
Gets summary (e.g. residuals, deviance, pValues) of model on
|
|
|
|
training set. An exception is thrown if
|
|
|
|
`trainingSummary is None`.
|
|
|
|
"""
|
2016-11-21 08:36:49 -05:00
|
|
|
if self.hasSummary:
|
2019-02-01 18:29:58 -05:00
|
|
|
return GeneralizedLinearRegressionTrainingSummary(
|
|
|
|
super(GeneralizedLinearRegressionModel, self).summary)
|
2016-11-21 08:36:49 -05:00
|
|
|
else:
|
|
|
|
raise RuntimeError("No training summary available for this %s" %
|
|
|
|
self.__class__.__name__)
|
2016-05-13 03:01:20 -04:00
|
|
|
|
|
|
|
def evaluate(self, dataset):
|
|
|
|
"""
|
|
|
|
Evaluates the model on a test dataset.
|
|
|
|
|
2020-11-09 19:33:48 -05:00
|
|
|
.. versionadded:: 2.0.0
|
|
|
|
|
|
|
|
Parameters
|
|
|
|
----------
|
|
|
|
dataset : :py:class:`pyspark.sql.DataFrame`
|
|
|
|
Test dataset to evaluate model on, where dataset is an
|
|
|
|
instance of :py:class:`pyspark.sql.DataFrame`
|
2016-05-13 03:01:20 -04:00
|
|
|
"""
|
|
|
|
if not isinstance(dataset, DataFrame):
|
|
|
|
raise ValueError("dataset must be a DataFrame but got %s." % type(dataset))
|
|
|
|
java_glr_summary = self._call_java("evaluate", dataset)
|
|
|
|
return GeneralizedLinearRegressionSummary(java_glr_summary)
|
|
|
|
|
|
|
|
|
|
|
|
class GeneralizedLinearRegressionSummary(JavaWrapper):
|
|
|
|
"""
|
|
|
|
Generalized linear regression results evaluated on a dataset.
|
|
|
|
|
|
|
|
.. versionadded:: 2.0.0
|
|
|
|
"""
|
|
|
|
|
|
|
|
@property
|
|
|
|
@since("2.0.0")
|
|
|
|
def predictions(self):
|
|
|
|
"""
|
|
|
|
Predictions output by the model's `transform` method.
|
|
|
|
"""
|
|
|
|
return self._call_java("predictions")
|
|
|
|
|
|
|
|
@property
|
|
|
|
@since("2.0.0")
|
|
|
|
def predictionCol(self):
|
|
|
|
"""
|
|
|
|
Field in :py:attr:`predictions` which gives the predicted value of each instance.
|
|
|
|
This is set to a new column name if the original model's `predictionCol` is not set.
|
|
|
|
"""
|
|
|
|
return self._call_java("predictionCol")
|
|
|
|
|
2017-05-22 10:42:37 -04:00
|
|
|
@property
|
|
|
|
@since("2.2.0")
|
|
|
|
def numInstances(self):
|
|
|
|
"""
|
|
|
|
Number of instances in DataFrame predictions.
|
|
|
|
"""
|
|
|
|
return self._call_java("numInstances")
|
|
|
|
|
2016-05-13 03:01:20 -04:00
|
|
|
@property
|
|
|
|
@since("2.0.0")
|
|
|
|
def rank(self):
|
|
|
|
"""
|
|
|
|
The numeric rank of the fitted linear model.
|
|
|
|
"""
|
|
|
|
return self._call_java("rank")
|
|
|
|
|
|
|
|
@property
|
|
|
|
@since("2.0.0")
|
|
|
|
def degreesOfFreedom(self):
|
|
|
|
"""
|
|
|
|
Degrees of freedom.
|
|
|
|
"""
|
|
|
|
return self._call_java("degreesOfFreedom")
|
|
|
|
|
|
|
|
@property
|
|
|
|
@since("2.0.0")
|
|
|
|
def residualDegreeOfFreedom(self):
|
|
|
|
"""
|
|
|
|
The residual degrees of freedom.
|
|
|
|
"""
|
|
|
|
return self._call_java("residualDegreeOfFreedom")
|
|
|
|
|
|
|
|
@property
|
|
|
|
@since("2.0.0")
|
|
|
|
def residualDegreeOfFreedomNull(self):
|
|
|
|
"""
|
|
|
|
The residual degrees of freedom for the null model.
|
|
|
|
"""
|
|
|
|
return self._call_java("residualDegreeOfFreedomNull")
|
|
|
|
|
|
|
|
def residuals(self, residualsType="deviance"):
|
|
|
|
"""
|
|
|
|
Get the residuals of the fitted model by type.
|
|
|
|
|
2020-11-09 19:33:48 -05:00
|
|
|
.. versionadded:: 2.0.0
|
|
|
|
|
|
|
|
Parameters
|
|
|
|
----------
|
|
|
|
residualsType : str, optional
|
|
|
|
The type of residuals which should be returned.
|
|
|
|
Supported options: deviance (default), pearson, working, and response.
|
2016-05-13 03:01:20 -04:00
|
|
|
"""
|
|
|
|
return self._call_java("residuals", residualsType)
|
|
|
|
|
|
|
|
@property
|
|
|
|
@since("2.0.0")
|
|
|
|
def nullDeviance(self):
|
|
|
|
"""
|
|
|
|
The deviance for the null model.
|
|
|
|
"""
|
|
|
|
return self._call_java("nullDeviance")
|
|
|
|
|
|
|
|
@property
|
|
|
|
@since("2.0.0")
|
|
|
|
def deviance(self):
|
|
|
|
"""
|
|
|
|
The deviance for the fitted model.
|
|
|
|
"""
|
|
|
|
return self._call_java("deviance")
|
|
|
|
|
|
|
|
@property
|
|
|
|
@since("2.0.0")
|
|
|
|
def dispersion(self):
|
|
|
|
"""
|
|
|
|
The dispersion of the fitted model.
|
|
|
|
It is taken as 1.0 for the "binomial" and "poisson" families, and otherwise
|
|
|
|
estimated by the residual Pearson's Chi-Squared statistic (which is defined as
|
|
|
|
sum of the squares of the Pearson residuals) divided by the residual degrees of freedom.
|
|
|
|
"""
|
|
|
|
return self._call_java("dispersion")
|
|
|
|
|
|
|
|
@property
|
|
|
|
@since("2.0.0")
|
|
|
|
def aic(self):
|
|
|
|
"""
|
|
|
|
Akaike's "An Information Criterion"(AIC) for the fitted model.
|
|
|
|
"""
|
|
|
|
return self._call_java("aic")
|
|
|
|
|
|
|
|
|
|
|
|
@inherit_doc
|
|
|
|
class GeneralizedLinearRegressionTrainingSummary(GeneralizedLinearRegressionSummary):
|
|
|
|
"""
|
|
|
|
Generalized linear regression training results.
|
|
|
|
|
|
|
|
.. versionadded:: 2.0.0
|
|
|
|
"""
|
|
|
|
|
|
|
|
@property
|
|
|
|
@since("2.0.0")
|
|
|
|
def numIterations(self):
|
|
|
|
"""
|
|
|
|
Number of training iterations.
|
|
|
|
"""
|
|
|
|
return self._call_java("numIterations")
|
|
|
|
|
|
|
|
@property
|
|
|
|
@since("2.0.0")
|
|
|
|
def solver(self):
|
|
|
|
"""
|
|
|
|
The numeric solver used for training.
|
|
|
|
"""
|
|
|
|
return self._call_java("solver")
|
|
|
|
|
|
|
|
@property
|
|
|
|
@since("2.0.0")
|
|
|
|
def coefficientStandardErrors(self):
|
|
|
|
"""
|
|
|
|
Standard error of estimated coefficients and intercept.
|
|
|
|
|
|
|
|
If :py:attr:`GeneralizedLinearRegression.fitIntercept` is set to True,
|
|
|
|
then the last element returned corresponds to the intercept.
|
|
|
|
"""
|
|
|
|
return self._call_java("coefficientStandardErrors")
|
|
|
|
|
|
|
|
@property
|
|
|
|
@since("2.0.0")
|
|
|
|
def tValues(self):
|
|
|
|
"""
|
|
|
|
T-statistic of estimated coefficients and intercept.
|
|
|
|
|
|
|
|
If :py:attr:`GeneralizedLinearRegression.fitIntercept` is set to True,
|
|
|
|
then the last element returned corresponds to the intercept.
|
|
|
|
"""
|
|
|
|
return self._call_java("tValues")
|
|
|
|
|
|
|
|
@property
|
|
|
|
@since("2.0.0")
|
|
|
|
def pValues(self):
|
|
|
|
"""
|
|
|
|
Two-sided p-value of estimated coefficients and intercept.
|
|
|
|
|
|
|
|
If :py:attr:`GeneralizedLinearRegression.fitIntercept` is set to True,
|
|
|
|
then the last element returned corresponds to the intercept.
|
|
|
|
"""
|
|
|
|
return self._call_java("pValues")
|
|
|
|
|
2017-08-07 20:43:58 -04:00
|
|
|
def __repr__(self):
|
|
|
|
return self._call_java("toString")
|
|
|
|
|
2016-04-12 14:29:12 -04:00
|
|
|
|
[SPARK-29212][ML][PYSPARK] Add common classes without using JVM backend
### What changes were proposed in this pull request?
Implement common base ML classes (`Predictor`, `PredictionModel`, `Classifier`, `ClasssificationModel` `ProbabilisticClassifier`, `ProbabilisticClasssificationModel`, `Regressor`, `RegrssionModel`) for non-Java backends.
Note
- `Predictor` and `JavaClassifier` should be abstract as `_fit` method is not implemented.
- `PredictionModel` should be abstract as `_transform` is not implemented.
### Why are the changes needed?
To provide extensions points for non-JVM algorithms, as well as a public (as opposed to `Java*` variants, which are commonly described in docstrings as private) hierarchy which can be used to distinguish between different classes of predictors.
For longer discussion see [SPARK-29212](https://issues.apache.org/jira/browse/SPARK-29212) and / or https://github.com/apache/spark/pull/25776.
### Does this PR introduce any user-facing change?
It adds new base classes as listed above, but effective interfaces (method resolution order notwithstanding) stay the same.
Additionally "private" `Java*` classes in`ml.regression` and `ml.classification` have been renamed to follow PEP-8 conventions (added leading underscore).
It is for discussion if the same should be done to equivalent classes from `ml.wrapper`.
If we take `JavaClassifier` as an example, type hierarchy will change from
![old pyspark ml classification JavaClassifier](https://user-images.githubusercontent.com/1554276/72657093-5c0b0c80-39a0-11ea-9069-a897d75de483.png)
to
![new pyspark ml classification _JavaClassifier](https://user-images.githubusercontent.com/1554276/72657098-64fbde00-39a0-11ea-8f80-01187a5ea5a6.png)
Similarly the old model
![old pyspark ml classification JavaClassificationModel](https://user-images.githubusercontent.com/1554276/72657103-7513bd80-39a0-11ea-9ffc-59eb6ab61fde.png)
will become
![new pyspark ml classification _JavaClassificationModel](https://user-images.githubusercontent.com/1554276/72657110-80ff7f80-39a0-11ea-9f5c-fe408664e827.png)
### How was this patch tested?
Existing unit tests.
Closes #27245 from zero323/SPARK-29212.
Authored-by: zero323 <mszymkiewicz@gmail.com>
Signed-off-by: zhengruifeng <ruifengz@foxmail.com>
2020-03-03 23:20:02 -05:00
|
|
|
class _FactorizationMachinesParams(_PredictorParams, HasMaxIter, HasStepSize, HasTol,
|
2020-08-03 11:50:34 -04:00
|
|
|
HasSolver, HasSeed, HasFitIntercept, HasRegParam, HasWeightCol):
|
2019-12-30 23:56:19 -05:00
|
|
|
"""
|
|
|
|
Params for :py:class:`FMRegressor`, :py:class:`FMRegressionModel`, :py:class:`FMClassifier`
|
|
|
|
and :py:class:`FMClassifierModel`.
|
|
|
|
|
|
|
|
.. versionadded:: 3.0.0
|
|
|
|
"""
|
|
|
|
|
|
|
|
factorSize = Param(Params._dummy(), "factorSize", "Dimensionality of the factor vectors, " +
|
|
|
|
"which are used to get pairwise interactions between variables",
|
|
|
|
typeConverter=TypeConverters.toInt)
|
|
|
|
|
|
|
|
fitLinear = Param(Params._dummy(), "fitLinear", "whether to fit linear term (aka 1-way term)",
|
|
|
|
typeConverter=TypeConverters.toBoolean)
|
|
|
|
|
|
|
|
miniBatchFraction = Param(Params._dummy(), "miniBatchFraction", "fraction of the input data " +
|
|
|
|
"set that should be used for one iteration of gradient descent",
|
|
|
|
typeConverter=TypeConverters.toFloat)
|
|
|
|
|
|
|
|
initStd = Param(Params._dummy(), "initStd", "standard deviation of initial coefficients",
|
|
|
|
typeConverter=TypeConverters.toFloat)
|
|
|
|
|
|
|
|
solver = Param(Params._dummy(), "solver", "The solver algorithm for optimization. Supported " +
|
|
|
|
"options: gd, adamW. (Default adamW)", typeConverter=TypeConverters.toString)
|
|
|
|
|
2020-08-03 11:50:34 -04:00
|
|
|
def __init__(self, *args):
|
|
|
|
super(_FactorizationMachinesParams, self).__init__(*args)
|
[SPARK-32232][ML][PYSPARK] Make sure ML has the same default solver values between Scala and Python
# What changes were proposed in this pull request?
current problems:
```
mlp = MultilayerPerceptronClassifier(layers=[2, 2, 2], seed=123)
model = mlp.fit(df)
path = tempfile.mkdtemp()
model_path = path + "/mlp"
model.save(model_path)
model2 = MultilayerPerceptronClassificationModel.load(model_path)
self.assertEqual(model2.getSolver(), "l-bfgs") # this fails because model2.getSolver() returns 'auto'
model2.transform(df)
# this fails with Exception pyspark.sql.utils.IllegalArgumentException: MultilayerPerceptronClassifier_dec859ed24ec parameter solver given invalid value auto.
```
FMClassifier/Regression and GeneralizedLinearRegression have the same problems.
Here are the root cause of the problems:
1. In HasSolver, both Scala and Python default solver to 'auto'
2. On Scala side, mlp overrides the default of solver to 'l-bfgs', FMClassifier/Regression overrides the default of solver to 'adamW', and glr overrides the default of solver to 'irls'
3. On Scala side, mlp overrides the default of solver in MultilayerPerceptronClassificationParams, so both MultilayerPerceptronClassification and MultilayerPerceptronClassificationModel have 'l-bfgs' as default
4. On Python side, mlp overrides the default of solver in MultilayerPerceptronClassification, so it has default as 'l-bfgs', but MultilayerPerceptronClassificationModel doesn't override the default so it gets the default from HasSolver which is 'auto'. In theory, we don't care about the solver value or any other params values for MultilayerPerceptronClassificationModel, because we have the fitted model already. That's why on Python side, we never set default values for any of the XXXModel.
5. when calling getSolver on the loaded mlp model, it calls this line of code underneath:
```
def _transfer_params_from_java(self):
"""
Transforms the embedded params from the companion Java object.
"""
......
# SPARK-14931: Only check set params back to avoid default params mismatch.
if self._java_obj.isSet(java_param):
value = _java2py(sc, self._java_obj.getOrDefault(java_param))
self._set(**{param.name: value})
......
```
that's why model2.getSolver() returns 'auto'. The code doesn't get the default Scala value (in this case 'l-bfgs') to set to Python param, so it takes the default value (in this case 'auto') on Python side.
6. when calling model2.transform(df), it calls this underneath:
```
def _transfer_params_to_java(self):
"""
Transforms the embedded params to the companion Java object.
"""
......
if self.hasDefault(param):
pair = self._make_java_param_pair(param, self._defaultParamMap[param])
pair_defaults.append(pair)
......
```
Again, it gets the Python default solver which is 'auto', and this caused the Exception
7. Currently, on Scala side, for some of the algorithms, we set default values in the XXXParam, so both estimator and transformer get the default value. However, for some of the algorithms, we only set default in estimators, and the XXXModel doesn't get the default value. On Python side, we never set defaults for the XXXModel. This causes the default value inconsistency.
8. My proposed solution: set default params in XXXParam for both Scala and Python, so both the estimator and transformer have the same default value for both Scala and Python. I currently only changed solver in this PR. If everyone is OK with the fix, I will change all the other params as well.
I hope my explanation makes sense to your folks :)
### Why are the changes needed?
Fix bug
### Does this PR introduce _any_ user-facing change?
No
### How was this patch tested?
existing and new tests
Closes #29060 from huaxingao/solver_parity.
Authored-by: Huaxin Gao <huaxing@us.ibm.com>
Signed-off-by: Sean Owen <srowen@gmail.com>
2020-07-11 11:37:26 -04:00
|
|
|
self._setDefault(factorSize=8, fitIntercept=True, fitLinear=True, regParam=0.0,
|
|
|
|
miniBatchFraction=1.0, initStd=0.01, maxIter=100, stepSize=1.0,
|
|
|
|
tol=1e-6, solver="adamW")
|
|
|
|
|
2019-12-30 23:56:19 -05:00
|
|
|
@since("3.0.0")
|
|
|
|
def getFactorSize(self):
|
|
|
|
"""
|
|
|
|
Gets the value of factorSize or its default value.
|
|
|
|
"""
|
|
|
|
return self.getOrDefault(self.factorSize)
|
|
|
|
|
|
|
|
@since("3.0.0")
|
|
|
|
def getFitLinear(self):
|
|
|
|
"""
|
|
|
|
Gets the value of fitLinear or its default value.
|
|
|
|
"""
|
|
|
|
return self.getOrDefault(self.fitLinear)
|
|
|
|
|
|
|
|
@since("3.0.0")
|
|
|
|
def getMiniBatchFraction(self):
|
|
|
|
"""
|
|
|
|
Gets the value of miniBatchFraction or its default value.
|
|
|
|
"""
|
|
|
|
return self.getOrDefault(self.miniBatchFraction)
|
|
|
|
|
|
|
|
@since("3.0.0")
|
|
|
|
def getInitStd(self):
|
|
|
|
"""
|
|
|
|
Gets the value of initStd or its default value.
|
|
|
|
"""
|
|
|
|
return self.getOrDefault(self.initStd)
|
|
|
|
|
|
|
|
|
2019-12-26 12:39:53 -05:00
|
|
|
@inherit_doc
|
[SPARK-29212][ML][PYSPARK] Add common classes without using JVM backend
### What changes were proposed in this pull request?
Implement common base ML classes (`Predictor`, `PredictionModel`, `Classifier`, `ClasssificationModel` `ProbabilisticClassifier`, `ProbabilisticClasssificationModel`, `Regressor`, `RegrssionModel`) for non-Java backends.
Note
- `Predictor` and `JavaClassifier` should be abstract as `_fit` method is not implemented.
- `PredictionModel` should be abstract as `_transform` is not implemented.
### Why are the changes needed?
To provide extensions points for non-JVM algorithms, as well as a public (as opposed to `Java*` variants, which are commonly described in docstrings as private) hierarchy which can be used to distinguish between different classes of predictors.
For longer discussion see [SPARK-29212](https://issues.apache.org/jira/browse/SPARK-29212) and / or https://github.com/apache/spark/pull/25776.
### Does this PR introduce any user-facing change?
It adds new base classes as listed above, but effective interfaces (method resolution order notwithstanding) stay the same.
Additionally "private" `Java*` classes in`ml.regression` and `ml.classification` have been renamed to follow PEP-8 conventions (added leading underscore).
It is for discussion if the same should be done to equivalent classes from `ml.wrapper`.
If we take `JavaClassifier` as an example, type hierarchy will change from
![old pyspark ml classification JavaClassifier](https://user-images.githubusercontent.com/1554276/72657093-5c0b0c80-39a0-11ea-9069-a897d75de483.png)
to
![new pyspark ml classification _JavaClassifier](https://user-images.githubusercontent.com/1554276/72657098-64fbde00-39a0-11ea-8f80-01187a5ea5a6.png)
Similarly the old model
![old pyspark ml classification JavaClassificationModel](https://user-images.githubusercontent.com/1554276/72657103-7513bd80-39a0-11ea-9ffc-59eb6ab61fde.png)
will become
![new pyspark ml classification _JavaClassificationModel](https://user-images.githubusercontent.com/1554276/72657110-80ff7f80-39a0-11ea-9f5c-fe408664e827.png)
### How was this patch tested?
Existing unit tests.
Closes #27245 from zero323/SPARK-29212.
Authored-by: zero323 <mszymkiewicz@gmail.com>
Signed-off-by: zhengruifeng <ruifengz@foxmail.com>
2020-03-03 23:20:02 -05:00
|
|
|
class FMRegressor(_JavaRegressor, _FactorizationMachinesParams, JavaMLWritable, JavaMLReadable):
|
2019-12-26 12:39:53 -05:00
|
|
|
"""
|
|
|
|
Factorization Machines learning algorithm for regression.
|
|
|
|
|
|
|
|
solver Supports:
|
|
|
|
|
|
|
|
* gd (normal mini-batch gradient descent)
|
|
|
|
* adamW (default)
|
|
|
|
|
2020-11-09 19:33:48 -05:00
|
|
|
.. versionadded:: 3.0.0
|
|
|
|
|
|
|
|
Examples
|
|
|
|
--------
|
2019-12-26 12:39:53 -05:00
|
|
|
>>> from pyspark.ml.linalg import Vectors
|
|
|
|
>>> from pyspark.ml.regression import FMRegressor
|
|
|
|
>>> df = spark.createDataFrame([
|
|
|
|
... (2.0, Vectors.dense(2.0)),
|
|
|
|
... (1.0, Vectors.dense(1.0)),
|
|
|
|
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
|
|
|
|
>>>
|
2019-12-30 23:56:19 -05:00
|
|
|
>>> fm = FMRegressor(factorSize=2)
|
|
|
|
>>> fm.setSeed(16)
|
|
|
|
FMRegressor...
|
2019-12-26 12:39:53 -05:00
|
|
|
>>> model = fm.fit(df)
|
2019-12-30 23:56:19 -05:00
|
|
|
>>> model.getMaxIter()
|
|
|
|
100
|
2019-12-26 12:39:53 -05:00
|
|
|
>>> test0 = spark.createDataFrame([
|
|
|
|
... (Vectors.dense(-2.0),),
|
|
|
|
... (Vectors.dense(0.5),),
|
|
|
|
... (Vectors.dense(1.0),),
|
|
|
|
... (Vectors.dense(4.0),)], ["features"])
|
|
|
|
>>> model.transform(test0).show(10, False)
|
|
|
|
+--------+-------------------+
|
|
|
|
|features|prediction |
|
|
|
|
+--------+-------------------+
|
|
|
|
|[-2.0] |-1.9989237712341565|
|
|
|
|
|[0.5] |0.4956682219523814 |
|
|
|
|
|[1.0] |0.994586620589689 |
|
|
|
|
|[4.0] |3.9880970124135344 |
|
|
|
|
+--------+-------------------+
|
|
|
|
...
|
|
|
|
>>> model.intercept
|
|
|
|
-0.0032501766849261557
|
|
|
|
>>> model.linear
|
|
|
|
DenseVector([0.9978])
|
|
|
|
>>> model.factors
|
|
|
|
DenseMatrix(1, 2, [0.0173, 0.0021], 1)
|
2020-08-03 11:50:34 -04:00
|
|
|
>>> model_path = temp_path + "/fm_model"
|
|
|
|
>>> model.save(model_path)
|
|
|
|
>>> model2 = FMRegressionModel.load(model_path)
|
|
|
|
>>> model2.intercept
|
|
|
|
-0.0032501766849261557
|
|
|
|
>>> model2.linear
|
|
|
|
DenseVector([0.9978])
|
|
|
|
>>> model2.factors
|
|
|
|
DenseMatrix(1, 2, [0.0173, 0.0021], 1)
|
|
|
|
>>> model.transform(test0).take(1) == model2.transform(test0).take(1)
|
|
|
|
True
|
2019-12-26 12:39:53 -05:00
|
|
|
"""
|
|
|
|
|
|
|
|
@keyword_only
|
[SPARK-32933][PYTHON] Use keyword-only syntax for keyword_only methods
### What changes were proposed in this pull request?
This PR adjusts signatures of methods decorated with `keyword_only` to indicate using [Python 3 keyword-only syntax](https://www.python.org/dev/peps/pep-3102/).
__Note__:
For the moment the goal is not to replace `keyword_only`. For justification see https://github.com/apache/spark/pull/29591#discussion_r489402579
### Why are the changes needed?
Right now it is not clear that `keyword_only` methods are indeed keyword only. This proposal addresses that.
In practice we could probably capture `locals` and drop `keyword_only` completel, i.e:
```python
keyword_only
def __init__(self, *, featuresCol="features"):
...
kwargs = self._input_kwargs
self.setParams(**kwargs)
```
could be replaced with
```python
def __init__(self, *, featuresCol="features"):
kwargs = locals()
del kwargs["self"]
...
self.setParams(**kwargs)
```
### Does this PR introduce _any_ user-facing change?
Docstrings and inspect tools will now indicate that `keyword_only` methods expect only keyword arguments.
For example with ` LinearSVC` will change from
```
>>> from pyspark.ml.classification import LinearSVC
>>> ?LinearSVC.__init__
Signature:
LinearSVC.__init__(
self,
featuresCol='features',
labelCol='label',
predictionCol='prediction',
maxIter=100,
regParam=0.0,
tol=1e-06,
rawPredictionCol='rawPrediction',
fitIntercept=True,
standardization=True,
threshold=0.0,
weightCol=None,
aggregationDepth=2,
)
Docstring: __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, aggregationDepth=2):
File: /path/to/python/pyspark/ml/classification.py
Type: function
```
to
```
>>> from pyspark.ml.classification import LinearSVC
>>> ?LinearSVC.__init__
Signature:
LinearSVC.__init__ (
self,
*,
featuresCol='features',
labelCol='label',
predictionCol='prediction',
maxIter=100,
regParam=0.0,
tol=1e-06,
rawPredictionCol='rawPrediction',
fitIntercept=True,
standardization=True,
threshold=0.0,
weightCol=None,
aggregationDepth=2,
blockSize=1,
)
Docstring: __init__(self, \*, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, aggregationDepth=2, blockSize=1):
File: ~/Workspace/spark/python/pyspark/ml/classification.py
Type: function
```
### How was this patch tested?
Existing tests.
Closes #29799 from zero323/SPARK-32933.
Authored-by: zero323 <mszymkiewicz@gmail.com>
Signed-off-by: HyukjinKwon <gurwls223@apache.org>
2020-09-22 20:28:33 -04:00
|
|
|
def __init__(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
|
2019-12-26 12:39:53 -05:00
|
|
|
factorSize=8, fitIntercept=True, fitLinear=True, regParam=0.0,
|
|
|
|
miniBatchFraction=1.0, initStd=0.01, maxIter=100, stepSize=1.0,
|
|
|
|
tol=1e-6, solver="adamW", seed=None):
|
|
|
|
"""
|
[SPARK-32933][PYTHON] Use keyword-only syntax for keyword_only methods
### What changes were proposed in this pull request?
This PR adjusts signatures of methods decorated with `keyword_only` to indicate using [Python 3 keyword-only syntax](https://www.python.org/dev/peps/pep-3102/).
__Note__:
For the moment the goal is not to replace `keyword_only`. For justification see https://github.com/apache/spark/pull/29591#discussion_r489402579
### Why are the changes needed?
Right now it is not clear that `keyword_only` methods are indeed keyword only. This proposal addresses that.
In practice we could probably capture `locals` and drop `keyword_only` completel, i.e:
```python
keyword_only
def __init__(self, *, featuresCol="features"):
...
kwargs = self._input_kwargs
self.setParams(**kwargs)
```
could be replaced with
```python
def __init__(self, *, featuresCol="features"):
kwargs = locals()
del kwargs["self"]
...
self.setParams(**kwargs)
```
### Does this PR introduce _any_ user-facing change?
Docstrings and inspect tools will now indicate that `keyword_only` methods expect only keyword arguments.
For example with ` LinearSVC` will change from
```
>>> from pyspark.ml.classification import LinearSVC
>>> ?LinearSVC.__init__
Signature:
LinearSVC.__init__(
self,
featuresCol='features',
labelCol='label',
predictionCol='prediction',
maxIter=100,
regParam=0.0,
tol=1e-06,
rawPredictionCol='rawPrediction',
fitIntercept=True,
standardization=True,
threshold=0.0,
weightCol=None,
aggregationDepth=2,
)
Docstring: __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, aggregationDepth=2):
File: /path/to/python/pyspark/ml/classification.py
Type: function
```
to
```
>>> from pyspark.ml.classification import LinearSVC
>>> ?LinearSVC.__init__
Signature:
LinearSVC.__init__ (
self,
*,
featuresCol='features',
labelCol='label',
predictionCol='prediction',
maxIter=100,
regParam=0.0,
tol=1e-06,
rawPredictionCol='rawPrediction',
fitIntercept=True,
standardization=True,
threshold=0.0,
weightCol=None,
aggregationDepth=2,
blockSize=1,
)
Docstring: __init__(self, \*, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, aggregationDepth=2, blockSize=1):
File: ~/Workspace/spark/python/pyspark/ml/classification.py
Type: function
```
### How was this patch tested?
Existing tests.
Closes #29799 from zero323/SPARK-32933.
Authored-by: zero323 <mszymkiewicz@gmail.com>
Signed-off-by: HyukjinKwon <gurwls223@apache.org>
2020-09-22 20:28:33 -04:00
|
|
|
__init__(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
|
2019-12-26 12:39:53 -05:00
|
|
|
factorSize=8, fitIntercept=True, fitLinear=True, regParam=0.0, \
|
|
|
|
miniBatchFraction=1.0, initStd=0.01, maxIter=100, stepSize=1.0, \
|
|
|
|
tol=1e-6, solver="adamW", seed=None)
|
|
|
|
"""
|
|
|
|
super(FMRegressor, self).__init__()
|
|
|
|
self._java_obj = self._new_java_obj(
|
|
|
|
"org.apache.spark.ml.regression.FMRegressor", self.uid)
|
|
|
|
kwargs = self._input_kwargs
|
|
|
|
self.setParams(**kwargs)
|
|
|
|
|
|
|
|
@keyword_only
|
|
|
|
@since("3.0.0")
|
[SPARK-32933][PYTHON] Use keyword-only syntax for keyword_only methods
### What changes were proposed in this pull request?
This PR adjusts signatures of methods decorated with `keyword_only` to indicate using [Python 3 keyword-only syntax](https://www.python.org/dev/peps/pep-3102/).
__Note__:
For the moment the goal is not to replace `keyword_only`. For justification see https://github.com/apache/spark/pull/29591#discussion_r489402579
### Why are the changes needed?
Right now it is not clear that `keyword_only` methods are indeed keyword only. This proposal addresses that.
In practice we could probably capture `locals` and drop `keyword_only` completel, i.e:
```python
keyword_only
def __init__(self, *, featuresCol="features"):
...
kwargs = self._input_kwargs
self.setParams(**kwargs)
```
could be replaced with
```python
def __init__(self, *, featuresCol="features"):
kwargs = locals()
del kwargs["self"]
...
self.setParams(**kwargs)
```
### Does this PR introduce _any_ user-facing change?
Docstrings and inspect tools will now indicate that `keyword_only` methods expect only keyword arguments.
For example with ` LinearSVC` will change from
```
>>> from pyspark.ml.classification import LinearSVC
>>> ?LinearSVC.__init__
Signature:
LinearSVC.__init__(
self,
featuresCol='features',
labelCol='label',
predictionCol='prediction',
maxIter=100,
regParam=0.0,
tol=1e-06,
rawPredictionCol='rawPrediction',
fitIntercept=True,
standardization=True,
threshold=0.0,
weightCol=None,
aggregationDepth=2,
)
Docstring: __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, aggregationDepth=2):
File: /path/to/python/pyspark/ml/classification.py
Type: function
```
to
```
>>> from pyspark.ml.classification import LinearSVC
>>> ?LinearSVC.__init__
Signature:
LinearSVC.__init__ (
self,
*,
featuresCol='features',
labelCol='label',
predictionCol='prediction',
maxIter=100,
regParam=0.0,
tol=1e-06,
rawPredictionCol='rawPrediction',
fitIntercept=True,
standardization=True,
threshold=0.0,
weightCol=None,
aggregationDepth=2,
blockSize=1,
)
Docstring: __init__(self, \*, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, aggregationDepth=2, blockSize=1):
File: ~/Workspace/spark/python/pyspark/ml/classification.py
Type: function
```
### How was this patch tested?
Existing tests.
Closes #29799 from zero323/SPARK-32933.
Authored-by: zero323 <mszymkiewicz@gmail.com>
Signed-off-by: HyukjinKwon <gurwls223@apache.org>
2020-09-22 20:28:33 -04:00
|
|
|
def setParams(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
|
2019-12-26 12:39:53 -05:00
|
|
|
factorSize=8, fitIntercept=True, fitLinear=True, regParam=0.0,
|
|
|
|
miniBatchFraction=1.0, initStd=0.01, maxIter=100, stepSize=1.0,
|
|
|
|
tol=1e-6, solver="adamW", seed=None):
|
|
|
|
"""
|
[SPARK-32933][PYTHON] Use keyword-only syntax for keyword_only methods
### What changes were proposed in this pull request?
This PR adjusts signatures of methods decorated with `keyword_only` to indicate using [Python 3 keyword-only syntax](https://www.python.org/dev/peps/pep-3102/).
__Note__:
For the moment the goal is not to replace `keyword_only`. For justification see https://github.com/apache/spark/pull/29591#discussion_r489402579
### Why are the changes needed?
Right now it is not clear that `keyword_only` methods are indeed keyword only. This proposal addresses that.
In practice we could probably capture `locals` and drop `keyword_only` completel, i.e:
```python
keyword_only
def __init__(self, *, featuresCol="features"):
...
kwargs = self._input_kwargs
self.setParams(**kwargs)
```
could be replaced with
```python
def __init__(self, *, featuresCol="features"):
kwargs = locals()
del kwargs["self"]
...
self.setParams(**kwargs)
```
### Does this PR introduce _any_ user-facing change?
Docstrings and inspect tools will now indicate that `keyword_only` methods expect only keyword arguments.
For example with ` LinearSVC` will change from
```
>>> from pyspark.ml.classification import LinearSVC
>>> ?LinearSVC.__init__
Signature:
LinearSVC.__init__(
self,
featuresCol='features',
labelCol='label',
predictionCol='prediction',
maxIter=100,
regParam=0.0,
tol=1e-06,
rawPredictionCol='rawPrediction',
fitIntercept=True,
standardization=True,
threshold=0.0,
weightCol=None,
aggregationDepth=2,
)
Docstring: __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, aggregationDepth=2):
File: /path/to/python/pyspark/ml/classification.py
Type: function
```
to
```
>>> from pyspark.ml.classification import LinearSVC
>>> ?LinearSVC.__init__
Signature:
LinearSVC.__init__ (
self,
*,
featuresCol='features',
labelCol='label',
predictionCol='prediction',
maxIter=100,
regParam=0.0,
tol=1e-06,
rawPredictionCol='rawPrediction',
fitIntercept=True,
standardization=True,
threshold=0.0,
weightCol=None,
aggregationDepth=2,
blockSize=1,
)
Docstring: __init__(self, \*, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, aggregationDepth=2, blockSize=1):
File: ~/Workspace/spark/python/pyspark/ml/classification.py
Type: function
```
### How was this patch tested?
Existing tests.
Closes #29799 from zero323/SPARK-32933.
Authored-by: zero323 <mszymkiewicz@gmail.com>
Signed-off-by: HyukjinKwon <gurwls223@apache.org>
2020-09-22 20:28:33 -04:00
|
|
|
setParams(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
|
2019-12-26 12:39:53 -05:00
|
|
|
factorSize=8, fitIntercept=True, fitLinear=True, regParam=0.0, \
|
|
|
|
miniBatchFraction=1.0, initStd=0.01, maxIter=100, stepSize=1.0, \
|
|
|
|
tol=1e-6, solver="adamW", seed=None)
|
|
|
|
Sets Params for FMRegressor.
|
|
|
|
"""
|
|
|
|
kwargs = self._input_kwargs
|
|
|
|
return self._set(**kwargs)
|
|
|
|
|
|
|
|
def _create_model(self, java_model):
|
|
|
|
return FMRegressionModel(java_model)
|
|
|
|
|
|
|
|
@since("3.0.0")
|
|
|
|
def setFactorSize(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`factorSize`.
|
|
|
|
"""
|
|
|
|
return self._set(factorSize=value)
|
|
|
|
|
|
|
|
@since("3.0.0")
|
|
|
|
def setFitLinear(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`fitLinear`.
|
|
|
|
"""
|
|
|
|
return self._set(fitLinear=value)
|
|
|
|
|
|
|
|
@since("3.0.0")
|
|
|
|
def setMiniBatchFraction(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`miniBatchFraction`.
|
|
|
|
"""
|
|
|
|
return self._set(miniBatchFraction=value)
|
|
|
|
|
|
|
|
@since("3.0.0")
|
|
|
|
def setInitStd(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`initStd`.
|
|
|
|
"""
|
|
|
|
return self._set(initStd=value)
|
|
|
|
|
2019-12-30 23:56:19 -05:00
|
|
|
@since("3.0.0")
|
|
|
|
def setMaxIter(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`maxIter`.
|
|
|
|
"""
|
|
|
|
return self._set(maxIter=value)
|
|
|
|
|
|
|
|
@since("3.0.0")
|
|
|
|
def setStepSize(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`stepSize`.
|
|
|
|
"""
|
|
|
|
return self._set(stepSize=value)
|
|
|
|
|
|
|
|
@since("3.0.0")
|
|
|
|
def setTol(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`tol`.
|
|
|
|
"""
|
|
|
|
return self._set(tol=value)
|
|
|
|
|
|
|
|
@since("3.0.0")
|
|
|
|
def setSolver(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`solver`.
|
|
|
|
"""
|
|
|
|
return self._set(solver=value)
|
|
|
|
|
|
|
|
@since("3.0.0")
|
|
|
|
def setSeed(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`seed`.
|
|
|
|
"""
|
|
|
|
return self._set(seed=value)
|
|
|
|
|
|
|
|
@since("3.0.0")
|
|
|
|
def setFitIntercept(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`fitIntercept`.
|
|
|
|
"""
|
|
|
|
return self._set(fitIntercept=value)
|
|
|
|
|
|
|
|
@since("3.0.0")
|
|
|
|
def setRegParam(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`regParam`.
|
|
|
|
"""
|
|
|
|
return self._set(regParam=value)
|
|
|
|
|
2019-12-26 12:39:53 -05:00
|
|
|
|
[SPARK-29212][ML][PYSPARK] Add common classes without using JVM backend
### What changes were proposed in this pull request?
Implement common base ML classes (`Predictor`, `PredictionModel`, `Classifier`, `ClasssificationModel` `ProbabilisticClassifier`, `ProbabilisticClasssificationModel`, `Regressor`, `RegrssionModel`) for non-Java backends.
Note
- `Predictor` and `JavaClassifier` should be abstract as `_fit` method is not implemented.
- `PredictionModel` should be abstract as `_transform` is not implemented.
### Why are the changes needed?
To provide extensions points for non-JVM algorithms, as well as a public (as opposed to `Java*` variants, which are commonly described in docstrings as private) hierarchy which can be used to distinguish between different classes of predictors.
For longer discussion see [SPARK-29212](https://issues.apache.org/jira/browse/SPARK-29212) and / or https://github.com/apache/spark/pull/25776.
### Does this PR introduce any user-facing change?
It adds new base classes as listed above, but effective interfaces (method resolution order notwithstanding) stay the same.
Additionally "private" `Java*` classes in`ml.regression` and `ml.classification` have been renamed to follow PEP-8 conventions (added leading underscore).
It is for discussion if the same should be done to equivalent classes from `ml.wrapper`.
If we take `JavaClassifier` as an example, type hierarchy will change from
![old pyspark ml classification JavaClassifier](https://user-images.githubusercontent.com/1554276/72657093-5c0b0c80-39a0-11ea-9069-a897d75de483.png)
to
![new pyspark ml classification _JavaClassifier](https://user-images.githubusercontent.com/1554276/72657098-64fbde00-39a0-11ea-8f80-01187a5ea5a6.png)
Similarly the old model
![old pyspark ml classification JavaClassificationModel](https://user-images.githubusercontent.com/1554276/72657103-7513bd80-39a0-11ea-9ffc-59eb6ab61fde.png)
will become
![new pyspark ml classification _JavaClassificationModel](https://user-images.githubusercontent.com/1554276/72657110-80ff7f80-39a0-11ea-9f5c-fe408664e827.png)
### How was this patch tested?
Existing unit tests.
Closes #27245 from zero323/SPARK-29212.
Authored-by: zero323 <mszymkiewicz@gmail.com>
Signed-off-by: zhengruifeng <ruifengz@foxmail.com>
2020-03-03 23:20:02 -05:00
|
|
|
class FMRegressionModel(_JavaRegressionModel, _FactorizationMachinesParams, JavaMLWritable,
|
2019-12-30 23:56:19 -05:00
|
|
|
JavaMLReadable):
|
2019-12-26 12:39:53 -05:00
|
|
|
"""
|
|
|
|
Model fitted by :class:`FMRegressor`.
|
|
|
|
|
|
|
|
.. versionadded:: 3.0.0
|
|
|
|
"""
|
|
|
|
|
|
|
|
@property
|
|
|
|
@since("3.0.0")
|
|
|
|
def intercept(self):
|
|
|
|
"""
|
|
|
|
Model intercept.
|
|
|
|
"""
|
|
|
|
return self._call_java("intercept")
|
|
|
|
|
|
|
|
@property
|
|
|
|
@since("3.0.0")
|
|
|
|
def linear(self):
|
|
|
|
"""
|
|
|
|
Model linear term.
|
|
|
|
"""
|
|
|
|
return self._call_java("linear")
|
|
|
|
|
|
|
|
@property
|
|
|
|
@since("3.0.0")
|
|
|
|
def factors(self):
|
|
|
|
"""
|
|
|
|
Model factor term.
|
|
|
|
"""
|
|
|
|
return self._call_java("factors")
|
|
|
|
|
|
|
|
|
2015-05-12 15:17:05 -04:00
|
|
|
if __name__ == "__main__":
|
|
|
|
import doctest
|
2016-01-29 12:22:24 -05:00
|
|
|
import pyspark.ml.regression
|
2016-05-23 21:14:48 -04:00
|
|
|
from pyspark.sql import SparkSession
|
2016-01-29 12:22:24 -05:00
|
|
|
globs = pyspark.ml.regression.__dict__.copy()
|
2015-05-12 15:17:05 -04:00
|
|
|
# The small batch size here ensures that we see multiple batches,
|
|
|
|
# even in these small test examples:
|
2016-05-23 21:14:48 -04:00
|
|
|
spark = SparkSession.builder\
|
|
|
|
.master("local[2]")\
|
|
|
|
.appName("ml.regression tests")\
|
|
|
|
.getOrCreate()
|
|
|
|
sc = spark.sparkContext
|
2015-05-12 15:17:05 -04:00
|
|
|
globs['sc'] = sc
|
2016-05-23 21:14:48 -04:00
|
|
|
globs['spark'] = spark
|
2016-02-20 04:07:19 -05:00
|
|
|
import tempfile
|
|
|
|
temp_path = tempfile.mkdtemp()
|
|
|
|
globs['temp_path'] = temp_path
|
|
|
|
try:
|
|
|
|
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
|
2016-05-23 21:14:48 -04:00
|
|
|
spark.stop()
|
2016-02-20 04:07:19 -05:00
|
|
|
finally:
|
|
|
|
from shutil import rmtree
|
|
|
|
try:
|
|
|
|
rmtree(temp_path)
|
|
|
|
except OSError:
|
|
|
|
pass
|
2015-05-12 15:17:05 -04:00
|
|
|
if failure_count:
|
2018-03-08 06:38:34 -05:00
|
|
|
sys.exit(-1)
|