2013-12-25 00:08:05 -05:00
|
|
|
#
|
|
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
|
|
|
|
# contributor license agreements. See the NOTICE file distributed with
|
|
|
|
# this work for additional information regarding copyright ownership.
|
|
|
|
# The ASF licenses this file to You under the Apache License, Version 2.0
|
|
|
|
# (the "License"); you may not use this file except in compliance with
|
|
|
|
# the License. You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
#
|
|
|
|
|
2018-03-08 06:38:34 -05:00
|
|
|
import sys
|
|
|
|
import warnings
|
|
|
|
|
2014-09-19 18:01:11 -04:00
|
|
|
import numpy as np
|
[WIP] SPARK-1430: Support sparse data in Python MLlib
This PR adds a SparseVector class in PySpark and updates all the regression, classification and clustering algorithms and models to support sparse data, similar to MLlib. I chose to add this class because SciPy is quite difficult to install in many environments (more so than NumPy), but I plan to add support for SciPy sparse vectors later too, and make the methods work transparently on objects of either type.
On the Scala side, we keep Python sparse vectors sparse and pass them to MLlib. We always return dense vectors from our models.
Some to-do items left:
- [x] Support SciPy's scipy.sparse matrix objects when SciPy is available. We can easily add a function to convert these to our own SparseVector.
- [x] MLlib currently uses a vector with one extra column on the left to represent what we call LabeledPoint in Scala. Do we really want this? It may get annoying once you deal with sparse data since you must add/subtract 1 to each feature index when training. We can remove this API in 1.0 and use tuples for labeling.
- [x] Explain how to use these in the Python MLlib docs.
CC @mengxr, @joshrosen
Author: Matei Zaharia <matei@databricks.com>
Closes #341 from mateiz/py-ml-update and squashes the following commits:
d52e763 [Matei Zaharia] Remove no-longer-needed slice code and handle review comments
ea5a25a [Matei Zaharia] Fix remaining uses of copyto() after merge
b9f97a3 [Matei Zaharia] Fix test
1e1bd0f [Matei Zaharia] Add MLlib logistic regression example in Python
88bc01f [Matei Zaharia] Clean up inheritance of LinearModel in Python, and expose its parametrs
37ab747 [Matei Zaharia] Fix some examples and docs due to changes in MLlib API
da0f27e [Matei Zaharia] Added a MLlib K-means example and updated docs to discuss sparse data
c48e85a [Matei Zaharia] Added some tests for passing lists as input, and added mllib/tests.py to run-tests script.
a07ba10 [Matei Zaharia] Fix some typos and calculation of initial weights
74eefe7 [Matei Zaharia] Added LabeledPoint class in Python
889dde8 [Matei Zaharia] Support scipy.sparse matrices in all our algorithms and models
ab244d1 [Matei Zaharia] Allow SparseVectors to be initialized using a dict
a5d6426 [Matei Zaharia] Add linalg.py to run-tests script
0e7a3d8 [Matei Zaharia] Keep vectors sparse in Java when reading LabeledPoints
eaee759 [Matei Zaharia] Update regression, classification and clustering models for sparse data
2abbb44 [Matei Zaharia] Further work to get linear models working with sparse data
154f45d [Matei Zaharia] Update docs, name some magic values
881fef7 [Matei Zaharia] Added a sparse vector in Python and made Java-Python format more compact
2014-04-15 23:33:24 -04:00
|
|
|
|
2015-10-23 11:43:49 -04:00
|
|
|
from pyspark import RDD, since
|
2015-06-30 13:25:59 -04:00
|
|
|
from pyspark.streaming.dstream import DStream
|
2015-03-20 14:44:21 -04:00
|
|
|
from pyspark.mllib.common import callMLlibFunc, _py2java, _java2py, inherit_doc
|
2020-08-08 11:51:57 -04:00
|
|
|
from pyspark.mllib.linalg import _convert_to_vector
|
2015-03-20 14:44:21 -04:00
|
|
|
from pyspark.mllib.util import Saveable, Loader
|
[WIP] SPARK-1430: Support sparse data in Python MLlib
This PR adds a SparseVector class in PySpark and updates all the regression, classification and clustering algorithms and models to support sparse data, similar to MLlib. I chose to add this class because SciPy is quite difficult to install in many environments (more so than NumPy), but I plan to add support for SciPy sparse vectors later too, and make the methods work transparently on objects of either type.
On the Scala side, we keep Python sparse vectors sparse and pass them to MLlib. We always return dense vectors from our models.
Some to-do items left:
- [x] Support SciPy's scipy.sparse matrix objects when SciPy is available. We can easily add a function to convert these to our own SparseVector.
- [x] MLlib currently uses a vector with one extra column on the left to represent what we call LabeledPoint in Scala. Do we really want this? It may get annoying once you deal with sparse data since you must add/subtract 1 to each feature index when training. We can remove this API in 1.0 and use tuples for labeling.
- [x] Explain how to use these in the Python MLlib docs.
CC @mengxr, @joshrosen
Author: Matei Zaharia <matei@databricks.com>
Closes #341 from mateiz/py-ml-update and squashes the following commits:
d52e763 [Matei Zaharia] Remove no-longer-needed slice code and handle review comments
ea5a25a [Matei Zaharia] Fix remaining uses of copyto() after merge
b9f97a3 [Matei Zaharia] Fix test
1e1bd0f [Matei Zaharia] Add MLlib logistic regression example in Python
88bc01f [Matei Zaharia] Clean up inheritance of LinearModel in Python, and expose its parametrs
37ab747 [Matei Zaharia] Fix some examples and docs due to changes in MLlib API
da0f27e [Matei Zaharia] Added a MLlib K-means example and updated docs to discuss sparse data
c48e85a [Matei Zaharia] Added some tests for passing lists as input, and added mllib/tests.py to run-tests script.
a07ba10 [Matei Zaharia] Fix some typos and calculation of initial weights
74eefe7 [Matei Zaharia] Added LabeledPoint class in Python
889dde8 [Matei Zaharia] Support scipy.sparse matrices in all our algorithms and models
ab244d1 [Matei Zaharia] Allow SparseVectors to be initialized using a dict
a5d6426 [Matei Zaharia] Add linalg.py to run-tests script
0e7a3d8 [Matei Zaharia] Keep vectors sparse in Java when reading LabeledPoints
eaee759 [Matei Zaharia] Update regression, classification and clustering models for sparse data
2abbb44 [Matei Zaharia] Further work to get linear models working with sparse data
154f45d [Matei Zaharia] Update docs, name some magic values
881fef7 [Matei Zaharia] Added a sparse vector in Python and made Java-Python format more compact
2014-04-15 23:33:24 -04:00
|
|
|
|
2015-03-12 19:46:29 -04:00
|
|
|
__all__ = ['LabeledPoint', 'LinearModel',
|
|
|
|
'LinearRegressionModel', 'LinearRegressionWithSGD',
|
|
|
|
'RidgeRegressionModel', 'RidgeRegressionWithSGD',
|
2015-05-06 01:57:13 -04:00
|
|
|
'LassoModel', 'LassoWithSGD', 'IsotonicRegressionModel',
|
2015-09-14 15:08:52 -04:00
|
|
|
'IsotonicRegression', 'StreamingLinearAlgorithm',
|
|
|
|
'StreamingLinearRegressionWithSGD']
|
2014-09-03 14:49:45 -04:00
|
|
|
|
|
|
|
|
[WIP] SPARK-1430: Support sparse data in Python MLlib
This PR adds a SparseVector class in PySpark and updates all the regression, classification and clustering algorithms and models to support sparse data, similar to MLlib. I chose to add this class because SciPy is quite difficult to install in many environments (more so than NumPy), but I plan to add support for SciPy sparse vectors later too, and make the methods work transparently on objects of either type.
On the Scala side, we keep Python sparse vectors sparse and pass them to MLlib. We always return dense vectors from our models.
Some to-do items left:
- [x] Support SciPy's scipy.sparse matrix objects when SciPy is available. We can easily add a function to convert these to our own SparseVector.
- [x] MLlib currently uses a vector with one extra column on the left to represent what we call LabeledPoint in Scala. Do we really want this? It may get annoying once you deal with sparse data since you must add/subtract 1 to each feature index when training. We can remove this API in 1.0 and use tuples for labeling.
- [x] Explain how to use these in the Python MLlib docs.
CC @mengxr, @joshrosen
Author: Matei Zaharia <matei@databricks.com>
Closes #341 from mateiz/py-ml-update and squashes the following commits:
d52e763 [Matei Zaharia] Remove no-longer-needed slice code and handle review comments
ea5a25a [Matei Zaharia] Fix remaining uses of copyto() after merge
b9f97a3 [Matei Zaharia] Fix test
1e1bd0f [Matei Zaharia] Add MLlib logistic regression example in Python
88bc01f [Matei Zaharia] Clean up inheritance of LinearModel in Python, and expose its parametrs
37ab747 [Matei Zaharia] Fix some examples and docs due to changes in MLlib API
da0f27e [Matei Zaharia] Added a MLlib K-means example and updated docs to discuss sparse data
c48e85a [Matei Zaharia] Added some tests for passing lists as input, and added mllib/tests.py to run-tests script.
a07ba10 [Matei Zaharia] Fix some typos and calculation of initial weights
74eefe7 [Matei Zaharia] Added LabeledPoint class in Python
889dde8 [Matei Zaharia] Support scipy.sparse matrices in all our algorithms and models
ab244d1 [Matei Zaharia] Allow SparseVectors to be initialized using a dict
a5d6426 [Matei Zaharia] Add linalg.py to run-tests script
0e7a3d8 [Matei Zaharia] Keep vectors sparse in Java when reading LabeledPoints
eaee759 [Matei Zaharia] Update regression, classification and clustering models for sparse data
2abbb44 [Matei Zaharia] Further work to get linear models working with sparse data
154f45d [Matei Zaharia] Update docs, name some magic values
881fef7 [Matei Zaharia] Added a sparse vector in Python and made Java-Python format more compact
2014-04-15 23:33:24 -04:00
|
|
|
class LabeledPoint(object):
|
2014-08-06 15:58:24 -04:00
|
|
|
|
[WIP] SPARK-1430: Support sparse data in Python MLlib
This PR adds a SparseVector class in PySpark and updates all the regression, classification and clustering algorithms and models to support sparse data, similar to MLlib. I chose to add this class because SciPy is quite difficult to install in many environments (more so than NumPy), but I plan to add support for SciPy sparse vectors later too, and make the methods work transparently on objects of either type.
On the Scala side, we keep Python sparse vectors sparse and pass them to MLlib. We always return dense vectors from our models.
Some to-do items left:
- [x] Support SciPy's scipy.sparse matrix objects when SciPy is available. We can easily add a function to convert these to our own SparseVector.
- [x] MLlib currently uses a vector with one extra column on the left to represent what we call LabeledPoint in Scala. Do we really want this? It may get annoying once you deal with sparse data since you must add/subtract 1 to each feature index when training. We can remove this API in 1.0 and use tuples for labeling.
- [x] Explain how to use these in the Python MLlib docs.
CC @mengxr, @joshrosen
Author: Matei Zaharia <matei@databricks.com>
Closes #341 from mateiz/py-ml-update and squashes the following commits:
d52e763 [Matei Zaharia] Remove no-longer-needed slice code and handle review comments
ea5a25a [Matei Zaharia] Fix remaining uses of copyto() after merge
b9f97a3 [Matei Zaharia] Fix test
1e1bd0f [Matei Zaharia] Add MLlib logistic regression example in Python
88bc01f [Matei Zaharia] Clean up inheritance of LinearModel in Python, and expose its parametrs
37ab747 [Matei Zaharia] Fix some examples and docs due to changes in MLlib API
da0f27e [Matei Zaharia] Added a MLlib K-means example and updated docs to discuss sparse data
c48e85a [Matei Zaharia] Added some tests for passing lists as input, and added mllib/tests.py to run-tests script.
a07ba10 [Matei Zaharia] Fix some typos and calculation of initial weights
74eefe7 [Matei Zaharia] Added LabeledPoint class in Python
889dde8 [Matei Zaharia] Support scipy.sparse matrices in all our algorithms and models
ab244d1 [Matei Zaharia] Allow SparseVectors to be initialized using a dict
a5d6426 [Matei Zaharia] Add linalg.py to run-tests script
0e7a3d8 [Matei Zaharia] Keep vectors sparse in Java when reading LabeledPoints
eaee759 [Matei Zaharia] Update regression, classification and clustering models for sparse data
2abbb44 [Matei Zaharia] Further work to get linear models working with sparse data
154f45d [Matei Zaharia] Update docs, name some magic values
881fef7 [Matei Zaharia] Added a sparse vector in Python and made Java-Python format more compact
2014-04-15 23:33:24 -04:00
|
|
|
"""
|
2015-06-16 17:30:30 -04:00
|
|
|
Class that represents the features and labels of a data point.
|
[WIP] SPARK-1430: Support sparse data in Python MLlib
This PR adds a SparseVector class in PySpark and updates all the regression, classification and clustering algorithms and models to support sparse data, similar to MLlib. I chose to add this class because SciPy is quite difficult to install in many environments (more so than NumPy), but I plan to add support for SciPy sparse vectors later too, and make the methods work transparently on objects of either type.
On the Scala side, we keep Python sparse vectors sparse and pass them to MLlib. We always return dense vectors from our models.
Some to-do items left:
- [x] Support SciPy's scipy.sparse matrix objects when SciPy is available. We can easily add a function to convert these to our own SparseVector.
- [x] MLlib currently uses a vector with one extra column on the left to represent what we call LabeledPoint in Scala. Do we really want this? It may get annoying once you deal with sparse data since you must add/subtract 1 to each feature index when training. We can remove this API in 1.0 and use tuples for labeling.
- [x] Explain how to use these in the Python MLlib docs.
CC @mengxr, @joshrosen
Author: Matei Zaharia <matei@databricks.com>
Closes #341 from mateiz/py-ml-update and squashes the following commits:
d52e763 [Matei Zaharia] Remove no-longer-needed slice code and handle review comments
ea5a25a [Matei Zaharia] Fix remaining uses of copyto() after merge
b9f97a3 [Matei Zaharia] Fix test
1e1bd0f [Matei Zaharia] Add MLlib logistic regression example in Python
88bc01f [Matei Zaharia] Clean up inheritance of LinearModel in Python, and expose its parametrs
37ab747 [Matei Zaharia] Fix some examples and docs due to changes in MLlib API
da0f27e [Matei Zaharia] Added a MLlib K-means example and updated docs to discuss sparse data
c48e85a [Matei Zaharia] Added some tests for passing lists as input, and added mllib/tests.py to run-tests script.
a07ba10 [Matei Zaharia] Fix some typos and calculation of initial weights
74eefe7 [Matei Zaharia] Added LabeledPoint class in Python
889dde8 [Matei Zaharia] Support scipy.sparse matrices in all our algorithms and models
ab244d1 [Matei Zaharia] Allow SparseVectors to be initialized using a dict
a5d6426 [Matei Zaharia] Add linalg.py to run-tests script
0e7a3d8 [Matei Zaharia] Keep vectors sparse in Java when reading LabeledPoints
eaee759 [Matei Zaharia] Update regression, classification and clustering models for sparse data
2abbb44 [Matei Zaharia] Further work to get linear models working with sparse data
154f45d [Matei Zaharia] Update docs, name some magic values
881fef7 [Matei Zaharia] Added a sparse vector in Python and made Java-Python format more compact
2014-04-15 23:33:24 -04:00
|
|
|
|
2015-10-23 11:43:49 -04:00
|
|
|
.. versionadded:: 1.0.0
|
2020-11-24 20:24:41 -05:00
|
|
|
|
|
|
|
Parameters
|
|
|
|
----------
|
|
|
|
label : int
|
|
|
|
Label for this data point.
|
|
|
|
features : :py:class:`pyspark.mllib.linalg.Vector` or convertible
|
|
|
|
Vector of features for this point (NumPy array, list,
|
|
|
|
pyspark.mllib.linalg.SparseVector, or scipy.sparse column matrix).
|
|
|
|
|
|
|
|
Notes
|
|
|
|
-----
|
|
|
|
'label' and 'features' are accessible as class attributes.
|
[WIP] SPARK-1430: Support sparse data in Python MLlib
This PR adds a SparseVector class in PySpark and updates all the regression, classification and clustering algorithms and models to support sparse data, similar to MLlib. I chose to add this class because SciPy is quite difficult to install in many environments (more so than NumPy), but I plan to add support for SciPy sparse vectors later too, and make the methods work transparently on objects of either type.
On the Scala side, we keep Python sparse vectors sparse and pass them to MLlib. We always return dense vectors from our models.
Some to-do items left:
- [x] Support SciPy's scipy.sparse matrix objects when SciPy is available. We can easily add a function to convert these to our own SparseVector.
- [x] MLlib currently uses a vector with one extra column on the left to represent what we call LabeledPoint in Scala. Do we really want this? It may get annoying once you deal with sparse data since you must add/subtract 1 to each feature index when training. We can remove this API in 1.0 and use tuples for labeling.
- [x] Explain how to use these in the Python MLlib docs.
CC @mengxr, @joshrosen
Author: Matei Zaharia <matei@databricks.com>
Closes #341 from mateiz/py-ml-update and squashes the following commits:
d52e763 [Matei Zaharia] Remove no-longer-needed slice code and handle review comments
ea5a25a [Matei Zaharia] Fix remaining uses of copyto() after merge
b9f97a3 [Matei Zaharia] Fix test
1e1bd0f [Matei Zaharia] Add MLlib logistic regression example in Python
88bc01f [Matei Zaharia] Clean up inheritance of LinearModel in Python, and expose its parametrs
37ab747 [Matei Zaharia] Fix some examples and docs due to changes in MLlib API
da0f27e [Matei Zaharia] Added a MLlib K-means example and updated docs to discuss sparse data
c48e85a [Matei Zaharia] Added some tests for passing lists as input, and added mllib/tests.py to run-tests script.
a07ba10 [Matei Zaharia] Fix some typos and calculation of initial weights
74eefe7 [Matei Zaharia] Added LabeledPoint class in Python
889dde8 [Matei Zaharia] Support scipy.sparse matrices in all our algorithms and models
ab244d1 [Matei Zaharia] Allow SparseVectors to be initialized using a dict
a5d6426 [Matei Zaharia] Add linalg.py to run-tests script
0e7a3d8 [Matei Zaharia] Keep vectors sparse in Java when reading LabeledPoints
eaee759 [Matei Zaharia] Update regression, classification and clustering models for sparse data
2abbb44 [Matei Zaharia] Further work to get linear models working with sparse data
154f45d [Matei Zaharia] Update docs, name some magic values
881fef7 [Matei Zaharia] Added a sparse vector in Python and made Java-Python format more compact
2014-04-15 23:33:24 -04:00
|
|
|
"""
|
2014-08-06 15:58:24 -04:00
|
|
|
|
[WIP] SPARK-1430: Support sparse data in Python MLlib
This PR adds a SparseVector class in PySpark and updates all the regression, classification and clustering algorithms and models to support sparse data, similar to MLlib. I chose to add this class because SciPy is quite difficult to install in many environments (more so than NumPy), but I plan to add support for SciPy sparse vectors later too, and make the methods work transparently on objects of either type.
On the Scala side, we keep Python sparse vectors sparse and pass them to MLlib. We always return dense vectors from our models.
Some to-do items left:
- [x] Support SciPy's scipy.sparse matrix objects when SciPy is available. We can easily add a function to convert these to our own SparseVector.
- [x] MLlib currently uses a vector with one extra column on the left to represent what we call LabeledPoint in Scala. Do we really want this? It may get annoying once you deal with sparse data since you must add/subtract 1 to each feature index when training. We can remove this API in 1.0 and use tuples for labeling.
- [x] Explain how to use these in the Python MLlib docs.
CC @mengxr, @joshrosen
Author: Matei Zaharia <matei@databricks.com>
Closes #341 from mateiz/py-ml-update and squashes the following commits:
d52e763 [Matei Zaharia] Remove no-longer-needed slice code and handle review comments
ea5a25a [Matei Zaharia] Fix remaining uses of copyto() after merge
b9f97a3 [Matei Zaharia] Fix test
1e1bd0f [Matei Zaharia] Add MLlib logistic regression example in Python
88bc01f [Matei Zaharia] Clean up inheritance of LinearModel in Python, and expose its parametrs
37ab747 [Matei Zaharia] Fix some examples and docs due to changes in MLlib API
da0f27e [Matei Zaharia] Added a MLlib K-means example and updated docs to discuss sparse data
c48e85a [Matei Zaharia] Added some tests for passing lists as input, and added mllib/tests.py to run-tests script.
a07ba10 [Matei Zaharia] Fix some typos and calculation of initial weights
74eefe7 [Matei Zaharia] Added LabeledPoint class in Python
889dde8 [Matei Zaharia] Support scipy.sparse matrices in all our algorithms and models
ab244d1 [Matei Zaharia] Allow SparseVectors to be initialized using a dict
a5d6426 [Matei Zaharia] Add linalg.py to run-tests script
0e7a3d8 [Matei Zaharia] Keep vectors sparse in Java when reading LabeledPoints
eaee759 [Matei Zaharia] Update regression, classification and clustering models for sparse data
2abbb44 [Matei Zaharia] Further work to get linear models working with sparse data
154f45d [Matei Zaharia] Update docs, name some magic values
881fef7 [Matei Zaharia] Added a sparse vector in Python and made Java-Python format more compact
2014-04-15 23:33:24 -04:00
|
|
|
def __init__(self, label, features):
|
2014-11-11 01:26:16 -05:00
|
|
|
self.label = float(label)
|
2014-09-19 18:01:11 -04:00
|
|
|
self.features = _convert_to_vector(features)
|
|
|
|
|
|
|
|
def __reduce__(self):
|
|
|
|
return (LabeledPoint, (self.label, self.features))
|
[WIP] SPARK-1430: Support sparse data in Python MLlib
This PR adds a SparseVector class in PySpark and updates all the regression, classification and clustering algorithms and models to support sparse data, similar to MLlib. I chose to add this class because SciPy is quite difficult to install in many environments (more so than NumPy), but I plan to add support for SciPy sparse vectors later too, and make the methods work transparently on objects of either type.
On the Scala side, we keep Python sparse vectors sparse and pass them to MLlib. We always return dense vectors from our models.
Some to-do items left:
- [x] Support SciPy's scipy.sparse matrix objects when SciPy is available. We can easily add a function to convert these to our own SparseVector.
- [x] MLlib currently uses a vector with one extra column on the left to represent what we call LabeledPoint in Scala. Do we really want this? It may get annoying once you deal with sparse data since you must add/subtract 1 to each feature index when training. We can remove this API in 1.0 and use tuples for labeling.
- [x] Explain how to use these in the Python MLlib docs.
CC @mengxr, @joshrosen
Author: Matei Zaharia <matei@databricks.com>
Closes #341 from mateiz/py-ml-update and squashes the following commits:
d52e763 [Matei Zaharia] Remove no-longer-needed slice code and handle review comments
ea5a25a [Matei Zaharia] Fix remaining uses of copyto() after merge
b9f97a3 [Matei Zaharia] Fix test
1e1bd0f [Matei Zaharia] Add MLlib logistic regression example in Python
88bc01f [Matei Zaharia] Clean up inheritance of LinearModel in Python, and expose its parametrs
37ab747 [Matei Zaharia] Fix some examples and docs due to changes in MLlib API
da0f27e [Matei Zaharia] Added a MLlib K-means example and updated docs to discuss sparse data
c48e85a [Matei Zaharia] Added some tests for passing lists as input, and added mllib/tests.py to run-tests script.
a07ba10 [Matei Zaharia] Fix some typos and calculation of initial weights
74eefe7 [Matei Zaharia] Added LabeledPoint class in Python
889dde8 [Matei Zaharia] Support scipy.sparse matrices in all our algorithms and models
ab244d1 [Matei Zaharia] Allow SparseVectors to be initialized using a dict
a5d6426 [Matei Zaharia] Add linalg.py to run-tests script
0e7a3d8 [Matei Zaharia] Keep vectors sparse in Java when reading LabeledPoints
eaee759 [Matei Zaharia] Update regression, classification and clustering models for sparse data
2abbb44 [Matei Zaharia] Further work to get linear models working with sparse data
154f45d [Matei Zaharia] Update docs, name some magic values
881fef7 [Matei Zaharia] Added a sparse vector in Python and made Java-Python format more compact
2014-04-15 23:33:24 -04:00
|
|
|
|
2014-06-04 15:56:56 -04:00
|
|
|
def __str__(self):
|
2014-09-19 18:01:11 -04:00
|
|
|
return "(" + ",".join((str(self.label), str(self.features))) + ")"
|
|
|
|
|
|
|
|
def __repr__(self):
|
2014-11-11 01:26:16 -05:00
|
|
|
return "LabeledPoint(%s, %s)" % (self.label, self.features)
|
2014-06-04 15:56:56 -04:00
|
|
|
|
2013-12-25 00:08:05 -05:00
|
|
|
|
|
|
|
class LinearModel(object):
|
2014-08-06 15:58:24 -04:00
|
|
|
|
2015-06-16 17:30:30 -04:00
|
|
|
"""
|
|
|
|
A linear model that has a vector of coefficients and an intercept.
|
|
|
|
|
2015-10-23 11:43:49 -04:00
|
|
|
.. versionadded:: 0.9.0
|
2020-11-24 20:24:41 -05:00
|
|
|
|
|
|
|
Parameters
|
|
|
|
----------
|
|
|
|
weights : :py:class:`pyspark.mllib.linalg.Vector`
|
|
|
|
Weights computed for every feature.
|
|
|
|
intercept : float
|
|
|
|
Intercept computed for this model.
|
2015-06-16 17:30:30 -04:00
|
|
|
"""
|
2014-08-06 15:58:24 -04:00
|
|
|
|
[WIP] SPARK-1430: Support sparse data in Python MLlib
This PR adds a SparseVector class in PySpark and updates all the regression, classification and clustering algorithms and models to support sparse data, similar to MLlib. I chose to add this class because SciPy is quite difficult to install in many environments (more so than NumPy), but I plan to add support for SciPy sparse vectors later too, and make the methods work transparently on objects of either type.
On the Scala side, we keep Python sparse vectors sparse and pass them to MLlib. We always return dense vectors from our models.
Some to-do items left:
- [x] Support SciPy's scipy.sparse matrix objects when SciPy is available. We can easily add a function to convert these to our own SparseVector.
- [x] MLlib currently uses a vector with one extra column on the left to represent what we call LabeledPoint in Scala. Do we really want this? It may get annoying once you deal with sparse data since you must add/subtract 1 to each feature index when training. We can remove this API in 1.0 and use tuples for labeling.
- [x] Explain how to use these in the Python MLlib docs.
CC @mengxr, @joshrosen
Author: Matei Zaharia <matei@databricks.com>
Closes #341 from mateiz/py-ml-update and squashes the following commits:
d52e763 [Matei Zaharia] Remove no-longer-needed slice code and handle review comments
ea5a25a [Matei Zaharia] Fix remaining uses of copyto() after merge
b9f97a3 [Matei Zaharia] Fix test
1e1bd0f [Matei Zaharia] Add MLlib logistic regression example in Python
88bc01f [Matei Zaharia] Clean up inheritance of LinearModel in Python, and expose its parametrs
37ab747 [Matei Zaharia] Fix some examples and docs due to changes in MLlib API
da0f27e [Matei Zaharia] Added a MLlib K-means example and updated docs to discuss sparse data
c48e85a [Matei Zaharia] Added some tests for passing lists as input, and added mllib/tests.py to run-tests script.
a07ba10 [Matei Zaharia] Fix some typos and calculation of initial weights
74eefe7 [Matei Zaharia] Added LabeledPoint class in Python
889dde8 [Matei Zaharia] Support scipy.sparse matrices in all our algorithms and models
ab244d1 [Matei Zaharia] Allow SparseVectors to be initialized using a dict
a5d6426 [Matei Zaharia] Add linalg.py to run-tests script
0e7a3d8 [Matei Zaharia] Keep vectors sparse in Java when reading LabeledPoints
eaee759 [Matei Zaharia] Update regression, classification and clustering models for sparse data
2abbb44 [Matei Zaharia] Further work to get linear models working with sparse data
154f45d [Matei Zaharia] Update docs, name some magic values
881fef7 [Matei Zaharia] Added a sparse vector in Python and made Java-Python format more compact
2014-04-15 23:33:24 -04:00
|
|
|
def __init__(self, weights, intercept):
|
2014-09-19 18:01:11 -04:00
|
|
|
self._coeff = _convert_to_vector(weights)
|
2014-11-11 01:26:16 -05:00
|
|
|
self._intercept = float(intercept)
|
2013-12-25 00:08:05 -05:00
|
|
|
|
[WIP] SPARK-1430: Support sparse data in Python MLlib
This PR adds a SparseVector class in PySpark and updates all the regression, classification and clustering algorithms and models to support sparse data, similar to MLlib. I chose to add this class because SciPy is quite difficult to install in many environments (more so than NumPy), but I plan to add support for SciPy sparse vectors later too, and make the methods work transparently on objects of either type.
On the Scala side, we keep Python sparse vectors sparse and pass them to MLlib. We always return dense vectors from our models.
Some to-do items left:
- [x] Support SciPy's scipy.sparse matrix objects when SciPy is available. We can easily add a function to convert these to our own SparseVector.
- [x] MLlib currently uses a vector with one extra column on the left to represent what we call LabeledPoint in Scala. Do we really want this? It may get annoying once you deal with sparse data since you must add/subtract 1 to each feature index when training. We can remove this API in 1.0 and use tuples for labeling.
- [x] Explain how to use these in the Python MLlib docs.
CC @mengxr, @joshrosen
Author: Matei Zaharia <matei@databricks.com>
Closes #341 from mateiz/py-ml-update and squashes the following commits:
d52e763 [Matei Zaharia] Remove no-longer-needed slice code and handle review comments
ea5a25a [Matei Zaharia] Fix remaining uses of copyto() after merge
b9f97a3 [Matei Zaharia] Fix test
1e1bd0f [Matei Zaharia] Add MLlib logistic regression example in Python
88bc01f [Matei Zaharia] Clean up inheritance of LinearModel in Python, and expose its parametrs
37ab747 [Matei Zaharia] Fix some examples and docs due to changes in MLlib API
da0f27e [Matei Zaharia] Added a MLlib K-means example and updated docs to discuss sparse data
c48e85a [Matei Zaharia] Added some tests for passing lists as input, and added mllib/tests.py to run-tests script.
a07ba10 [Matei Zaharia] Fix some typos and calculation of initial weights
74eefe7 [Matei Zaharia] Added LabeledPoint class in Python
889dde8 [Matei Zaharia] Support scipy.sparse matrices in all our algorithms and models
ab244d1 [Matei Zaharia] Allow SparseVectors to be initialized using a dict
a5d6426 [Matei Zaharia] Add linalg.py to run-tests script
0e7a3d8 [Matei Zaharia] Keep vectors sparse in Java when reading LabeledPoints
eaee759 [Matei Zaharia] Update regression, classification and clustering models for sparse data
2abbb44 [Matei Zaharia] Further work to get linear models working with sparse data
154f45d [Matei Zaharia] Update docs, name some magic values
881fef7 [Matei Zaharia] Added a sparse vector in Python and made Java-Python format more compact
2014-04-15 23:33:24 -04:00
|
|
|
@property
|
2015-10-23 11:43:49 -04:00
|
|
|
@since("1.0.0")
|
[WIP] SPARK-1430: Support sparse data in Python MLlib
This PR adds a SparseVector class in PySpark and updates all the regression, classification and clustering algorithms and models to support sparse data, similar to MLlib. I chose to add this class because SciPy is quite difficult to install in many environments (more so than NumPy), but I plan to add support for SciPy sparse vectors later too, and make the methods work transparently on objects of either type.
On the Scala side, we keep Python sparse vectors sparse and pass them to MLlib. We always return dense vectors from our models.
Some to-do items left:
- [x] Support SciPy's scipy.sparse matrix objects when SciPy is available. We can easily add a function to convert these to our own SparseVector.
- [x] MLlib currently uses a vector with one extra column on the left to represent what we call LabeledPoint in Scala. Do we really want this? It may get annoying once you deal with sparse data since you must add/subtract 1 to each feature index when training. We can remove this API in 1.0 and use tuples for labeling.
- [x] Explain how to use these in the Python MLlib docs.
CC @mengxr, @joshrosen
Author: Matei Zaharia <matei@databricks.com>
Closes #341 from mateiz/py-ml-update and squashes the following commits:
d52e763 [Matei Zaharia] Remove no-longer-needed slice code and handle review comments
ea5a25a [Matei Zaharia] Fix remaining uses of copyto() after merge
b9f97a3 [Matei Zaharia] Fix test
1e1bd0f [Matei Zaharia] Add MLlib logistic regression example in Python
88bc01f [Matei Zaharia] Clean up inheritance of LinearModel in Python, and expose its parametrs
37ab747 [Matei Zaharia] Fix some examples and docs due to changes in MLlib API
da0f27e [Matei Zaharia] Added a MLlib K-means example and updated docs to discuss sparse data
c48e85a [Matei Zaharia] Added some tests for passing lists as input, and added mllib/tests.py to run-tests script.
a07ba10 [Matei Zaharia] Fix some typos and calculation of initial weights
74eefe7 [Matei Zaharia] Added LabeledPoint class in Python
889dde8 [Matei Zaharia] Support scipy.sparse matrices in all our algorithms and models
ab244d1 [Matei Zaharia] Allow SparseVectors to be initialized using a dict
a5d6426 [Matei Zaharia] Add linalg.py to run-tests script
0e7a3d8 [Matei Zaharia] Keep vectors sparse in Java when reading LabeledPoints
eaee759 [Matei Zaharia] Update regression, classification and clustering models for sparse data
2abbb44 [Matei Zaharia] Further work to get linear models working with sparse data
154f45d [Matei Zaharia] Update docs, name some magic values
881fef7 [Matei Zaharia] Added a sparse vector in Python and made Java-Python format more compact
2014-04-15 23:33:24 -04:00
|
|
|
def weights(self):
|
2015-10-23 11:43:49 -04:00
|
|
|
"""Weights computed for every feature."""
|
[WIP] SPARK-1430: Support sparse data in Python MLlib
This PR adds a SparseVector class in PySpark and updates all the regression, classification and clustering algorithms and models to support sparse data, similar to MLlib. I chose to add this class because SciPy is quite difficult to install in many environments (more so than NumPy), but I plan to add support for SciPy sparse vectors later too, and make the methods work transparently on objects of either type.
On the Scala side, we keep Python sparse vectors sparse and pass them to MLlib. We always return dense vectors from our models.
Some to-do items left:
- [x] Support SciPy's scipy.sparse matrix objects when SciPy is available. We can easily add a function to convert these to our own SparseVector.
- [x] MLlib currently uses a vector with one extra column on the left to represent what we call LabeledPoint in Scala. Do we really want this? It may get annoying once you deal with sparse data since you must add/subtract 1 to each feature index when training. We can remove this API in 1.0 and use tuples for labeling.
- [x] Explain how to use these in the Python MLlib docs.
CC @mengxr, @joshrosen
Author: Matei Zaharia <matei@databricks.com>
Closes #341 from mateiz/py-ml-update and squashes the following commits:
d52e763 [Matei Zaharia] Remove no-longer-needed slice code and handle review comments
ea5a25a [Matei Zaharia] Fix remaining uses of copyto() after merge
b9f97a3 [Matei Zaharia] Fix test
1e1bd0f [Matei Zaharia] Add MLlib logistic regression example in Python
88bc01f [Matei Zaharia] Clean up inheritance of LinearModel in Python, and expose its parametrs
37ab747 [Matei Zaharia] Fix some examples and docs due to changes in MLlib API
da0f27e [Matei Zaharia] Added a MLlib K-means example and updated docs to discuss sparse data
c48e85a [Matei Zaharia] Added some tests for passing lists as input, and added mllib/tests.py to run-tests script.
a07ba10 [Matei Zaharia] Fix some typos and calculation of initial weights
74eefe7 [Matei Zaharia] Added LabeledPoint class in Python
889dde8 [Matei Zaharia] Support scipy.sparse matrices in all our algorithms and models
ab244d1 [Matei Zaharia] Allow SparseVectors to be initialized using a dict
a5d6426 [Matei Zaharia] Add linalg.py to run-tests script
0e7a3d8 [Matei Zaharia] Keep vectors sparse in Java when reading LabeledPoints
eaee759 [Matei Zaharia] Update regression, classification and clustering models for sparse data
2abbb44 [Matei Zaharia] Further work to get linear models working with sparse data
154f45d [Matei Zaharia] Update docs, name some magic values
881fef7 [Matei Zaharia] Added a sparse vector in Python and made Java-Python format more compact
2014-04-15 23:33:24 -04:00
|
|
|
return self._coeff
|
|
|
|
|
|
|
|
@property
|
2015-10-23 11:43:49 -04:00
|
|
|
@since("1.0.0")
|
[WIP] SPARK-1430: Support sparse data in Python MLlib
This PR adds a SparseVector class in PySpark and updates all the regression, classification and clustering algorithms and models to support sparse data, similar to MLlib. I chose to add this class because SciPy is quite difficult to install in many environments (more so than NumPy), but I plan to add support for SciPy sparse vectors later too, and make the methods work transparently on objects of either type.
On the Scala side, we keep Python sparse vectors sparse and pass them to MLlib. We always return dense vectors from our models.
Some to-do items left:
- [x] Support SciPy's scipy.sparse matrix objects when SciPy is available. We can easily add a function to convert these to our own SparseVector.
- [x] MLlib currently uses a vector with one extra column on the left to represent what we call LabeledPoint in Scala. Do we really want this? It may get annoying once you deal with sparse data since you must add/subtract 1 to each feature index when training. We can remove this API in 1.0 and use tuples for labeling.
- [x] Explain how to use these in the Python MLlib docs.
CC @mengxr, @joshrosen
Author: Matei Zaharia <matei@databricks.com>
Closes #341 from mateiz/py-ml-update and squashes the following commits:
d52e763 [Matei Zaharia] Remove no-longer-needed slice code and handle review comments
ea5a25a [Matei Zaharia] Fix remaining uses of copyto() after merge
b9f97a3 [Matei Zaharia] Fix test
1e1bd0f [Matei Zaharia] Add MLlib logistic regression example in Python
88bc01f [Matei Zaharia] Clean up inheritance of LinearModel in Python, and expose its parametrs
37ab747 [Matei Zaharia] Fix some examples and docs due to changes in MLlib API
da0f27e [Matei Zaharia] Added a MLlib K-means example and updated docs to discuss sparse data
c48e85a [Matei Zaharia] Added some tests for passing lists as input, and added mllib/tests.py to run-tests script.
a07ba10 [Matei Zaharia] Fix some typos and calculation of initial weights
74eefe7 [Matei Zaharia] Added LabeledPoint class in Python
889dde8 [Matei Zaharia] Support scipy.sparse matrices in all our algorithms and models
ab244d1 [Matei Zaharia] Allow SparseVectors to be initialized using a dict
a5d6426 [Matei Zaharia] Add linalg.py to run-tests script
0e7a3d8 [Matei Zaharia] Keep vectors sparse in Java when reading LabeledPoints
eaee759 [Matei Zaharia] Update regression, classification and clustering models for sparse data
2abbb44 [Matei Zaharia] Further work to get linear models working with sparse data
154f45d [Matei Zaharia] Update docs, name some magic values
881fef7 [Matei Zaharia] Added a sparse vector in Python and made Java-Python format more compact
2014-04-15 23:33:24 -04:00
|
|
|
def intercept(self):
|
2015-10-23 11:43:49 -04:00
|
|
|
"""Intercept computed for this model."""
|
[WIP] SPARK-1430: Support sparse data in Python MLlib
This PR adds a SparseVector class in PySpark and updates all the regression, classification and clustering algorithms and models to support sparse data, similar to MLlib. I chose to add this class because SciPy is quite difficult to install in many environments (more so than NumPy), but I plan to add support for SciPy sparse vectors later too, and make the methods work transparently on objects of either type.
On the Scala side, we keep Python sparse vectors sparse and pass them to MLlib. We always return dense vectors from our models.
Some to-do items left:
- [x] Support SciPy's scipy.sparse matrix objects when SciPy is available. We can easily add a function to convert these to our own SparseVector.
- [x] MLlib currently uses a vector with one extra column on the left to represent what we call LabeledPoint in Scala. Do we really want this? It may get annoying once you deal with sparse data since you must add/subtract 1 to each feature index when training. We can remove this API in 1.0 and use tuples for labeling.
- [x] Explain how to use these in the Python MLlib docs.
CC @mengxr, @joshrosen
Author: Matei Zaharia <matei@databricks.com>
Closes #341 from mateiz/py-ml-update and squashes the following commits:
d52e763 [Matei Zaharia] Remove no-longer-needed slice code and handle review comments
ea5a25a [Matei Zaharia] Fix remaining uses of copyto() after merge
b9f97a3 [Matei Zaharia] Fix test
1e1bd0f [Matei Zaharia] Add MLlib logistic regression example in Python
88bc01f [Matei Zaharia] Clean up inheritance of LinearModel in Python, and expose its parametrs
37ab747 [Matei Zaharia] Fix some examples and docs due to changes in MLlib API
da0f27e [Matei Zaharia] Added a MLlib K-means example and updated docs to discuss sparse data
c48e85a [Matei Zaharia] Added some tests for passing lists as input, and added mllib/tests.py to run-tests script.
a07ba10 [Matei Zaharia] Fix some typos and calculation of initial weights
74eefe7 [Matei Zaharia] Added LabeledPoint class in Python
889dde8 [Matei Zaharia] Support scipy.sparse matrices in all our algorithms and models
ab244d1 [Matei Zaharia] Allow SparseVectors to be initialized using a dict
a5d6426 [Matei Zaharia] Add linalg.py to run-tests script
0e7a3d8 [Matei Zaharia] Keep vectors sparse in Java when reading LabeledPoints
eaee759 [Matei Zaharia] Update regression, classification and clustering models for sparse data
2abbb44 [Matei Zaharia] Further work to get linear models working with sparse data
154f45d [Matei Zaharia] Update docs, name some magic values
881fef7 [Matei Zaharia] Added a sparse vector in Python and made Java-Python format more compact
2014-04-15 23:33:24 -04:00
|
|
|
return self._intercept
|
|
|
|
|
2014-10-06 17:05:45 -04:00
|
|
|
def __repr__(self):
|
2014-11-11 01:26:16 -05:00
|
|
|
return "(weights=%s, intercept=%r)" % (self._coeff, self._intercept)
|
2014-10-06 17:05:45 -04:00
|
|
|
|
[WIP] SPARK-1430: Support sparse data in Python MLlib
This PR adds a SparseVector class in PySpark and updates all the regression, classification and clustering algorithms and models to support sparse data, similar to MLlib. I chose to add this class because SciPy is quite difficult to install in many environments (more so than NumPy), but I plan to add support for SciPy sparse vectors later too, and make the methods work transparently on objects of either type.
On the Scala side, we keep Python sparse vectors sparse and pass them to MLlib. We always return dense vectors from our models.
Some to-do items left:
- [x] Support SciPy's scipy.sparse matrix objects when SciPy is available. We can easily add a function to convert these to our own SparseVector.
- [x] MLlib currently uses a vector with one extra column on the left to represent what we call LabeledPoint in Scala. Do we really want this? It may get annoying once you deal with sparse data since you must add/subtract 1 to each feature index when training. We can remove this API in 1.0 and use tuples for labeling.
- [x] Explain how to use these in the Python MLlib docs.
CC @mengxr, @joshrosen
Author: Matei Zaharia <matei@databricks.com>
Closes #341 from mateiz/py-ml-update and squashes the following commits:
d52e763 [Matei Zaharia] Remove no-longer-needed slice code and handle review comments
ea5a25a [Matei Zaharia] Fix remaining uses of copyto() after merge
b9f97a3 [Matei Zaharia] Fix test
1e1bd0f [Matei Zaharia] Add MLlib logistic regression example in Python
88bc01f [Matei Zaharia] Clean up inheritance of LinearModel in Python, and expose its parametrs
37ab747 [Matei Zaharia] Fix some examples and docs due to changes in MLlib API
da0f27e [Matei Zaharia] Added a MLlib K-means example and updated docs to discuss sparse data
c48e85a [Matei Zaharia] Added some tests for passing lists as input, and added mllib/tests.py to run-tests script.
a07ba10 [Matei Zaharia] Fix some typos and calculation of initial weights
74eefe7 [Matei Zaharia] Added LabeledPoint class in Python
889dde8 [Matei Zaharia] Support scipy.sparse matrices in all our algorithms and models
ab244d1 [Matei Zaharia] Allow SparseVectors to be initialized using a dict
a5d6426 [Matei Zaharia] Add linalg.py to run-tests script
0e7a3d8 [Matei Zaharia] Keep vectors sparse in Java when reading LabeledPoints
eaee759 [Matei Zaharia] Update regression, classification and clustering models for sparse data
2abbb44 [Matei Zaharia] Further work to get linear models working with sparse data
154f45d [Matei Zaharia] Update docs, name some magic values
881fef7 [Matei Zaharia] Added a sparse vector in Python and made Java-Python format more compact
2014-04-15 23:33:24 -04:00
|
|
|
|
2015-02-20 05:31:32 -05:00
|
|
|
@inherit_doc
|
2013-12-25 00:08:05 -05:00
|
|
|
class LinearRegressionModelBase(LinearModel):
|
2014-08-06 15:58:24 -04:00
|
|
|
|
2013-12-25 00:08:05 -05:00
|
|
|
"""A linear regression model.
|
|
|
|
|
2020-11-24 20:24:41 -05:00
|
|
|
.. versionadded:: 0.9.0
|
|
|
|
|
|
|
|
Examples
|
|
|
|
--------
|
2020-08-08 11:51:57 -04:00
|
|
|
>>> from pyspark.mllib.linalg import SparseVector
|
2014-09-19 18:01:11 -04:00
|
|
|
>>> lrmb = LinearRegressionModelBase(np.array([1.0, 2.0]), 0.1)
|
|
|
|
>>> abs(lrmb.predict(np.array([-1.03, 7.777])) - 14.624) < 1e-6
|
2013-12-25 00:08:05 -05:00
|
|
|
True
|
[WIP] SPARK-1430: Support sparse data in Python MLlib
This PR adds a SparseVector class in PySpark and updates all the regression, classification and clustering algorithms and models to support sparse data, similar to MLlib. I chose to add this class because SciPy is quite difficult to install in many environments (more so than NumPy), but I plan to add support for SciPy sparse vectors later too, and make the methods work transparently on objects of either type.
On the Scala side, we keep Python sparse vectors sparse and pass them to MLlib. We always return dense vectors from our models.
Some to-do items left:
- [x] Support SciPy's scipy.sparse matrix objects when SciPy is available. We can easily add a function to convert these to our own SparseVector.
- [x] MLlib currently uses a vector with one extra column on the left to represent what we call LabeledPoint in Scala. Do we really want this? It may get annoying once you deal with sparse data since you must add/subtract 1 to each feature index when training. We can remove this API in 1.0 and use tuples for labeling.
- [x] Explain how to use these in the Python MLlib docs.
CC @mengxr, @joshrosen
Author: Matei Zaharia <matei@databricks.com>
Closes #341 from mateiz/py-ml-update and squashes the following commits:
d52e763 [Matei Zaharia] Remove no-longer-needed slice code and handle review comments
ea5a25a [Matei Zaharia] Fix remaining uses of copyto() after merge
b9f97a3 [Matei Zaharia] Fix test
1e1bd0f [Matei Zaharia] Add MLlib logistic regression example in Python
88bc01f [Matei Zaharia] Clean up inheritance of LinearModel in Python, and expose its parametrs
37ab747 [Matei Zaharia] Fix some examples and docs due to changes in MLlib API
da0f27e [Matei Zaharia] Added a MLlib K-means example and updated docs to discuss sparse data
c48e85a [Matei Zaharia] Added some tests for passing lists as input, and added mllib/tests.py to run-tests script.
a07ba10 [Matei Zaharia] Fix some typos and calculation of initial weights
74eefe7 [Matei Zaharia] Added LabeledPoint class in Python
889dde8 [Matei Zaharia] Support scipy.sparse matrices in all our algorithms and models
ab244d1 [Matei Zaharia] Allow SparseVectors to be initialized using a dict
a5d6426 [Matei Zaharia] Add linalg.py to run-tests script
0e7a3d8 [Matei Zaharia] Keep vectors sparse in Java when reading LabeledPoints
eaee759 [Matei Zaharia] Update regression, classification and clustering models for sparse data
2abbb44 [Matei Zaharia] Further work to get linear models working with sparse data
154f45d [Matei Zaharia] Update docs, name some magic values
881fef7 [Matei Zaharia] Added a sparse vector in Python and made Java-Python format more compact
2014-04-15 23:33:24 -04:00
|
|
|
>>> abs(lrmb.predict(SparseVector(2, {0: -1.03, 1: 7.777})) - 14.624) < 1e-6
|
|
|
|
True
|
2013-12-25 00:08:05 -05:00
|
|
|
"""
|
2014-08-06 15:58:24 -04:00
|
|
|
|
2015-10-23 11:43:49 -04:00
|
|
|
@since("0.9.0")
|
2013-12-25 00:08:05 -05:00
|
|
|
def predict(self, x):
|
2014-09-19 18:01:11 -04:00
|
|
|
"""
|
2015-07-23 21:53:07 -04:00
|
|
|
Predict the value of the dependent variable given a vector or
|
|
|
|
an RDD of vectors containing values for the independent variables.
|
2014-09-19 18:01:11 -04:00
|
|
|
"""
|
2015-07-23 21:53:07 -04:00
|
|
|
if isinstance(x, RDD):
|
|
|
|
return x.map(self.predict)
|
2014-11-11 01:26:16 -05:00
|
|
|
x = _convert_to_vector(x)
|
2014-09-19 18:01:11 -04:00
|
|
|
return self.weights.dot(x) + self.intercept
|
[WIP] SPARK-1430: Support sparse data in Python MLlib
This PR adds a SparseVector class in PySpark and updates all the regression, classification and clustering algorithms and models to support sparse data, similar to MLlib. I chose to add this class because SciPy is quite difficult to install in many environments (more so than NumPy), but I plan to add support for SciPy sparse vectors later too, and make the methods work transparently on objects of either type.
On the Scala side, we keep Python sparse vectors sparse and pass them to MLlib. We always return dense vectors from our models.
Some to-do items left:
- [x] Support SciPy's scipy.sparse matrix objects when SciPy is available. We can easily add a function to convert these to our own SparseVector.
- [x] MLlib currently uses a vector with one extra column on the left to represent what we call LabeledPoint in Scala. Do we really want this? It may get annoying once you deal with sparse data since you must add/subtract 1 to each feature index when training. We can remove this API in 1.0 and use tuples for labeling.
- [x] Explain how to use these in the Python MLlib docs.
CC @mengxr, @joshrosen
Author: Matei Zaharia <matei@databricks.com>
Closes #341 from mateiz/py-ml-update and squashes the following commits:
d52e763 [Matei Zaharia] Remove no-longer-needed slice code and handle review comments
ea5a25a [Matei Zaharia] Fix remaining uses of copyto() after merge
b9f97a3 [Matei Zaharia] Fix test
1e1bd0f [Matei Zaharia] Add MLlib logistic regression example in Python
88bc01f [Matei Zaharia] Clean up inheritance of LinearModel in Python, and expose its parametrs
37ab747 [Matei Zaharia] Fix some examples and docs due to changes in MLlib API
da0f27e [Matei Zaharia] Added a MLlib K-means example and updated docs to discuss sparse data
c48e85a [Matei Zaharia] Added some tests for passing lists as input, and added mllib/tests.py to run-tests script.
a07ba10 [Matei Zaharia] Fix some typos and calculation of initial weights
74eefe7 [Matei Zaharia] Added LabeledPoint class in Python
889dde8 [Matei Zaharia] Support scipy.sparse matrices in all our algorithms and models
ab244d1 [Matei Zaharia] Allow SparseVectors to be initialized using a dict
a5d6426 [Matei Zaharia] Add linalg.py to run-tests script
0e7a3d8 [Matei Zaharia] Keep vectors sparse in Java when reading LabeledPoints
eaee759 [Matei Zaharia] Update regression, classification and clustering models for sparse data
2abbb44 [Matei Zaharia] Further work to get linear models working with sparse data
154f45d [Matei Zaharia] Update docs, name some magic values
881fef7 [Matei Zaharia] Added a sparse vector in Python and made Java-Python format more compact
2014-04-15 23:33:24 -04:00
|
|
|
|
2013-12-25 00:08:05 -05:00
|
|
|
|
2015-02-20 05:31:32 -05:00
|
|
|
@inherit_doc
|
2013-12-25 00:08:05 -05:00
|
|
|
class LinearRegressionModel(LinearRegressionModelBase):
|
2014-08-06 15:58:24 -04:00
|
|
|
|
2013-12-25 00:08:05 -05:00
|
|
|
"""A linear regression model derived from a least-squares fit.
|
|
|
|
|
2020-11-24 20:24:41 -05:00
|
|
|
.. versionadded:: 0.9.0
|
|
|
|
|
|
|
|
Examples
|
|
|
|
--------
|
2020-08-08 11:51:57 -04:00
|
|
|
>>> from pyspark.mllib.linalg import SparseVector
|
[WIP] SPARK-1430: Support sparse data in Python MLlib
This PR adds a SparseVector class in PySpark and updates all the regression, classification and clustering algorithms and models to support sparse data, similar to MLlib. I chose to add this class because SciPy is quite difficult to install in many environments (more so than NumPy), but I plan to add support for SciPy sparse vectors later too, and make the methods work transparently on objects of either type.
On the Scala side, we keep Python sparse vectors sparse and pass them to MLlib. We always return dense vectors from our models.
Some to-do items left:
- [x] Support SciPy's scipy.sparse matrix objects when SciPy is available. We can easily add a function to convert these to our own SparseVector.
- [x] MLlib currently uses a vector with one extra column on the left to represent what we call LabeledPoint in Scala. Do we really want this? It may get annoying once you deal with sparse data since you must add/subtract 1 to each feature index when training. We can remove this API in 1.0 and use tuples for labeling.
- [x] Explain how to use these in the Python MLlib docs.
CC @mengxr, @joshrosen
Author: Matei Zaharia <matei@databricks.com>
Closes #341 from mateiz/py-ml-update and squashes the following commits:
d52e763 [Matei Zaharia] Remove no-longer-needed slice code and handle review comments
ea5a25a [Matei Zaharia] Fix remaining uses of copyto() after merge
b9f97a3 [Matei Zaharia] Fix test
1e1bd0f [Matei Zaharia] Add MLlib logistic regression example in Python
88bc01f [Matei Zaharia] Clean up inheritance of LinearModel in Python, and expose its parametrs
37ab747 [Matei Zaharia] Fix some examples and docs due to changes in MLlib API
da0f27e [Matei Zaharia] Added a MLlib K-means example and updated docs to discuss sparse data
c48e85a [Matei Zaharia] Added some tests for passing lists as input, and added mllib/tests.py to run-tests script.
a07ba10 [Matei Zaharia] Fix some typos and calculation of initial weights
74eefe7 [Matei Zaharia] Added LabeledPoint class in Python
889dde8 [Matei Zaharia] Support scipy.sparse matrices in all our algorithms and models
ab244d1 [Matei Zaharia] Allow SparseVectors to be initialized using a dict
a5d6426 [Matei Zaharia] Add linalg.py to run-tests script
0e7a3d8 [Matei Zaharia] Keep vectors sparse in Java when reading LabeledPoints
eaee759 [Matei Zaharia] Update regression, classification and clustering models for sparse data
2abbb44 [Matei Zaharia] Further work to get linear models working with sparse data
154f45d [Matei Zaharia] Update docs, name some magic values
881fef7 [Matei Zaharia] Added a sparse vector in Python and made Java-Python format more compact
2014-04-15 23:33:24 -04:00
|
|
|
>>> from pyspark.mllib.regression import LabeledPoint
|
|
|
|
>>> data = [
|
|
|
|
... LabeledPoint(0.0, [0.0]),
|
|
|
|
... LabeledPoint(1.0, [1.0]),
|
|
|
|
... LabeledPoint(3.0, [2.0]),
|
|
|
|
... LabeledPoint(2.0, [3.0])
|
|
|
|
... ]
|
2015-04-21 20:49:55 -04:00
|
|
|
>>> lrm = LinearRegressionWithSGD.train(sc.parallelize(data), iterations=10,
|
|
|
|
... initialWeights=np.array([1.0]))
|
2014-09-19 18:01:11 -04:00
|
|
|
>>> abs(lrm.predict(np.array([0.0])) - 0) < 0.5
|
[WIP] SPARK-1430: Support sparse data in Python MLlib
This PR adds a SparseVector class in PySpark and updates all the regression, classification and clustering algorithms and models to support sparse data, similar to MLlib. I chose to add this class because SciPy is quite difficult to install in many environments (more so than NumPy), but I plan to add support for SciPy sparse vectors later too, and make the methods work transparently on objects of either type.
On the Scala side, we keep Python sparse vectors sparse and pass them to MLlib. We always return dense vectors from our models.
Some to-do items left:
- [x] Support SciPy's scipy.sparse matrix objects when SciPy is available. We can easily add a function to convert these to our own SparseVector.
- [x] MLlib currently uses a vector with one extra column on the left to represent what we call LabeledPoint in Scala. Do we really want this? It may get annoying once you deal with sparse data since you must add/subtract 1 to each feature index when training. We can remove this API in 1.0 and use tuples for labeling.
- [x] Explain how to use these in the Python MLlib docs.
CC @mengxr, @joshrosen
Author: Matei Zaharia <matei@databricks.com>
Closes #341 from mateiz/py-ml-update and squashes the following commits:
d52e763 [Matei Zaharia] Remove no-longer-needed slice code and handle review comments
ea5a25a [Matei Zaharia] Fix remaining uses of copyto() after merge
b9f97a3 [Matei Zaharia] Fix test
1e1bd0f [Matei Zaharia] Add MLlib logistic regression example in Python
88bc01f [Matei Zaharia] Clean up inheritance of LinearModel in Python, and expose its parametrs
37ab747 [Matei Zaharia] Fix some examples and docs due to changes in MLlib API
da0f27e [Matei Zaharia] Added a MLlib K-means example and updated docs to discuss sparse data
c48e85a [Matei Zaharia] Added some tests for passing lists as input, and added mllib/tests.py to run-tests script.
a07ba10 [Matei Zaharia] Fix some typos and calculation of initial weights
74eefe7 [Matei Zaharia] Added LabeledPoint class in Python
889dde8 [Matei Zaharia] Support scipy.sparse matrices in all our algorithms and models
ab244d1 [Matei Zaharia] Allow SparseVectors to be initialized using a dict
a5d6426 [Matei Zaharia] Add linalg.py to run-tests script
0e7a3d8 [Matei Zaharia] Keep vectors sparse in Java when reading LabeledPoints
eaee759 [Matei Zaharia] Update regression, classification and clustering models for sparse data
2abbb44 [Matei Zaharia] Further work to get linear models working with sparse data
154f45d [Matei Zaharia] Update docs, name some magic values
881fef7 [Matei Zaharia] Added a sparse vector in Python and made Java-Python format more compact
2014-04-15 23:33:24 -04:00
|
|
|
True
|
2014-09-19 18:01:11 -04:00
|
|
|
>>> abs(lrm.predict(np.array([1.0])) - 1) < 0.5
|
[WIP] SPARK-1430: Support sparse data in Python MLlib
This PR adds a SparseVector class in PySpark and updates all the regression, classification and clustering algorithms and models to support sparse data, similar to MLlib. I chose to add this class because SciPy is quite difficult to install in many environments (more so than NumPy), but I plan to add support for SciPy sparse vectors later too, and make the methods work transparently on objects of either type.
On the Scala side, we keep Python sparse vectors sparse and pass them to MLlib. We always return dense vectors from our models.
Some to-do items left:
- [x] Support SciPy's scipy.sparse matrix objects when SciPy is available. We can easily add a function to convert these to our own SparseVector.
- [x] MLlib currently uses a vector with one extra column on the left to represent what we call LabeledPoint in Scala. Do we really want this? It may get annoying once you deal with sparse data since you must add/subtract 1 to each feature index when training. We can remove this API in 1.0 and use tuples for labeling.
- [x] Explain how to use these in the Python MLlib docs.
CC @mengxr, @joshrosen
Author: Matei Zaharia <matei@databricks.com>
Closes #341 from mateiz/py-ml-update and squashes the following commits:
d52e763 [Matei Zaharia] Remove no-longer-needed slice code and handle review comments
ea5a25a [Matei Zaharia] Fix remaining uses of copyto() after merge
b9f97a3 [Matei Zaharia] Fix test
1e1bd0f [Matei Zaharia] Add MLlib logistic regression example in Python
88bc01f [Matei Zaharia] Clean up inheritance of LinearModel in Python, and expose its parametrs
37ab747 [Matei Zaharia] Fix some examples and docs due to changes in MLlib API
da0f27e [Matei Zaharia] Added a MLlib K-means example and updated docs to discuss sparse data
c48e85a [Matei Zaharia] Added some tests for passing lists as input, and added mllib/tests.py to run-tests script.
a07ba10 [Matei Zaharia] Fix some typos and calculation of initial weights
74eefe7 [Matei Zaharia] Added LabeledPoint class in Python
889dde8 [Matei Zaharia] Support scipy.sparse matrices in all our algorithms and models
ab244d1 [Matei Zaharia] Allow SparseVectors to be initialized using a dict
a5d6426 [Matei Zaharia] Add linalg.py to run-tests script
0e7a3d8 [Matei Zaharia] Keep vectors sparse in Java when reading LabeledPoints
eaee759 [Matei Zaharia] Update regression, classification and clustering models for sparse data
2abbb44 [Matei Zaharia] Further work to get linear models working with sparse data
154f45d [Matei Zaharia] Update docs, name some magic values
881fef7 [Matei Zaharia] Added a sparse vector in Python and made Java-Python format more compact
2014-04-15 23:33:24 -04:00
|
|
|
True
|
|
|
|
>>> abs(lrm.predict(SparseVector(1, {0: 1.0})) - 1) < 0.5
|
|
|
|
True
|
2015-07-23 21:53:07 -04:00
|
|
|
>>> abs(lrm.predict(sc.parallelize([[1.0]])).collect()[0] - 1) < 0.5
|
|
|
|
True
|
2015-03-20 14:44:21 -04:00
|
|
|
>>> import os, tempfile
|
|
|
|
>>> path = tempfile.mkdtemp()
|
|
|
|
>>> lrm.save(sc, path)
|
|
|
|
>>> sameModel = LinearRegressionModel.load(sc, path)
|
|
|
|
>>> abs(sameModel.predict(np.array([0.0])) - 0) < 0.5
|
|
|
|
True
|
|
|
|
>>> abs(sameModel.predict(np.array([1.0])) - 1) < 0.5
|
|
|
|
True
|
|
|
|
>>> abs(sameModel.predict(SparseVector(1, {0: 1.0})) - 1) < 0.5
|
|
|
|
True
|
2015-06-22 14:53:11 -04:00
|
|
|
>>> from shutil import rmtree
|
2015-03-20 14:44:21 -04:00
|
|
|
>>> try:
|
2015-06-22 14:53:11 -04:00
|
|
|
... rmtree(path)
|
2015-03-20 14:44:21 -04:00
|
|
|
... except:
|
2015-06-22 14:53:11 -04:00
|
|
|
... pass
|
[WIP] SPARK-1430: Support sparse data in Python MLlib
This PR adds a SparseVector class in PySpark and updates all the regression, classification and clustering algorithms and models to support sparse data, similar to MLlib. I chose to add this class because SciPy is quite difficult to install in many environments (more so than NumPy), but I plan to add support for SciPy sparse vectors later too, and make the methods work transparently on objects of either type.
On the Scala side, we keep Python sparse vectors sparse and pass them to MLlib. We always return dense vectors from our models.
Some to-do items left:
- [x] Support SciPy's scipy.sparse matrix objects when SciPy is available. We can easily add a function to convert these to our own SparseVector.
- [x] MLlib currently uses a vector with one extra column on the left to represent what we call LabeledPoint in Scala. Do we really want this? It may get annoying once you deal with sparse data since you must add/subtract 1 to each feature index when training. We can remove this API in 1.0 and use tuples for labeling.
- [x] Explain how to use these in the Python MLlib docs.
CC @mengxr, @joshrosen
Author: Matei Zaharia <matei@databricks.com>
Closes #341 from mateiz/py-ml-update and squashes the following commits:
d52e763 [Matei Zaharia] Remove no-longer-needed slice code and handle review comments
ea5a25a [Matei Zaharia] Fix remaining uses of copyto() after merge
b9f97a3 [Matei Zaharia] Fix test
1e1bd0f [Matei Zaharia] Add MLlib logistic regression example in Python
88bc01f [Matei Zaharia] Clean up inheritance of LinearModel in Python, and expose its parametrs
37ab747 [Matei Zaharia] Fix some examples and docs due to changes in MLlib API
da0f27e [Matei Zaharia] Added a MLlib K-means example and updated docs to discuss sparse data
c48e85a [Matei Zaharia] Added some tests for passing lists as input, and added mllib/tests.py to run-tests script.
a07ba10 [Matei Zaharia] Fix some typos and calculation of initial weights
74eefe7 [Matei Zaharia] Added LabeledPoint class in Python
889dde8 [Matei Zaharia] Support scipy.sparse matrices in all our algorithms and models
ab244d1 [Matei Zaharia] Allow SparseVectors to be initialized using a dict
a5d6426 [Matei Zaharia] Add linalg.py to run-tests script
0e7a3d8 [Matei Zaharia] Keep vectors sparse in Java when reading LabeledPoints
eaee759 [Matei Zaharia] Update regression, classification and clustering models for sparse data
2abbb44 [Matei Zaharia] Further work to get linear models working with sparse data
154f45d [Matei Zaharia] Update docs, name some magic values
881fef7 [Matei Zaharia] Added a sparse vector in Python and made Java-Python format more compact
2014-04-15 23:33:24 -04:00
|
|
|
>>> data = [
|
|
|
|
... LabeledPoint(0.0, SparseVector(1, {0: 0.0})),
|
|
|
|
... LabeledPoint(1.0, SparseVector(1, {0: 1.0})),
|
|
|
|
... LabeledPoint(3.0, SparseVector(1, {0: 2.0})),
|
|
|
|
... LabeledPoint(2.0, SparseVector(1, {0: 3.0}))
|
|
|
|
... ]
|
2015-04-21 20:49:55 -04:00
|
|
|
>>> lrm = LinearRegressionWithSGD.train(sc.parallelize(data), iterations=10,
|
2019-01-17 20:40:39 -05:00
|
|
|
... initialWeights=np.array([1.0]))
|
|
|
|
>>> abs(lrm.predict(np.array([0.0])) - 0) < 0.5
|
[WIP] SPARK-1430: Support sparse data in Python MLlib
This PR adds a SparseVector class in PySpark and updates all the regression, classification and clustering algorithms and models to support sparse data, similar to MLlib. I chose to add this class because SciPy is quite difficult to install in many environments (more so than NumPy), but I plan to add support for SciPy sparse vectors later too, and make the methods work transparently on objects of either type.
On the Scala side, we keep Python sparse vectors sparse and pass them to MLlib. We always return dense vectors from our models.
Some to-do items left:
- [x] Support SciPy's scipy.sparse matrix objects when SciPy is available. We can easily add a function to convert these to our own SparseVector.
- [x] MLlib currently uses a vector with one extra column on the left to represent what we call LabeledPoint in Scala. Do we really want this? It may get annoying once you deal with sparse data since you must add/subtract 1 to each feature index when training. We can remove this API in 1.0 and use tuples for labeling.
- [x] Explain how to use these in the Python MLlib docs.
CC @mengxr, @joshrosen
Author: Matei Zaharia <matei@databricks.com>
Closes #341 from mateiz/py-ml-update and squashes the following commits:
d52e763 [Matei Zaharia] Remove no-longer-needed slice code and handle review comments
ea5a25a [Matei Zaharia] Fix remaining uses of copyto() after merge
b9f97a3 [Matei Zaharia] Fix test
1e1bd0f [Matei Zaharia] Add MLlib logistic regression example in Python
88bc01f [Matei Zaharia] Clean up inheritance of LinearModel in Python, and expose its parametrs
37ab747 [Matei Zaharia] Fix some examples and docs due to changes in MLlib API
da0f27e [Matei Zaharia] Added a MLlib K-means example and updated docs to discuss sparse data
c48e85a [Matei Zaharia] Added some tests for passing lists as input, and added mllib/tests.py to run-tests script.
a07ba10 [Matei Zaharia] Fix some typos and calculation of initial weights
74eefe7 [Matei Zaharia] Added LabeledPoint class in Python
889dde8 [Matei Zaharia] Support scipy.sparse matrices in all our algorithms and models
ab244d1 [Matei Zaharia] Allow SparseVectors to be initialized using a dict
a5d6426 [Matei Zaharia] Add linalg.py to run-tests script
0e7a3d8 [Matei Zaharia] Keep vectors sparse in Java when reading LabeledPoints
eaee759 [Matei Zaharia] Update regression, classification and clustering models for sparse data
2abbb44 [Matei Zaharia] Further work to get linear models working with sparse data
154f45d [Matei Zaharia] Update docs, name some magic values
881fef7 [Matei Zaharia] Added a sparse vector in Python and made Java-Python format more compact
2014-04-15 23:33:24 -04:00
|
|
|
True
|
|
|
|
>>> abs(lrm.predict(SparseVector(1, {0: 1.0})) - 1) < 0.5
|
|
|
|
True
|
2015-04-21 20:49:55 -04:00
|
|
|
>>> lrm = LinearRegressionWithSGD.train(sc.parallelize(data), iterations=10, step=1.0,
|
2019-01-17 20:40:39 -05:00
|
|
|
... miniBatchFraction=1.0, initialWeights=np.array([1.0]), regParam=0.1, regType="l2",
|
2015-03-25 16:38:33 -04:00
|
|
|
... intercept=True, validateData=True)
|
2019-01-17 20:40:39 -05:00
|
|
|
>>> abs(lrm.predict(np.array([0.0])) - 0) < 0.5
|
2015-03-25 16:38:33 -04:00
|
|
|
True
|
|
|
|
>>> abs(lrm.predict(SparseVector(1, {0: 1.0})) - 1) < 0.5
|
|
|
|
True
|
2013-12-25 00:08:05 -05:00
|
|
|
"""
|
2015-10-23 11:43:49 -04:00
|
|
|
@since("1.4.0")
|
2015-03-20 14:44:21 -04:00
|
|
|
def save(self, sc, path):
|
2015-10-23 11:43:49 -04:00
|
|
|
"""Save a LinearRegressionModel."""
|
2015-03-20 14:44:21 -04:00
|
|
|
java_model = sc._jvm.org.apache.spark.mllib.regression.LinearRegressionModel(
|
|
|
|
_py2java(sc, self._coeff), self.intercept)
|
|
|
|
java_model.save(sc._jsc.sc(), path)
|
|
|
|
|
|
|
|
@classmethod
|
2015-10-23 11:43:49 -04:00
|
|
|
@since("1.4.0")
|
2015-03-20 14:44:21 -04:00
|
|
|
def load(cls, sc, path):
|
2015-10-23 11:43:49 -04:00
|
|
|
"""Load a LinearRegressionModel."""
|
2015-03-20 14:44:21 -04:00
|
|
|
java_model = sc._jvm.org.apache.spark.mllib.regression.LinearRegressionModel.load(
|
|
|
|
sc._jsc.sc(), path)
|
|
|
|
weights = _java2py(sc, java_model.weights())
|
|
|
|
intercept = java_model.intercept()
|
|
|
|
model = LinearRegressionModel(weights, intercept)
|
|
|
|
return model
|
2013-12-25 00:08:05 -05:00
|
|
|
|
[WIP] SPARK-1430: Support sparse data in Python MLlib
This PR adds a SparseVector class in PySpark and updates all the regression, classification and clustering algorithms and models to support sparse data, similar to MLlib. I chose to add this class because SciPy is quite difficult to install in many environments (more so than NumPy), but I plan to add support for SciPy sparse vectors later too, and make the methods work transparently on objects of either type.
On the Scala side, we keep Python sparse vectors sparse and pass them to MLlib. We always return dense vectors from our models.
Some to-do items left:
- [x] Support SciPy's scipy.sparse matrix objects when SciPy is available. We can easily add a function to convert these to our own SparseVector.
- [x] MLlib currently uses a vector with one extra column on the left to represent what we call LabeledPoint in Scala. Do we really want this? It may get annoying once you deal with sparse data since you must add/subtract 1 to each feature index when training. We can remove this API in 1.0 and use tuples for labeling.
- [x] Explain how to use these in the Python MLlib docs.
CC @mengxr, @joshrosen
Author: Matei Zaharia <matei@databricks.com>
Closes #341 from mateiz/py-ml-update and squashes the following commits:
d52e763 [Matei Zaharia] Remove no-longer-needed slice code and handle review comments
ea5a25a [Matei Zaharia] Fix remaining uses of copyto() after merge
b9f97a3 [Matei Zaharia] Fix test
1e1bd0f [Matei Zaharia] Add MLlib logistic regression example in Python
88bc01f [Matei Zaharia] Clean up inheritance of LinearModel in Python, and expose its parametrs
37ab747 [Matei Zaharia] Fix some examples and docs due to changes in MLlib API
da0f27e [Matei Zaharia] Added a MLlib K-means example and updated docs to discuss sparse data
c48e85a [Matei Zaharia] Added some tests for passing lists as input, and added mllib/tests.py to run-tests script.
a07ba10 [Matei Zaharia] Fix some typos and calculation of initial weights
74eefe7 [Matei Zaharia] Added LabeledPoint class in Python
889dde8 [Matei Zaharia] Support scipy.sparse matrices in all our algorithms and models
ab244d1 [Matei Zaharia] Allow SparseVectors to be initialized using a dict
a5d6426 [Matei Zaharia] Add linalg.py to run-tests script
0e7a3d8 [Matei Zaharia] Keep vectors sparse in Java when reading LabeledPoints
eaee759 [Matei Zaharia] Update regression, classification and clustering models for sparse data
2abbb44 [Matei Zaharia] Further work to get linear models working with sparse data
154f45d [Matei Zaharia] Update docs, name some magic values
881fef7 [Matei Zaharia] Added a sparse vector in Python and made Java-Python format more compact
2014-04-15 23:33:24 -04:00
|
|
|
|
2014-09-19 18:01:11 -04:00
|
|
|
# train_func should take two parameters, namely data and initial_weights, and
|
|
|
|
# return the result of a call to the appropriate JVM stub.
|
|
|
|
# _regression_train_wrapper is responsible for setup and error checking.
|
2014-10-31 01:25:18 -04:00
|
|
|
def _regression_train_wrapper(train_func, modelClass, data, initial_weights):
|
2015-03-31 14:32:14 -04:00
|
|
|
from pyspark.mllib.classification import LogisticRegressionModel
|
2014-11-11 01:26:16 -05:00
|
|
|
first = data.first()
|
|
|
|
if not isinstance(first, LabeledPoint):
|
2015-04-20 13:44:09 -04:00
|
|
|
raise TypeError("data should be an RDD of LabeledPoint, but got %s" % type(first))
|
2015-03-20 17:18:18 -04:00
|
|
|
if initial_weights is None:
|
|
|
|
initial_weights = [0.0] * len(data.first().features)
|
2015-03-31 14:32:14 -04:00
|
|
|
if (modelClass == LogisticRegressionModel):
|
|
|
|
weights, intercept, numFeatures, numClasses = train_func(
|
|
|
|
data, _convert_to_vector(initial_weights))
|
|
|
|
return modelClass(weights, intercept, numFeatures, numClasses)
|
|
|
|
else:
|
|
|
|
weights, intercept = train_func(data, _convert_to_vector(initial_weights))
|
|
|
|
return modelClass(weights, intercept)
|
2014-09-19 18:01:11 -04:00
|
|
|
|
|
|
|
|
2013-12-25 00:08:05 -05:00
|
|
|
class LinearRegressionWithSGD(object):
|
2015-10-23 11:43:49 -04:00
|
|
|
"""
|
2020-11-24 20:24:41 -05:00
|
|
|
Train a linear regression model with no regularization using Stochastic Gradient Descent.
|
|
|
|
|
2015-10-23 11:43:49 -04:00
|
|
|
.. versionadded:: 0.9.0
|
2020-11-24 20:24:41 -05:00
|
|
|
.. deprecated:: 2.0.0
|
|
|
|
Use :py:class:`pyspark.ml.regression.LinearRegression`.
|
2015-10-23 11:43:49 -04:00
|
|
|
"""
|
2013-12-25 00:08:05 -05:00
|
|
|
@classmethod
|
2014-08-02 00:00:31 -04:00
|
|
|
def train(cls, data, iterations=100, step=1.0, miniBatchFraction=1.0,
|
2015-03-25 16:38:33 -04:00
|
|
|
initialWeights=None, regParam=0.0, regType=None, intercept=False,
|
2015-09-14 15:08:52 -04:00
|
|
|
validateData=True, convergenceTol=0.001):
|
2014-08-02 00:00:31 -04:00
|
|
|
"""
|
2015-06-16 17:30:30 -04:00
|
|
|
Train a linear regression model using Stochastic Gradient
|
2016-02-29 08:52:41 -05:00
|
|
|
Descent (SGD). This solves the least squares regression
|
|
|
|
formulation
|
|
|
|
|
|
|
|
f(weights) = 1/(2n) ||A weights - y||^2
|
|
|
|
|
|
|
|
which is the mean squared error. Here the data matrix has n rows,
|
|
|
|
and the input RDD holds the set of rows of A, each with its
|
|
|
|
corresponding right hand side label y.
|
|
|
|
See also the documentation for the precise formulation.
|
|
|
|
|
2020-11-24 20:24:41 -05:00
|
|
|
.. versionadded:: 0.9.0
|
|
|
|
|
|
|
|
Parameters
|
|
|
|
----------
|
|
|
|
data : :py:class:`pyspark.RDD`
|
|
|
|
The training data, an RDD of LabeledPoint.
|
|
|
|
iterations : int, optional
|
|
|
|
The number of iterations.
|
|
|
|
(default: 100)
|
|
|
|
step : float, optional
|
|
|
|
The step parameter used in SGD.
|
|
|
|
(default: 1.0)
|
|
|
|
miniBatchFraction : float, optional
|
|
|
|
Fraction of data to be used for each SGD iteration.
|
|
|
|
(default: 1.0)
|
|
|
|
initialWeights : :py:class:`pyspark.mllib.linalg.Vector` or convertible, optional
|
|
|
|
The initial weights.
|
|
|
|
(default: None)
|
|
|
|
regParam : float, optional
|
|
|
|
The regularizer parameter.
|
|
|
|
(default: 0.0)
|
|
|
|
regType : str, optional
|
|
|
|
The type of regularizer used for training our model.
|
|
|
|
Supported values:
|
2016-02-29 08:52:41 -05:00
|
|
|
|
|
|
|
- "l1" for using L1 regularization
|
|
|
|
- "l2" for using L2 regularization
|
|
|
|
- None for no regularization (default)
|
2020-11-24 20:24:41 -05:00
|
|
|
|
|
|
|
intercept : bool, optional
|
|
|
|
Boolean parameter which indicates the use or not of the
|
|
|
|
augmented representation for training data (i.e., whether bias
|
|
|
|
features are activated or not).
|
|
|
|
(default: False)
|
|
|
|
validateData : bool, optional
|
|
|
|
Boolean parameter which indicates if the algorithm should
|
|
|
|
validate data before training.
|
|
|
|
(default: True)
|
|
|
|
convergenceTol : float, optional
|
|
|
|
A condition which decides iteration termination.
|
|
|
|
(default: 0.001)
|
2014-08-02 00:00:31 -04:00
|
|
|
"""
|
[SPARK-22313][PYTHON] Mark/print deprecation warnings as DeprecationWarning for deprecated APIs
## What changes were proposed in this pull request?
This PR proposes to mark the existing warnings as `DeprecationWarning` and print out warnings for deprecated functions.
This could be actually useful for Spark app developers. I use (old) PyCharm and this IDE can detect this specific `DeprecationWarning` in some cases:
**Before**
<img src="https://user-images.githubusercontent.com/6477701/31762664-df68d9f8-b4f6-11e7-8773-f0468f70a2cc.png" height="45" />
**After**
<img src="https://user-images.githubusercontent.com/6477701/31762662-de4d6868-b4f6-11e7-98dc-3c8446a0c28a.png" height="70" />
For console usage, `DeprecationWarning` is usually disabled (see https://docs.python.org/2/library/warnings.html#warning-categories and https://docs.python.org/3/library/warnings.html#warning-categories):
```
>>> import warnings
>>> filter(lambda f: f[2] == DeprecationWarning, warnings.filters)
[('ignore', <_sre.SRE_Pattern object at 0x10ba58c00>, <type 'exceptions.DeprecationWarning'>, <_sre.SRE_Pattern object at 0x10bb04138>, 0), ('ignore', None, <type 'exceptions.DeprecationWarning'>, None, 0)]
```
so, it won't actually mess up the terminal much unless it is intended.
If this is intendedly enabled, it'd should as below:
```
>>> import warnings
>>> warnings.simplefilter('always', DeprecationWarning)
>>>
>>> from pyspark.sql import functions
>>> functions.approxCountDistinct("a")
.../spark/python/pyspark/sql/functions.py:232: DeprecationWarning: Deprecated in 2.1, use approx_count_distinct instead.
"Deprecated in 2.1, use approx_count_distinct instead.", DeprecationWarning)
...
```
These instances were found by:
```
cd python/pyspark
grep -r "Deprecated" .
grep -r "deprecated" .
grep -r "deprecate" .
```
## How was this patch tested?
Manually tested.
Author: hyukjinkwon <gurwls223@gmail.com>
Closes #19535 from HyukjinKwon/deprecated-warning.
2017-10-23 23:44:47 -04:00
|
|
|
warnings.warn(
|
|
|
|
"Deprecated in 2.0.0. Use ml.regression.LinearRegression.", DeprecationWarning)
|
2016-04-29 01:44:14 -04:00
|
|
|
|
2014-10-31 01:25:18 -04:00
|
|
|
def train(rdd, i):
|
2014-11-13 16:54:16 -05:00
|
|
|
return callMLlibFunc("trainLinearRegressionModelWithSGD", rdd, int(iterations),
|
|
|
|
float(step), float(miniBatchFraction), i, float(regParam),
|
2015-09-14 15:08:52 -04:00
|
|
|
regType, bool(intercept), bool(validateData),
|
|
|
|
float(convergenceTol))
|
2014-09-19 18:01:11 -04:00
|
|
|
|
2014-11-13 16:54:16 -05:00
|
|
|
return _regression_train_wrapper(train, LinearRegressionModel, data, initialWeights)
|
2013-12-25 00:08:05 -05:00
|
|
|
|
[WIP] SPARK-1430: Support sparse data in Python MLlib
This PR adds a SparseVector class in PySpark and updates all the regression, classification and clustering algorithms and models to support sparse data, similar to MLlib. I chose to add this class because SciPy is quite difficult to install in many environments (more so than NumPy), but I plan to add support for SciPy sparse vectors later too, and make the methods work transparently on objects of either type.
On the Scala side, we keep Python sparse vectors sparse and pass them to MLlib. We always return dense vectors from our models.
Some to-do items left:
- [x] Support SciPy's scipy.sparse matrix objects when SciPy is available. We can easily add a function to convert these to our own SparseVector.
- [x] MLlib currently uses a vector with one extra column on the left to represent what we call LabeledPoint in Scala. Do we really want this? It may get annoying once you deal with sparse data since you must add/subtract 1 to each feature index when training. We can remove this API in 1.0 and use tuples for labeling.
- [x] Explain how to use these in the Python MLlib docs.
CC @mengxr, @joshrosen
Author: Matei Zaharia <matei@databricks.com>
Closes #341 from mateiz/py-ml-update and squashes the following commits:
d52e763 [Matei Zaharia] Remove no-longer-needed slice code and handle review comments
ea5a25a [Matei Zaharia] Fix remaining uses of copyto() after merge
b9f97a3 [Matei Zaharia] Fix test
1e1bd0f [Matei Zaharia] Add MLlib logistic regression example in Python
88bc01f [Matei Zaharia] Clean up inheritance of LinearModel in Python, and expose its parametrs
37ab747 [Matei Zaharia] Fix some examples and docs due to changes in MLlib API
da0f27e [Matei Zaharia] Added a MLlib K-means example and updated docs to discuss sparse data
c48e85a [Matei Zaharia] Added some tests for passing lists as input, and added mllib/tests.py to run-tests script.
a07ba10 [Matei Zaharia] Fix some typos and calculation of initial weights
74eefe7 [Matei Zaharia] Added LabeledPoint class in Python
889dde8 [Matei Zaharia] Support scipy.sparse matrices in all our algorithms and models
ab244d1 [Matei Zaharia] Allow SparseVectors to be initialized using a dict
a5d6426 [Matei Zaharia] Add linalg.py to run-tests script
0e7a3d8 [Matei Zaharia] Keep vectors sparse in Java when reading LabeledPoints
eaee759 [Matei Zaharia] Update regression, classification and clustering models for sparse data
2abbb44 [Matei Zaharia] Further work to get linear models working with sparse data
154f45d [Matei Zaharia] Update docs, name some magic values
881fef7 [Matei Zaharia] Added a sparse vector in Python and made Java-Python format more compact
2014-04-15 23:33:24 -04:00
|
|
|
|
2015-02-20 05:31:32 -05:00
|
|
|
@inherit_doc
|
2013-12-25 00:08:05 -05:00
|
|
|
class LassoModel(LinearRegressionModelBase):
|
2014-08-06 15:58:24 -04:00
|
|
|
|
2015-06-16 17:30:30 -04:00
|
|
|
"""A linear regression model derived from a least-squares fit with
|
|
|
|
an l_1 penalty term.
|
2013-12-25 00:08:05 -05:00
|
|
|
|
2020-11-24 20:24:41 -05:00
|
|
|
.. versionadded:: 0.9.0
|
|
|
|
|
|
|
|
Examples
|
|
|
|
--------
|
2020-08-08 11:51:57 -04:00
|
|
|
>>> from pyspark.mllib.linalg import SparseVector
|
[WIP] SPARK-1430: Support sparse data in Python MLlib
This PR adds a SparseVector class in PySpark and updates all the regression, classification and clustering algorithms and models to support sparse data, similar to MLlib. I chose to add this class because SciPy is quite difficult to install in many environments (more so than NumPy), but I plan to add support for SciPy sparse vectors later too, and make the methods work transparently on objects of either type.
On the Scala side, we keep Python sparse vectors sparse and pass them to MLlib. We always return dense vectors from our models.
Some to-do items left:
- [x] Support SciPy's scipy.sparse matrix objects when SciPy is available. We can easily add a function to convert these to our own SparseVector.
- [x] MLlib currently uses a vector with one extra column on the left to represent what we call LabeledPoint in Scala. Do we really want this? It may get annoying once you deal with sparse data since you must add/subtract 1 to each feature index when training. We can remove this API in 1.0 and use tuples for labeling.
- [x] Explain how to use these in the Python MLlib docs.
CC @mengxr, @joshrosen
Author: Matei Zaharia <matei@databricks.com>
Closes #341 from mateiz/py-ml-update and squashes the following commits:
d52e763 [Matei Zaharia] Remove no-longer-needed slice code and handle review comments
ea5a25a [Matei Zaharia] Fix remaining uses of copyto() after merge
b9f97a3 [Matei Zaharia] Fix test
1e1bd0f [Matei Zaharia] Add MLlib logistic regression example in Python
88bc01f [Matei Zaharia] Clean up inheritance of LinearModel in Python, and expose its parametrs
37ab747 [Matei Zaharia] Fix some examples and docs due to changes in MLlib API
da0f27e [Matei Zaharia] Added a MLlib K-means example and updated docs to discuss sparse data
c48e85a [Matei Zaharia] Added some tests for passing lists as input, and added mllib/tests.py to run-tests script.
a07ba10 [Matei Zaharia] Fix some typos and calculation of initial weights
74eefe7 [Matei Zaharia] Added LabeledPoint class in Python
889dde8 [Matei Zaharia] Support scipy.sparse matrices in all our algorithms and models
ab244d1 [Matei Zaharia] Allow SparseVectors to be initialized using a dict
a5d6426 [Matei Zaharia] Add linalg.py to run-tests script
0e7a3d8 [Matei Zaharia] Keep vectors sparse in Java when reading LabeledPoints
eaee759 [Matei Zaharia] Update regression, classification and clustering models for sparse data
2abbb44 [Matei Zaharia] Further work to get linear models working with sparse data
154f45d [Matei Zaharia] Update docs, name some magic values
881fef7 [Matei Zaharia] Added a sparse vector in Python and made Java-Python format more compact
2014-04-15 23:33:24 -04:00
|
|
|
>>> from pyspark.mllib.regression import LabeledPoint
|
|
|
|
>>> data = [
|
|
|
|
... LabeledPoint(0.0, [0.0]),
|
|
|
|
... LabeledPoint(1.0, [1.0]),
|
|
|
|
... LabeledPoint(3.0, [2.0]),
|
|
|
|
... LabeledPoint(2.0, [3.0])
|
|
|
|
... ]
|
2019-01-17 20:40:39 -05:00
|
|
|
>>> lrm = LassoWithSGD.train(
|
|
|
|
... sc.parallelize(data), iterations=10, initialWeights=np.array([1.0]))
|
2014-09-19 18:01:11 -04:00
|
|
|
>>> abs(lrm.predict(np.array([0.0])) - 0) < 0.5
|
[WIP] SPARK-1430: Support sparse data in Python MLlib
This PR adds a SparseVector class in PySpark and updates all the regression, classification and clustering algorithms and models to support sparse data, similar to MLlib. I chose to add this class because SciPy is quite difficult to install in many environments (more so than NumPy), but I plan to add support for SciPy sparse vectors later too, and make the methods work transparently on objects of either type.
On the Scala side, we keep Python sparse vectors sparse and pass them to MLlib. We always return dense vectors from our models.
Some to-do items left:
- [x] Support SciPy's scipy.sparse matrix objects when SciPy is available. We can easily add a function to convert these to our own SparseVector.
- [x] MLlib currently uses a vector with one extra column on the left to represent what we call LabeledPoint in Scala. Do we really want this? It may get annoying once you deal with sparse data since you must add/subtract 1 to each feature index when training. We can remove this API in 1.0 and use tuples for labeling.
- [x] Explain how to use these in the Python MLlib docs.
CC @mengxr, @joshrosen
Author: Matei Zaharia <matei@databricks.com>
Closes #341 from mateiz/py-ml-update and squashes the following commits:
d52e763 [Matei Zaharia] Remove no-longer-needed slice code and handle review comments
ea5a25a [Matei Zaharia] Fix remaining uses of copyto() after merge
b9f97a3 [Matei Zaharia] Fix test
1e1bd0f [Matei Zaharia] Add MLlib logistic regression example in Python
88bc01f [Matei Zaharia] Clean up inheritance of LinearModel in Python, and expose its parametrs
37ab747 [Matei Zaharia] Fix some examples and docs due to changes in MLlib API
da0f27e [Matei Zaharia] Added a MLlib K-means example and updated docs to discuss sparse data
c48e85a [Matei Zaharia] Added some tests for passing lists as input, and added mllib/tests.py to run-tests script.
a07ba10 [Matei Zaharia] Fix some typos and calculation of initial weights
74eefe7 [Matei Zaharia] Added LabeledPoint class in Python
889dde8 [Matei Zaharia] Support scipy.sparse matrices in all our algorithms and models
ab244d1 [Matei Zaharia] Allow SparseVectors to be initialized using a dict
a5d6426 [Matei Zaharia] Add linalg.py to run-tests script
0e7a3d8 [Matei Zaharia] Keep vectors sparse in Java when reading LabeledPoints
eaee759 [Matei Zaharia] Update regression, classification and clustering models for sparse data
2abbb44 [Matei Zaharia] Further work to get linear models working with sparse data
154f45d [Matei Zaharia] Update docs, name some magic values
881fef7 [Matei Zaharia] Added a sparse vector in Python and made Java-Python format more compact
2014-04-15 23:33:24 -04:00
|
|
|
True
|
2014-09-19 18:01:11 -04:00
|
|
|
>>> abs(lrm.predict(np.array([1.0])) - 1) < 0.5
|
[WIP] SPARK-1430: Support sparse data in Python MLlib
This PR adds a SparseVector class in PySpark and updates all the regression, classification and clustering algorithms and models to support sparse data, similar to MLlib. I chose to add this class because SciPy is quite difficult to install in many environments (more so than NumPy), but I plan to add support for SciPy sparse vectors later too, and make the methods work transparently on objects of either type.
On the Scala side, we keep Python sparse vectors sparse and pass them to MLlib. We always return dense vectors from our models.
Some to-do items left:
- [x] Support SciPy's scipy.sparse matrix objects when SciPy is available. We can easily add a function to convert these to our own SparseVector.
- [x] MLlib currently uses a vector with one extra column on the left to represent what we call LabeledPoint in Scala. Do we really want this? It may get annoying once you deal with sparse data since you must add/subtract 1 to each feature index when training. We can remove this API in 1.0 and use tuples for labeling.
- [x] Explain how to use these in the Python MLlib docs.
CC @mengxr, @joshrosen
Author: Matei Zaharia <matei@databricks.com>
Closes #341 from mateiz/py-ml-update and squashes the following commits:
d52e763 [Matei Zaharia] Remove no-longer-needed slice code and handle review comments
ea5a25a [Matei Zaharia] Fix remaining uses of copyto() after merge
b9f97a3 [Matei Zaharia] Fix test
1e1bd0f [Matei Zaharia] Add MLlib logistic regression example in Python
88bc01f [Matei Zaharia] Clean up inheritance of LinearModel in Python, and expose its parametrs
37ab747 [Matei Zaharia] Fix some examples and docs due to changes in MLlib API
da0f27e [Matei Zaharia] Added a MLlib K-means example and updated docs to discuss sparse data
c48e85a [Matei Zaharia] Added some tests for passing lists as input, and added mllib/tests.py to run-tests script.
a07ba10 [Matei Zaharia] Fix some typos and calculation of initial weights
74eefe7 [Matei Zaharia] Added LabeledPoint class in Python
889dde8 [Matei Zaharia] Support scipy.sparse matrices in all our algorithms and models
ab244d1 [Matei Zaharia] Allow SparseVectors to be initialized using a dict
a5d6426 [Matei Zaharia] Add linalg.py to run-tests script
0e7a3d8 [Matei Zaharia] Keep vectors sparse in Java when reading LabeledPoints
eaee759 [Matei Zaharia] Update regression, classification and clustering models for sparse data
2abbb44 [Matei Zaharia] Further work to get linear models working with sparse data
154f45d [Matei Zaharia] Update docs, name some magic values
881fef7 [Matei Zaharia] Added a sparse vector in Python and made Java-Python format more compact
2014-04-15 23:33:24 -04:00
|
|
|
True
|
|
|
|
>>> abs(lrm.predict(SparseVector(1, {0: 1.0})) - 1) < 0.5
|
|
|
|
True
|
2015-07-23 21:53:07 -04:00
|
|
|
>>> abs(lrm.predict(sc.parallelize([[1.0]])).collect()[0] - 1) < 0.5
|
|
|
|
True
|
2015-03-20 14:44:21 -04:00
|
|
|
>>> import os, tempfile
|
|
|
|
>>> path = tempfile.mkdtemp()
|
|
|
|
>>> lrm.save(sc, path)
|
|
|
|
>>> sameModel = LassoModel.load(sc, path)
|
|
|
|
>>> abs(sameModel.predict(np.array([0.0])) - 0) < 0.5
|
|
|
|
True
|
|
|
|
>>> abs(sameModel.predict(np.array([1.0])) - 1) < 0.5
|
|
|
|
True
|
|
|
|
>>> abs(sameModel.predict(SparseVector(1, {0: 1.0})) - 1) < 0.5
|
|
|
|
True
|
2015-06-22 14:53:11 -04:00
|
|
|
>>> from shutil import rmtree
|
2015-03-20 14:44:21 -04:00
|
|
|
>>> try:
|
2015-06-22 14:53:11 -04:00
|
|
|
... rmtree(path)
|
2015-03-20 14:44:21 -04:00
|
|
|
... except:
|
|
|
|
... pass
|
[WIP] SPARK-1430: Support sparse data in Python MLlib
This PR adds a SparseVector class in PySpark and updates all the regression, classification and clustering algorithms and models to support sparse data, similar to MLlib. I chose to add this class because SciPy is quite difficult to install in many environments (more so than NumPy), but I plan to add support for SciPy sparse vectors later too, and make the methods work transparently on objects of either type.
On the Scala side, we keep Python sparse vectors sparse and pass them to MLlib. We always return dense vectors from our models.
Some to-do items left:
- [x] Support SciPy's scipy.sparse matrix objects when SciPy is available. We can easily add a function to convert these to our own SparseVector.
- [x] MLlib currently uses a vector with one extra column on the left to represent what we call LabeledPoint in Scala. Do we really want this? It may get annoying once you deal with sparse data since you must add/subtract 1 to each feature index when training. We can remove this API in 1.0 and use tuples for labeling.
- [x] Explain how to use these in the Python MLlib docs.
CC @mengxr, @joshrosen
Author: Matei Zaharia <matei@databricks.com>
Closes #341 from mateiz/py-ml-update and squashes the following commits:
d52e763 [Matei Zaharia] Remove no-longer-needed slice code and handle review comments
ea5a25a [Matei Zaharia] Fix remaining uses of copyto() after merge
b9f97a3 [Matei Zaharia] Fix test
1e1bd0f [Matei Zaharia] Add MLlib logistic regression example in Python
88bc01f [Matei Zaharia] Clean up inheritance of LinearModel in Python, and expose its parametrs
37ab747 [Matei Zaharia] Fix some examples and docs due to changes in MLlib API
da0f27e [Matei Zaharia] Added a MLlib K-means example and updated docs to discuss sparse data
c48e85a [Matei Zaharia] Added some tests for passing lists as input, and added mllib/tests.py to run-tests script.
a07ba10 [Matei Zaharia] Fix some typos and calculation of initial weights
74eefe7 [Matei Zaharia] Added LabeledPoint class in Python
889dde8 [Matei Zaharia] Support scipy.sparse matrices in all our algorithms and models
ab244d1 [Matei Zaharia] Allow SparseVectors to be initialized using a dict
a5d6426 [Matei Zaharia] Add linalg.py to run-tests script
0e7a3d8 [Matei Zaharia] Keep vectors sparse in Java when reading LabeledPoints
eaee759 [Matei Zaharia] Update regression, classification and clustering models for sparse data
2abbb44 [Matei Zaharia] Further work to get linear models working with sparse data
154f45d [Matei Zaharia] Update docs, name some magic values
881fef7 [Matei Zaharia] Added a sparse vector in Python and made Java-Python format more compact
2014-04-15 23:33:24 -04:00
|
|
|
>>> data = [
|
|
|
|
... LabeledPoint(0.0, SparseVector(1, {0: 0.0})),
|
|
|
|
... LabeledPoint(1.0, SparseVector(1, {0: 1.0})),
|
|
|
|
... LabeledPoint(3.0, SparseVector(1, {0: 2.0})),
|
|
|
|
... LabeledPoint(2.0, SparseVector(1, {0: 3.0}))
|
|
|
|
... ]
|
2015-04-21 20:49:55 -04:00
|
|
|
>>> lrm = LinearRegressionWithSGD.train(sc.parallelize(data), iterations=10,
|
2019-01-17 20:40:39 -05:00
|
|
|
... initialWeights=np.array([1.0]))
|
2014-09-19 18:01:11 -04:00
|
|
|
>>> abs(lrm.predict(np.array([0.0])) - 0) < 0.5
|
[WIP] SPARK-1430: Support sparse data in Python MLlib
This PR adds a SparseVector class in PySpark and updates all the regression, classification and clustering algorithms and models to support sparse data, similar to MLlib. I chose to add this class because SciPy is quite difficult to install in many environments (more so than NumPy), but I plan to add support for SciPy sparse vectors later too, and make the methods work transparently on objects of either type.
On the Scala side, we keep Python sparse vectors sparse and pass them to MLlib. We always return dense vectors from our models.
Some to-do items left:
- [x] Support SciPy's scipy.sparse matrix objects when SciPy is available. We can easily add a function to convert these to our own SparseVector.
- [x] MLlib currently uses a vector with one extra column on the left to represent what we call LabeledPoint in Scala. Do we really want this? It may get annoying once you deal with sparse data since you must add/subtract 1 to each feature index when training. We can remove this API in 1.0 and use tuples for labeling.
- [x] Explain how to use these in the Python MLlib docs.
CC @mengxr, @joshrosen
Author: Matei Zaharia <matei@databricks.com>
Closes #341 from mateiz/py-ml-update and squashes the following commits:
d52e763 [Matei Zaharia] Remove no-longer-needed slice code and handle review comments
ea5a25a [Matei Zaharia] Fix remaining uses of copyto() after merge
b9f97a3 [Matei Zaharia] Fix test
1e1bd0f [Matei Zaharia] Add MLlib logistic regression example in Python
88bc01f [Matei Zaharia] Clean up inheritance of LinearModel in Python, and expose its parametrs
37ab747 [Matei Zaharia] Fix some examples and docs due to changes in MLlib API
da0f27e [Matei Zaharia] Added a MLlib K-means example and updated docs to discuss sparse data
c48e85a [Matei Zaharia] Added some tests for passing lists as input, and added mllib/tests.py to run-tests script.
a07ba10 [Matei Zaharia] Fix some typos and calculation of initial weights
74eefe7 [Matei Zaharia] Added LabeledPoint class in Python
889dde8 [Matei Zaharia] Support scipy.sparse matrices in all our algorithms and models
ab244d1 [Matei Zaharia] Allow SparseVectors to be initialized using a dict
a5d6426 [Matei Zaharia] Add linalg.py to run-tests script
0e7a3d8 [Matei Zaharia] Keep vectors sparse in Java when reading LabeledPoints
eaee759 [Matei Zaharia] Update regression, classification and clustering models for sparse data
2abbb44 [Matei Zaharia] Further work to get linear models working with sparse data
154f45d [Matei Zaharia] Update docs, name some magic values
881fef7 [Matei Zaharia] Added a sparse vector in Python and made Java-Python format more compact
2014-04-15 23:33:24 -04:00
|
|
|
True
|
|
|
|
>>> abs(lrm.predict(SparseVector(1, {0: 1.0})) - 1) < 0.5
|
|
|
|
True
|
2015-04-21 20:49:55 -04:00
|
|
|
>>> lrm = LassoWithSGD.train(sc.parallelize(data), iterations=10, step=1.0,
|
2019-01-17 20:40:39 -05:00
|
|
|
... regParam=0.01, miniBatchFraction=1.0, initialWeights=np.array([1.0]), intercept=True,
|
2015-03-25 16:38:33 -04:00
|
|
|
... validateData=True)
|
|
|
|
>>> abs(lrm.predict(np.array([0.0])) - 0) < 0.5
|
|
|
|
True
|
|
|
|
>>> abs(lrm.predict(SparseVector(1, {0: 1.0})) - 1) < 0.5
|
|
|
|
True
|
2013-12-25 00:08:05 -05:00
|
|
|
"""
|
2015-10-23 11:43:49 -04:00
|
|
|
@since("1.4.0")
|
2015-03-20 14:44:21 -04:00
|
|
|
def save(self, sc, path):
|
2015-10-23 11:43:49 -04:00
|
|
|
"""Save a LassoModel."""
|
2015-03-20 14:44:21 -04:00
|
|
|
java_model = sc._jvm.org.apache.spark.mllib.regression.LassoModel(
|
|
|
|
_py2java(sc, self._coeff), self.intercept)
|
|
|
|
java_model.save(sc._jsc.sc(), path)
|
|
|
|
|
|
|
|
@classmethod
|
2015-10-23 11:43:49 -04:00
|
|
|
@since("1.4.0")
|
2015-03-20 14:44:21 -04:00
|
|
|
def load(cls, sc, path):
|
2015-10-23 11:43:49 -04:00
|
|
|
"""Load a LassoModel."""
|
2015-03-20 14:44:21 -04:00
|
|
|
java_model = sc._jvm.org.apache.spark.mllib.regression.LassoModel.load(
|
|
|
|
sc._jsc.sc(), path)
|
|
|
|
weights = _java2py(sc, java_model.weights())
|
|
|
|
intercept = java_model.intercept()
|
|
|
|
model = LassoModel(weights, intercept)
|
|
|
|
return model
|
2014-01-10 02:55:06 -05:00
|
|
|
|
[WIP] SPARK-1430: Support sparse data in Python MLlib
This PR adds a SparseVector class in PySpark and updates all the regression, classification and clustering algorithms and models to support sparse data, similar to MLlib. I chose to add this class because SciPy is quite difficult to install in many environments (more so than NumPy), but I plan to add support for SciPy sparse vectors later too, and make the methods work transparently on objects of either type.
On the Scala side, we keep Python sparse vectors sparse and pass them to MLlib. We always return dense vectors from our models.
Some to-do items left:
- [x] Support SciPy's scipy.sparse matrix objects when SciPy is available. We can easily add a function to convert these to our own SparseVector.
- [x] MLlib currently uses a vector with one extra column on the left to represent what we call LabeledPoint in Scala. Do we really want this? It may get annoying once you deal with sparse data since you must add/subtract 1 to each feature index when training. We can remove this API in 1.0 and use tuples for labeling.
- [x] Explain how to use these in the Python MLlib docs.
CC @mengxr, @joshrosen
Author: Matei Zaharia <matei@databricks.com>
Closes #341 from mateiz/py-ml-update and squashes the following commits:
d52e763 [Matei Zaharia] Remove no-longer-needed slice code and handle review comments
ea5a25a [Matei Zaharia] Fix remaining uses of copyto() after merge
b9f97a3 [Matei Zaharia] Fix test
1e1bd0f [Matei Zaharia] Add MLlib logistic regression example in Python
88bc01f [Matei Zaharia] Clean up inheritance of LinearModel in Python, and expose its parametrs
37ab747 [Matei Zaharia] Fix some examples and docs due to changes in MLlib API
da0f27e [Matei Zaharia] Added a MLlib K-means example and updated docs to discuss sparse data
c48e85a [Matei Zaharia] Added some tests for passing lists as input, and added mllib/tests.py to run-tests script.
a07ba10 [Matei Zaharia] Fix some typos and calculation of initial weights
74eefe7 [Matei Zaharia] Added LabeledPoint class in Python
889dde8 [Matei Zaharia] Support scipy.sparse matrices in all our algorithms and models
ab244d1 [Matei Zaharia] Allow SparseVectors to be initialized using a dict
a5d6426 [Matei Zaharia] Add linalg.py to run-tests script
0e7a3d8 [Matei Zaharia] Keep vectors sparse in Java when reading LabeledPoints
eaee759 [Matei Zaharia] Update regression, classification and clustering models for sparse data
2abbb44 [Matei Zaharia] Further work to get linear models working with sparse data
154f45d [Matei Zaharia] Update docs, name some magic values
881fef7 [Matei Zaharia] Added a sparse vector in Python and made Java-Python format more compact
2014-04-15 23:33:24 -04:00
|
|
|
|
2013-12-25 00:08:05 -05:00
|
|
|
class LassoWithSGD(object):
|
2015-10-23 11:43:49 -04:00
|
|
|
"""
|
2020-11-24 20:24:41 -05:00
|
|
|
Train a regression model with L1-regularization using Stochastic Gradient Descent.
|
|
|
|
|
2015-10-23 11:43:49 -04:00
|
|
|
.. versionadded:: 0.9.0
|
2020-11-24 20:24:41 -05:00
|
|
|
.. deprecated:: 2.0.0
|
|
|
|
Use :py:class:`pyspark.ml.regression.LinearRegression` with elasticNetParam = 1.0.
|
|
|
|
Note the default regParam is 0.01 for LassoWithSGD, but is 0.0 for LinearRegression.
|
2015-10-23 11:43:49 -04:00
|
|
|
"""
|
2013-12-25 00:08:05 -05:00
|
|
|
@classmethod
|
2014-11-13 16:54:16 -05:00
|
|
|
def train(cls, data, iterations=100, step=1.0, regParam=0.01,
|
2015-03-25 16:38:33 -04:00
|
|
|
miniBatchFraction=1.0, initialWeights=None, intercept=False,
|
2015-09-14 15:08:52 -04:00
|
|
|
validateData=True, convergenceTol=0.001):
|
2015-06-16 17:30:30 -04:00
|
|
|
"""
|
2016-02-29 08:52:41 -05:00
|
|
|
Train a regression model with L1-regularization using Stochastic
|
|
|
|
Gradient Descent. This solves the l1-regularized least squares
|
|
|
|
regression formulation
|
|
|
|
|
|
|
|
f(weights) = 1/(2n) ||A weights - y||^2 + regParam ||weights||_1
|
|
|
|
|
|
|
|
Here the data matrix has n rows, and the input RDD holds the set
|
|
|
|
of rows of A, each with its corresponding right hand side label y.
|
|
|
|
See also the documentation for the precise formulation.
|
|
|
|
|
2020-11-24 20:24:41 -05:00
|
|
|
.. versionadded:: 0.9.0
|
|
|
|
|
|
|
|
Parameters
|
|
|
|
----------
|
|
|
|
data : :py:class:`pyspark.RDD`
|
|
|
|
The training data, an RDD of LabeledPoint.
|
|
|
|
iterations : int, optional
|
|
|
|
The number of iterations.
|
|
|
|
(default: 100)
|
|
|
|
step : float, optional
|
|
|
|
The step parameter used in SGD.
|
|
|
|
(default: 1.0)
|
|
|
|
regParam : float, optional
|
|
|
|
The regularizer parameter.
|
|
|
|
(default: 0.01)
|
|
|
|
miniBatchFraction : float, optional
|
|
|
|
Fraction of data to be used for each SGD iteration.
|
|
|
|
(default: 1.0)
|
|
|
|
initialWeights : :py:class:`pyspark.mllib.linalg.Vector` or convertible, optional
|
|
|
|
The initial weights.
|
|
|
|
(default: None)
|
|
|
|
intercept : bool, optional
|
|
|
|
Boolean parameter which indicates the use or not of the
|
|
|
|
augmented representation for training data (i.e. whether bias
|
|
|
|
features are activated or not).
|
|
|
|
(default: False)
|
|
|
|
validateData : bool, optional
|
|
|
|
Boolean parameter which indicates if the algorithm should
|
|
|
|
validate data before training.
|
|
|
|
(default: True)
|
|
|
|
convergenceTol : float, optional
|
|
|
|
A condition which decides iteration termination.
|
|
|
|
(default: 0.001)
|
2015-06-16 17:30:30 -04:00
|
|
|
"""
|
2016-04-29 01:44:14 -04:00
|
|
|
warnings.warn(
|
|
|
|
"Deprecated in 2.0.0. Use ml.regression.LinearRegression with elasticNetParam = 1.0. "
|
[SPARK-22313][PYTHON] Mark/print deprecation warnings as DeprecationWarning for deprecated APIs
## What changes were proposed in this pull request?
This PR proposes to mark the existing warnings as `DeprecationWarning` and print out warnings for deprecated functions.
This could be actually useful for Spark app developers. I use (old) PyCharm and this IDE can detect this specific `DeprecationWarning` in some cases:
**Before**
<img src="https://user-images.githubusercontent.com/6477701/31762664-df68d9f8-b4f6-11e7-8773-f0468f70a2cc.png" height="45" />
**After**
<img src="https://user-images.githubusercontent.com/6477701/31762662-de4d6868-b4f6-11e7-98dc-3c8446a0c28a.png" height="70" />
For console usage, `DeprecationWarning` is usually disabled (see https://docs.python.org/2/library/warnings.html#warning-categories and https://docs.python.org/3/library/warnings.html#warning-categories):
```
>>> import warnings
>>> filter(lambda f: f[2] == DeprecationWarning, warnings.filters)
[('ignore', <_sre.SRE_Pattern object at 0x10ba58c00>, <type 'exceptions.DeprecationWarning'>, <_sre.SRE_Pattern object at 0x10bb04138>, 0), ('ignore', None, <type 'exceptions.DeprecationWarning'>, None, 0)]
```
so, it won't actually mess up the terminal much unless it is intended.
If this is intendedly enabled, it'd should as below:
```
>>> import warnings
>>> warnings.simplefilter('always', DeprecationWarning)
>>>
>>> from pyspark.sql import functions
>>> functions.approxCountDistinct("a")
.../spark/python/pyspark/sql/functions.py:232: DeprecationWarning: Deprecated in 2.1, use approx_count_distinct instead.
"Deprecated in 2.1, use approx_count_distinct instead.", DeprecationWarning)
...
```
These instances were found by:
```
cd python/pyspark
grep -r "Deprecated" .
grep -r "deprecated" .
grep -r "deprecate" .
```
## How was this patch tested?
Manually tested.
Author: hyukjinkwon <gurwls223@gmail.com>
Closes #19535 from HyukjinKwon/deprecated-warning.
2017-10-23 23:44:47 -04:00
|
|
|
"Note the default regParam is 0.01 for LassoWithSGD, but is 0.0 for LinearRegression.",
|
|
|
|
DeprecationWarning)
|
2016-04-29 01:44:14 -04:00
|
|
|
|
2014-10-31 01:25:18 -04:00
|
|
|
def train(rdd, i):
|
2014-11-13 16:54:16 -05:00
|
|
|
return callMLlibFunc("trainLassoModelWithSGD", rdd, int(iterations), float(step),
|
2015-03-25 16:38:33 -04:00
|
|
|
float(regParam), float(miniBatchFraction), i, bool(intercept),
|
2015-09-14 15:08:52 -04:00
|
|
|
bool(validateData), float(convergenceTol))
|
2014-11-13 16:54:16 -05:00
|
|
|
|
2014-10-31 01:25:18 -04:00
|
|
|
return _regression_train_wrapper(train, LassoModel, data, initialWeights)
|
2013-12-25 00:08:05 -05:00
|
|
|
|
[WIP] SPARK-1430: Support sparse data in Python MLlib
This PR adds a SparseVector class in PySpark and updates all the regression, classification and clustering algorithms and models to support sparse data, similar to MLlib. I chose to add this class because SciPy is quite difficult to install in many environments (more so than NumPy), but I plan to add support for SciPy sparse vectors later too, and make the methods work transparently on objects of either type.
On the Scala side, we keep Python sparse vectors sparse and pass them to MLlib. We always return dense vectors from our models.
Some to-do items left:
- [x] Support SciPy's scipy.sparse matrix objects when SciPy is available. We can easily add a function to convert these to our own SparseVector.
- [x] MLlib currently uses a vector with one extra column on the left to represent what we call LabeledPoint in Scala. Do we really want this? It may get annoying once you deal with sparse data since you must add/subtract 1 to each feature index when training. We can remove this API in 1.0 and use tuples for labeling.
- [x] Explain how to use these in the Python MLlib docs.
CC @mengxr, @joshrosen
Author: Matei Zaharia <matei@databricks.com>
Closes #341 from mateiz/py-ml-update and squashes the following commits:
d52e763 [Matei Zaharia] Remove no-longer-needed slice code and handle review comments
ea5a25a [Matei Zaharia] Fix remaining uses of copyto() after merge
b9f97a3 [Matei Zaharia] Fix test
1e1bd0f [Matei Zaharia] Add MLlib logistic regression example in Python
88bc01f [Matei Zaharia] Clean up inheritance of LinearModel in Python, and expose its parametrs
37ab747 [Matei Zaharia] Fix some examples and docs due to changes in MLlib API
da0f27e [Matei Zaharia] Added a MLlib K-means example and updated docs to discuss sparse data
c48e85a [Matei Zaharia] Added some tests for passing lists as input, and added mllib/tests.py to run-tests script.
a07ba10 [Matei Zaharia] Fix some typos and calculation of initial weights
74eefe7 [Matei Zaharia] Added LabeledPoint class in Python
889dde8 [Matei Zaharia] Support scipy.sparse matrices in all our algorithms and models
ab244d1 [Matei Zaharia] Allow SparseVectors to be initialized using a dict
a5d6426 [Matei Zaharia] Add linalg.py to run-tests script
0e7a3d8 [Matei Zaharia] Keep vectors sparse in Java when reading LabeledPoints
eaee759 [Matei Zaharia] Update regression, classification and clustering models for sparse data
2abbb44 [Matei Zaharia] Further work to get linear models working with sparse data
154f45d [Matei Zaharia] Update docs, name some magic values
881fef7 [Matei Zaharia] Added a sparse vector in Python and made Java-Python format more compact
2014-04-15 23:33:24 -04:00
|
|
|
|
2015-02-20 05:31:32 -05:00
|
|
|
@inherit_doc
|
2013-12-25 00:08:05 -05:00
|
|
|
class RidgeRegressionModel(LinearRegressionModelBase):
|
2014-08-06 15:58:24 -04:00
|
|
|
|
2015-06-16 17:30:30 -04:00
|
|
|
"""A linear regression model derived from a least-squares fit with
|
|
|
|
an l_2 penalty term.
|
2013-12-25 00:08:05 -05:00
|
|
|
|
2020-11-24 20:24:41 -05:00
|
|
|
.. versionadded:: 0.9.0
|
|
|
|
|
|
|
|
Examples
|
|
|
|
--------
|
2020-08-08 11:51:57 -04:00
|
|
|
>>> from pyspark.mllib.linalg import SparseVector
|
[WIP] SPARK-1430: Support sparse data in Python MLlib
This PR adds a SparseVector class in PySpark and updates all the regression, classification and clustering algorithms and models to support sparse data, similar to MLlib. I chose to add this class because SciPy is quite difficult to install in many environments (more so than NumPy), but I plan to add support for SciPy sparse vectors later too, and make the methods work transparently on objects of either type.
On the Scala side, we keep Python sparse vectors sparse and pass them to MLlib. We always return dense vectors from our models.
Some to-do items left:
- [x] Support SciPy's scipy.sparse matrix objects when SciPy is available. We can easily add a function to convert these to our own SparseVector.
- [x] MLlib currently uses a vector with one extra column on the left to represent what we call LabeledPoint in Scala. Do we really want this? It may get annoying once you deal with sparse data since you must add/subtract 1 to each feature index when training. We can remove this API in 1.0 and use tuples for labeling.
- [x] Explain how to use these in the Python MLlib docs.
CC @mengxr, @joshrosen
Author: Matei Zaharia <matei@databricks.com>
Closes #341 from mateiz/py-ml-update and squashes the following commits:
d52e763 [Matei Zaharia] Remove no-longer-needed slice code and handle review comments
ea5a25a [Matei Zaharia] Fix remaining uses of copyto() after merge
b9f97a3 [Matei Zaharia] Fix test
1e1bd0f [Matei Zaharia] Add MLlib logistic regression example in Python
88bc01f [Matei Zaharia] Clean up inheritance of LinearModel in Python, and expose its parametrs
37ab747 [Matei Zaharia] Fix some examples and docs due to changes in MLlib API
da0f27e [Matei Zaharia] Added a MLlib K-means example and updated docs to discuss sparse data
c48e85a [Matei Zaharia] Added some tests for passing lists as input, and added mllib/tests.py to run-tests script.
a07ba10 [Matei Zaharia] Fix some typos and calculation of initial weights
74eefe7 [Matei Zaharia] Added LabeledPoint class in Python
889dde8 [Matei Zaharia] Support scipy.sparse matrices in all our algorithms and models
ab244d1 [Matei Zaharia] Allow SparseVectors to be initialized using a dict
a5d6426 [Matei Zaharia] Add linalg.py to run-tests script
0e7a3d8 [Matei Zaharia] Keep vectors sparse in Java when reading LabeledPoints
eaee759 [Matei Zaharia] Update regression, classification and clustering models for sparse data
2abbb44 [Matei Zaharia] Further work to get linear models working with sparse data
154f45d [Matei Zaharia] Update docs, name some magic values
881fef7 [Matei Zaharia] Added a sparse vector in Python and made Java-Python format more compact
2014-04-15 23:33:24 -04:00
|
|
|
>>> from pyspark.mllib.regression import LabeledPoint
|
|
|
|
>>> data = [
|
|
|
|
... LabeledPoint(0.0, [0.0]),
|
|
|
|
... LabeledPoint(1.0, [1.0]),
|
|
|
|
... LabeledPoint(3.0, [2.0]),
|
|
|
|
... LabeledPoint(2.0, [3.0])
|
|
|
|
... ]
|
2015-04-21 20:49:55 -04:00
|
|
|
>>> lrm = RidgeRegressionWithSGD.train(sc.parallelize(data), iterations=10,
|
2019-01-17 20:40:39 -05:00
|
|
|
... initialWeights=np.array([1.0]))
|
2014-09-19 18:01:11 -04:00
|
|
|
>>> abs(lrm.predict(np.array([0.0])) - 0) < 0.5
|
[WIP] SPARK-1430: Support sparse data in Python MLlib
This PR adds a SparseVector class in PySpark and updates all the regression, classification and clustering algorithms and models to support sparse data, similar to MLlib. I chose to add this class because SciPy is quite difficult to install in many environments (more so than NumPy), but I plan to add support for SciPy sparse vectors later too, and make the methods work transparently on objects of either type.
On the Scala side, we keep Python sparse vectors sparse and pass them to MLlib. We always return dense vectors from our models.
Some to-do items left:
- [x] Support SciPy's scipy.sparse matrix objects when SciPy is available. We can easily add a function to convert these to our own SparseVector.
- [x] MLlib currently uses a vector with one extra column on the left to represent what we call LabeledPoint in Scala. Do we really want this? It may get annoying once you deal with sparse data since you must add/subtract 1 to each feature index when training. We can remove this API in 1.0 and use tuples for labeling.
- [x] Explain how to use these in the Python MLlib docs.
CC @mengxr, @joshrosen
Author: Matei Zaharia <matei@databricks.com>
Closes #341 from mateiz/py-ml-update and squashes the following commits:
d52e763 [Matei Zaharia] Remove no-longer-needed slice code and handle review comments
ea5a25a [Matei Zaharia] Fix remaining uses of copyto() after merge
b9f97a3 [Matei Zaharia] Fix test
1e1bd0f [Matei Zaharia] Add MLlib logistic regression example in Python
88bc01f [Matei Zaharia] Clean up inheritance of LinearModel in Python, and expose its parametrs
37ab747 [Matei Zaharia] Fix some examples and docs due to changes in MLlib API
da0f27e [Matei Zaharia] Added a MLlib K-means example and updated docs to discuss sparse data
c48e85a [Matei Zaharia] Added some tests for passing lists as input, and added mllib/tests.py to run-tests script.
a07ba10 [Matei Zaharia] Fix some typos and calculation of initial weights
74eefe7 [Matei Zaharia] Added LabeledPoint class in Python
889dde8 [Matei Zaharia] Support scipy.sparse matrices in all our algorithms and models
ab244d1 [Matei Zaharia] Allow SparseVectors to be initialized using a dict
a5d6426 [Matei Zaharia] Add linalg.py to run-tests script
0e7a3d8 [Matei Zaharia] Keep vectors sparse in Java when reading LabeledPoints
eaee759 [Matei Zaharia] Update regression, classification and clustering models for sparse data
2abbb44 [Matei Zaharia] Further work to get linear models working with sparse data
154f45d [Matei Zaharia] Update docs, name some magic values
881fef7 [Matei Zaharia] Added a sparse vector in Python and made Java-Python format more compact
2014-04-15 23:33:24 -04:00
|
|
|
True
|
2014-09-19 18:01:11 -04:00
|
|
|
>>> abs(lrm.predict(np.array([1.0])) - 1) < 0.5
|
[WIP] SPARK-1430: Support sparse data in Python MLlib
This PR adds a SparseVector class in PySpark and updates all the regression, classification and clustering algorithms and models to support sparse data, similar to MLlib. I chose to add this class because SciPy is quite difficult to install in many environments (more so than NumPy), but I plan to add support for SciPy sparse vectors later too, and make the methods work transparently on objects of either type.
On the Scala side, we keep Python sparse vectors sparse and pass them to MLlib. We always return dense vectors from our models.
Some to-do items left:
- [x] Support SciPy's scipy.sparse matrix objects when SciPy is available. We can easily add a function to convert these to our own SparseVector.
- [x] MLlib currently uses a vector with one extra column on the left to represent what we call LabeledPoint in Scala. Do we really want this? It may get annoying once you deal with sparse data since you must add/subtract 1 to each feature index when training. We can remove this API in 1.0 and use tuples for labeling.
- [x] Explain how to use these in the Python MLlib docs.
CC @mengxr, @joshrosen
Author: Matei Zaharia <matei@databricks.com>
Closes #341 from mateiz/py-ml-update and squashes the following commits:
d52e763 [Matei Zaharia] Remove no-longer-needed slice code and handle review comments
ea5a25a [Matei Zaharia] Fix remaining uses of copyto() after merge
b9f97a3 [Matei Zaharia] Fix test
1e1bd0f [Matei Zaharia] Add MLlib logistic regression example in Python
88bc01f [Matei Zaharia] Clean up inheritance of LinearModel in Python, and expose its parametrs
37ab747 [Matei Zaharia] Fix some examples and docs due to changes in MLlib API
da0f27e [Matei Zaharia] Added a MLlib K-means example and updated docs to discuss sparse data
c48e85a [Matei Zaharia] Added some tests for passing lists as input, and added mllib/tests.py to run-tests script.
a07ba10 [Matei Zaharia] Fix some typos and calculation of initial weights
74eefe7 [Matei Zaharia] Added LabeledPoint class in Python
889dde8 [Matei Zaharia] Support scipy.sparse matrices in all our algorithms and models
ab244d1 [Matei Zaharia] Allow SparseVectors to be initialized using a dict
a5d6426 [Matei Zaharia] Add linalg.py to run-tests script
0e7a3d8 [Matei Zaharia] Keep vectors sparse in Java when reading LabeledPoints
eaee759 [Matei Zaharia] Update regression, classification and clustering models for sparse data
2abbb44 [Matei Zaharia] Further work to get linear models working with sparse data
154f45d [Matei Zaharia] Update docs, name some magic values
881fef7 [Matei Zaharia] Added a sparse vector in Python and made Java-Python format more compact
2014-04-15 23:33:24 -04:00
|
|
|
True
|
|
|
|
>>> abs(lrm.predict(SparseVector(1, {0: 1.0})) - 1) < 0.5
|
|
|
|
True
|
2015-07-23 21:53:07 -04:00
|
|
|
>>> abs(lrm.predict(sc.parallelize([[1.0]])).collect()[0] - 1) < 0.5
|
|
|
|
True
|
2015-03-20 14:44:21 -04:00
|
|
|
>>> import os, tempfile
|
|
|
|
>>> path = tempfile.mkdtemp()
|
|
|
|
>>> lrm.save(sc, path)
|
|
|
|
>>> sameModel = RidgeRegressionModel.load(sc, path)
|
|
|
|
>>> abs(sameModel.predict(np.array([0.0])) - 0) < 0.5
|
|
|
|
True
|
|
|
|
>>> abs(sameModel.predict(np.array([1.0])) - 1) < 0.5
|
|
|
|
True
|
|
|
|
>>> abs(sameModel.predict(SparseVector(1, {0: 1.0})) - 1) < 0.5
|
|
|
|
True
|
2015-06-22 14:53:11 -04:00
|
|
|
>>> from shutil import rmtree
|
2015-03-20 14:44:21 -04:00
|
|
|
>>> try:
|
2015-06-22 14:53:11 -04:00
|
|
|
... rmtree(path)
|
2015-03-20 14:44:21 -04:00
|
|
|
... except:
|
|
|
|
... pass
|
[WIP] SPARK-1430: Support sparse data in Python MLlib
This PR adds a SparseVector class in PySpark and updates all the regression, classification and clustering algorithms and models to support sparse data, similar to MLlib. I chose to add this class because SciPy is quite difficult to install in many environments (more so than NumPy), but I plan to add support for SciPy sparse vectors later too, and make the methods work transparently on objects of either type.
On the Scala side, we keep Python sparse vectors sparse and pass them to MLlib. We always return dense vectors from our models.
Some to-do items left:
- [x] Support SciPy's scipy.sparse matrix objects when SciPy is available. We can easily add a function to convert these to our own SparseVector.
- [x] MLlib currently uses a vector with one extra column on the left to represent what we call LabeledPoint in Scala. Do we really want this? It may get annoying once you deal with sparse data since you must add/subtract 1 to each feature index when training. We can remove this API in 1.0 and use tuples for labeling.
- [x] Explain how to use these in the Python MLlib docs.
CC @mengxr, @joshrosen
Author: Matei Zaharia <matei@databricks.com>
Closes #341 from mateiz/py-ml-update and squashes the following commits:
d52e763 [Matei Zaharia] Remove no-longer-needed slice code and handle review comments
ea5a25a [Matei Zaharia] Fix remaining uses of copyto() after merge
b9f97a3 [Matei Zaharia] Fix test
1e1bd0f [Matei Zaharia] Add MLlib logistic regression example in Python
88bc01f [Matei Zaharia] Clean up inheritance of LinearModel in Python, and expose its parametrs
37ab747 [Matei Zaharia] Fix some examples and docs due to changes in MLlib API
da0f27e [Matei Zaharia] Added a MLlib K-means example and updated docs to discuss sparse data
c48e85a [Matei Zaharia] Added some tests for passing lists as input, and added mllib/tests.py to run-tests script.
a07ba10 [Matei Zaharia] Fix some typos and calculation of initial weights
74eefe7 [Matei Zaharia] Added LabeledPoint class in Python
889dde8 [Matei Zaharia] Support scipy.sparse matrices in all our algorithms and models
ab244d1 [Matei Zaharia] Allow SparseVectors to be initialized using a dict
a5d6426 [Matei Zaharia] Add linalg.py to run-tests script
0e7a3d8 [Matei Zaharia] Keep vectors sparse in Java when reading LabeledPoints
eaee759 [Matei Zaharia] Update regression, classification and clustering models for sparse data
2abbb44 [Matei Zaharia] Further work to get linear models working with sparse data
154f45d [Matei Zaharia] Update docs, name some magic values
881fef7 [Matei Zaharia] Added a sparse vector in Python and made Java-Python format more compact
2014-04-15 23:33:24 -04:00
|
|
|
>>> data = [
|
|
|
|
... LabeledPoint(0.0, SparseVector(1, {0: 0.0})),
|
|
|
|
... LabeledPoint(1.0, SparseVector(1, {0: 1.0})),
|
|
|
|
... LabeledPoint(3.0, SparseVector(1, {0: 2.0})),
|
|
|
|
... LabeledPoint(2.0, SparseVector(1, {0: 3.0}))
|
|
|
|
... ]
|
2015-04-21 20:49:55 -04:00
|
|
|
>>> lrm = LinearRegressionWithSGD.train(sc.parallelize(data), iterations=10,
|
2019-01-17 20:40:39 -05:00
|
|
|
... initialWeights=np.array([1.0]))
|
2014-09-19 18:01:11 -04:00
|
|
|
>>> abs(lrm.predict(np.array([0.0])) - 0) < 0.5
|
[WIP] SPARK-1430: Support sparse data in Python MLlib
This PR adds a SparseVector class in PySpark and updates all the regression, classification and clustering algorithms and models to support sparse data, similar to MLlib. I chose to add this class because SciPy is quite difficult to install in many environments (more so than NumPy), but I plan to add support for SciPy sparse vectors later too, and make the methods work transparently on objects of either type.
On the Scala side, we keep Python sparse vectors sparse and pass them to MLlib. We always return dense vectors from our models.
Some to-do items left:
- [x] Support SciPy's scipy.sparse matrix objects when SciPy is available. We can easily add a function to convert these to our own SparseVector.
- [x] MLlib currently uses a vector with one extra column on the left to represent what we call LabeledPoint in Scala. Do we really want this? It may get annoying once you deal with sparse data since you must add/subtract 1 to each feature index when training. We can remove this API in 1.0 and use tuples for labeling.
- [x] Explain how to use these in the Python MLlib docs.
CC @mengxr, @joshrosen
Author: Matei Zaharia <matei@databricks.com>
Closes #341 from mateiz/py-ml-update and squashes the following commits:
d52e763 [Matei Zaharia] Remove no-longer-needed slice code and handle review comments
ea5a25a [Matei Zaharia] Fix remaining uses of copyto() after merge
b9f97a3 [Matei Zaharia] Fix test
1e1bd0f [Matei Zaharia] Add MLlib logistic regression example in Python
88bc01f [Matei Zaharia] Clean up inheritance of LinearModel in Python, and expose its parametrs
37ab747 [Matei Zaharia] Fix some examples and docs due to changes in MLlib API
da0f27e [Matei Zaharia] Added a MLlib K-means example and updated docs to discuss sparse data
c48e85a [Matei Zaharia] Added some tests for passing lists as input, and added mllib/tests.py to run-tests script.
a07ba10 [Matei Zaharia] Fix some typos and calculation of initial weights
74eefe7 [Matei Zaharia] Added LabeledPoint class in Python
889dde8 [Matei Zaharia] Support scipy.sparse matrices in all our algorithms and models
ab244d1 [Matei Zaharia] Allow SparseVectors to be initialized using a dict
a5d6426 [Matei Zaharia] Add linalg.py to run-tests script
0e7a3d8 [Matei Zaharia] Keep vectors sparse in Java when reading LabeledPoints
eaee759 [Matei Zaharia] Update regression, classification and clustering models for sparse data
2abbb44 [Matei Zaharia] Further work to get linear models working with sparse data
154f45d [Matei Zaharia] Update docs, name some magic values
881fef7 [Matei Zaharia] Added a sparse vector in Python and made Java-Python format more compact
2014-04-15 23:33:24 -04:00
|
|
|
True
|
|
|
|
>>> abs(lrm.predict(SparseVector(1, {0: 1.0})) - 1) < 0.5
|
|
|
|
True
|
2015-04-21 20:49:55 -04:00
|
|
|
>>> lrm = RidgeRegressionWithSGD.train(sc.parallelize(data), iterations=10, step=1.0,
|
2019-01-17 20:40:39 -05:00
|
|
|
... regParam=0.01, miniBatchFraction=1.0, initialWeights=np.array([1.0]), intercept=True,
|
2015-03-25 16:38:33 -04:00
|
|
|
... validateData=True)
|
|
|
|
>>> abs(lrm.predict(np.array([0.0])) - 0) < 0.5
|
|
|
|
True
|
|
|
|
>>> abs(lrm.predict(SparseVector(1, {0: 1.0})) - 1) < 0.5
|
|
|
|
True
|
2013-12-25 00:08:05 -05:00
|
|
|
"""
|
2015-10-23 11:43:49 -04:00
|
|
|
@since("1.4.0")
|
2015-03-20 14:44:21 -04:00
|
|
|
def save(self, sc, path):
|
2015-10-23 11:43:49 -04:00
|
|
|
"""Save a RidgeRegressionMode."""
|
2015-03-20 14:44:21 -04:00
|
|
|
java_model = sc._jvm.org.apache.spark.mllib.regression.RidgeRegressionModel(
|
|
|
|
_py2java(sc, self._coeff), self.intercept)
|
|
|
|
java_model.save(sc._jsc.sc(), path)
|
|
|
|
|
|
|
|
@classmethod
|
2015-10-23 11:43:49 -04:00
|
|
|
@since("1.4.0")
|
2015-03-20 14:44:21 -04:00
|
|
|
def load(cls, sc, path):
|
2015-10-23 11:43:49 -04:00
|
|
|
"""Load a RidgeRegressionMode."""
|
2015-03-20 14:44:21 -04:00
|
|
|
java_model = sc._jvm.org.apache.spark.mllib.regression.RidgeRegressionModel.load(
|
|
|
|
sc._jsc.sc(), path)
|
|
|
|
weights = _java2py(sc, java_model.weights())
|
|
|
|
intercept = java_model.intercept()
|
|
|
|
model = RidgeRegressionModel(weights, intercept)
|
|
|
|
return model
|
2013-12-25 00:08:05 -05:00
|
|
|
|
[WIP] SPARK-1430: Support sparse data in Python MLlib
This PR adds a SparseVector class in PySpark and updates all the regression, classification and clustering algorithms and models to support sparse data, similar to MLlib. I chose to add this class because SciPy is quite difficult to install in many environments (more so than NumPy), but I plan to add support for SciPy sparse vectors later too, and make the methods work transparently on objects of either type.
On the Scala side, we keep Python sparse vectors sparse and pass them to MLlib. We always return dense vectors from our models.
Some to-do items left:
- [x] Support SciPy's scipy.sparse matrix objects when SciPy is available. We can easily add a function to convert these to our own SparseVector.
- [x] MLlib currently uses a vector with one extra column on the left to represent what we call LabeledPoint in Scala. Do we really want this? It may get annoying once you deal with sparse data since you must add/subtract 1 to each feature index when training. We can remove this API in 1.0 and use tuples for labeling.
- [x] Explain how to use these in the Python MLlib docs.
CC @mengxr, @joshrosen
Author: Matei Zaharia <matei@databricks.com>
Closes #341 from mateiz/py-ml-update and squashes the following commits:
d52e763 [Matei Zaharia] Remove no-longer-needed slice code and handle review comments
ea5a25a [Matei Zaharia] Fix remaining uses of copyto() after merge
b9f97a3 [Matei Zaharia] Fix test
1e1bd0f [Matei Zaharia] Add MLlib logistic regression example in Python
88bc01f [Matei Zaharia] Clean up inheritance of LinearModel in Python, and expose its parametrs
37ab747 [Matei Zaharia] Fix some examples and docs due to changes in MLlib API
da0f27e [Matei Zaharia] Added a MLlib K-means example and updated docs to discuss sparse data
c48e85a [Matei Zaharia] Added some tests for passing lists as input, and added mllib/tests.py to run-tests script.
a07ba10 [Matei Zaharia] Fix some typos and calculation of initial weights
74eefe7 [Matei Zaharia] Added LabeledPoint class in Python
889dde8 [Matei Zaharia] Support scipy.sparse matrices in all our algorithms and models
ab244d1 [Matei Zaharia] Allow SparseVectors to be initialized using a dict
a5d6426 [Matei Zaharia] Add linalg.py to run-tests script
0e7a3d8 [Matei Zaharia] Keep vectors sparse in Java when reading LabeledPoints
eaee759 [Matei Zaharia] Update regression, classification and clustering models for sparse data
2abbb44 [Matei Zaharia] Further work to get linear models working with sparse data
154f45d [Matei Zaharia] Update docs, name some magic values
881fef7 [Matei Zaharia] Added a sparse vector in Python and made Java-Python format more compact
2014-04-15 23:33:24 -04:00
|
|
|
|
2013-12-25 00:08:05 -05:00
|
|
|
class RidgeRegressionWithSGD(object):
|
2015-10-23 11:43:49 -04:00
|
|
|
"""
|
2020-11-24 20:24:41 -05:00
|
|
|
Train a regression model with L2-regularization using Stochastic Gradient Descent.
|
|
|
|
|
2015-10-23 11:43:49 -04:00
|
|
|
.. versionadded:: 0.9.0
|
2020-11-24 20:24:41 -05:00
|
|
|
.. deprecated:: 2.0.0
|
|
|
|
Use :py:class:`pyspark.ml.regression.LinearRegression` with elasticNetParam = 0.0.
|
|
|
|
Note the default regParam is 0.01 for RidgeRegressionWithSGD, but is 0.0 for
|
|
|
|
LinearRegression.
|
2015-10-23 11:43:49 -04:00
|
|
|
"""
|
2013-12-25 00:08:05 -05:00
|
|
|
@classmethod
|
2014-11-13 16:54:16 -05:00
|
|
|
def train(cls, data, iterations=100, step=1.0, regParam=0.01,
|
2015-03-25 16:38:33 -04:00
|
|
|
miniBatchFraction=1.0, initialWeights=None, intercept=False,
|
2015-09-14 15:08:52 -04:00
|
|
|
validateData=True, convergenceTol=0.001):
|
2015-06-16 17:30:30 -04:00
|
|
|
"""
|
2016-02-29 08:52:41 -05:00
|
|
|
Train a regression model with L2-regularization using Stochastic
|
|
|
|
Gradient Descent. This solves the l2-regularized least squares
|
|
|
|
regression formulation
|
|
|
|
|
|
|
|
f(weights) = 1/(2n) ||A weights - y||^2 + regParam/2 ||weights||^2
|
|
|
|
|
|
|
|
Here the data matrix has n rows, and the input RDD holds the set
|
|
|
|
of rows of A, each with its corresponding right hand side label y.
|
|
|
|
See also the documentation for the precise formulation.
|
|
|
|
|
2020-11-24 20:24:41 -05:00
|
|
|
.. versionadded:: 0.9.0
|
|
|
|
|
|
|
|
Parameters
|
|
|
|
----------
|
|
|
|
data : :py:class:`pyspark.RDD`
|
|
|
|
The training data, an RDD of LabeledPoint.
|
|
|
|
iterations : int, optional
|
|
|
|
The number of iterations.
|
|
|
|
(default: 100)
|
|
|
|
step : float, optional
|
|
|
|
The step parameter used in SGD.
|
|
|
|
(default: 1.0)
|
|
|
|
regParam : float, optional
|
|
|
|
The regularizer parameter.
|
|
|
|
(default: 0.01)
|
|
|
|
miniBatchFraction : float, optional
|
|
|
|
Fraction of data to be used for each SGD iteration.
|
|
|
|
(default: 1.0)
|
|
|
|
initialWeights : :py:class:`pyspark.mllib.linalg.Vector` or convertible, optional
|
|
|
|
The initial weights.
|
|
|
|
(default: None)
|
|
|
|
intercept : bool, optional
|
|
|
|
Boolean parameter which indicates the use or not of the
|
|
|
|
augmented representation for training data (i.e. whether bias
|
|
|
|
features are activated or not).
|
|
|
|
(default: False)
|
|
|
|
validateData : bool, optional
|
|
|
|
Boolean parameter which indicates if the algorithm should
|
|
|
|
validate data before training.
|
|
|
|
(default: True)
|
|
|
|
convergenceTol : float, optional
|
|
|
|
A condition which decides iteration termination.
|
|
|
|
(default: 0.001)
|
2015-06-16 17:30:30 -04:00
|
|
|
"""
|
2016-04-29 01:44:14 -04:00
|
|
|
warnings.warn(
|
|
|
|
"Deprecated in 2.0.0. Use ml.regression.LinearRegression with elasticNetParam = 0.0. "
|
|
|
|
"Note the default regParam is 0.01 for RidgeRegressionWithSGD, but is 0.0 for "
|
[SPARK-22313][PYTHON] Mark/print deprecation warnings as DeprecationWarning for deprecated APIs
## What changes were proposed in this pull request?
This PR proposes to mark the existing warnings as `DeprecationWarning` and print out warnings for deprecated functions.
This could be actually useful for Spark app developers. I use (old) PyCharm and this IDE can detect this specific `DeprecationWarning` in some cases:
**Before**
<img src="https://user-images.githubusercontent.com/6477701/31762664-df68d9f8-b4f6-11e7-8773-f0468f70a2cc.png" height="45" />
**After**
<img src="https://user-images.githubusercontent.com/6477701/31762662-de4d6868-b4f6-11e7-98dc-3c8446a0c28a.png" height="70" />
For console usage, `DeprecationWarning` is usually disabled (see https://docs.python.org/2/library/warnings.html#warning-categories and https://docs.python.org/3/library/warnings.html#warning-categories):
```
>>> import warnings
>>> filter(lambda f: f[2] == DeprecationWarning, warnings.filters)
[('ignore', <_sre.SRE_Pattern object at 0x10ba58c00>, <type 'exceptions.DeprecationWarning'>, <_sre.SRE_Pattern object at 0x10bb04138>, 0), ('ignore', None, <type 'exceptions.DeprecationWarning'>, None, 0)]
```
so, it won't actually mess up the terminal much unless it is intended.
If this is intendedly enabled, it'd should as below:
```
>>> import warnings
>>> warnings.simplefilter('always', DeprecationWarning)
>>>
>>> from pyspark.sql import functions
>>> functions.approxCountDistinct("a")
.../spark/python/pyspark/sql/functions.py:232: DeprecationWarning: Deprecated in 2.1, use approx_count_distinct instead.
"Deprecated in 2.1, use approx_count_distinct instead.", DeprecationWarning)
...
```
These instances were found by:
```
cd python/pyspark
grep -r "Deprecated" .
grep -r "deprecated" .
grep -r "deprecate" .
```
## How was this patch tested?
Manually tested.
Author: hyukjinkwon <gurwls223@gmail.com>
Closes #19535 from HyukjinKwon/deprecated-warning.
2017-10-23 23:44:47 -04:00
|
|
|
"LinearRegression.", DeprecationWarning)
|
2016-04-29 01:44:14 -04:00
|
|
|
|
2014-10-31 01:25:18 -04:00
|
|
|
def train(rdd, i):
|
2014-11-13 16:54:16 -05:00
|
|
|
return callMLlibFunc("trainRidgeModelWithSGD", rdd, int(iterations), float(step),
|
2015-03-25 16:38:33 -04:00
|
|
|
float(regParam), float(miniBatchFraction), i, bool(intercept),
|
2015-09-14 15:08:52 -04:00
|
|
|
bool(validateData), float(convergenceTol))
|
2014-09-19 18:01:11 -04:00
|
|
|
|
2014-11-13 16:54:16 -05:00
|
|
|
return _regression_train_wrapper(train, RidgeRegressionModel, data, initialWeights)
|
2013-12-25 00:08:05 -05:00
|
|
|
|
[WIP] SPARK-1430: Support sparse data in Python MLlib
This PR adds a SparseVector class in PySpark and updates all the regression, classification and clustering algorithms and models to support sparse data, similar to MLlib. I chose to add this class because SciPy is quite difficult to install in many environments (more so than NumPy), but I plan to add support for SciPy sparse vectors later too, and make the methods work transparently on objects of either type.
On the Scala side, we keep Python sparse vectors sparse and pass them to MLlib. We always return dense vectors from our models.
Some to-do items left:
- [x] Support SciPy's scipy.sparse matrix objects when SciPy is available. We can easily add a function to convert these to our own SparseVector.
- [x] MLlib currently uses a vector with one extra column on the left to represent what we call LabeledPoint in Scala. Do we really want this? It may get annoying once you deal with sparse data since you must add/subtract 1 to each feature index when training. We can remove this API in 1.0 and use tuples for labeling.
- [x] Explain how to use these in the Python MLlib docs.
CC @mengxr, @joshrosen
Author: Matei Zaharia <matei@databricks.com>
Closes #341 from mateiz/py-ml-update and squashes the following commits:
d52e763 [Matei Zaharia] Remove no-longer-needed slice code and handle review comments
ea5a25a [Matei Zaharia] Fix remaining uses of copyto() after merge
b9f97a3 [Matei Zaharia] Fix test
1e1bd0f [Matei Zaharia] Add MLlib logistic regression example in Python
88bc01f [Matei Zaharia] Clean up inheritance of LinearModel in Python, and expose its parametrs
37ab747 [Matei Zaharia] Fix some examples and docs due to changes in MLlib API
da0f27e [Matei Zaharia] Added a MLlib K-means example and updated docs to discuss sparse data
c48e85a [Matei Zaharia] Added some tests for passing lists as input, and added mllib/tests.py to run-tests script.
a07ba10 [Matei Zaharia] Fix some typos and calculation of initial weights
74eefe7 [Matei Zaharia] Added LabeledPoint class in Python
889dde8 [Matei Zaharia] Support scipy.sparse matrices in all our algorithms and models
ab244d1 [Matei Zaharia] Allow SparseVectors to be initialized using a dict
a5d6426 [Matei Zaharia] Add linalg.py to run-tests script
0e7a3d8 [Matei Zaharia] Keep vectors sparse in Java when reading LabeledPoints
eaee759 [Matei Zaharia] Update regression, classification and clustering models for sparse data
2abbb44 [Matei Zaharia] Further work to get linear models working with sparse data
154f45d [Matei Zaharia] Update docs, name some magic values
881fef7 [Matei Zaharia] Added a sparse vector in Python and made Java-Python format more compact
2014-04-15 23:33:24 -04:00
|
|
|
|
2015-05-06 01:57:13 -04:00
|
|
|
class IsotonicRegressionModel(Saveable, Loader):
|
|
|
|
|
2015-06-16 17:30:30 -04:00
|
|
|
"""
|
|
|
|
Regression model for isotonic regression.
|
|
|
|
|
2020-11-24 20:24:41 -05:00
|
|
|
.. versionadded:: 1.4.0
|
2015-05-06 01:57:13 -04:00
|
|
|
|
2020-11-24 20:24:41 -05:00
|
|
|
Parameters
|
|
|
|
----------
|
|
|
|
boundaries : ndarray
|
|
|
|
Array of boundaries for which predictions are known. Boundaries
|
|
|
|
must be sorted in increasing order.
|
|
|
|
predictions : ndarray
|
|
|
|
Array of predictions associated to the boundaries at the same
|
|
|
|
index. Results of isotonic regression and therefore monotone.
|
|
|
|
isotonic : true
|
|
|
|
Indicates whether this is isotonic or antitonic.
|
|
|
|
|
|
|
|
Examples
|
|
|
|
--------
|
2015-05-06 01:57:13 -04:00
|
|
|
>>> data = [(1, 0, 1), (2, 1, 1), (3, 2, 1), (1, 3, 1), (6, 4, 1), (17, 5, 1), (16, 6, 1)]
|
|
|
|
>>> irm = IsotonicRegression.train(sc.parallelize(data))
|
|
|
|
>>> irm.predict(3)
|
|
|
|
2.0
|
|
|
|
>>> irm.predict(5)
|
|
|
|
16.5
|
|
|
|
>>> irm.predict(sc.parallelize([3, 5])).collect()
|
|
|
|
[2.0, 16.5]
|
|
|
|
>>> import os, tempfile
|
|
|
|
>>> path = tempfile.mkdtemp()
|
|
|
|
>>> irm.save(sc, path)
|
|
|
|
>>> sameModel = IsotonicRegressionModel.load(sc, path)
|
|
|
|
>>> sameModel.predict(3)
|
|
|
|
2.0
|
|
|
|
>>> sameModel.predict(5)
|
|
|
|
16.5
|
2015-06-22 14:53:11 -04:00
|
|
|
>>> from shutil import rmtree
|
2015-05-06 01:57:13 -04:00
|
|
|
>>> try:
|
2015-06-22 14:53:11 -04:00
|
|
|
... rmtree(path)
|
2015-05-06 01:57:13 -04:00
|
|
|
... except OSError:
|
|
|
|
... pass
|
|
|
|
"""
|
|
|
|
|
|
|
|
def __init__(self, boundaries, predictions, isotonic):
|
|
|
|
self.boundaries = boundaries
|
|
|
|
self.predictions = predictions
|
|
|
|
self.isotonic = isotonic
|
|
|
|
|
|
|
|
def predict(self, x):
|
2015-06-16 17:30:30 -04:00
|
|
|
"""
|
|
|
|
Predict labels for provided features.
|
|
|
|
Using a piecewise linear function.
|
|
|
|
1) If x exactly matches a boundary then associated prediction
|
|
|
|
is returned. In case there are multiple predictions with the
|
|
|
|
same boundary then one of them is returned. Which one is
|
|
|
|
undefined (same as java.util.Arrays.binarySearch).
|
|
|
|
2) If x is lower or higher than all boundaries then first or
|
|
|
|
last prediction is returned respectively. In case there are
|
|
|
|
multiple predictions with the same boundary then the lowest
|
|
|
|
or highest is returned respectively.
|
|
|
|
3) If x falls between two values in boundary array then
|
|
|
|
prediction is treated as piecewise linear function and
|
|
|
|
interpolated value is returned. In case there are multiple
|
|
|
|
values with the same boundary then the same rules as in 2)
|
|
|
|
are used.
|
|
|
|
|
2020-11-24 20:24:41 -05:00
|
|
|
|
|
|
|
.. versionadded:: 1.4.0
|
|
|
|
|
|
|
|
Parameters
|
|
|
|
----------
|
|
|
|
x : :py:class:`pyspark.mllib.linalg.Vector` or :py:class:`pyspark.RDD`
|
|
|
|
Feature or RDD of Features to be labeled.
|
2015-06-16 17:30:30 -04:00
|
|
|
"""
|
2015-05-06 01:57:13 -04:00
|
|
|
if isinstance(x, RDD):
|
|
|
|
return x.map(lambda v: self.predict(v))
|
|
|
|
return np.interp(x, self.boundaries, self.predictions)
|
|
|
|
|
2015-10-23 11:43:49 -04:00
|
|
|
@since("1.4.0")
|
2015-05-06 01:57:13 -04:00
|
|
|
def save(self, sc, path):
|
2016-05-27 01:39:14 -04:00
|
|
|
"""Save an IsotonicRegressionModel."""
|
2015-05-06 01:57:13 -04:00
|
|
|
java_boundaries = _py2java(sc, self.boundaries.tolist())
|
|
|
|
java_predictions = _py2java(sc, self.predictions.tolist())
|
|
|
|
java_model = sc._jvm.org.apache.spark.mllib.regression.IsotonicRegressionModel(
|
|
|
|
java_boundaries, java_predictions, self.isotonic)
|
|
|
|
java_model.save(sc._jsc.sc(), path)
|
|
|
|
|
|
|
|
@classmethod
|
2015-10-23 11:43:49 -04:00
|
|
|
@since("1.4.0")
|
2015-05-06 01:57:13 -04:00
|
|
|
def load(cls, sc, path):
|
2016-05-27 01:39:14 -04:00
|
|
|
"""Load an IsotonicRegressionModel."""
|
2015-05-06 01:57:13 -04:00
|
|
|
java_model = sc._jvm.org.apache.spark.mllib.regression.IsotonicRegressionModel.load(
|
|
|
|
sc._jsc.sc(), path)
|
|
|
|
py_boundaries = _java2py(sc, java_model.boundaryVector()).toArray()
|
|
|
|
py_predictions = _java2py(sc, java_model.predictionVector()).toArray()
|
|
|
|
return IsotonicRegressionModel(py_boundaries, py_predictions, java_model.isotonic)
|
|
|
|
|
|
|
|
|
|
|
|
class IsotonicRegression(object):
|
2015-10-23 11:43:49 -04:00
|
|
|
"""
|
|
|
|
Isotonic regression.
|
2016-02-29 08:52:41 -05:00
|
|
|
Currently implemented using parallelized pool adjacent violators
|
|
|
|
algorithm. Only univariate (single feature) algorithm supported.
|
2015-10-23 11:43:49 -04:00
|
|
|
|
2020-11-24 20:24:41 -05:00
|
|
|
.. versionadded:: 1.4.0
|
|
|
|
|
|
|
|
Notes
|
|
|
|
-----
|
|
|
|
Sequential PAV implementation based on
|
|
|
|
Tibshirani, Ryan J., Holger Hoefling, and Robert Tibshirani (2011) [1]_
|
2016-02-08 14:06:41 -05:00
|
|
|
|
2020-11-24 20:24:41 -05:00
|
|
|
Sequential PAV parallelization based on
|
|
|
|
Kearsley, Anthony J., Richard A. Tapia, and Michael W. Trosset (1996) [2]_
|
2015-10-23 11:43:49 -04:00
|
|
|
|
2020-11-24 20:24:41 -05:00
|
|
|
See also
|
|
|
|
`Isotonic regression (Wikipedia) <http://en.wikipedia.org/wiki/Isotonic_regression>`_.
|
2015-10-23 11:43:49 -04:00
|
|
|
|
2020-11-24 20:24:41 -05:00
|
|
|
.. [1] Tibshirani, Ryan J., Holger Hoefling, and Robert Tibshirani.
|
|
|
|
"Nearly-isotonic regression." Technometrics 53.1 (2011): 54-61.
|
|
|
|
Available from http://www.stat.cmu.edu/~ryantibs/papers/neariso.pdf
|
|
|
|
.. [2] Kearsley, Anthony J., Richard A. Tapia, and Michael W. Trosset
|
2016-02-08 14:06:41 -05:00
|
|
|
"An approach to parallelizing isotonic regression."
|
|
|
|
Applied Mathematics and Parallel Computing. Physica-Verlag HD, 1996. 141-147.
|
|
|
|
Available from http://softlib.rice.edu/pub/CRPC-TRs/reports/CRPC-TR96640.pdf
|
2015-10-23 11:43:49 -04:00
|
|
|
"""
|
2015-05-06 01:57:13 -04:00
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def train(cls, data, isotonic=True):
|
2015-06-16 17:30:30 -04:00
|
|
|
"""
|
2016-05-27 01:39:14 -04:00
|
|
|
Train an isotonic regression model on the given data.
|
2015-06-16 17:30:30 -04:00
|
|
|
|
2020-11-24 20:24:41 -05:00
|
|
|
.. versionadded:: 1.4.0
|
|
|
|
|
|
|
|
Parameters
|
|
|
|
----------
|
|
|
|
data : :py:class:`pyspark.RDD`
|
|
|
|
RDD of (label, feature, weight) tuples.
|
|
|
|
isotonic : bool, optional
|
|
|
|
Whether this is isotonic (which is default) or antitonic.
|
|
|
|
(default: True)
|
2015-06-16 17:30:30 -04:00
|
|
|
"""
|
2015-05-06 01:57:13 -04:00
|
|
|
boundaries, predictions = callMLlibFunc("trainIsotonicRegressionModel",
|
|
|
|
data.map(_convert_to_vector), bool(isotonic))
|
|
|
|
return IsotonicRegressionModel(boundaries.toArray(), predictions.toArray(), isotonic)
|
|
|
|
|
|
|
|
|
2015-06-30 13:25:59 -04:00
|
|
|
class StreamingLinearAlgorithm(object):
|
|
|
|
"""
|
|
|
|
Base class that has to be inherited by any StreamingLinearAlgorithm.
|
|
|
|
|
|
|
|
Prevents reimplementation of methods predictOn and predictOnValues.
|
2015-10-23 11:43:49 -04:00
|
|
|
|
|
|
|
.. versionadded:: 1.5.0
|
2015-06-30 13:25:59 -04:00
|
|
|
"""
|
|
|
|
def __init__(self, model):
|
|
|
|
self._model = model
|
|
|
|
|
2015-10-23 11:43:49 -04:00
|
|
|
@since("1.5.0")
|
2015-06-30 13:25:59 -04:00
|
|
|
def latestModel(self):
|
|
|
|
"""
|
|
|
|
Returns the latest model.
|
|
|
|
"""
|
|
|
|
return self._model
|
|
|
|
|
|
|
|
def _validate(self, dstream):
|
|
|
|
if not isinstance(dstream, DStream):
|
|
|
|
raise TypeError(
|
|
|
|
"dstream should be a DStream object, got %s" % type(dstream))
|
|
|
|
if not self._model:
|
|
|
|
raise ValueError(
|
2020-11-27 11:22:45 -05:00
|
|
|
"Model must be initialized using setInitialWeights")
|
2015-06-30 13:25:59 -04:00
|
|
|
|
|
|
|
def predictOn(self, dstream):
|
|
|
|
"""
|
2016-02-29 08:52:41 -05:00
|
|
|
Use the model to make predictions on batches of data from a
|
|
|
|
DStream.
|
2015-06-30 13:25:59 -04:00
|
|
|
|
2020-11-24 20:24:41 -05:00
|
|
|
.. versionadded:: 1.5.0
|
|
|
|
|
|
|
|
Returns
|
|
|
|
-------
|
|
|
|
:py:class:`pyspark.streaming.DStream`
|
|
|
|
DStream containing predictions.
|
2015-06-30 13:25:59 -04:00
|
|
|
"""
|
|
|
|
self._validate(dstream)
|
|
|
|
return dstream.map(lambda x: self._model.predict(x))
|
|
|
|
|
|
|
|
def predictOnValues(self, dstream):
|
|
|
|
"""
|
2016-02-29 08:52:41 -05:00
|
|
|
Use the model to make predictions on the values of a DStream and
|
|
|
|
carry over its keys.
|
2015-06-30 13:25:59 -04:00
|
|
|
|
2020-11-24 20:24:41 -05:00
|
|
|
.. versionadded:: 1.5.0
|
|
|
|
|
|
|
|
Returns
|
|
|
|
-------
|
|
|
|
:py:class:`pyspark.streaming.DStream`
|
|
|
|
DStream containing predictions.
|
2015-06-30 13:25:59 -04:00
|
|
|
"""
|
|
|
|
self._validate(dstream)
|
|
|
|
return dstream.mapValues(lambda x: self._model.predict(x))
|
|
|
|
|
|
|
|
|
|
|
|
@inherit_doc
|
|
|
|
class StreamingLinearRegressionWithSGD(StreamingLinearAlgorithm):
|
|
|
|
"""
|
2016-02-29 08:52:41 -05:00
|
|
|
Train or predict a linear regression model on streaming data.
|
|
|
|
Training uses Stochastic Gradient Descent to update the model
|
|
|
|
based on each new batch of incoming data from a DStream
|
|
|
|
(see `LinearRegressionWithSGD` for model equation).
|
2015-11-23 20:11:51 -05:00
|
|
|
|
|
|
|
Each batch of data is assumed to be an RDD of LabeledPoints.
|
|
|
|
The number of data points per batch can vary, but the number
|
2016-02-29 08:52:41 -05:00
|
|
|
of features must be constant. An initial weight vector must
|
|
|
|
be provided.
|
2015-11-23 20:11:51 -05:00
|
|
|
|
2015-10-23 11:43:49 -04:00
|
|
|
.. versionadded:: 1.5.0
|
2020-11-24 20:24:41 -05:00
|
|
|
|
|
|
|
Parameters
|
|
|
|
----------
|
|
|
|
stepSize : float, optional
|
|
|
|
Step size for each iteration of gradient descent.
|
|
|
|
(default: 0.1)
|
|
|
|
numIterations : int, optional
|
|
|
|
Number of iterations run for each batch of data.
|
|
|
|
(default: 50)
|
|
|
|
miniBatchFraction : float, optional
|
|
|
|
Fraction of each batch of data to use for updates.
|
|
|
|
(default: 1.0)
|
|
|
|
convergenceTol : float, optional
|
|
|
|
Value used to determine when to terminate iterations.
|
|
|
|
(default: 0.001)
|
2015-06-30 13:25:59 -04:00
|
|
|
"""
|
2015-09-14 15:08:52 -04:00
|
|
|
def __init__(self, stepSize=0.1, numIterations=50, miniBatchFraction=1.0, convergenceTol=0.001):
|
2015-06-30 13:25:59 -04:00
|
|
|
self.stepSize = stepSize
|
|
|
|
self.numIterations = numIterations
|
|
|
|
self.miniBatchFraction = miniBatchFraction
|
2015-09-14 15:08:52 -04:00
|
|
|
self.convergenceTol = convergenceTol
|
2015-06-30 13:25:59 -04:00
|
|
|
self._model = None
|
|
|
|
super(StreamingLinearRegressionWithSGD, self).__init__(
|
|
|
|
model=self._model)
|
|
|
|
|
2015-10-23 11:43:49 -04:00
|
|
|
@since("1.5.0")
|
2015-06-30 13:25:59 -04:00
|
|
|
def setInitialWeights(self, initialWeights):
|
|
|
|
"""
|
|
|
|
Set the initial value of weights.
|
|
|
|
|
|
|
|
This must be set before running trainOn and predictOn
|
|
|
|
"""
|
|
|
|
initialWeights = _convert_to_vector(initialWeights)
|
|
|
|
self._model = LinearRegressionModel(initialWeights, 0)
|
|
|
|
return self
|
|
|
|
|
2015-10-23 11:43:49 -04:00
|
|
|
@since("1.5.0")
|
2015-06-30 13:25:59 -04:00
|
|
|
def trainOn(self, dstream):
|
|
|
|
"""Train the model on the incoming dstream."""
|
|
|
|
self._validate(dstream)
|
|
|
|
|
|
|
|
def update(rdd):
|
|
|
|
# LinearRegressionWithSGD.train raises an error for an empty RDD.
|
|
|
|
if not rdd.isEmpty():
|
|
|
|
self._model = LinearRegressionWithSGD.train(
|
|
|
|
rdd, self.numIterations, self.stepSize,
|
|
|
|
self.miniBatchFraction, self._model.weights,
|
2015-10-09 01:21:07 -04:00
|
|
|
intercept=self._model.intercept, convergenceTol=self.convergenceTol)
|
2015-06-30 13:25:59 -04:00
|
|
|
|
|
|
|
dstream.foreachRDD(update)
|
|
|
|
|
|
|
|
|
2013-12-25 00:08:05 -05:00
|
|
|
def _test():
|
|
|
|
import doctest
|
2016-05-23 21:14:48 -04:00
|
|
|
from pyspark.sql import SparkSession
|
2014-11-11 01:26:16 -05:00
|
|
|
import pyspark.mllib.regression
|
|
|
|
globs = pyspark.mllib.regression.__dict__.copy()
|
2016-05-23 21:14:48 -04:00
|
|
|
spark = SparkSession.builder\
|
|
|
|
.master("local[2]")\
|
|
|
|
.appName("mllib.regression tests")\
|
|
|
|
.getOrCreate()
|
|
|
|
globs['sc'] = spark.sparkContext
|
2014-05-25 20:15:01 -04:00
|
|
|
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
|
2016-05-23 21:14:48 -04:00
|
|
|
spark.stop()
|
2013-12-25 00:08:05 -05:00
|
|
|
if failure_count:
|
2018-03-08 06:38:34 -05:00
|
|
|
sys.exit(-1)
|
2013-12-25 00:08:05 -05:00
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
_test()
|