2013-12-25 00:08:05 -05:00
|
|
|
#
|
|
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
|
|
|
|
# contributor license agreements. See the NOTICE file distributed with
|
|
|
|
# this work for additional information regarding copyright ownership.
|
|
|
|
# The ASF licenses this file to You under the Apache License, Version 2.0
|
|
|
|
# (the "License"); you may not use this file except in compliance with
|
|
|
|
# the License. You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
#
|
|
|
|
|
2015-04-16 19:20:57 -04:00
|
|
|
import sys
|
|
|
|
import array as pyarray
|
2015-06-19 15:23:15 -04:00
|
|
|
from math import exp, log
|
2020-07-13 22:22:44 -04:00
|
|
|
from collections import namedtuple
|
2015-06-19 15:23:15 -04:00
|
|
|
|
|
|
|
from numpy import array, random, tile
|
2015-02-03 02:04:55 -05:00
|
|
|
|
2015-10-27 00:28:18 -04:00
|
|
|
from pyspark import SparkContext, since
|
2020-07-13 22:22:44 -04:00
|
|
|
from pyspark.rdd import RDD
|
2015-06-29 01:38:04 -04:00
|
|
|
from pyspark.mllib.common import JavaModelWrapper, callMLlibFunc, callJavaFunc, _py2java, _java2py
|
2015-06-19 15:23:15 -04:00
|
|
|
from pyspark.mllib.linalg import SparseVector, _convert_to_vector, DenseVector
|
2015-02-03 02:04:55 -05:00
|
|
|
from pyspark.mllib.stat.distribution import MultivariateGaussian
|
2015-06-29 01:38:04 -04:00
|
|
|
from pyspark.mllib.util import Saveable, Loader, inherit_doc, JavaLoader, JavaSaveable
|
2015-06-19 15:23:15 -04:00
|
|
|
from pyspark.streaming import DStream
|
[WIP] SPARK-1430: Support sparse data in Python MLlib
This PR adds a SparseVector class in PySpark and updates all the regression, classification and clustering algorithms and models to support sparse data, similar to MLlib. I chose to add this class because SciPy is quite difficult to install in many environments (more so than NumPy), but I plan to add support for SciPy sparse vectors later too, and make the methods work transparently on objects of either type.
On the Scala side, we keep Python sparse vectors sparse and pass them to MLlib. We always return dense vectors from our models.
Some to-do items left:
- [x] Support SciPy's scipy.sparse matrix objects when SciPy is available. We can easily add a function to convert these to our own SparseVector.
- [x] MLlib currently uses a vector with one extra column on the left to represent what we call LabeledPoint in Scala. Do we really want this? It may get annoying once you deal with sparse data since you must add/subtract 1 to each feature index when training. We can remove this API in 1.0 and use tuples for labeling.
- [x] Explain how to use these in the Python MLlib docs.
CC @mengxr, @joshrosen
Author: Matei Zaharia <matei@databricks.com>
Closes #341 from mateiz/py-ml-update and squashes the following commits:
d52e763 [Matei Zaharia] Remove no-longer-needed slice code and handle review comments
ea5a25a [Matei Zaharia] Fix remaining uses of copyto() after merge
b9f97a3 [Matei Zaharia] Fix test
1e1bd0f [Matei Zaharia] Add MLlib logistic regression example in Python
88bc01f [Matei Zaharia] Clean up inheritance of LinearModel in Python, and expose its parametrs
37ab747 [Matei Zaharia] Fix some examples and docs due to changes in MLlib API
da0f27e [Matei Zaharia] Added a MLlib K-means example and updated docs to discuss sparse data
c48e85a [Matei Zaharia] Added some tests for passing lists as input, and added mllib/tests.py to run-tests script.
a07ba10 [Matei Zaharia] Fix some typos and calculation of initial weights
74eefe7 [Matei Zaharia] Added LabeledPoint class in Python
889dde8 [Matei Zaharia] Support scipy.sparse matrices in all our algorithms and models
ab244d1 [Matei Zaharia] Allow SparseVectors to be initialized using a dict
a5d6426 [Matei Zaharia] Add linalg.py to run-tests script
0e7a3d8 [Matei Zaharia] Keep vectors sparse in Java when reading LabeledPoints
eaee759 [Matei Zaharia] Update regression, classification and clustering models for sparse data
2abbb44 [Matei Zaharia] Further work to get linear models working with sparse data
154f45d [Matei Zaharia] Update docs, name some magic values
881fef7 [Matei Zaharia] Added a sparse vector in Python and made Java-Python format more compact
2014-04-15 23:33:24 -04:00
|
|
|
|
2016-01-19 13:15:54 -05:00
|
|
|
__all__ = ['BisectingKMeansModel', 'BisectingKMeans', 'KMeansModel', 'KMeans',
|
|
|
|
'GaussianMixtureModel', 'GaussianMixture', 'PowerIterationClusteringModel',
|
|
|
|
'PowerIterationClustering', 'StreamingKMeans', 'StreamingKMeansModel',
|
2015-07-15 02:27:42 -04:00
|
|
|
'LDA', 'LDAModel']
|
2014-09-03 14:49:45 -04:00
|
|
|
|
2013-12-25 00:08:05 -05:00
|
|
|
|
2016-01-19 13:15:54 -05:00
|
|
|
@inherit_doc
|
|
|
|
class BisectingKMeansModel(JavaModelWrapper):
|
|
|
|
"""
|
|
|
|
A clustering model derived from the bisecting k-means method.
|
|
|
|
|
|
|
|
>>> data = array([0.0,0.0, 1.0,1.0, 9.0,8.0, 8.0,9.0]).reshape(4, 2)
|
|
|
|
>>> bskm = BisectingKMeans()
|
|
|
|
>>> model = bskm.train(sc.parallelize(data, 2), k=4)
|
|
|
|
>>> p = array([0.0, 0.0])
|
|
|
|
>>> model.predict(p)
|
|
|
|
0
|
|
|
|
>>> model.k
|
|
|
|
4
|
|
|
|
>>> model.computeCost(p)
|
|
|
|
0.0
|
|
|
|
|
|
|
|
.. versionadded:: 2.0.0
|
|
|
|
"""
|
|
|
|
|
|
|
|
def __init__(self, java_model):
|
|
|
|
super(BisectingKMeansModel, self).__init__(java_model)
|
|
|
|
self.centers = [c.toArray() for c in self.call("clusterCenters")]
|
|
|
|
|
|
|
|
@property
|
|
|
|
@since('2.0.0')
|
|
|
|
def clusterCenters(self):
|
|
|
|
"""Get the cluster centers, represented as a list of NumPy
|
|
|
|
arrays."""
|
|
|
|
return self.centers
|
|
|
|
|
|
|
|
@property
|
|
|
|
@since('2.0.0')
|
|
|
|
def k(self):
|
|
|
|
"""Get the number of clusters"""
|
|
|
|
return self.call("k")
|
|
|
|
|
|
|
|
@since('2.0.0')
|
|
|
|
def predict(self, x):
|
|
|
|
"""
|
|
|
|
Find the cluster that each of the points belongs to in this
|
|
|
|
model.
|
|
|
|
|
2016-02-02 13:50:22 -05:00
|
|
|
:param x:
|
|
|
|
A data point (or RDD of points) to determine cluster index.
|
|
|
|
:return:
|
|
|
|
Predicted cluster index or an RDD of predicted cluster indices
|
|
|
|
if the input is an RDD.
|
2016-01-19 13:15:54 -05:00
|
|
|
"""
|
|
|
|
if isinstance(x, RDD):
|
|
|
|
vecs = x.map(_convert_to_vector)
|
|
|
|
return self.call("predict", vecs)
|
|
|
|
|
|
|
|
x = _convert_to_vector(x)
|
|
|
|
return self.call("predict", x)
|
|
|
|
|
|
|
|
@since('2.0.0')
|
|
|
|
def computeCost(self, x):
|
|
|
|
"""
|
|
|
|
Return the Bisecting K-means cost (sum of squared distances of
|
|
|
|
points to their nearest center) for this model on the given
|
|
|
|
data. If provided with an RDD of points returns the sum.
|
|
|
|
|
2016-02-02 13:50:22 -05:00
|
|
|
:param point:
|
|
|
|
A data point (or RDD of points) to compute the cost(s).
|
2016-01-19 13:15:54 -05:00
|
|
|
"""
|
|
|
|
if isinstance(x, RDD):
|
|
|
|
vecs = x.map(_convert_to_vector)
|
|
|
|
return self.call("computeCost", vecs)
|
|
|
|
|
|
|
|
return self.call("computeCost", _convert_to_vector(x))
|
|
|
|
|
|
|
|
|
|
|
|
class BisectingKMeans(object):
|
|
|
|
"""
|
|
|
|
A bisecting k-means algorithm based on the paper "A comparison of
|
|
|
|
document clustering techniques" by Steinbach, Karypis, and Kumar,
|
|
|
|
with modification to fit Spark.
|
|
|
|
The algorithm starts from a single cluster that contains all points.
|
|
|
|
Iteratively it finds divisible clusters on the bottom level and
|
|
|
|
bisects each of them using k-means, until there are `k` leaf
|
|
|
|
clusters in total or no leaf clusters are divisible.
|
|
|
|
The bisecting steps of clusters on the same level are grouped
|
|
|
|
together to increase parallelism. If bisecting all divisible
|
|
|
|
clusters on the bottom level would result more than `k` leaf
|
|
|
|
clusters, larger clusters get higher priority.
|
|
|
|
|
|
|
|
Based on
|
2019-07-05 13:08:22 -04:00
|
|
|
`Steinbach, Karypis, and Kumar, A comparison of document clustering
|
|
|
|
techniques, KDD Workshop on Text Mining, 2000
|
|
|
|
<http://glaros.dtc.umn.edu/gkhome/fetch/papers/docclusterKDDTMW00.pdf>`_.
|
2016-01-19 13:15:54 -05:00
|
|
|
|
|
|
|
.. versionadded:: 2.0.0
|
|
|
|
"""
|
|
|
|
|
2016-03-11 02:21:12 -05:00
|
|
|
@classmethod
|
2016-01-19 13:15:54 -05:00
|
|
|
@since('2.0.0')
|
|
|
|
def train(self, rdd, k=4, maxIterations=20, minDivisibleClusterSize=1.0, seed=-1888008604):
|
|
|
|
"""
|
|
|
|
Runs the bisecting k-means algorithm return the model.
|
|
|
|
|
2016-02-02 13:50:22 -05:00
|
|
|
:param rdd:
|
|
|
|
Training points as an `RDD` of `Vector` or convertible
|
|
|
|
sequence types.
|
|
|
|
:param k:
|
|
|
|
The desired number of leaf clusters. The actual number could
|
|
|
|
be smaller if there are no divisible leaf clusters.
|
|
|
|
(default: 4)
|
|
|
|
:param maxIterations:
|
|
|
|
Maximum number of iterations allowed to split clusters.
|
|
|
|
(default: 20)
|
|
|
|
:param minDivisibleClusterSize:
|
|
|
|
Minimum number of points (if >= 1.0) or the minimum proportion
|
|
|
|
of points (if < 1.0) of a divisible cluster.
|
|
|
|
(default: 1)
|
|
|
|
:param seed:
|
|
|
|
Random seed value for cluster initialization.
|
|
|
|
(default: -1888008604 from classOf[BisectingKMeans].getName.##)
|
2016-01-19 13:15:54 -05:00
|
|
|
"""
|
|
|
|
java_model = callMLlibFunc(
|
|
|
|
"trainBisectingKMeans", rdd.map(_convert_to_vector),
|
|
|
|
k, maxIterations, minDivisibleClusterSize, seed)
|
|
|
|
return BisectingKMeansModel(java_model)
|
|
|
|
|
|
|
|
|
2015-03-17 15:14:40 -04:00
|
|
|
@inherit_doc
|
|
|
|
class KMeansModel(Saveable, Loader):
|
2014-08-06 15:58:24 -04:00
|
|
|
|
2013-12-25 00:08:05 -05:00
|
|
|
"""A clustering model derived from the k-means method.
|
|
|
|
|
2015-02-03 02:04:55 -05:00
|
|
|
>>> data = array([0.0,0.0, 1.0,1.0, 9.0,8.0, 8.0,9.0]).reshape(4, 2)
|
2014-05-25 20:15:01 -04:00
|
|
|
>>> model = KMeans.train(
|
2016-04-26 14:55:21 -04:00
|
|
|
... sc.parallelize(data), 2, maxIterations=10, initializationMode="random",
|
2015-05-05 10:57:39 -04:00
|
|
|
... seed=50, initializationSteps=5, epsilon=1e-4)
|
[WIP] SPARK-1430: Support sparse data in Python MLlib
This PR adds a SparseVector class in PySpark and updates all the regression, classification and clustering algorithms and models to support sparse data, similar to MLlib. I chose to add this class because SciPy is quite difficult to install in many environments (more so than NumPy), but I plan to add support for SciPy sparse vectors later too, and make the methods work transparently on objects of either type.
On the Scala side, we keep Python sparse vectors sparse and pass them to MLlib. We always return dense vectors from our models.
Some to-do items left:
- [x] Support SciPy's scipy.sparse matrix objects when SciPy is available. We can easily add a function to convert these to our own SparseVector.
- [x] MLlib currently uses a vector with one extra column on the left to represent what we call LabeledPoint in Scala. Do we really want this? It may get annoying once you deal with sparse data since you must add/subtract 1 to each feature index when training. We can remove this API in 1.0 and use tuples for labeling.
- [x] Explain how to use these in the Python MLlib docs.
CC @mengxr, @joshrosen
Author: Matei Zaharia <matei@databricks.com>
Closes #341 from mateiz/py-ml-update and squashes the following commits:
d52e763 [Matei Zaharia] Remove no-longer-needed slice code and handle review comments
ea5a25a [Matei Zaharia] Fix remaining uses of copyto() after merge
b9f97a3 [Matei Zaharia] Fix test
1e1bd0f [Matei Zaharia] Add MLlib logistic regression example in Python
88bc01f [Matei Zaharia] Clean up inheritance of LinearModel in Python, and expose its parametrs
37ab747 [Matei Zaharia] Fix some examples and docs due to changes in MLlib API
da0f27e [Matei Zaharia] Added a MLlib K-means example and updated docs to discuss sparse data
c48e85a [Matei Zaharia] Added some tests for passing lists as input, and added mllib/tests.py to run-tests script.
a07ba10 [Matei Zaharia] Fix some typos and calculation of initial weights
74eefe7 [Matei Zaharia] Added LabeledPoint class in Python
889dde8 [Matei Zaharia] Support scipy.sparse matrices in all our algorithms and models
ab244d1 [Matei Zaharia] Allow SparseVectors to be initialized using a dict
a5d6426 [Matei Zaharia] Add linalg.py to run-tests script
0e7a3d8 [Matei Zaharia] Keep vectors sparse in Java when reading LabeledPoints
eaee759 [Matei Zaharia] Update regression, classification and clustering models for sparse data
2abbb44 [Matei Zaharia] Further work to get linear models working with sparse data
154f45d [Matei Zaharia] Update docs, name some magic values
881fef7 [Matei Zaharia] Added a sparse vector in Python and made Java-Python format more compact
2014-04-15 23:33:24 -04:00
|
|
|
>>> model.predict(array([0.0, 0.0])) == model.predict(array([1.0, 1.0]))
|
|
|
|
True
|
|
|
|
>>> model.predict(array([8.0, 9.0])) == model.predict(array([9.0, 8.0]))
|
|
|
|
True
|
2015-05-05 10:57:39 -04:00
|
|
|
>>> model.k
|
|
|
|
2
|
|
|
|
>>> model.computeCost(sc.parallelize(data))
|
[SPARK-25868][MLLIB] One part of Spark MLlib Kmean Logic Performance problem
## What changes were proposed in this pull request?
Fix fastSquaredDistance to calculate dense-dense situation calculation performance problem and meanwhile enhance the calculation accuracy.
## How was this patch tested?
From different point to test after add this patch, the dense-dense calculation situation performance is enhanced and will do influence other calculation situation like (sparse-sparse, sparse-dense)
**For calculation logic test**
There is my test for sparse-sparse, dense-dense, sparse-dense case
There is test result:
First we need define some branch path logic for sparse-sparse and sparse-dense case
if meet precisionBound1, we define it as LOGIC1
if not meet precisionBound1, and not meet precisionBound2, we define it as LOGIC2
if not meet precisionBound1, but meet precisionBound2, we define it as LOGIC3
(There is a trick, you can manually change the precision value to meet above situation)
sparse- sparse case time cost situation (milliseconds)
LOGIC1
Before add patch: 7786, 7970, 8086
After add patch: 7729, 7653, 7903
LOGIC2
Before add patch: 8412, 9029, 8606
After add patch: 8603, 8724, 9024
LOGIC3
Before add patch: 19365, 19146, 19351
After add patch: 18917, 19007, 19074
sparse-dense case time cost situation (milliseconds)
LOGIC1
Before add patch: 4195, 4014, 4409
After add patch: 4081,3971, 4151
LOGIC2
Before add patch: 4968, 5579, 5080
After add patch: 4980, 5472, 5148
LOGIC3
Before add patch: 11848, 12077, 12168
After add patch: 11718, 11874, 11743
And for dense-dense case like we already discussed in comment, only use sqdist to calculate distance
dense-dense case time cost situation (milliseconds)
Before add patch: 7340, 7816, 7672
After add patch: 5752, 5800, 5753
**For real world data test**
There is my test data situation
I use the data
http://archive.ics.uci.edu/ml/datasets/Condition+monitoring+of+hydraulic+systems
extract file (PS1, PS2, PS3, PS4, PS5, PS6) to form the test data
total instances are 13230
the attributes for line are 6000
Result for sparse-sparse situation time cost (milliseconds)
Before Enhance: 7670, 7704, 7652
After Enhance: 7634, 7729, 7645
Closes #22893 from KyleLi1985/updatekmeanpatch.
Authored-by: 李亮 <liang.li.work@outlook.com>
Signed-off-by: Sean Owen <sean.owen@databricks.com>
2018-11-14 10:24:13 -05:00
|
|
|
2.0
|
[WIP] SPARK-1430: Support sparse data in Python MLlib
This PR adds a SparseVector class in PySpark and updates all the regression, classification and clustering algorithms and models to support sparse data, similar to MLlib. I chose to add this class because SciPy is quite difficult to install in many environments (more so than NumPy), but I plan to add support for SciPy sparse vectors later too, and make the methods work transparently on objects of either type.
On the Scala side, we keep Python sparse vectors sparse and pass them to MLlib. We always return dense vectors from our models.
Some to-do items left:
- [x] Support SciPy's scipy.sparse matrix objects when SciPy is available. We can easily add a function to convert these to our own SparseVector.
- [x] MLlib currently uses a vector with one extra column on the left to represent what we call LabeledPoint in Scala. Do we really want this? It may get annoying once you deal with sparse data since you must add/subtract 1 to each feature index when training. We can remove this API in 1.0 and use tuples for labeling.
- [x] Explain how to use these in the Python MLlib docs.
CC @mengxr, @joshrosen
Author: Matei Zaharia <matei@databricks.com>
Closes #341 from mateiz/py-ml-update and squashes the following commits:
d52e763 [Matei Zaharia] Remove no-longer-needed slice code and handle review comments
ea5a25a [Matei Zaharia] Fix remaining uses of copyto() after merge
b9f97a3 [Matei Zaharia] Fix test
1e1bd0f [Matei Zaharia] Add MLlib logistic regression example in Python
88bc01f [Matei Zaharia] Clean up inheritance of LinearModel in Python, and expose its parametrs
37ab747 [Matei Zaharia] Fix some examples and docs due to changes in MLlib API
da0f27e [Matei Zaharia] Added a MLlib K-means example and updated docs to discuss sparse data
c48e85a [Matei Zaharia] Added some tests for passing lists as input, and added mllib/tests.py to run-tests script.
a07ba10 [Matei Zaharia] Fix some typos and calculation of initial weights
74eefe7 [Matei Zaharia] Added LabeledPoint class in Python
889dde8 [Matei Zaharia] Support scipy.sparse matrices in all our algorithms and models
ab244d1 [Matei Zaharia] Allow SparseVectors to be initialized using a dict
a5d6426 [Matei Zaharia] Add linalg.py to run-tests script
0e7a3d8 [Matei Zaharia] Keep vectors sparse in Java when reading LabeledPoints
eaee759 [Matei Zaharia] Update regression, classification and clustering models for sparse data
2abbb44 [Matei Zaharia] Further work to get linear models working with sparse data
154f45d [Matei Zaharia] Update docs, name some magic values
881fef7 [Matei Zaharia] Added a sparse vector in Python and made Java-Python format more compact
2014-04-15 23:33:24 -04:00
|
|
|
>>> model = KMeans.train(sc.parallelize(data), 2)
|
|
|
|
>>> sparse_data = [
|
|
|
|
... SparseVector(3, {1: 1.0}),
|
|
|
|
... SparseVector(3, {1: 1.1}),
|
|
|
|
... SparseVector(3, {2: 1.0}),
|
|
|
|
... SparseVector(3, {2: 1.1})
|
|
|
|
... ]
|
2015-05-05 10:57:39 -04:00
|
|
|
>>> model = KMeans.train(sc.parallelize(sparse_data), 2, initializationMode="k-means||",
|
|
|
|
... seed=50, initializationSteps=5, epsilon=1e-4)
|
[WIP] SPARK-1430: Support sparse data in Python MLlib
This PR adds a SparseVector class in PySpark and updates all the regression, classification and clustering algorithms and models to support sparse data, similar to MLlib. I chose to add this class because SciPy is quite difficult to install in many environments (more so than NumPy), but I plan to add support for SciPy sparse vectors later too, and make the methods work transparently on objects of either type.
On the Scala side, we keep Python sparse vectors sparse and pass them to MLlib. We always return dense vectors from our models.
Some to-do items left:
- [x] Support SciPy's scipy.sparse matrix objects when SciPy is available. We can easily add a function to convert these to our own SparseVector.
- [x] MLlib currently uses a vector with one extra column on the left to represent what we call LabeledPoint in Scala. Do we really want this? It may get annoying once you deal with sparse data since you must add/subtract 1 to each feature index when training. We can remove this API in 1.0 and use tuples for labeling.
- [x] Explain how to use these in the Python MLlib docs.
CC @mengxr, @joshrosen
Author: Matei Zaharia <matei@databricks.com>
Closes #341 from mateiz/py-ml-update and squashes the following commits:
d52e763 [Matei Zaharia] Remove no-longer-needed slice code and handle review comments
ea5a25a [Matei Zaharia] Fix remaining uses of copyto() after merge
b9f97a3 [Matei Zaharia] Fix test
1e1bd0f [Matei Zaharia] Add MLlib logistic regression example in Python
88bc01f [Matei Zaharia] Clean up inheritance of LinearModel in Python, and expose its parametrs
37ab747 [Matei Zaharia] Fix some examples and docs due to changes in MLlib API
da0f27e [Matei Zaharia] Added a MLlib K-means example and updated docs to discuss sparse data
c48e85a [Matei Zaharia] Added some tests for passing lists as input, and added mllib/tests.py to run-tests script.
a07ba10 [Matei Zaharia] Fix some typos and calculation of initial weights
74eefe7 [Matei Zaharia] Added LabeledPoint class in Python
889dde8 [Matei Zaharia] Support scipy.sparse matrices in all our algorithms and models
ab244d1 [Matei Zaharia] Allow SparseVectors to be initialized using a dict
a5d6426 [Matei Zaharia] Add linalg.py to run-tests script
0e7a3d8 [Matei Zaharia] Keep vectors sparse in Java when reading LabeledPoints
eaee759 [Matei Zaharia] Update regression, classification and clustering models for sparse data
2abbb44 [Matei Zaharia] Further work to get linear models working with sparse data
154f45d [Matei Zaharia] Update docs, name some magic values
881fef7 [Matei Zaharia] Added a sparse vector in Python and made Java-Python format more compact
2014-04-15 23:33:24 -04:00
|
|
|
>>> model.predict(array([0., 1., 0.])) == model.predict(array([0, 1.1, 0.]))
|
|
|
|
True
|
|
|
|
>>> model.predict(array([0., 0., 1.])) == model.predict(array([0, 0, 1.1]))
|
|
|
|
True
|
|
|
|
>>> model.predict(sparse_data[0]) == model.predict(sparse_data[1])
|
2013-12-25 00:08:05 -05:00
|
|
|
True
|
[WIP] SPARK-1430: Support sparse data in Python MLlib
This PR adds a SparseVector class in PySpark and updates all the regression, classification and clustering algorithms and models to support sparse data, similar to MLlib. I chose to add this class because SciPy is quite difficult to install in many environments (more so than NumPy), but I plan to add support for SciPy sparse vectors later too, and make the methods work transparently on objects of either type.
On the Scala side, we keep Python sparse vectors sparse and pass them to MLlib. We always return dense vectors from our models.
Some to-do items left:
- [x] Support SciPy's scipy.sparse matrix objects when SciPy is available. We can easily add a function to convert these to our own SparseVector.
- [x] MLlib currently uses a vector with one extra column on the left to represent what we call LabeledPoint in Scala. Do we really want this? It may get annoying once you deal with sparse data since you must add/subtract 1 to each feature index when training. We can remove this API in 1.0 and use tuples for labeling.
- [x] Explain how to use these in the Python MLlib docs.
CC @mengxr, @joshrosen
Author: Matei Zaharia <matei@databricks.com>
Closes #341 from mateiz/py-ml-update and squashes the following commits:
d52e763 [Matei Zaharia] Remove no-longer-needed slice code and handle review comments
ea5a25a [Matei Zaharia] Fix remaining uses of copyto() after merge
b9f97a3 [Matei Zaharia] Fix test
1e1bd0f [Matei Zaharia] Add MLlib logistic regression example in Python
88bc01f [Matei Zaharia] Clean up inheritance of LinearModel in Python, and expose its parametrs
37ab747 [Matei Zaharia] Fix some examples and docs due to changes in MLlib API
da0f27e [Matei Zaharia] Added a MLlib K-means example and updated docs to discuss sparse data
c48e85a [Matei Zaharia] Added some tests for passing lists as input, and added mllib/tests.py to run-tests script.
a07ba10 [Matei Zaharia] Fix some typos and calculation of initial weights
74eefe7 [Matei Zaharia] Added LabeledPoint class in Python
889dde8 [Matei Zaharia] Support scipy.sparse matrices in all our algorithms and models
ab244d1 [Matei Zaharia] Allow SparseVectors to be initialized using a dict
a5d6426 [Matei Zaharia] Add linalg.py to run-tests script
0e7a3d8 [Matei Zaharia] Keep vectors sparse in Java when reading LabeledPoints
eaee759 [Matei Zaharia] Update regression, classification and clustering models for sparse data
2abbb44 [Matei Zaharia] Further work to get linear models working with sparse data
154f45d [Matei Zaharia] Update docs, name some magic values
881fef7 [Matei Zaharia] Added a sparse vector in Python and made Java-Python format more compact
2014-04-15 23:33:24 -04:00
|
|
|
>>> model.predict(sparse_data[2]) == model.predict(sparse_data[3])
|
2013-12-25 00:08:05 -05:00
|
|
|
True
|
2015-04-16 19:20:57 -04:00
|
|
|
>>> isinstance(model.clusterCenters, list)
|
|
|
|
True
|
2015-03-17 15:14:40 -04:00
|
|
|
>>> import os, tempfile
|
|
|
|
>>> path = tempfile.mkdtemp()
|
|
|
|
>>> model.save(sc, path)
|
|
|
|
>>> sameModel = KMeansModel.load(sc, path)
|
|
|
|
>>> sameModel.predict(sparse_data[0]) == model.predict(sparse_data[0])
|
|
|
|
True
|
2015-06-22 14:53:11 -04:00
|
|
|
>>> from shutil import rmtree
|
2015-03-17 15:14:40 -04:00
|
|
|
>>> try:
|
2015-06-22 14:53:11 -04:00
|
|
|
... rmtree(path)
|
2015-03-17 15:14:40 -04:00
|
|
|
... except OSError:
|
|
|
|
... pass
|
2015-10-07 18:04:53 -04:00
|
|
|
|
|
|
|
>>> data = array([-383.1,-382.9, 28.7,31.2, 366.2,367.3]).reshape(3, 2)
|
|
|
|
>>> model = KMeans.train(sc.parallelize(data), 3, maxIterations=0,
|
|
|
|
... initialModel = KMeansModel([(-1000.0,-1000.0),(5.0,5.0),(1000.0,1000.0)]))
|
|
|
|
>>> model.clusterCenters
|
|
|
|
[array([-1000., -1000.]), array([ 5., 5.]), array([ 1000., 1000.])]
|
2015-10-27 00:28:18 -04:00
|
|
|
|
|
|
|
.. versionadded:: 0.9.0
|
2013-12-25 00:08:05 -05:00
|
|
|
"""
|
2014-08-06 15:58:24 -04:00
|
|
|
|
[WIP] SPARK-1430: Support sparse data in Python MLlib
This PR adds a SparseVector class in PySpark and updates all the regression, classification and clustering algorithms and models to support sparse data, similar to MLlib. I chose to add this class because SciPy is quite difficult to install in many environments (more so than NumPy), but I plan to add support for SciPy sparse vectors later too, and make the methods work transparently on objects of either type.
On the Scala side, we keep Python sparse vectors sparse and pass them to MLlib. We always return dense vectors from our models.
Some to-do items left:
- [x] Support SciPy's scipy.sparse matrix objects when SciPy is available. We can easily add a function to convert these to our own SparseVector.
- [x] MLlib currently uses a vector with one extra column on the left to represent what we call LabeledPoint in Scala. Do we really want this? It may get annoying once you deal with sparse data since you must add/subtract 1 to each feature index when training. We can remove this API in 1.0 and use tuples for labeling.
- [x] Explain how to use these in the Python MLlib docs.
CC @mengxr, @joshrosen
Author: Matei Zaharia <matei@databricks.com>
Closes #341 from mateiz/py-ml-update and squashes the following commits:
d52e763 [Matei Zaharia] Remove no-longer-needed slice code and handle review comments
ea5a25a [Matei Zaharia] Fix remaining uses of copyto() after merge
b9f97a3 [Matei Zaharia] Fix test
1e1bd0f [Matei Zaharia] Add MLlib logistic regression example in Python
88bc01f [Matei Zaharia] Clean up inheritance of LinearModel in Python, and expose its parametrs
37ab747 [Matei Zaharia] Fix some examples and docs due to changes in MLlib API
da0f27e [Matei Zaharia] Added a MLlib K-means example and updated docs to discuss sparse data
c48e85a [Matei Zaharia] Added some tests for passing lists as input, and added mllib/tests.py to run-tests script.
a07ba10 [Matei Zaharia] Fix some typos and calculation of initial weights
74eefe7 [Matei Zaharia] Added LabeledPoint class in Python
889dde8 [Matei Zaharia] Support scipy.sparse matrices in all our algorithms and models
ab244d1 [Matei Zaharia] Allow SparseVectors to be initialized using a dict
a5d6426 [Matei Zaharia] Add linalg.py to run-tests script
0e7a3d8 [Matei Zaharia] Keep vectors sparse in Java when reading LabeledPoints
eaee759 [Matei Zaharia] Update regression, classification and clustering models for sparse data
2abbb44 [Matei Zaharia] Further work to get linear models working with sparse data
154f45d [Matei Zaharia] Update docs, name some magic values
881fef7 [Matei Zaharia] Added a sparse vector in Python and made Java-Python format more compact
2014-04-15 23:33:24 -04:00
|
|
|
def __init__(self, centers):
|
|
|
|
self.centers = centers
|
|
|
|
|
|
|
|
@property
|
2015-10-27 00:28:18 -04:00
|
|
|
@since('1.0.0')
|
[WIP] SPARK-1430: Support sparse data in Python MLlib
This PR adds a SparseVector class in PySpark and updates all the regression, classification and clustering algorithms and models to support sparse data, similar to MLlib. I chose to add this class because SciPy is quite difficult to install in many environments (more so than NumPy), but I plan to add support for SciPy sparse vectors later too, and make the methods work transparently on objects of either type.
On the Scala side, we keep Python sparse vectors sparse and pass them to MLlib. We always return dense vectors from our models.
Some to-do items left:
- [x] Support SciPy's scipy.sparse matrix objects when SciPy is available. We can easily add a function to convert these to our own SparseVector.
- [x] MLlib currently uses a vector with one extra column on the left to represent what we call LabeledPoint in Scala. Do we really want this? It may get annoying once you deal with sparse data since you must add/subtract 1 to each feature index when training. We can remove this API in 1.0 and use tuples for labeling.
- [x] Explain how to use these in the Python MLlib docs.
CC @mengxr, @joshrosen
Author: Matei Zaharia <matei@databricks.com>
Closes #341 from mateiz/py-ml-update and squashes the following commits:
d52e763 [Matei Zaharia] Remove no-longer-needed slice code and handle review comments
ea5a25a [Matei Zaharia] Fix remaining uses of copyto() after merge
b9f97a3 [Matei Zaharia] Fix test
1e1bd0f [Matei Zaharia] Add MLlib logistic regression example in Python
88bc01f [Matei Zaharia] Clean up inheritance of LinearModel in Python, and expose its parametrs
37ab747 [Matei Zaharia] Fix some examples and docs due to changes in MLlib API
da0f27e [Matei Zaharia] Added a MLlib K-means example and updated docs to discuss sparse data
c48e85a [Matei Zaharia] Added some tests for passing lists as input, and added mllib/tests.py to run-tests script.
a07ba10 [Matei Zaharia] Fix some typos and calculation of initial weights
74eefe7 [Matei Zaharia] Added LabeledPoint class in Python
889dde8 [Matei Zaharia] Support scipy.sparse matrices in all our algorithms and models
ab244d1 [Matei Zaharia] Allow SparseVectors to be initialized using a dict
a5d6426 [Matei Zaharia] Add linalg.py to run-tests script
0e7a3d8 [Matei Zaharia] Keep vectors sparse in Java when reading LabeledPoints
eaee759 [Matei Zaharia] Update regression, classification and clustering models for sparse data
2abbb44 [Matei Zaharia] Further work to get linear models working with sparse data
154f45d [Matei Zaharia] Update docs, name some magic values
881fef7 [Matei Zaharia] Added a sparse vector in Python and made Java-Python format more compact
2014-04-15 23:33:24 -04:00
|
|
|
def clusterCenters(self):
|
|
|
|
"""Get the cluster centers, represented as a list of NumPy arrays."""
|
|
|
|
return self.centers
|
2013-12-25 00:08:05 -05:00
|
|
|
|
2015-05-05 10:57:39 -04:00
|
|
|
@property
|
2015-10-27 00:28:18 -04:00
|
|
|
@since('1.4.0')
|
2015-05-05 10:57:39 -04:00
|
|
|
def k(self):
|
|
|
|
"""Total number of clusters."""
|
|
|
|
return len(self.centers)
|
|
|
|
|
2015-10-27 00:28:18 -04:00
|
|
|
@since('0.9.0')
|
2013-12-25 00:08:05 -05:00
|
|
|
def predict(self, x):
|
2016-01-19 13:15:54 -05:00
|
|
|
"""
|
|
|
|
Find the cluster that each of the points belongs to in this
|
|
|
|
model.
|
|
|
|
|
2016-02-02 13:50:22 -05:00
|
|
|
:param x:
|
|
|
|
A data point (or RDD of points) to determine cluster index.
|
|
|
|
:return:
|
|
|
|
Predicted cluster index or an RDD of predicted cluster indices
|
|
|
|
if the input is an RDD.
|
2016-01-19 13:15:54 -05:00
|
|
|
"""
|
2013-12-25 00:08:05 -05:00
|
|
|
best = 0
|
[WIP] SPARK-1430: Support sparse data in Python MLlib
This PR adds a SparseVector class in PySpark and updates all the regression, classification and clustering algorithms and models to support sparse data, similar to MLlib. I chose to add this class because SciPy is quite difficult to install in many environments (more so than NumPy), but I plan to add support for SciPy sparse vectors later too, and make the methods work transparently on objects of either type.
On the Scala side, we keep Python sparse vectors sparse and pass them to MLlib. We always return dense vectors from our models.
Some to-do items left:
- [x] Support SciPy's scipy.sparse matrix objects when SciPy is available. We can easily add a function to convert these to our own SparseVector.
- [x] MLlib currently uses a vector with one extra column on the left to represent what we call LabeledPoint in Scala. Do we really want this? It may get annoying once you deal with sparse data since you must add/subtract 1 to each feature index when training. We can remove this API in 1.0 and use tuples for labeling.
- [x] Explain how to use these in the Python MLlib docs.
CC @mengxr, @joshrosen
Author: Matei Zaharia <matei@databricks.com>
Closes #341 from mateiz/py-ml-update and squashes the following commits:
d52e763 [Matei Zaharia] Remove no-longer-needed slice code and handle review comments
ea5a25a [Matei Zaharia] Fix remaining uses of copyto() after merge
b9f97a3 [Matei Zaharia] Fix test
1e1bd0f [Matei Zaharia] Add MLlib logistic regression example in Python
88bc01f [Matei Zaharia] Clean up inheritance of LinearModel in Python, and expose its parametrs
37ab747 [Matei Zaharia] Fix some examples and docs due to changes in MLlib API
da0f27e [Matei Zaharia] Added a MLlib K-means example and updated docs to discuss sparse data
c48e85a [Matei Zaharia] Added some tests for passing lists as input, and added mllib/tests.py to run-tests script.
a07ba10 [Matei Zaharia] Fix some typos and calculation of initial weights
74eefe7 [Matei Zaharia] Added LabeledPoint class in Python
889dde8 [Matei Zaharia] Support scipy.sparse matrices in all our algorithms and models
ab244d1 [Matei Zaharia] Allow SparseVectors to be initialized using a dict
a5d6426 [Matei Zaharia] Add linalg.py to run-tests script
0e7a3d8 [Matei Zaharia] Keep vectors sparse in Java when reading LabeledPoints
eaee759 [Matei Zaharia] Update regression, classification and clustering models for sparse data
2abbb44 [Matei Zaharia] Further work to get linear models working with sparse data
154f45d [Matei Zaharia] Update docs, name some magic values
881fef7 [Matei Zaharia] Added a sparse vector in Python and made Java-Python format more compact
2014-04-15 23:33:24 -04:00
|
|
|
best_distance = float("inf")
|
2015-06-19 15:23:15 -04:00
|
|
|
if isinstance(x, RDD):
|
|
|
|
return x.map(self.predict)
|
|
|
|
|
2014-09-19 18:01:11 -04:00
|
|
|
x = _convert_to_vector(x)
|
2020-07-13 22:22:44 -04:00
|
|
|
for i in range(len(self.centers)):
|
2014-09-19 18:01:11 -04:00
|
|
|
distance = x.squared_distance(self.centers[i])
|
2013-12-25 00:08:05 -05:00
|
|
|
if distance < best_distance:
|
|
|
|
best = i
|
|
|
|
best_distance = distance
|
|
|
|
return best
|
|
|
|
|
2015-10-27 00:28:18 -04:00
|
|
|
@since('1.4.0')
|
2015-05-05 10:57:39 -04:00
|
|
|
def computeCost(self, rdd):
|
|
|
|
"""
|
|
|
|
Return the K-means cost (sum of squared distances of points to
|
2016-01-19 13:15:54 -05:00
|
|
|
their nearest center) for this model on the given
|
|
|
|
data.
|
|
|
|
|
2016-02-02 13:50:22 -05:00
|
|
|
:param rdd:
|
|
|
|
The RDD of points to compute the cost on.
|
2015-05-05 10:57:39 -04:00
|
|
|
"""
|
|
|
|
cost = callMLlibFunc("computeCostKmeansModel", rdd.map(_convert_to_vector),
|
|
|
|
[_convert_to_vector(c) for c in self.centers])
|
|
|
|
return cost
|
|
|
|
|
2015-10-27 00:28:18 -04:00
|
|
|
@since('1.4.0')
|
2015-03-17 15:14:40 -04:00
|
|
|
def save(self, sc, path):
|
2015-10-27 00:28:18 -04:00
|
|
|
"""
|
|
|
|
Save this model to the given path.
|
|
|
|
"""
|
2015-04-16 19:20:57 -04:00
|
|
|
java_centers = _py2java(sc, [_convert_to_vector(c) for c in self.centers])
|
2015-03-17 15:14:40 -04:00
|
|
|
java_model = sc._jvm.org.apache.spark.mllib.clustering.KMeansModel(java_centers)
|
|
|
|
java_model.save(sc._jsc.sc(), path)
|
|
|
|
|
|
|
|
@classmethod
|
2015-10-27 00:28:18 -04:00
|
|
|
@since('1.4.0')
|
2015-03-17 15:14:40 -04:00
|
|
|
def load(cls, sc, path):
|
2015-10-27 00:28:18 -04:00
|
|
|
"""
|
|
|
|
Load a model from the given path.
|
|
|
|
"""
|
2015-03-17 15:14:40 -04:00
|
|
|
java_model = sc._jvm.org.apache.spark.mllib.clustering.KMeansModel.load(sc._jsc.sc(), path)
|
|
|
|
return KMeansModel(_java2py(sc, java_model.clusterCenters()))
|
|
|
|
|
[WIP] SPARK-1430: Support sparse data in Python MLlib
This PR adds a SparseVector class in PySpark and updates all the regression, classification and clustering algorithms and models to support sparse data, similar to MLlib. I chose to add this class because SciPy is quite difficult to install in many environments (more so than NumPy), but I plan to add support for SciPy sparse vectors later too, and make the methods work transparently on objects of either type.
On the Scala side, we keep Python sparse vectors sparse and pass them to MLlib. We always return dense vectors from our models.
Some to-do items left:
- [x] Support SciPy's scipy.sparse matrix objects when SciPy is available. We can easily add a function to convert these to our own SparseVector.
- [x] MLlib currently uses a vector with one extra column on the left to represent what we call LabeledPoint in Scala. Do we really want this? It may get annoying once you deal with sparse data since you must add/subtract 1 to each feature index when training. We can remove this API in 1.0 and use tuples for labeling.
- [x] Explain how to use these in the Python MLlib docs.
CC @mengxr, @joshrosen
Author: Matei Zaharia <matei@databricks.com>
Closes #341 from mateiz/py-ml-update and squashes the following commits:
d52e763 [Matei Zaharia] Remove no-longer-needed slice code and handle review comments
ea5a25a [Matei Zaharia] Fix remaining uses of copyto() after merge
b9f97a3 [Matei Zaharia] Fix test
1e1bd0f [Matei Zaharia] Add MLlib logistic regression example in Python
88bc01f [Matei Zaharia] Clean up inheritance of LinearModel in Python, and expose its parametrs
37ab747 [Matei Zaharia] Fix some examples and docs due to changes in MLlib API
da0f27e [Matei Zaharia] Added a MLlib K-means example and updated docs to discuss sparse data
c48e85a [Matei Zaharia] Added some tests for passing lists as input, and added mllib/tests.py to run-tests script.
a07ba10 [Matei Zaharia] Fix some typos and calculation of initial weights
74eefe7 [Matei Zaharia] Added LabeledPoint class in Python
889dde8 [Matei Zaharia] Support scipy.sparse matrices in all our algorithms and models
ab244d1 [Matei Zaharia] Allow SparseVectors to be initialized using a dict
a5d6426 [Matei Zaharia] Add linalg.py to run-tests script
0e7a3d8 [Matei Zaharia] Keep vectors sparse in Java when reading LabeledPoints
eaee759 [Matei Zaharia] Update regression, classification and clustering models for sparse data
2abbb44 [Matei Zaharia] Further work to get linear models working with sparse data
154f45d [Matei Zaharia] Update docs, name some magic values
881fef7 [Matei Zaharia] Added a sparse vector in Python and made Java-Python format more compact
2014-04-15 23:33:24 -04:00
|
|
|
|
2013-12-25 00:08:05 -05:00
|
|
|
class KMeans(object):
|
2015-10-27 00:28:18 -04:00
|
|
|
"""
|
|
|
|
.. versionadded:: 0.9.0
|
|
|
|
"""
|
2014-08-06 15:58:24 -04:00
|
|
|
|
2013-12-25 00:08:05 -05:00
|
|
|
@classmethod
|
2015-10-27 00:28:18 -04:00
|
|
|
@since('0.9.0')
|
[SPARK-28980][CORE][SQL][STREAMING][MLLIB] Remove most items deprecated in Spark 2.2.0 or earlier, for Spark 3
### What changes were proposed in this pull request?
- Remove SQLContext.createExternalTable and Catalog.createExternalTable, deprecated in favor of createTable since 2.2.0, plus tests of deprecated methods
- Remove HiveContext, deprecated in 2.0.0, in favor of `SparkSession.builder.enableHiveSupport`
- Remove deprecated KinesisUtils.createStream methods, plus tests of deprecated methods, deprecate in 2.2.0
- Remove deprecated MLlib (not Spark ML) linear method support, mostly utility constructors and 'train' methods, and associated docs. This includes methods in LinearRegression, LogisticRegression, Lasso, RidgeRegression. These have been deprecated since 2.0.0
- Remove deprecated Pyspark MLlib linear method support, including LogisticRegressionWithSGD, LinearRegressionWithSGD, LassoWithSGD
- Remove 'runs' argument in KMeans.train() method, which has been a no-op since 2.0.0
- Remove deprecated ChiSqSelector isSorted protected method
- Remove deprecated 'yarn-cluster' and 'yarn-client' master argument in favor of 'yarn' and deploy mode 'cluster', etc
Notes:
- I was not able to remove deprecated DataFrameReader.json(RDD) in favor of DataFrameReader.json(Dataset); the former was deprecated in 2.2.0, but, it is still needed to support Pyspark's .json() method, which can't use a Dataset.
- Looks like SQLContext.createExternalTable was not actually deprecated in Pyspark, but, almost certainly was meant to be? Catalog.createExternalTable was.
- I afterwards noted that the toDegrees, toRadians functions were almost removed fully in SPARK-25908, but Felix suggested keeping just the R version as they hadn't been technically deprecated. I'd like to revisit that. Do we really want the inconsistency? I'm not against reverting it again, but then that implies leaving SQLContext.createExternalTable just in Pyspark too, which seems weird.
- I *kept* LogisticRegressionWithSGD, LinearRegressionWithSGD, LassoWithSGD, RidgeRegressionWithSGD in Pyspark, though deprecated, as it is hard to remove them (still used by StreamingLogisticRegressionWithSGD?) and they are not fully removed in Scala. Maybe should not have been deprecated.
### Why are the changes needed?
Deprecated items are easiest to remove in a major release, so we should do so as much as possible for Spark 3. This does not target items deprecated 'recently' as of Spark 2.3, which is still 18 months old.
### Does this PR introduce any user-facing change?
Yes, in that deprecated items are removed from some public APIs.
### How was this patch tested?
Existing tests.
Closes #25684 from srowen/SPARK-28980.
Lead-authored-by: Sean Owen <sean.owen@databricks.com>
Co-authored-by: HyukjinKwon <gurwls223@apache.org>
Signed-off-by: Sean Owen <sean.owen@databricks.com>
2019-09-09 11:19:40 -04:00
|
|
|
def train(cls, rdd, k, maxIterations=100, initializationMode="k-means||",
|
2016-09-11 08:47:13 -04:00
|
|
|
seed=None, initializationSteps=2, epsilon=1e-4, initialModel=None):
|
2016-02-02 13:50:22 -05:00
|
|
|
"""
|
|
|
|
Train a k-means clustering model.
|
|
|
|
|
|
|
|
:param rdd:
|
|
|
|
Training points as an `RDD` of `Vector` or convertible
|
|
|
|
sequence types.
|
|
|
|
:param k:
|
|
|
|
Number of clusters to create.
|
|
|
|
:param maxIterations:
|
|
|
|
Maximum number of iterations allowed.
|
|
|
|
(default: 100)
|
|
|
|
:param initializationMode:
|
|
|
|
The initialization algorithm. This can be either "random" or
|
|
|
|
"k-means||".
|
|
|
|
(default: "k-means||")
|
|
|
|
:param seed:
|
|
|
|
Random seed value for cluster initialization. Set as None to
|
|
|
|
generate seed based on system time.
|
|
|
|
(default: None)
|
|
|
|
:param initializationSteps:
|
|
|
|
Number of steps for the k-means|| initialization mode.
|
2016-09-11 08:47:13 -04:00
|
|
|
This is an advanced setting -- the default of 2 is almost
|
2016-02-02 13:50:22 -05:00
|
|
|
always enough.
|
2016-09-11 08:47:13 -04:00
|
|
|
(default: 2)
|
2016-02-02 13:50:22 -05:00
|
|
|
:param epsilon:
|
|
|
|
Distance threshold within which a center will be considered to
|
|
|
|
have converged. If all centers move less than this Euclidean
|
|
|
|
distance, iterations are stopped.
|
|
|
|
(default: 1e-4)
|
|
|
|
:param initialModel:
|
|
|
|
Initial cluster centers can be provided as a KMeansModel object
|
|
|
|
rather than using the random or k-means|| initializationModel.
|
|
|
|
(default: None)
|
|
|
|
"""
|
2015-10-07 18:04:53 -04:00
|
|
|
clusterInitialModel = []
|
|
|
|
if initialModel is not None:
|
|
|
|
if not isinstance(initialModel, KMeansModel):
|
|
|
|
raise Exception("initialModel is of "+str(type(initialModel))+". It needs "
|
|
|
|
"to be of <type 'KMeansModel'>")
|
|
|
|
clusterInitialModel = [_convert_to_vector(c) for c in initialModel.clusterCenters]
|
2014-11-21 18:02:31 -05:00
|
|
|
model = callMLlibFunc("trainKMeansModel", rdd.map(_convert_to_vector), k, maxIterations,
|
[SPARK-28980][CORE][SQL][STREAMING][MLLIB] Remove most items deprecated in Spark 2.2.0 or earlier, for Spark 3
### What changes were proposed in this pull request?
- Remove SQLContext.createExternalTable and Catalog.createExternalTable, deprecated in favor of createTable since 2.2.0, plus tests of deprecated methods
- Remove HiveContext, deprecated in 2.0.0, in favor of `SparkSession.builder.enableHiveSupport`
- Remove deprecated KinesisUtils.createStream methods, plus tests of deprecated methods, deprecate in 2.2.0
- Remove deprecated MLlib (not Spark ML) linear method support, mostly utility constructors and 'train' methods, and associated docs. This includes methods in LinearRegression, LogisticRegression, Lasso, RidgeRegression. These have been deprecated since 2.0.0
- Remove deprecated Pyspark MLlib linear method support, including LogisticRegressionWithSGD, LinearRegressionWithSGD, LassoWithSGD
- Remove 'runs' argument in KMeans.train() method, which has been a no-op since 2.0.0
- Remove deprecated ChiSqSelector isSorted protected method
- Remove deprecated 'yarn-cluster' and 'yarn-client' master argument in favor of 'yarn' and deploy mode 'cluster', etc
Notes:
- I was not able to remove deprecated DataFrameReader.json(RDD) in favor of DataFrameReader.json(Dataset); the former was deprecated in 2.2.0, but, it is still needed to support Pyspark's .json() method, which can't use a Dataset.
- Looks like SQLContext.createExternalTable was not actually deprecated in Pyspark, but, almost certainly was meant to be? Catalog.createExternalTable was.
- I afterwards noted that the toDegrees, toRadians functions were almost removed fully in SPARK-25908, but Felix suggested keeping just the R version as they hadn't been technically deprecated. I'd like to revisit that. Do we really want the inconsistency? I'm not against reverting it again, but then that implies leaving SQLContext.createExternalTable just in Pyspark too, which seems weird.
- I *kept* LogisticRegressionWithSGD, LinearRegressionWithSGD, LassoWithSGD, RidgeRegressionWithSGD in Pyspark, though deprecated, as it is hard to remove them (still used by StreamingLogisticRegressionWithSGD?) and they are not fully removed in Scala. Maybe should not have been deprecated.
### Why are the changes needed?
Deprecated items are easiest to remove in a major release, so we should do so as much as possible for Spark 3. This does not target items deprecated 'recently' as of Spark 2.3, which is still 18 months old.
### Does this PR introduce any user-facing change?
Yes, in that deprecated items are removed from some public APIs.
### How was this patch tested?
Existing tests.
Closes #25684 from srowen/SPARK-28980.
Lead-authored-by: Sean Owen <sean.owen@databricks.com>
Co-authored-by: HyukjinKwon <gurwls223@apache.org>
Signed-off-by: Sean Owen <sean.owen@databricks.com>
2019-09-09 11:19:40 -04:00
|
|
|
initializationMode, seed, initializationSteps, epsilon,
|
2015-10-07 18:04:53 -04:00
|
|
|
clusterInitialModel)
|
2014-10-31 01:25:18 -04:00
|
|
|
centers = callJavaFunc(rdd.context, model.clusterCenters)
|
2014-09-19 18:01:11 -04:00
|
|
|
return KMeansModel([c.toArray() for c in centers])
|
[WIP] SPARK-1430: Support sparse data in Python MLlib
This PR adds a SparseVector class in PySpark and updates all the regression, classification and clustering algorithms and models to support sparse data, similar to MLlib. I chose to add this class because SciPy is quite difficult to install in many environments (more so than NumPy), but I plan to add support for SciPy sparse vectors later too, and make the methods work transparently on objects of either type.
On the Scala side, we keep Python sparse vectors sparse and pass them to MLlib. We always return dense vectors from our models.
Some to-do items left:
- [x] Support SciPy's scipy.sparse matrix objects when SciPy is available. We can easily add a function to convert these to our own SparseVector.
- [x] MLlib currently uses a vector with one extra column on the left to represent what we call LabeledPoint in Scala. Do we really want this? It may get annoying once you deal with sparse data since you must add/subtract 1 to each feature index when training. We can remove this API in 1.0 and use tuples for labeling.
- [x] Explain how to use these in the Python MLlib docs.
CC @mengxr, @joshrosen
Author: Matei Zaharia <matei@databricks.com>
Closes #341 from mateiz/py-ml-update and squashes the following commits:
d52e763 [Matei Zaharia] Remove no-longer-needed slice code and handle review comments
ea5a25a [Matei Zaharia] Fix remaining uses of copyto() after merge
b9f97a3 [Matei Zaharia] Fix test
1e1bd0f [Matei Zaharia] Add MLlib logistic regression example in Python
88bc01f [Matei Zaharia] Clean up inheritance of LinearModel in Python, and expose its parametrs
37ab747 [Matei Zaharia] Fix some examples and docs due to changes in MLlib API
da0f27e [Matei Zaharia] Added a MLlib K-means example and updated docs to discuss sparse data
c48e85a [Matei Zaharia] Added some tests for passing lists as input, and added mllib/tests.py to run-tests script.
a07ba10 [Matei Zaharia] Fix some typos and calculation of initial weights
74eefe7 [Matei Zaharia] Added LabeledPoint class in Python
889dde8 [Matei Zaharia] Support scipy.sparse matrices in all our algorithms and models
ab244d1 [Matei Zaharia] Allow SparseVectors to be initialized using a dict
a5d6426 [Matei Zaharia] Add linalg.py to run-tests script
0e7a3d8 [Matei Zaharia] Keep vectors sparse in Java when reading LabeledPoints
eaee759 [Matei Zaharia] Update regression, classification and clustering models for sparse data
2abbb44 [Matei Zaharia] Further work to get linear models working with sparse data
154f45d [Matei Zaharia] Update docs, name some magic values
881fef7 [Matei Zaharia] Added a sparse vector in Python and made Java-Python format more compact
2014-04-15 23:33:24 -04:00
|
|
|
|
2013-12-25 00:08:05 -05:00
|
|
|
|
2015-07-28 18:00:25 -04:00
|
|
|
@inherit_doc
|
|
|
|
class GaussianMixtureModel(JavaModelWrapper, JavaSaveable, JavaLoader):
|
|
|
|
|
|
|
|
"""
|
|
|
|
A clustering model derived from the Gaussian Mixture Model method.
|
2015-02-03 02:04:55 -05:00
|
|
|
|
2015-05-15 03:18:39 -04:00
|
|
|
>>> from pyspark.mllib.linalg import Vectors, DenseMatrix
|
2015-07-28 18:00:25 -04:00
|
|
|
>>> from numpy.testing import assert_equal
|
|
|
|
>>> from shutil import rmtree
|
|
|
|
>>> import os, tempfile
|
|
|
|
|
2015-02-03 02:04:55 -05:00
|
|
|
>>> clusterdata_1 = sc.parallelize(array([-0.1,-0.05,-0.01,-0.1,
|
|
|
|
... 0.9,0.8,0.75,0.935,
|
2016-01-11 17:43:25 -05:00
|
|
|
... -0.83,-0.68,-0.91,-0.76 ]).reshape(6, 2), 2)
|
2015-02-03 02:04:55 -05:00
|
|
|
>>> model = GaussianMixture.train(clusterdata_1, 3, convergenceTol=0.0001,
|
|
|
|
... maxIterations=50, seed=10)
|
|
|
|
>>> labels = model.predict(clusterdata_1).collect()
|
|
|
|
>>> labels[0]==labels[1]
|
|
|
|
False
|
|
|
|
>>> labels[1]==labels[2]
|
2016-01-11 17:43:25 -05:00
|
|
|
False
|
2015-02-03 02:04:55 -05:00
|
|
|
>>> labels[4]==labels[5]
|
|
|
|
True
|
2016-01-11 17:43:25 -05:00
|
|
|
>>> model.predict([-0.1,-0.05])
|
|
|
|
0
|
|
|
|
>>> softPredicted = model.predictSoft([-0.1,-0.05])
|
[SPARK-28736][SPARK-28735][PYTHON][ML] Fix PySpark ML tests to pass in JDK 11
<!--
Thanks for sending a pull request! Here are some tips for you:
1. If this is your first time, please read our contributor guidelines: https://spark.apache.org/contributing.html
2. Ensure you have added or run the appropriate tests for your PR: https://spark.apache.org/developer-tools.html
3. If the PR is unfinished, add '[WIP]' in your PR title, e.g., '[WIP][SPARK-XXXX] Your PR title ...'.
4. Be sure to keep the PR description updated to reflect all changes.
5. Please write your PR title to summarize what this PR proposes.
6. If possible, provide a concise example to reproduce the issue for a faster review.
-->
### What changes were proposed in this pull request?
<!--
Please clarify what changes you are proposing. The purpose of this section is to outline the changes and how this PR fixes the issue.
If possible, please consider writing useful notes for better and faster reviews in your PR. See the examples below.
1. If you refactor some codes with changing classes, showing the class hierarchy will help reviewers.
2. If you fix some SQL features, you can provide some references of other DBMSes.
3. If there is design documentation, please add the link.
4. If there is a discussion in the mailing list, please add the link.
-->
This PR proposes to fix both tests below:
```
======================================================================
FAIL: test_raw_and_probability_prediction (pyspark.ml.tests.test_algorithms.MultilayerPerceptronClassifierTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/Users/dongjoon/APACHE/spark-master/python/pyspark/ml/tests/test_algorithms.py", line 89, in test_raw_and_probability_prediction
self.assertTrue(np.allclose(result.rawPrediction, expected_rawPrediction, atol=1E-4))
AssertionError: False is not true
```
```
File "/Users/dongjoon/APACHE/spark-master/python/pyspark/mllib/clustering.py", line 386, in __main__.GaussianMixtureModel
Failed example:
abs(softPredicted[0] - 1.0) < 0.001
Expected:
True
Got:
False
**********************************************************************
File "/Users/dongjoon/APACHE/spark-master/python/pyspark/mllib/clustering.py", line 388, in __main__.GaussianMixtureModel
Failed example:
abs(softPredicted[1] - 0.0) < 0.001
Expected:
True
Got:
False
```
to pass in JDK 11.
The root cause seems to be different float values being understood via Py4J. This issue also was found in https://github.com/apache/spark/pull/25132 before.
When floats are transferred from Python to JVM, the values are sent as are. Python floats are not "precise" due to its own limitation - https://docs.python.org/3/tutorial/floatingpoint.html.
For some reasons, the floats from Python on JDK 8 and JDK 11 are different, which is already explicitly not guaranteed.
This seems why only some tests in PySpark with floats are being failed.
So, this PR fixes it by increasing tolerance in identified test cases in PySpark.
### Why are the changes needed?
<!--
Please clarify why the changes are needed. For instance,
1. If you propose a new API, clarify the use case for a new API.
2. If you fix a bug, you can clarify why it is a bug.
-->
To fully support JDK 11. See, for instance, https://github.com/apache/spark/pull/25443 and https://github.com/apache/spark/pull/25423 for ongoing efforts.
### Does this PR introduce any user-facing change?
<!--
If yes, please clarify the previous behavior and the change this PR proposes - provide the console output, description and/or an example to show the behavior difference if possible.
If no, write 'No'.
-->
No.
### How was this patch tested?
<!--
If tests were added, say they were added here. Please make sure to add some test cases that check the changes thoroughly including negative and positive cases if possible.
If it was tested in a way different from regular unit tests, please clarify how you tested step by step, ideally copy and paste-able, so that other reviewers can test and check, and descendants can verify in the future.
If tests were not added, please describe why they were not added and/or why it was difficult to add.
-->
Manually tested as described in JIRAs:
```
$ build/sbt -Phadoop-3.2 test:package
$ python/run-tests --testnames 'pyspark.ml.tests.test_algorithms' --python-executables python
```
```
$ build/sbt -Phadoop-3.2 test:package
$ python/run-tests --testnames 'pyspark.mllib.clustering' --python-executables python
```
Closes #25475 from HyukjinKwon/SPARK-28735.
Authored-by: HyukjinKwon <gurwls223@apache.org>
Signed-off-by: HyukjinKwon <gurwls223@apache.org>
2019-08-16 06:47:29 -04:00
|
|
|
>>> abs(softPredicted[0] - 1.0) < 0.03
|
2016-01-11 17:43:25 -05:00
|
|
|
True
|
[SPARK-28736][SPARK-28735][PYTHON][ML] Fix PySpark ML tests to pass in JDK 11
<!--
Thanks for sending a pull request! Here are some tips for you:
1. If this is your first time, please read our contributor guidelines: https://spark.apache.org/contributing.html
2. Ensure you have added or run the appropriate tests for your PR: https://spark.apache.org/developer-tools.html
3. If the PR is unfinished, add '[WIP]' in your PR title, e.g., '[WIP][SPARK-XXXX] Your PR title ...'.
4. Be sure to keep the PR description updated to reflect all changes.
5. Please write your PR title to summarize what this PR proposes.
6. If possible, provide a concise example to reproduce the issue for a faster review.
-->
### What changes were proposed in this pull request?
<!--
Please clarify what changes you are proposing. The purpose of this section is to outline the changes and how this PR fixes the issue.
If possible, please consider writing useful notes for better and faster reviews in your PR. See the examples below.
1. If you refactor some codes with changing classes, showing the class hierarchy will help reviewers.
2. If you fix some SQL features, you can provide some references of other DBMSes.
3. If there is design documentation, please add the link.
4. If there is a discussion in the mailing list, please add the link.
-->
This PR proposes to fix both tests below:
```
======================================================================
FAIL: test_raw_and_probability_prediction (pyspark.ml.tests.test_algorithms.MultilayerPerceptronClassifierTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/Users/dongjoon/APACHE/spark-master/python/pyspark/ml/tests/test_algorithms.py", line 89, in test_raw_and_probability_prediction
self.assertTrue(np.allclose(result.rawPrediction, expected_rawPrediction, atol=1E-4))
AssertionError: False is not true
```
```
File "/Users/dongjoon/APACHE/spark-master/python/pyspark/mllib/clustering.py", line 386, in __main__.GaussianMixtureModel
Failed example:
abs(softPredicted[0] - 1.0) < 0.001
Expected:
True
Got:
False
**********************************************************************
File "/Users/dongjoon/APACHE/spark-master/python/pyspark/mllib/clustering.py", line 388, in __main__.GaussianMixtureModel
Failed example:
abs(softPredicted[1] - 0.0) < 0.001
Expected:
True
Got:
False
```
to pass in JDK 11.
The root cause seems to be different float values being understood via Py4J. This issue also was found in https://github.com/apache/spark/pull/25132 before.
When floats are transferred from Python to JVM, the values are sent as are. Python floats are not "precise" due to its own limitation - https://docs.python.org/3/tutorial/floatingpoint.html.
For some reasons, the floats from Python on JDK 8 and JDK 11 are different, which is already explicitly not guaranteed.
This seems why only some tests in PySpark with floats are being failed.
So, this PR fixes it by increasing tolerance in identified test cases in PySpark.
### Why are the changes needed?
<!--
Please clarify why the changes are needed. For instance,
1. If you propose a new API, clarify the use case for a new API.
2. If you fix a bug, you can clarify why it is a bug.
-->
To fully support JDK 11. See, for instance, https://github.com/apache/spark/pull/25443 and https://github.com/apache/spark/pull/25423 for ongoing efforts.
### Does this PR introduce any user-facing change?
<!--
If yes, please clarify the previous behavior and the change this PR proposes - provide the console output, description and/or an example to show the behavior difference if possible.
If no, write 'No'.
-->
No.
### How was this patch tested?
<!--
If tests were added, say they were added here. Please make sure to add some test cases that check the changes thoroughly including negative and positive cases if possible.
If it was tested in a way different from regular unit tests, please clarify how you tested step by step, ideally copy and paste-able, so that other reviewers can test and check, and descendants can verify in the future.
If tests were not added, please describe why they were not added and/or why it was difficult to add.
-->
Manually tested as described in JIRAs:
```
$ build/sbt -Phadoop-3.2 test:package
$ python/run-tests --testnames 'pyspark.ml.tests.test_algorithms' --python-executables python
```
```
$ build/sbt -Phadoop-3.2 test:package
$ python/run-tests --testnames 'pyspark.mllib.clustering' --python-executables python
```
Closes #25475 from HyukjinKwon/SPARK-28735.
Authored-by: HyukjinKwon <gurwls223@apache.org>
Signed-off-by: HyukjinKwon <gurwls223@apache.org>
2019-08-16 06:47:29 -04:00
|
|
|
>>> abs(softPredicted[1] - 0.0) < 0.03
|
2016-01-11 17:43:25 -05:00
|
|
|
True
|
[SPARK-28736][SPARK-28735][PYTHON][ML] Fix PySpark ML tests to pass in JDK 11
<!--
Thanks for sending a pull request! Here are some tips for you:
1. If this is your first time, please read our contributor guidelines: https://spark.apache.org/contributing.html
2. Ensure you have added or run the appropriate tests for your PR: https://spark.apache.org/developer-tools.html
3. If the PR is unfinished, add '[WIP]' in your PR title, e.g., '[WIP][SPARK-XXXX] Your PR title ...'.
4. Be sure to keep the PR description updated to reflect all changes.
5. Please write your PR title to summarize what this PR proposes.
6. If possible, provide a concise example to reproduce the issue for a faster review.
-->
### What changes were proposed in this pull request?
<!--
Please clarify what changes you are proposing. The purpose of this section is to outline the changes and how this PR fixes the issue.
If possible, please consider writing useful notes for better and faster reviews in your PR. See the examples below.
1. If you refactor some codes with changing classes, showing the class hierarchy will help reviewers.
2. If you fix some SQL features, you can provide some references of other DBMSes.
3. If there is design documentation, please add the link.
4. If there is a discussion in the mailing list, please add the link.
-->
This PR proposes to fix both tests below:
```
======================================================================
FAIL: test_raw_and_probability_prediction (pyspark.ml.tests.test_algorithms.MultilayerPerceptronClassifierTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/Users/dongjoon/APACHE/spark-master/python/pyspark/ml/tests/test_algorithms.py", line 89, in test_raw_and_probability_prediction
self.assertTrue(np.allclose(result.rawPrediction, expected_rawPrediction, atol=1E-4))
AssertionError: False is not true
```
```
File "/Users/dongjoon/APACHE/spark-master/python/pyspark/mllib/clustering.py", line 386, in __main__.GaussianMixtureModel
Failed example:
abs(softPredicted[0] - 1.0) < 0.001
Expected:
True
Got:
False
**********************************************************************
File "/Users/dongjoon/APACHE/spark-master/python/pyspark/mllib/clustering.py", line 388, in __main__.GaussianMixtureModel
Failed example:
abs(softPredicted[1] - 0.0) < 0.001
Expected:
True
Got:
False
```
to pass in JDK 11.
The root cause seems to be different float values being understood via Py4J. This issue also was found in https://github.com/apache/spark/pull/25132 before.
When floats are transferred from Python to JVM, the values are sent as are. Python floats are not "precise" due to its own limitation - https://docs.python.org/3/tutorial/floatingpoint.html.
For some reasons, the floats from Python on JDK 8 and JDK 11 are different, which is already explicitly not guaranteed.
This seems why only some tests in PySpark with floats are being failed.
So, this PR fixes it by increasing tolerance in identified test cases in PySpark.
### Why are the changes needed?
<!--
Please clarify why the changes are needed. For instance,
1. If you propose a new API, clarify the use case for a new API.
2. If you fix a bug, you can clarify why it is a bug.
-->
To fully support JDK 11. See, for instance, https://github.com/apache/spark/pull/25443 and https://github.com/apache/spark/pull/25423 for ongoing efforts.
### Does this PR introduce any user-facing change?
<!--
If yes, please clarify the previous behavior and the change this PR proposes - provide the console output, description and/or an example to show the behavior difference if possible.
If no, write 'No'.
-->
No.
### How was this patch tested?
<!--
If tests were added, say they were added here. Please make sure to add some test cases that check the changes thoroughly including negative and positive cases if possible.
If it was tested in a way different from regular unit tests, please clarify how you tested step by step, ideally copy and paste-able, so that other reviewers can test and check, and descendants can verify in the future.
If tests were not added, please describe why they were not added and/or why it was difficult to add.
-->
Manually tested as described in JIRAs:
```
$ build/sbt -Phadoop-3.2 test:package
$ python/run-tests --testnames 'pyspark.ml.tests.test_algorithms' --python-executables python
```
```
$ build/sbt -Phadoop-3.2 test:package
$ python/run-tests --testnames 'pyspark.mllib.clustering' --python-executables python
```
Closes #25475 from HyukjinKwon/SPARK-28735.
Authored-by: HyukjinKwon <gurwls223@apache.org>
Signed-off-by: HyukjinKwon <gurwls223@apache.org>
2019-08-16 06:47:29 -04:00
|
|
|
>>> abs(softPredicted[2] - 0.0) < 0.03
|
2016-01-11 17:43:25 -05:00
|
|
|
True
|
2015-07-28 18:00:25 -04:00
|
|
|
|
|
|
|
>>> path = tempfile.mkdtemp()
|
|
|
|
>>> model.save(sc, path)
|
|
|
|
>>> sameModel = GaussianMixtureModel.load(sc, path)
|
|
|
|
>>> assert_equal(model.weights, sameModel.weights)
|
|
|
|
>>> mus, sigmas = list(
|
|
|
|
... zip(*[(g.mu, g.sigma) for g in model.gaussians]))
|
|
|
|
>>> sameMus, sameSigmas = list(
|
|
|
|
... zip(*[(g.mu, g.sigma) for g in sameModel.gaussians]))
|
|
|
|
>>> mus == sameMus
|
|
|
|
True
|
|
|
|
>>> sigmas == sameSigmas
|
|
|
|
True
|
|
|
|
>>> from shutil import rmtree
|
|
|
|
>>> try:
|
|
|
|
... rmtree(path)
|
|
|
|
... except OSError:
|
|
|
|
... pass
|
|
|
|
|
2015-05-15 03:18:39 -04:00
|
|
|
>>> data = array([-5.1971, -2.5359, -3.8220,
|
|
|
|
... -5.2211, -5.0602, 4.7118,
|
|
|
|
... 6.8989, 3.4592, 4.6322,
|
|
|
|
... 5.7048, 4.6567, 5.5026,
|
|
|
|
... 4.5605, 5.2043, 6.2734])
|
|
|
|
>>> clusterdata_2 = sc.parallelize(data.reshape(5,3))
|
2015-02-03 02:04:55 -05:00
|
|
|
>>> model = GaussianMixture.train(clusterdata_2, 2, convergenceTol=0.0001,
|
2016-08-19 05:11:59 -04:00
|
|
|
... maxIterations=150, seed=4)
|
2015-02-03 02:04:55 -05:00
|
|
|
>>> labels = model.predict(clusterdata_2).collect()
|
2015-10-28 02:07:37 -04:00
|
|
|
>>> labels[0]==labels[1]
|
2015-02-03 02:04:55 -05:00
|
|
|
True
|
2015-10-28 02:07:37 -04:00
|
|
|
>>> labels[2]==labels[3]==labels[4]
|
2015-02-03 02:04:55 -05:00
|
|
|
True
|
2015-10-27 00:28:18 -04:00
|
|
|
|
|
|
|
.. versionadded:: 1.3.0
|
2015-02-03 02:04:55 -05:00
|
|
|
"""
|
|
|
|
|
2015-05-15 03:18:39 -04:00
|
|
|
@property
|
2015-10-27 00:28:18 -04:00
|
|
|
@since('1.4.0')
|
2015-05-15 03:18:39 -04:00
|
|
|
def weights(self):
|
|
|
|
"""
|
|
|
|
Weights for each Gaussian distribution in the mixture, where weights[i] is
|
|
|
|
the weight for Gaussian i, and weights.sum == 1.
|
|
|
|
"""
|
2015-07-28 18:00:25 -04:00
|
|
|
return array(self.call("weights"))
|
2015-05-15 03:18:39 -04:00
|
|
|
|
|
|
|
@property
|
2015-10-27 00:28:18 -04:00
|
|
|
@since('1.4.0')
|
2015-05-15 03:18:39 -04:00
|
|
|
def gaussians(self):
|
|
|
|
"""
|
|
|
|
Array of MultivariateGaussian where gaussians[i] represents
|
|
|
|
the Multivariate Gaussian (Normal) Distribution for Gaussian i.
|
|
|
|
"""
|
2015-07-28 18:00:25 -04:00
|
|
|
return [
|
|
|
|
MultivariateGaussian(gaussian[0], gaussian[1])
|
2015-11-10 19:42:28 -05:00
|
|
|
for gaussian in self.call("gaussians")]
|
2015-05-15 03:18:39 -04:00
|
|
|
|
|
|
|
@property
|
2015-10-27 00:28:18 -04:00
|
|
|
@since('1.4.0')
|
2015-05-15 03:18:39 -04:00
|
|
|
def k(self):
|
|
|
|
"""Number of gaussians in mixture."""
|
2015-07-28 18:00:25 -04:00
|
|
|
return len(self.weights)
|
2015-02-03 02:04:55 -05:00
|
|
|
|
2015-10-27 00:28:18 -04:00
|
|
|
@since('1.3.0')
|
2015-02-03 02:04:55 -05:00
|
|
|
def predict(self, x):
|
|
|
|
"""
|
2016-01-11 17:43:25 -05:00
|
|
|
Find the cluster to which the point 'x' or each point in RDD 'x'
|
|
|
|
has maximum membership in this model.
|
2015-02-03 02:04:55 -05:00
|
|
|
|
2016-02-02 13:50:22 -05:00
|
|
|
:param x:
|
|
|
|
A feature vector or an RDD of vectors representing data points.
|
|
|
|
:return:
|
|
|
|
Predicted cluster label or an RDD of predicted cluster labels
|
|
|
|
if the input is an RDD.
|
2015-02-03 02:04:55 -05:00
|
|
|
"""
|
|
|
|
if isinstance(x, RDD):
|
|
|
|
cluster_labels = self.predictSoft(x).map(lambda z: z.index(max(z)))
|
|
|
|
return cluster_labels
|
2015-05-15 13:43:18 -04:00
|
|
|
else:
|
2016-01-11 17:43:25 -05:00
|
|
|
z = self.predictSoft(x)
|
|
|
|
return z.argmax()
|
2015-02-03 02:04:55 -05:00
|
|
|
|
2015-10-27 00:28:18 -04:00
|
|
|
@since('1.3.0')
|
2015-02-03 02:04:55 -05:00
|
|
|
def predictSoft(self, x):
|
|
|
|
"""
|
2016-01-11 17:43:25 -05:00
|
|
|
Find the membership of point 'x' or each point in RDD 'x' to all mixture components.
|
2015-02-03 02:04:55 -05:00
|
|
|
|
2016-02-02 13:50:22 -05:00
|
|
|
:param x:
|
|
|
|
A feature vector or an RDD of vectors representing data points.
|
|
|
|
:return:
|
|
|
|
The membership value to all mixture components for vector 'x'
|
|
|
|
or each vector in RDD 'x'.
|
2015-02-03 02:04:55 -05:00
|
|
|
"""
|
|
|
|
if isinstance(x, RDD):
|
2015-07-28 18:00:25 -04:00
|
|
|
means, sigmas = zip(*[(g.mu, g.sigma) for g in self.gaussians])
|
2015-02-03 02:04:55 -05:00
|
|
|
membership_matrix = callMLlibFunc("predictSoftGMM", x.map(_convert_to_vector),
|
2015-07-28 18:00:25 -04:00
|
|
|
_convert_to_vector(self.weights), means, sigmas)
|
2015-04-16 19:20:57 -04:00
|
|
|
return membership_matrix.map(lambda x: pyarray.array('d', x))
|
2015-05-15 13:43:18 -04:00
|
|
|
else:
|
2016-01-11 17:43:25 -05:00
|
|
|
return self.call("predictSoft", _convert_to_vector(x)).toArray()
|
2015-02-03 02:04:55 -05:00
|
|
|
|
2015-07-28 18:00:25 -04:00
|
|
|
@classmethod
|
2015-10-27 00:28:18 -04:00
|
|
|
@since('1.5.0')
|
2015-07-28 18:00:25 -04:00
|
|
|
def load(cls, sc, path):
|
|
|
|
"""Load the GaussianMixtureModel from disk.
|
|
|
|
|
2016-02-02 13:50:22 -05:00
|
|
|
:param sc:
|
|
|
|
SparkContext.
|
|
|
|
:param path:
|
|
|
|
Path to where the model is stored.
|
2015-07-28 18:00:25 -04:00
|
|
|
"""
|
|
|
|
model = cls._load_java(sc, path)
|
2016-07-05 20:00:24 -04:00
|
|
|
wrapper = sc._jvm.org.apache.spark.mllib.api.python.GaussianMixtureModelWrapper(model)
|
2015-07-28 18:00:25 -04:00
|
|
|
return cls(wrapper)
|
|
|
|
|
2015-02-03 02:04:55 -05:00
|
|
|
|
|
|
|
class GaussianMixture(object):
|
|
|
|
"""
|
2015-02-20 05:31:32 -05:00
|
|
|
Learning algorithm for Gaussian Mixtures using the expectation-maximization algorithm.
|
2015-02-03 02:04:55 -05:00
|
|
|
|
2015-10-27 00:28:18 -04:00
|
|
|
.. versionadded:: 1.3.0
|
2015-02-03 02:04:55 -05:00
|
|
|
"""
|
|
|
|
@classmethod
|
2015-10-27 00:28:18 -04:00
|
|
|
@since('1.3.0')
|
2015-05-15 03:18:39 -04:00
|
|
|
def train(cls, rdd, k, convergenceTol=1e-3, maxIterations=100, seed=None, initialModel=None):
|
2016-02-02 13:50:22 -05:00
|
|
|
"""
|
|
|
|
Train a Gaussian Mixture clustering model.
|
|
|
|
|
|
|
|
:param rdd:
|
|
|
|
Training points as an `RDD` of `Vector` or convertible
|
|
|
|
sequence types.
|
|
|
|
:param k:
|
|
|
|
Number of independent Gaussians in the mixture model.
|
|
|
|
:param convergenceTol:
|
|
|
|
Maximum change in log-likelihood at which convergence is
|
|
|
|
considered to have occurred.
|
|
|
|
(default: 1e-3)
|
|
|
|
:param maxIterations:
|
|
|
|
Maximum number of iterations allowed.
|
|
|
|
(default: 100)
|
|
|
|
:param seed:
|
|
|
|
Random seed for initial Gaussian distribution. Set as None to
|
|
|
|
generate seed based on system time.
|
|
|
|
(default: None)
|
|
|
|
:param initialModel:
|
|
|
|
Initial GMM starting point, bypassing the random
|
|
|
|
initialization.
|
|
|
|
(default: None)
|
|
|
|
"""
|
2015-05-15 03:18:39 -04:00
|
|
|
initialModelWeights = None
|
|
|
|
initialModelMu = None
|
|
|
|
initialModelSigma = None
|
|
|
|
if initialModel is not None:
|
|
|
|
if initialModel.k != k:
|
|
|
|
raise Exception("Mismatched cluster count, initialModel.k = %s, however k = %s"
|
|
|
|
% (initialModel.k, k))
|
2016-01-07 13:32:56 -05:00
|
|
|
initialModelWeights = list(initialModel.weights)
|
2015-05-15 03:18:39 -04:00
|
|
|
initialModelMu = [initialModel.gaussians[i].mu for i in range(initialModel.k)]
|
|
|
|
initialModelSigma = [initialModel.gaussians[i].sigma for i in range(initialModel.k)]
|
2015-07-28 18:00:25 -04:00
|
|
|
java_model = callMLlibFunc("trainGaussianMixtureModel", rdd.map(_convert_to_vector),
|
|
|
|
k, convergenceTol, maxIterations, seed,
|
|
|
|
initialModelWeights, initialModelMu, initialModelSigma)
|
|
|
|
return GaussianMixtureModel(java_model)
|
2015-02-03 02:04:55 -05:00
|
|
|
|
|
|
|
|
2015-06-29 01:38:04 -04:00
|
|
|
class PowerIterationClusteringModel(JavaModelWrapper, JavaSaveable, JavaLoader):
|
|
|
|
|
|
|
|
"""
|
|
|
|
Model produced by [[PowerIterationClustering]].
|
|
|
|
|
2016-02-13 18:56:20 -05:00
|
|
|
>>> import math
|
|
|
|
>>> def genCircle(r, n):
|
2016-07-06 13:45:51 -04:00
|
|
|
... points = []
|
|
|
|
... for i in range(0, n):
|
|
|
|
... theta = 2.0 * math.pi * i / n
|
|
|
|
... points.append((r * math.cos(theta), r * math.sin(theta)))
|
|
|
|
... return points
|
2016-02-13 18:56:20 -05:00
|
|
|
>>> def sim(x, y):
|
2016-07-06 13:45:51 -04:00
|
|
|
... dist2 = (x[0] - y[0]) * (x[0] - y[0]) + (x[1] - y[1]) * (x[1] - y[1])
|
|
|
|
... return math.exp(-dist2 / 2.0)
|
2016-02-13 18:56:20 -05:00
|
|
|
>>> r1 = 1.0
|
|
|
|
>>> n1 = 10
|
|
|
|
>>> r2 = 4.0
|
|
|
|
>>> n2 = 40
|
|
|
|
>>> n = n1 + n2
|
|
|
|
>>> points = genCircle(r1, n1) + genCircle(r2, n2)
|
|
|
|
>>> similarities = [(i, j, sim(points[i], points[j])) for i in range(1, n) for j in range(0, i)]
|
|
|
|
>>> rdd = sc.parallelize(similarities, 2)
|
|
|
|
>>> model = PowerIterationClustering.train(rdd, 2, 40)
|
2015-06-29 01:38:04 -04:00
|
|
|
>>> model.k
|
|
|
|
2
|
2015-07-06 19:15:12 -04:00
|
|
|
>>> result = sorted(model.assignments().collect(), key=lambda x: x.id)
|
|
|
|
>>> result[0].cluster == result[1].cluster == result[2].cluster == result[3].cluster
|
|
|
|
True
|
|
|
|
>>> result[4].cluster == result[5].cluster == result[6].cluster == result[7].cluster
|
|
|
|
True
|
2015-06-29 01:38:04 -04:00
|
|
|
>>> import os, tempfile
|
|
|
|
>>> path = tempfile.mkdtemp()
|
|
|
|
>>> model.save(sc, path)
|
|
|
|
>>> sameModel = PowerIterationClusteringModel.load(sc, path)
|
|
|
|
>>> sameModel.k
|
|
|
|
2
|
2015-07-06 19:15:12 -04:00
|
|
|
>>> result = sorted(model.assignments().collect(), key=lambda x: x.id)
|
|
|
|
>>> result[0].cluster == result[1].cluster == result[2].cluster == result[3].cluster
|
|
|
|
True
|
|
|
|
>>> result[4].cluster == result[5].cluster == result[6].cluster == result[7].cluster
|
|
|
|
True
|
2015-06-29 01:38:04 -04:00
|
|
|
>>> from shutil import rmtree
|
|
|
|
>>> try:
|
|
|
|
... rmtree(path)
|
|
|
|
... except OSError:
|
|
|
|
... pass
|
2015-10-27 00:28:18 -04:00
|
|
|
|
|
|
|
.. versionadded:: 1.5.0
|
2015-06-29 01:38:04 -04:00
|
|
|
"""
|
|
|
|
|
|
|
|
@property
|
2015-10-27 00:28:18 -04:00
|
|
|
@since('1.5.0')
|
2015-06-29 01:38:04 -04:00
|
|
|
def k(self):
|
|
|
|
"""
|
|
|
|
Returns the number of clusters.
|
|
|
|
"""
|
|
|
|
return self.call("k")
|
|
|
|
|
2015-10-27 00:28:18 -04:00
|
|
|
@since('1.5.0')
|
2015-06-29 01:38:04 -04:00
|
|
|
def assignments(self):
|
|
|
|
"""
|
|
|
|
Returns the cluster assignments of this model.
|
|
|
|
"""
|
|
|
|
return self.call("getAssignments").map(
|
|
|
|
lambda x: (PowerIterationClustering.Assignment(*x)))
|
|
|
|
|
|
|
|
@classmethod
|
2015-10-27 00:28:18 -04:00
|
|
|
@since('1.5.0')
|
2015-06-29 01:38:04 -04:00
|
|
|
def load(cls, sc, path):
|
2015-10-27 00:28:18 -04:00
|
|
|
"""
|
|
|
|
Load a model from the given path.
|
|
|
|
"""
|
2015-06-29 01:38:04 -04:00
|
|
|
model = cls._load_java(sc, path)
|
2016-07-05 20:00:24 -04:00
|
|
|
wrapper =\
|
|
|
|
sc._jvm.org.apache.spark.mllib.api.python.PowerIterationClusteringModelWrapper(model)
|
2015-06-29 01:38:04 -04:00
|
|
|
return PowerIterationClusteringModel(wrapper)
|
|
|
|
|
|
|
|
|
|
|
|
class PowerIterationClustering(object):
|
|
|
|
"""
|
|
|
|
Power Iteration Clustering (PIC), a scalable graph clustering algorithm
|
2017-10-31 04:20:23 -04:00
|
|
|
developed by [[http://www.cs.cmu.edu/~frank/papers/icml2010-pic-final.pdf Lin and Cohen]].
|
2015-06-29 01:38:04 -04:00
|
|
|
From the abstract: PIC finds a very low-dimensional embedding of a
|
|
|
|
dataset using truncated power iteration on a normalized pair-wise
|
|
|
|
similarity matrix of the data.
|
2015-10-27 00:28:18 -04:00
|
|
|
|
|
|
|
.. versionadded:: 1.5.0
|
2015-06-29 01:38:04 -04:00
|
|
|
"""
|
|
|
|
|
|
|
|
@classmethod
|
2015-10-27 00:28:18 -04:00
|
|
|
@since('1.5.0')
|
2015-06-29 01:38:04 -04:00
|
|
|
def train(cls, rdd, k, maxIterations=100, initMode="random"):
|
2018-09-12 23:19:43 -04:00
|
|
|
r"""
|
2016-02-02 13:50:22 -05:00
|
|
|
:param rdd:
|
|
|
|
An RDD of (i, j, s\ :sub:`ij`\) tuples representing the
|
|
|
|
affinity matrix, which is the matrix A in the PIC paper. The
|
|
|
|
similarity s\ :sub:`ij`\ must be nonnegative. This is a symmetric
|
|
|
|
matrix and hence s\ :sub:`ij`\ = s\ :sub:`ji`\ For any (i, j) with
|
|
|
|
nonzero similarity, there should be either (i, j, s\ :sub:`ij`\) or
|
|
|
|
(j, i, s\ :sub:`ji`\) in the input. Tuples with i = j are ignored,
|
|
|
|
because it is assumed s\ :sub:`ij`\ = 0.0.
|
|
|
|
:param k:
|
|
|
|
Number of clusters.
|
|
|
|
:param maxIterations:
|
|
|
|
Maximum number of iterations of the PIC algorithm.
|
|
|
|
(default: 100)
|
|
|
|
:param initMode:
|
|
|
|
Initialization mode. This can be either "random" to use
|
|
|
|
a random vector as vertex properties, or "degree" to use
|
|
|
|
normalized sum similarities.
|
|
|
|
(default: "random")
|
2015-06-29 01:38:04 -04:00
|
|
|
"""
|
|
|
|
model = callMLlibFunc("trainPowerIterationClusteringModel",
|
|
|
|
rdd.map(_convert_to_vector), int(k), int(maxIterations), initMode)
|
|
|
|
return PowerIterationClusteringModel(model)
|
|
|
|
|
|
|
|
class Assignment(namedtuple("Assignment", ["id", "cluster"])):
|
|
|
|
"""
|
|
|
|
Represents an (id, cluster) tuple.
|
2015-10-27 00:28:18 -04:00
|
|
|
|
|
|
|
.. versionadded:: 1.5.0
|
2015-06-29 01:38:04 -04:00
|
|
|
"""
|
|
|
|
|
|
|
|
|
2015-06-19 15:23:15 -04:00
|
|
|
class StreamingKMeansModel(KMeansModel):
|
|
|
|
"""
|
|
|
|
Clustering model which can perform an online update of the centroids.
|
|
|
|
|
|
|
|
The update formula for each centroid is given by
|
|
|
|
|
|
|
|
* c_t+1 = ((c_t * n_t * a) + (x_t * m_t)) / (n_t + m_t)
|
|
|
|
* n_t+1 = n_t * a + m_t
|
|
|
|
|
|
|
|
where
|
|
|
|
|
|
|
|
* c_t: Centroid at the n_th iteration.
|
|
|
|
* n_t: Number of samples (or) weights associated with the centroid
|
|
|
|
at the n_th iteration.
|
|
|
|
* x_t: Centroid of the new data closest to c_t.
|
|
|
|
* m_t: Number of samples (or) weights of the new data closest to c_t
|
|
|
|
* c_t+1: New centroid.
|
|
|
|
* n_t+1: New number of weights.
|
|
|
|
* a: Decay Factor, which gives the forgetfulness.
|
|
|
|
|
2016-11-22 06:40:18 -05:00
|
|
|
.. note:: If a is set to 1, it is the weighted mean of the previous
|
|
|
|
and new data. If it set to zero, the old centroids are completely
|
|
|
|
forgotten.
|
2015-06-19 15:23:15 -04:00
|
|
|
|
2016-02-02 13:50:22 -05:00
|
|
|
:param clusterCenters:
|
|
|
|
Initial cluster centers.
|
|
|
|
:param clusterWeights:
|
|
|
|
List of weights assigned to each cluster.
|
2015-06-19 15:23:15 -04:00
|
|
|
|
|
|
|
>>> initCenters = [[0.0, 0.0], [1.0, 1.0]]
|
|
|
|
>>> initWeights = [1.0, 1.0]
|
|
|
|
>>> stkm = StreamingKMeansModel(initCenters, initWeights)
|
|
|
|
>>> data = sc.parallelize([[-0.1, -0.1], [0.1, 0.1],
|
|
|
|
... [0.9, 0.9], [1.1, 1.1]])
|
2020-07-13 22:22:44 -04:00
|
|
|
>>> stkm = stkm.update(data, 1.0, "batches")
|
2015-06-19 15:23:15 -04:00
|
|
|
>>> stkm.centers
|
|
|
|
array([[ 0., 0.],
|
|
|
|
[ 1., 1.]])
|
|
|
|
>>> stkm.predict([-0.1, -0.1])
|
|
|
|
0
|
|
|
|
>>> stkm.predict([0.9, 0.9])
|
|
|
|
1
|
|
|
|
>>> stkm.clusterWeights
|
|
|
|
[3.0, 3.0]
|
|
|
|
>>> decayFactor = 0.0
|
|
|
|
>>> data = sc.parallelize([DenseVector([1.5, 1.5]), DenseVector([0.2, 0.2])])
|
2020-07-13 22:22:44 -04:00
|
|
|
>>> stkm = stkm.update(data, 0.0, "batches")
|
2015-06-19 15:23:15 -04:00
|
|
|
>>> stkm.centers
|
|
|
|
array([[ 0.2, 0.2],
|
|
|
|
[ 1.5, 1.5]])
|
|
|
|
>>> stkm.clusterWeights
|
|
|
|
[1.0, 1.0]
|
|
|
|
>>> stkm.predict([0.2, 0.2])
|
|
|
|
0
|
|
|
|
>>> stkm.predict([1.5, 1.5])
|
|
|
|
1
|
2015-10-27 00:28:18 -04:00
|
|
|
|
|
|
|
.. versionadded:: 1.5.0
|
2015-06-19 15:23:15 -04:00
|
|
|
"""
|
|
|
|
def __init__(self, clusterCenters, clusterWeights):
|
|
|
|
super(StreamingKMeansModel, self).__init__(centers=clusterCenters)
|
|
|
|
self._clusterWeights = list(clusterWeights)
|
|
|
|
|
|
|
|
@property
|
2015-10-27 00:28:18 -04:00
|
|
|
@since('1.5.0')
|
2015-06-19 15:23:15 -04:00
|
|
|
def clusterWeights(self):
|
|
|
|
"""Return the cluster weights."""
|
|
|
|
return self._clusterWeights
|
|
|
|
|
2015-10-27 00:28:18 -04:00
|
|
|
@since('1.5.0')
|
2015-06-19 15:23:15 -04:00
|
|
|
def update(self, data, decayFactor, timeUnit):
|
|
|
|
"""Update the centroids, according to data
|
|
|
|
|
2016-02-02 13:50:22 -05:00
|
|
|
:param data:
|
|
|
|
RDD with new data for the model update.
|
|
|
|
:param decayFactor:
|
|
|
|
Forgetfulness of the previous centroids.
|
|
|
|
:param timeUnit:
|
|
|
|
Can be "batches" or "points". If points, then the decay factor
|
|
|
|
is raised to the power of number of new points and if batches,
|
|
|
|
then decay factor will be used as is.
|
2015-06-19 15:23:15 -04:00
|
|
|
"""
|
|
|
|
if not isinstance(data, RDD):
|
|
|
|
raise TypeError("Data should be of an RDD, got %s." % type(data))
|
|
|
|
data = data.map(_convert_to_vector)
|
|
|
|
decayFactor = float(decayFactor)
|
|
|
|
if timeUnit not in ["batches", "points"]:
|
|
|
|
raise ValueError(
|
|
|
|
"timeUnit should be 'batches' or 'points', got %s." % timeUnit)
|
|
|
|
vectorCenters = [_convert_to_vector(center) for center in self.centers]
|
|
|
|
updatedModel = callMLlibFunc(
|
|
|
|
"updateStreamingKMeansModel", vectorCenters, self._clusterWeights,
|
|
|
|
data, decayFactor, timeUnit)
|
|
|
|
self.centers = array(updatedModel[0])
|
|
|
|
self._clusterWeights = list(updatedModel[1])
|
|
|
|
return self
|
|
|
|
|
|
|
|
|
|
|
|
class StreamingKMeans(object):
|
|
|
|
"""
|
|
|
|
Provides methods to set k, decayFactor, timeUnit to configure the
|
|
|
|
KMeans algorithm for fitting and predicting on incoming dstreams.
|
|
|
|
More details on how the centroids are updated are provided under the
|
|
|
|
docs of StreamingKMeansModel.
|
|
|
|
|
2016-02-02 13:50:22 -05:00
|
|
|
:param k:
|
|
|
|
Number of clusters.
|
|
|
|
(default: 2)
|
|
|
|
:param decayFactor:
|
|
|
|
Forgetfulness of the previous centroids.
|
|
|
|
(default: 1.0)
|
|
|
|
:param timeUnit:
|
|
|
|
Can be "batches" or "points". If points, then the decay factor is
|
|
|
|
raised to the power of number of new points and if batches, then
|
|
|
|
decay factor will be used as is.
|
|
|
|
(default: "batches")
|
2015-10-27 00:28:18 -04:00
|
|
|
|
|
|
|
.. versionadded:: 1.5.0
|
2015-06-19 15:23:15 -04:00
|
|
|
"""
|
|
|
|
def __init__(self, k=2, decayFactor=1.0, timeUnit="batches"):
|
|
|
|
self._k = k
|
|
|
|
self._decayFactor = decayFactor
|
|
|
|
if timeUnit not in ["batches", "points"]:
|
|
|
|
raise ValueError(
|
|
|
|
"timeUnit should be 'batches' or 'points', got %s." % timeUnit)
|
|
|
|
self._timeUnit = timeUnit
|
|
|
|
self._model = None
|
|
|
|
|
2015-10-27 00:28:18 -04:00
|
|
|
@since('1.5.0')
|
2015-06-19 15:23:15 -04:00
|
|
|
def latestModel(self):
|
|
|
|
"""Return the latest model"""
|
|
|
|
return self._model
|
|
|
|
|
|
|
|
def _validate(self, dstream):
|
|
|
|
if self._model is None:
|
|
|
|
raise ValueError(
|
|
|
|
"Initial centers should be set either by setInitialCenters "
|
|
|
|
"or setRandomCenters.")
|
|
|
|
if not isinstance(dstream, DStream):
|
|
|
|
raise TypeError(
|
|
|
|
"Expected dstream to be of type DStream, "
|
|
|
|
"got type %s" % type(dstream))
|
|
|
|
|
2015-10-27 00:28:18 -04:00
|
|
|
@since('1.5.0')
|
2015-06-19 15:23:15 -04:00
|
|
|
def setK(self, k):
|
|
|
|
"""Set number of clusters."""
|
|
|
|
self._k = k
|
|
|
|
return self
|
|
|
|
|
2015-10-27 00:28:18 -04:00
|
|
|
@since('1.5.0')
|
2015-06-19 15:23:15 -04:00
|
|
|
def setDecayFactor(self, decayFactor):
|
|
|
|
"""Set decay factor."""
|
|
|
|
self._decayFactor = decayFactor
|
|
|
|
return self
|
|
|
|
|
2015-10-27 00:28:18 -04:00
|
|
|
@since('1.5.0')
|
2015-06-19 15:23:15 -04:00
|
|
|
def setHalfLife(self, halfLife, timeUnit):
|
|
|
|
"""
|
|
|
|
Set number of batches after which the centroids of that
|
|
|
|
particular batch has half the weightage.
|
|
|
|
"""
|
|
|
|
self._timeUnit = timeUnit
|
|
|
|
self._decayFactor = exp(log(0.5) / halfLife)
|
|
|
|
return self
|
|
|
|
|
2015-10-27 00:28:18 -04:00
|
|
|
@since('1.5.0')
|
2015-06-19 15:23:15 -04:00
|
|
|
def setInitialCenters(self, centers, weights):
|
|
|
|
"""
|
|
|
|
Set initial centers. Should be set before calling trainOn.
|
|
|
|
"""
|
|
|
|
self._model = StreamingKMeansModel(centers, weights)
|
|
|
|
return self
|
|
|
|
|
2015-10-27 00:28:18 -04:00
|
|
|
@since('1.5.0')
|
2015-06-19 15:23:15 -04:00
|
|
|
def setRandomCenters(self, dim, weight, seed):
|
|
|
|
"""
|
|
|
|
Set the initial centres to be random samples from
|
|
|
|
a gaussian population with constant weights.
|
|
|
|
"""
|
|
|
|
rng = random.RandomState(seed)
|
|
|
|
clusterCenters = rng.randn(self._k, dim)
|
|
|
|
clusterWeights = tile(weight, self._k)
|
|
|
|
self._model = StreamingKMeansModel(clusterCenters, clusterWeights)
|
|
|
|
return self
|
|
|
|
|
2015-10-27 00:28:18 -04:00
|
|
|
@since('1.5.0')
|
2015-06-19 15:23:15 -04:00
|
|
|
def trainOn(self, dstream):
|
|
|
|
"""Train the model on the incoming dstream."""
|
|
|
|
self._validate(dstream)
|
|
|
|
|
|
|
|
def update(rdd):
|
|
|
|
self._model.update(rdd, self._decayFactor, self._timeUnit)
|
|
|
|
|
|
|
|
dstream.foreachRDD(update)
|
|
|
|
|
2015-10-27 00:28:18 -04:00
|
|
|
@since('1.5.0')
|
2015-06-19 15:23:15 -04:00
|
|
|
def predictOn(self, dstream):
|
|
|
|
"""
|
|
|
|
Make predictions on a dstream.
|
|
|
|
Returns a transformed dstream object
|
|
|
|
"""
|
|
|
|
self._validate(dstream)
|
|
|
|
return dstream.map(lambda x: self._model.predict(x))
|
|
|
|
|
2015-10-27 00:28:18 -04:00
|
|
|
@since('1.5.0')
|
2015-06-19 15:23:15 -04:00
|
|
|
def predictOnValues(self, dstream):
|
|
|
|
"""
|
|
|
|
Make predictions on a keyed dstream.
|
|
|
|
Returns a transformed dstream object.
|
|
|
|
"""
|
|
|
|
self._validate(dstream)
|
|
|
|
return dstream.mapValues(lambda x: self._model.predict(x))
|
|
|
|
|
|
|
|
|
2015-11-07 01:56:29 -05:00
|
|
|
class LDAModel(JavaModelWrapper, JavaSaveable, Loader):
|
2015-07-15 02:27:42 -04:00
|
|
|
|
|
|
|
""" A clustering model derived from the LDA method.
|
|
|
|
|
|
|
|
Latent Dirichlet Allocation (LDA), a topic model designed for text documents.
|
|
|
|
Terminology
|
|
|
|
- "word" = "term": an element of the vocabulary
|
|
|
|
- "token": instance of a term appearing in a document
|
|
|
|
- "topic": multinomial distribution over words representing some concept
|
|
|
|
References:
|
|
|
|
- Original LDA paper (journal version):
|
|
|
|
Blei, Ng, and Jordan. "Latent Dirichlet Allocation." JMLR, 2003.
|
|
|
|
|
|
|
|
>>> from pyspark.mllib.linalg import Vectors
|
2015-07-22 20:22:12 -04:00
|
|
|
>>> from numpy.testing import assert_almost_equal, assert_equal
|
2015-07-15 02:27:42 -04:00
|
|
|
>>> data = [
|
|
|
|
... [1, Vectors.dense([0.0, 1.0])],
|
|
|
|
... [2, SparseVector(2, {0: 1.0})],
|
|
|
|
... ]
|
|
|
|
>>> rdd = sc.parallelize(data)
|
2015-11-07 01:56:29 -05:00
|
|
|
>>> model = LDA.train(rdd, k=2, seed=1)
|
2015-07-15 02:27:42 -04:00
|
|
|
>>> model.vocabSize()
|
|
|
|
2
|
2015-11-07 01:56:29 -05:00
|
|
|
>>> model.describeTopics()
|
|
|
|
[([1, 0], [0.5..., 0.49...]), ([0, 1], [0.5..., 0.49...])]
|
|
|
|
>>> model.describeTopics(1)
|
|
|
|
[([1], [0.5...]), ([0], [0.5...])]
|
|
|
|
|
2015-07-15 02:27:42 -04:00
|
|
|
>>> topics = model.topicsMatrix()
|
|
|
|
>>> topics_expect = array([[0.5, 0.5], [0.5, 0.5]])
|
|
|
|
>>> assert_almost_equal(topics, topics_expect, 1)
|
2015-07-22 20:22:12 -04:00
|
|
|
|
|
|
|
>>> import os, tempfile
|
|
|
|
>>> from shutil import rmtree
|
|
|
|
>>> path = tempfile.mkdtemp()
|
|
|
|
>>> model.save(sc, path)
|
|
|
|
>>> sameModel = LDAModel.load(sc, path)
|
|
|
|
>>> assert_equal(sameModel.topicsMatrix(), model.topicsMatrix())
|
|
|
|
>>> sameModel.vocabSize() == model.vocabSize()
|
|
|
|
True
|
|
|
|
>>> try:
|
|
|
|
... rmtree(path)
|
|
|
|
... except OSError:
|
|
|
|
... pass
|
2015-10-27 00:28:18 -04:00
|
|
|
|
|
|
|
.. versionadded:: 1.5.0
|
2015-07-15 02:27:42 -04:00
|
|
|
"""
|
|
|
|
|
2015-10-27 00:28:18 -04:00
|
|
|
@since('1.5.0')
|
2015-07-15 02:27:42 -04:00
|
|
|
def topicsMatrix(self):
|
|
|
|
"""Inferred topics, where each topic is represented by a distribution over terms."""
|
|
|
|
return self.call("topicsMatrix").toArray()
|
|
|
|
|
2015-10-27 00:28:18 -04:00
|
|
|
@since('1.5.0')
|
2015-07-15 02:27:42 -04:00
|
|
|
def vocabSize(self):
|
|
|
|
"""Vocabulary size (number of terms or terms in the vocabulary)"""
|
|
|
|
return self.call("vocabSize")
|
|
|
|
|
2015-11-07 01:56:29 -05:00
|
|
|
@since('1.6.0')
|
|
|
|
def describeTopics(self, maxTermsPerTopic=None):
|
|
|
|
"""Return the topics described by weighted terms.
|
2015-07-22 20:22:12 -04:00
|
|
|
|
2015-11-07 01:56:29 -05:00
|
|
|
WARNING: If vocabSize and k are large, this can return a large object!
|
2015-11-09 19:25:29 -05:00
|
|
|
|
2016-02-02 13:50:22 -05:00
|
|
|
:param maxTermsPerTopic:
|
|
|
|
Maximum number of terms to collect for each topic.
|
|
|
|
(default: vocabulary size)
|
|
|
|
:return:
|
|
|
|
Array over topics. Each topic is represented as a pair of
|
|
|
|
matching arrays: (term indices, term weights in topic).
|
|
|
|
Each topic's terms are sorted in order of decreasing weight.
|
2015-07-22 20:22:12 -04:00
|
|
|
"""
|
2015-11-07 01:56:29 -05:00
|
|
|
if maxTermsPerTopic is None:
|
|
|
|
topics = self.call("describeTopics")
|
|
|
|
else:
|
|
|
|
topics = self.call("describeTopics", maxTermsPerTopic)
|
|
|
|
return topics
|
2015-07-22 20:22:12 -04:00
|
|
|
|
|
|
|
@classmethod
|
2015-10-27 00:28:18 -04:00
|
|
|
@since('1.5.0')
|
2015-07-22 20:22:12 -04:00
|
|
|
def load(cls, sc, path):
|
|
|
|
"""Load the LDAModel from disk.
|
|
|
|
|
2016-02-02 13:50:22 -05:00
|
|
|
:param sc:
|
|
|
|
SparkContext.
|
|
|
|
:param path:
|
|
|
|
Path to where the model is stored.
|
2015-07-22 20:22:12 -04:00
|
|
|
"""
|
|
|
|
if not isinstance(sc, SparkContext):
|
|
|
|
raise TypeError("sc should be a SparkContext, got type %s" % type(sc))
|
2020-07-13 22:22:44 -04:00
|
|
|
if not isinstance(path, str):
|
|
|
|
raise TypeError("path should be a string, got type %s" % type(path))
|
2015-11-07 01:56:29 -05:00
|
|
|
model = callMLlibFunc("loadLDAModel", sc, path)
|
|
|
|
return LDAModel(model)
|
2015-07-22 20:22:12 -04:00
|
|
|
|
2015-07-15 02:27:42 -04:00
|
|
|
|
|
|
|
class LDA(object):
|
2015-10-27 00:28:18 -04:00
|
|
|
"""
|
|
|
|
.. versionadded:: 1.5.0
|
|
|
|
"""
|
2015-07-15 02:27:42 -04:00
|
|
|
|
|
|
|
@classmethod
|
2015-10-27 00:28:18 -04:00
|
|
|
@since('1.5.0')
|
2015-07-15 02:27:42 -04:00
|
|
|
def train(cls, rdd, k=10, maxIterations=20, docConcentration=-1.0,
|
|
|
|
topicConcentration=-1.0, seed=None, checkpointInterval=10, optimizer="em"):
|
|
|
|
"""Train a LDA model.
|
|
|
|
|
2016-02-02 13:50:22 -05:00
|
|
|
:param rdd:
|
|
|
|
RDD of documents, which are tuples of document IDs and term
|
|
|
|
(word) count vectors. The term count vectors are "bags of
|
|
|
|
words" with a fixed-size vocabulary (where the vocabulary size
|
|
|
|
is the length of the vector). Document IDs must be unique
|
|
|
|
and >= 0.
|
|
|
|
:param k:
|
|
|
|
Number of topics to infer, i.e., the number of soft cluster
|
|
|
|
centers.
|
|
|
|
(default: 10)
|
|
|
|
:param maxIterations:
|
|
|
|
Maximum number of iterations allowed.
|
|
|
|
(default: 20)
|
|
|
|
:param docConcentration:
|
|
|
|
Concentration parameter (commonly named "alpha") for the prior
|
|
|
|
placed on documents' distributions over topics ("theta").
|
|
|
|
(default: -1.0)
|
|
|
|
:param topicConcentration:
|
|
|
|
Concentration parameter (commonly named "beta" or "eta") for
|
|
|
|
the prior placed on topics' distributions over terms.
|
|
|
|
(default: -1.0)
|
|
|
|
:param seed:
|
|
|
|
Random seed for cluster initialization. Set as None to generate
|
|
|
|
seed based on system time.
|
|
|
|
(default: None)
|
|
|
|
:param checkpointInterval:
|
|
|
|
Period (in iterations) between checkpoints.
|
|
|
|
(default: 10)
|
|
|
|
:param optimizer:
|
|
|
|
LDAOptimizer used to perform the actual calculation. Currently
|
|
|
|
"em", "online" are supported.
|
|
|
|
(default: "em")
|
2015-07-15 02:27:42 -04:00
|
|
|
"""
|
|
|
|
model = callMLlibFunc("trainLDAModel", rdd, k, maxIterations,
|
|
|
|
docConcentration, topicConcentration, seed,
|
|
|
|
checkpointInterval, optimizer)
|
|
|
|
return LDAModel(model)
|
|
|
|
|
|
|
|
|
2013-12-25 00:08:05 -05:00
|
|
|
def _test():
|
|
|
|
import doctest
|
[SPARK-24740][PYTHON][ML] Make PySpark's tests compatible with NumPy 1.14+
## What changes were proposed in this pull request?
This PR proposes to make PySpark's tests compatible with NumPy 0.14+
NumPy 0.14.x introduced rather radical changes about its string representation.
For example, the tests below are failed:
```
**********************************************************************
File "/.../spark/python/pyspark/ml/linalg/__init__.py", line 895, in __main__.DenseMatrix.__str__
Failed example:
print(dm)
Expected:
DenseMatrix([[ 0., 2.],
[ 1., 3.]])
Got:
DenseMatrix([[0., 2.],
[1., 3.]])
**********************************************************************
File "/.../spark/python/pyspark/ml/linalg/__init__.py", line 899, in __main__.DenseMatrix.__str__
Failed example:
print(dm)
Expected:
DenseMatrix([[ 0., 1.],
[ 2., 3.]])
Got:
DenseMatrix([[0., 1.],
[2., 3.]])
**********************************************************************
File "/.../spark/python/pyspark/ml/linalg/__init__.py", line 939, in __main__.DenseMatrix.toArray
Failed example:
m.toArray()
Expected:
array([[ 0., 2.],
[ 1., 3.]])
Got:
array([[0., 2.],
[1., 3.]])
**********************************************************************
File "/.../spark/python/pyspark/ml/linalg/__init__.py", line 324, in __main__.DenseVector.dot
Failed example:
dense.dot(np.reshape([1., 2., 3., 4.], (2, 2), order='F'))
Expected:
array([ 5., 11.])
Got:
array([ 5., 11.])
**********************************************************************
File "/.../spark/python/pyspark/ml/linalg/__init__.py", line 567, in __main__.SparseVector.dot
Failed example:
a.dot(np.array([[1, 1], [2, 2], [3, 3], [4, 4]]))
Expected:
array([ 22., 22.])
Got:
array([22., 22.])
```
See [release note](https://docs.scipy.org/doc/numpy-1.14.0/release.html#compatibility-notes).
## How was this patch tested?
Manually tested:
```
$ ./run-tests --python-executables=python3.6,python2.7 --modules=pyspark-ml,pyspark-mllib
Running PySpark tests. Output is in /.../spark/python/unit-tests.log
Will test against the following Python executables: ['python3.6', 'python2.7']
Will test the following Python modules: ['pyspark-ml', 'pyspark-mllib']
Starting test(python2.7): pyspark.mllib.tests
Starting test(python2.7): pyspark.ml.classification
Starting test(python3.6): pyspark.mllib.tests
Starting test(python2.7): pyspark.ml.clustering
Finished test(python2.7): pyspark.ml.clustering (54s)
Starting test(python2.7): pyspark.ml.evaluation
Finished test(python2.7): pyspark.ml.classification (74s)
Starting test(python2.7): pyspark.ml.feature
Finished test(python2.7): pyspark.ml.evaluation (27s)
Starting test(python2.7): pyspark.ml.fpm
Finished test(python2.7): pyspark.ml.fpm (0s)
Starting test(python2.7): pyspark.ml.image
Finished test(python2.7): pyspark.ml.image (17s)
Starting test(python2.7): pyspark.ml.linalg.__init__
Finished test(python2.7): pyspark.ml.linalg.__init__ (1s)
Starting test(python2.7): pyspark.ml.recommendation
Finished test(python2.7): pyspark.ml.feature (76s)
Starting test(python2.7): pyspark.ml.regression
Finished test(python2.7): pyspark.ml.recommendation (69s)
Starting test(python2.7): pyspark.ml.stat
Finished test(python2.7): pyspark.ml.regression (45s)
Starting test(python2.7): pyspark.ml.tests
Finished test(python2.7): pyspark.ml.stat (28s)
Starting test(python2.7): pyspark.ml.tuning
Finished test(python2.7): pyspark.ml.tuning (20s)
Starting test(python2.7): pyspark.mllib.classification
Finished test(python2.7): pyspark.mllib.classification (31s)
Starting test(python2.7): pyspark.mllib.clustering
Finished test(python2.7): pyspark.mllib.tests (260s)
Starting test(python2.7): pyspark.mllib.evaluation
Finished test(python3.6): pyspark.mllib.tests (266s)
Starting test(python2.7): pyspark.mllib.feature
Finished test(python2.7): pyspark.mllib.evaluation (21s)
Starting test(python2.7): pyspark.mllib.fpm
Finished test(python2.7): pyspark.mllib.feature (38s)
Starting test(python2.7): pyspark.mllib.linalg.__init__
Finished test(python2.7): pyspark.mllib.linalg.__init__ (1s)
Starting test(python2.7): pyspark.mllib.linalg.distributed
Finished test(python2.7): pyspark.mllib.fpm (34s)
Starting test(python2.7): pyspark.mllib.random
Finished test(python2.7): pyspark.mllib.clustering (64s)
Starting test(python2.7): pyspark.mllib.recommendation
Finished test(python2.7): pyspark.mllib.random (15s)
Starting test(python2.7): pyspark.mllib.regression
Finished test(python2.7): pyspark.mllib.linalg.distributed (47s)
Starting test(python2.7): pyspark.mllib.stat.KernelDensity
Finished test(python2.7): pyspark.mllib.stat.KernelDensity (0s)
Starting test(python2.7): pyspark.mllib.stat._statistics
Finished test(python2.7): pyspark.mllib.recommendation (40s)
Starting test(python2.7): pyspark.mllib.tree
Finished test(python2.7): pyspark.mllib.regression (38s)
Starting test(python2.7): pyspark.mllib.util
Finished test(python2.7): pyspark.mllib.stat._statistics (19s)
Starting test(python3.6): pyspark.ml.classification
Finished test(python2.7): pyspark.mllib.tree (26s)
Starting test(python3.6): pyspark.ml.clustering
Finished test(python2.7): pyspark.mllib.util (27s)
Starting test(python3.6): pyspark.ml.evaluation
Finished test(python3.6): pyspark.ml.evaluation (30s)
Starting test(python3.6): pyspark.ml.feature
Finished test(python2.7): pyspark.ml.tests (234s)
Starting test(python3.6): pyspark.ml.fpm
Finished test(python3.6): pyspark.ml.fpm (1s)
Starting test(python3.6): pyspark.ml.image
Finished test(python3.6): pyspark.ml.clustering (55s)
Starting test(python3.6): pyspark.ml.linalg.__init__
Finished test(python3.6): pyspark.ml.linalg.__init__ (0s)
Starting test(python3.6): pyspark.ml.recommendation
Finished test(python3.6): pyspark.ml.classification (71s)
Starting test(python3.6): pyspark.ml.regression
Finished test(python3.6): pyspark.ml.image (18s)
Starting test(python3.6): pyspark.ml.stat
Finished test(python3.6): pyspark.ml.stat (37s)
Starting test(python3.6): pyspark.ml.tests
Finished test(python3.6): pyspark.ml.regression (59s)
Starting test(python3.6): pyspark.ml.tuning
Finished test(python3.6): pyspark.ml.feature (93s)
Starting test(python3.6): pyspark.mllib.classification
Finished test(python3.6): pyspark.ml.recommendation (83s)
Starting test(python3.6): pyspark.mllib.clustering
Finished test(python3.6): pyspark.ml.tuning (29s)
Starting test(python3.6): pyspark.mllib.evaluation
Finished test(python3.6): pyspark.mllib.evaluation (26s)
Starting test(python3.6): pyspark.mllib.feature
Finished test(python3.6): pyspark.mllib.classification (43s)
Starting test(python3.6): pyspark.mllib.fpm
Finished test(python3.6): pyspark.mllib.clustering (81s)
Starting test(python3.6): pyspark.mllib.linalg.__init__
Finished test(python3.6): pyspark.mllib.linalg.__init__ (2s)
Starting test(python3.6): pyspark.mllib.linalg.distributed
Finished test(python3.6): pyspark.mllib.fpm (48s)
Starting test(python3.6): pyspark.mllib.random
Finished test(python3.6): pyspark.mllib.feature (54s)
Starting test(python3.6): pyspark.mllib.recommendation
Finished test(python3.6): pyspark.mllib.random (18s)
Starting test(python3.6): pyspark.mllib.regression
Finished test(python3.6): pyspark.mllib.linalg.distributed (55s)
Starting test(python3.6): pyspark.mllib.stat.KernelDensity
Finished test(python3.6): pyspark.mllib.stat.KernelDensity (1s)
Starting test(python3.6): pyspark.mllib.stat._statistics
Finished test(python3.6): pyspark.mllib.recommendation (51s)
Starting test(python3.6): pyspark.mllib.tree
Finished test(python3.6): pyspark.mllib.regression (45s)
Starting test(python3.6): pyspark.mllib.util
Finished test(python3.6): pyspark.mllib.stat._statistics (21s)
Finished test(python3.6): pyspark.mllib.tree (27s)
Finished test(python3.6): pyspark.mllib.util (27s)
Finished test(python3.6): pyspark.ml.tests (264s)
```
Author: hyukjinkwon <gurwls223@apache.org>
Closes #21715 from HyukjinKwon/SPARK-24740.
2018-07-06 23:39:29 -04:00
|
|
|
import numpy
|
2015-06-29 01:38:04 -04:00
|
|
|
import pyspark.mllib.clustering
|
[SPARK-24740][PYTHON][ML] Make PySpark's tests compatible with NumPy 1.14+
## What changes were proposed in this pull request?
This PR proposes to make PySpark's tests compatible with NumPy 0.14+
NumPy 0.14.x introduced rather radical changes about its string representation.
For example, the tests below are failed:
```
**********************************************************************
File "/.../spark/python/pyspark/ml/linalg/__init__.py", line 895, in __main__.DenseMatrix.__str__
Failed example:
print(dm)
Expected:
DenseMatrix([[ 0., 2.],
[ 1., 3.]])
Got:
DenseMatrix([[0., 2.],
[1., 3.]])
**********************************************************************
File "/.../spark/python/pyspark/ml/linalg/__init__.py", line 899, in __main__.DenseMatrix.__str__
Failed example:
print(dm)
Expected:
DenseMatrix([[ 0., 1.],
[ 2., 3.]])
Got:
DenseMatrix([[0., 1.],
[2., 3.]])
**********************************************************************
File "/.../spark/python/pyspark/ml/linalg/__init__.py", line 939, in __main__.DenseMatrix.toArray
Failed example:
m.toArray()
Expected:
array([[ 0., 2.],
[ 1., 3.]])
Got:
array([[0., 2.],
[1., 3.]])
**********************************************************************
File "/.../spark/python/pyspark/ml/linalg/__init__.py", line 324, in __main__.DenseVector.dot
Failed example:
dense.dot(np.reshape([1., 2., 3., 4.], (2, 2), order='F'))
Expected:
array([ 5., 11.])
Got:
array([ 5., 11.])
**********************************************************************
File "/.../spark/python/pyspark/ml/linalg/__init__.py", line 567, in __main__.SparseVector.dot
Failed example:
a.dot(np.array([[1, 1], [2, 2], [3, 3], [4, 4]]))
Expected:
array([ 22., 22.])
Got:
array([22., 22.])
```
See [release note](https://docs.scipy.org/doc/numpy-1.14.0/release.html#compatibility-notes).
## How was this patch tested?
Manually tested:
```
$ ./run-tests --python-executables=python3.6,python2.7 --modules=pyspark-ml,pyspark-mllib
Running PySpark tests. Output is in /.../spark/python/unit-tests.log
Will test against the following Python executables: ['python3.6', 'python2.7']
Will test the following Python modules: ['pyspark-ml', 'pyspark-mllib']
Starting test(python2.7): pyspark.mllib.tests
Starting test(python2.7): pyspark.ml.classification
Starting test(python3.6): pyspark.mllib.tests
Starting test(python2.7): pyspark.ml.clustering
Finished test(python2.7): pyspark.ml.clustering (54s)
Starting test(python2.7): pyspark.ml.evaluation
Finished test(python2.7): pyspark.ml.classification (74s)
Starting test(python2.7): pyspark.ml.feature
Finished test(python2.7): pyspark.ml.evaluation (27s)
Starting test(python2.7): pyspark.ml.fpm
Finished test(python2.7): pyspark.ml.fpm (0s)
Starting test(python2.7): pyspark.ml.image
Finished test(python2.7): pyspark.ml.image (17s)
Starting test(python2.7): pyspark.ml.linalg.__init__
Finished test(python2.7): pyspark.ml.linalg.__init__ (1s)
Starting test(python2.7): pyspark.ml.recommendation
Finished test(python2.7): pyspark.ml.feature (76s)
Starting test(python2.7): pyspark.ml.regression
Finished test(python2.7): pyspark.ml.recommendation (69s)
Starting test(python2.7): pyspark.ml.stat
Finished test(python2.7): pyspark.ml.regression (45s)
Starting test(python2.7): pyspark.ml.tests
Finished test(python2.7): pyspark.ml.stat (28s)
Starting test(python2.7): pyspark.ml.tuning
Finished test(python2.7): pyspark.ml.tuning (20s)
Starting test(python2.7): pyspark.mllib.classification
Finished test(python2.7): pyspark.mllib.classification (31s)
Starting test(python2.7): pyspark.mllib.clustering
Finished test(python2.7): pyspark.mllib.tests (260s)
Starting test(python2.7): pyspark.mllib.evaluation
Finished test(python3.6): pyspark.mllib.tests (266s)
Starting test(python2.7): pyspark.mllib.feature
Finished test(python2.7): pyspark.mllib.evaluation (21s)
Starting test(python2.7): pyspark.mllib.fpm
Finished test(python2.7): pyspark.mllib.feature (38s)
Starting test(python2.7): pyspark.mllib.linalg.__init__
Finished test(python2.7): pyspark.mllib.linalg.__init__ (1s)
Starting test(python2.7): pyspark.mllib.linalg.distributed
Finished test(python2.7): pyspark.mllib.fpm (34s)
Starting test(python2.7): pyspark.mllib.random
Finished test(python2.7): pyspark.mllib.clustering (64s)
Starting test(python2.7): pyspark.mllib.recommendation
Finished test(python2.7): pyspark.mllib.random (15s)
Starting test(python2.7): pyspark.mllib.regression
Finished test(python2.7): pyspark.mllib.linalg.distributed (47s)
Starting test(python2.7): pyspark.mllib.stat.KernelDensity
Finished test(python2.7): pyspark.mllib.stat.KernelDensity (0s)
Starting test(python2.7): pyspark.mllib.stat._statistics
Finished test(python2.7): pyspark.mllib.recommendation (40s)
Starting test(python2.7): pyspark.mllib.tree
Finished test(python2.7): pyspark.mllib.regression (38s)
Starting test(python2.7): pyspark.mllib.util
Finished test(python2.7): pyspark.mllib.stat._statistics (19s)
Starting test(python3.6): pyspark.ml.classification
Finished test(python2.7): pyspark.mllib.tree (26s)
Starting test(python3.6): pyspark.ml.clustering
Finished test(python2.7): pyspark.mllib.util (27s)
Starting test(python3.6): pyspark.ml.evaluation
Finished test(python3.6): pyspark.ml.evaluation (30s)
Starting test(python3.6): pyspark.ml.feature
Finished test(python2.7): pyspark.ml.tests (234s)
Starting test(python3.6): pyspark.ml.fpm
Finished test(python3.6): pyspark.ml.fpm (1s)
Starting test(python3.6): pyspark.ml.image
Finished test(python3.6): pyspark.ml.clustering (55s)
Starting test(python3.6): pyspark.ml.linalg.__init__
Finished test(python3.6): pyspark.ml.linalg.__init__ (0s)
Starting test(python3.6): pyspark.ml.recommendation
Finished test(python3.6): pyspark.ml.classification (71s)
Starting test(python3.6): pyspark.ml.regression
Finished test(python3.6): pyspark.ml.image (18s)
Starting test(python3.6): pyspark.ml.stat
Finished test(python3.6): pyspark.ml.stat (37s)
Starting test(python3.6): pyspark.ml.tests
Finished test(python3.6): pyspark.ml.regression (59s)
Starting test(python3.6): pyspark.ml.tuning
Finished test(python3.6): pyspark.ml.feature (93s)
Starting test(python3.6): pyspark.mllib.classification
Finished test(python3.6): pyspark.ml.recommendation (83s)
Starting test(python3.6): pyspark.mllib.clustering
Finished test(python3.6): pyspark.ml.tuning (29s)
Starting test(python3.6): pyspark.mllib.evaluation
Finished test(python3.6): pyspark.mllib.evaluation (26s)
Starting test(python3.6): pyspark.mllib.feature
Finished test(python3.6): pyspark.mllib.classification (43s)
Starting test(python3.6): pyspark.mllib.fpm
Finished test(python3.6): pyspark.mllib.clustering (81s)
Starting test(python3.6): pyspark.mllib.linalg.__init__
Finished test(python3.6): pyspark.mllib.linalg.__init__ (2s)
Starting test(python3.6): pyspark.mllib.linalg.distributed
Finished test(python3.6): pyspark.mllib.fpm (48s)
Starting test(python3.6): pyspark.mllib.random
Finished test(python3.6): pyspark.mllib.feature (54s)
Starting test(python3.6): pyspark.mllib.recommendation
Finished test(python3.6): pyspark.mllib.random (18s)
Starting test(python3.6): pyspark.mllib.regression
Finished test(python3.6): pyspark.mllib.linalg.distributed (55s)
Starting test(python3.6): pyspark.mllib.stat.KernelDensity
Finished test(python3.6): pyspark.mllib.stat.KernelDensity (1s)
Starting test(python3.6): pyspark.mllib.stat._statistics
Finished test(python3.6): pyspark.mllib.recommendation (51s)
Starting test(python3.6): pyspark.mllib.tree
Finished test(python3.6): pyspark.mllib.regression (45s)
Starting test(python3.6): pyspark.mllib.util
Finished test(python3.6): pyspark.mllib.stat._statistics (21s)
Finished test(python3.6): pyspark.mllib.tree (27s)
Finished test(python3.6): pyspark.mllib.util (27s)
Finished test(python3.6): pyspark.ml.tests (264s)
```
Author: hyukjinkwon <gurwls223@apache.org>
Closes #21715 from HyukjinKwon/SPARK-24740.
2018-07-06 23:39:29 -04:00
|
|
|
try:
|
|
|
|
# Numpy 1.14+ changed it's string format.
|
|
|
|
numpy.set_printoptions(legacy='1.13')
|
|
|
|
except TypeError:
|
|
|
|
pass
|
2015-06-29 01:38:04 -04:00
|
|
|
globs = pyspark.mllib.clustering.__dict__.copy()
|
2013-12-25 00:08:05 -05:00
|
|
|
globs['sc'] = SparkContext('local[4]', 'PythonTest', batchSize=2)
|
2014-05-25 20:15:01 -04:00
|
|
|
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
|
2013-12-25 00:08:05 -05:00
|
|
|
globs['sc'].stop()
|
|
|
|
if failure_count:
|
2018-03-08 06:38:34 -05:00
|
|
|
sys.exit(-1)
|
2013-12-25 00:08:05 -05:00
|
|
|
|
[WIP] SPARK-1430: Support sparse data in Python MLlib
This PR adds a SparseVector class in PySpark and updates all the regression, classification and clustering algorithms and models to support sparse data, similar to MLlib. I chose to add this class because SciPy is quite difficult to install in many environments (more so than NumPy), but I plan to add support for SciPy sparse vectors later too, and make the methods work transparently on objects of either type.
On the Scala side, we keep Python sparse vectors sparse and pass them to MLlib. We always return dense vectors from our models.
Some to-do items left:
- [x] Support SciPy's scipy.sparse matrix objects when SciPy is available. We can easily add a function to convert these to our own SparseVector.
- [x] MLlib currently uses a vector with one extra column on the left to represent what we call LabeledPoint in Scala. Do we really want this? It may get annoying once you deal with sparse data since you must add/subtract 1 to each feature index when training. We can remove this API in 1.0 and use tuples for labeling.
- [x] Explain how to use these in the Python MLlib docs.
CC @mengxr, @joshrosen
Author: Matei Zaharia <matei@databricks.com>
Closes #341 from mateiz/py-ml-update and squashes the following commits:
d52e763 [Matei Zaharia] Remove no-longer-needed slice code and handle review comments
ea5a25a [Matei Zaharia] Fix remaining uses of copyto() after merge
b9f97a3 [Matei Zaharia] Fix test
1e1bd0f [Matei Zaharia] Add MLlib logistic regression example in Python
88bc01f [Matei Zaharia] Clean up inheritance of LinearModel in Python, and expose its parametrs
37ab747 [Matei Zaharia] Fix some examples and docs due to changes in MLlib API
da0f27e [Matei Zaharia] Added a MLlib K-means example and updated docs to discuss sparse data
c48e85a [Matei Zaharia] Added some tests for passing lists as input, and added mllib/tests.py to run-tests script.
a07ba10 [Matei Zaharia] Fix some typos and calculation of initial weights
74eefe7 [Matei Zaharia] Added LabeledPoint class in Python
889dde8 [Matei Zaharia] Support scipy.sparse matrices in all our algorithms and models
ab244d1 [Matei Zaharia] Allow SparseVectors to be initialized using a dict
a5d6426 [Matei Zaharia] Add linalg.py to run-tests script
0e7a3d8 [Matei Zaharia] Keep vectors sparse in Java when reading LabeledPoints
eaee759 [Matei Zaharia] Update regression, classification and clustering models for sparse data
2abbb44 [Matei Zaharia] Further work to get linear models working with sparse data
154f45d [Matei Zaharia] Update docs, name some magic values
881fef7 [Matei Zaharia] Added a sparse vector in Python and made Java-Python format more compact
2014-04-15 23:33:24 -04:00
|
|
|
|
2013-12-25 00:08:05 -05:00
|
|
|
if __name__ == "__main__":
|
|
|
|
_test()
|