4a17eedb16
For SPARK-5867: * The spark.ml programming guide needs to be updated to use the new SQL DataFrame API instead of the old SchemaRDD API. * It should also include Python examples now. For SPARK-5892: * Fix Python docs * Various other cleanups BTW, I accidentally merged this with master. If you want to compile it on your own, use this branch which is based on spark/branch-1.3 and cherry-picks the commits from this PR: [https://github.com/jkbradley/spark/tree/doc-review-1.3-check] CC: mengxr (ML), davies (Python docs) Author: Joseph K. Bradley <joseph@databricks.com> Closes #4675 from jkbradley/doc-review-1.3 and squashes the following commits: f191bb0 [Joseph K. Bradley] small cleanups e786efa [Joseph K. Bradley] small doc corrections 6b1ab4a [Joseph K. Bradley] fixed python lint test 946affa [Joseph K. Bradley] Added sample data for ml.MovieLensALS example. Changed spark.ml Java examples to use DataFrames API instead of sql() da81558 [Joseph K. Bradley] Merge remote-tracking branch 'upstream/master' into doc-review-1.3 629dbf5 [Joseph K. Bradley] Updated based on code review: * made new page for old migration guides * small fixes * moved inherit_doc in python b9df7c4 [Joseph K. Bradley] Small cleanups: toDF to toDF(), adding s for string interpolation 34b067f [Joseph K. Bradley] small doc correction da16aef [Joseph K. Bradley] Fixed python mllib docs 8cce91c [Joseph K. Bradley] GMM: removed old imports, added some doc 695f3f6 [Joseph K. Bradley] partly done trying to fix inherit_doc for class hierarchies in python docs a72c018 [Joseph K. Bradley] made ChiSqTestResult appear in python docs b05a80d [Joseph K. Bradley] organize imports. doc cleanups e572827 [Joseph K. Bradley] updated programming guide for ml and mllib
128 lines
4.7 KiB
Python
128 lines
4.7 KiB
Python
#
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
|
|
# contributor license agreements. See the NOTICE file distributed with
|
|
# this work for additional information regarding copyright ownership.
|
|
# The ASF licenses this file to You under the Apache License, Version 2.0
|
|
# (the "License"); you may not use this file except in compliance with
|
|
# the License. You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
#
|
|
|
|
from pyspark.ml.param.shared import HasInputCol, HasOutputCol, HasNumFeatures
|
|
from pyspark.ml.util import keyword_only
|
|
from pyspark.ml.wrapper import JavaTransformer
|
|
from pyspark.mllib.common import inherit_doc
|
|
|
|
__all__ = ['Tokenizer', 'HashingTF']
|
|
|
|
|
|
@inherit_doc
|
|
class Tokenizer(JavaTransformer, HasInputCol, HasOutputCol):
|
|
"""
|
|
A tokenizer that converts the input string to lowercase and then
|
|
splits it by white spaces.
|
|
|
|
>>> from pyspark.sql import Row
|
|
>>> df = sc.parallelize([Row(text="a b c")]).toDF()
|
|
>>> tokenizer = Tokenizer(inputCol="text", outputCol="words")
|
|
>>> print tokenizer.transform(df).head()
|
|
Row(text=u'a b c', words=[u'a', u'b', u'c'])
|
|
>>> # Change a parameter.
|
|
>>> print tokenizer.setParams(outputCol="tokens").transform(df).head()
|
|
Row(text=u'a b c', tokens=[u'a', u'b', u'c'])
|
|
>>> # Temporarily modify a parameter.
|
|
>>> print tokenizer.transform(df, {tokenizer.outputCol: "words"}).head()
|
|
Row(text=u'a b c', words=[u'a', u'b', u'c'])
|
|
>>> print tokenizer.transform(df).head()
|
|
Row(text=u'a b c', tokens=[u'a', u'b', u'c'])
|
|
>>> # Must use keyword arguments to specify params.
|
|
>>> tokenizer.setParams("text")
|
|
Traceback (most recent call last):
|
|
...
|
|
TypeError: Method setParams forces keyword arguments.
|
|
"""
|
|
|
|
_java_class = "org.apache.spark.ml.feature.Tokenizer"
|
|
|
|
@keyword_only
|
|
def __init__(self, inputCol="input", outputCol="output"):
|
|
"""
|
|
__init__(self, inputCol="input", outputCol="output")
|
|
"""
|
|
super(Tokenizer, self).__init__()
|
|
kwargs = self.__init__._input_kwargs
|
|
self.setParams(**kwargs)
|
|
|
|
@keyword_only
|
|
def setParams(self, inputCol="input", outputCol="output"):
|
|
"""
|
|
setParams(self, inputCol="input", outputCol="output")
|
|
Sets params for this Tokenizer.
|
|
"""
|
|
kwargs = self.setParams._input_kwargs
|
|
return self._set_params(**kwargs)
|
|
|
|
|
|
@inherit_doc
|
|
class HashingTF(JavaTransformer, HasInputCol, HasOutputCol, HasNumFeatures):
|
|
"""
|
|
Maps a sequence of terms to their term frequencies using the
|
|
hashing trick.
|
|
|
|
>>> from pyspark.sql import Row
|
|
>>> df = sc.parallelize([Row(words=["a", "b", "c"])]).toDF()
|
|
>>> hashingTF = HashingTF(numFeatures=10, inputCol="words", outputCol="features")
|
|
>>> print hashingTF.transform(df).head().features
|
|
(10,[7,8,9],[1.0,1.0,1.0])
|
|
>>> print hashingTF.setParams(outputCol="freqs").transform(df).head().freqs
|
|
(10,[7,8,9],[1.0,1.0,1.0])
|
|
>>> params = {hashingTF.numFeatures: 5, hashingTF.outputCol: "vector"}
|
|
>>> print hashingTF.transform(df, params).head().vector
|
|
(5,[2,3,4],[1.0,1.0,1.0])
|
|
"""
|
|
|
|
_java_class = "org.apache.spark.ml.feature.HashingTF"
|
|
|
|
@keyword_only
|
|
def __init__(self, numFeatures=1 << 18, inputCol="input", outputCol="output"):
|
|
"""
|
|
__init__(self, numFeatures=1 << 18, inputCol="input", outputCol="output")
|
|
"""
|
|
super(HashingTF, self).__init__()
|
|
kwargs = self.__init__._input_kwargs
|
|
self.setParams(**kwargs)
|
|
|
|
@keyword_only
|
|
def setParams(self, numFeatures=1 << 18, inputCol="input", outputCol="output"):
|
|
"""
|
|
setParams(self, numFeatures=1 << 18, inputCol="input", outputCol="output")
|
|
Sets params for this HashingTF.
|
|
"""
|
|
kwargs = self.setParams._input_kwargs
|
|
return self._set_params(**kwargs)
|
|
|
|
|
|
if __name__ == "__main__":
|
|
import doctest
|
|
from pyspark.context import SparkContext
|
|
from pyspark.sql import SQLContext
|
|
globs = globals().copy()
|
|
# The small batch size here ensures that we see multiple batches,
|
|
# even in these small test examples:
|
|
sc = SparkContext("local[2]", "ml.feature tests")
|
|
sqlCtx = SQLContext(sc)
|
|
globs['sc'] = sc
|
|
globs['sqlCtx'] = sqlCtx
|
|
(failure_count, test_count) = doctest.testmod(
|
|
globs=globs, optionflags=doctest.ELLIPSIS)
|
|
sc.stop()
|
|
if failure_count:
|
|
exit(-1)
|