2015-01-28 20:14:23 -05:00
|
|
|
#
|
|
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
|
|
|
|
# contributor license agreements. See the NOTICE file distributed with
|
|
|
|
# this work for additional information regarding copyright ownership.
|
|
|
|
# The ASF licenses this file to You under the Apache License, Version 2.0
|
|
|
|
# (the "License"); you may not use this file except in compliance with
|
|
|
|
# the License. You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
#
|
|
|
|
|
2015-04-16 19:20:57 -04:00
|
|
|
from pyspark.rdd import ignore_unicode_prefix
|
2015-05-07 13:25:41 -04:00
|
|
|
from pyspark.ml.param.shared import HasInputCol, HasInputCols, HasOutputCol, HasNumFeatures
|
2015-02-20 05:31:32 -05:00
|
|
|
from pyspark.ml.util import keyword_only
|
2015-01-28 20:14:23 -05:00
|
|
|
from pyspark.ml.wrapper import JavaTransformer
|
2015-02-20 05:31:32 -05:00
|
|
|
from pyspark.mllib.common import inherit_doc
|
2015-01-28 20:14:23 -05:00
|
|
|
|
2015-05-07 13:25:41 -04:00
|
|
|
__all__ = ['Tokenizer', 'HashingTF', 'VectorAssembler']
|
2015-01-28 20:14:23 -05:00
|
|
|
|
|
|
|
|
|
|
|
@inherit_doc
|
2015-04-16 19:20:57 -04:00
|
|
|
@ignore_unicode_prefix
|
2015-01-28 20:14:23 -05:00
|
|
|
class Tokenizer(JavaTransformer, HasInputCol, HasOutputCol):
|
|
|
|
"""
|
|
|
|
A tokenizer that converts the input string to lowercase and then
|
|
|
|
splits it by white spaces.
|
|
|
|
|
|
|
|
>>> from pyspark.sql import Row
|
2015-02-15 23:29:26 -05:00
|
|
|
>>> df = sc.parallelize([Row(text="a b c")]).toDF()
|
|
|
|
>>> tokenizer = Tokenizer(inputCol="text", outputCol="words")
|
2015-04-16 19:20:57 -04:00
|
|
|
>>> tokenizer.transform(df).head()
|
2015-01-28 20:14:23 -05:00
|
|
|
Row(text=u'a b c', words=[u'a', u'b', u'c'])
|
2015-02-15 23:29:26 -05:00
|
|
|
>>> # Change a parameter.
|
2015-04-16 19:20:57 -04:00
|
|
|
>>> tokenizer.setParams(outputCol="tokens").transform(df).head()
|
2015-01-28 20:14:23 -05:00
|
|
|
Row(text=u'a b c', tokens=[u'a', u'b', u'c'])
|
2015-02-15 23:29:26 -05:00
|
|
|
>>> # Temporarily modify a parameter.
|
2015-04-16 19:20:57 -04:00
|
|
|
>>> tokenizer.transform(df, {tokenizer.outputCol: "words"}).head()
|
2015-02-15 23:29:26 -05:00
|
|
|
Row(text=u'a b c', words=[u'a', u'b', u'c'])
|
2015-04-16 19:20:57 -04:00
|
|
|
>>> tokenizer.transform(df).head()
|
2015-02-15 23:29:26 -05:00
|
|
|
Row(text=u'a b c', tokens=[u'a', u'b', u'c'])
|
|
|
|
>>> # Must use keyword arguments to specify params.
|
|
|
|
>>> tokenizer.setParams("text")
|
|
|
|
Traceback (most recent call last):
|
|
|
|
...
|
|
|
|
TypeError: Method setParams forces keyword arguments.
|
2015-01-28 20:14:23 -05:00
|
|
|
"""
|
|
|
|
|
|
|
|
_java_class = "org.apache.spark.ml.feature.Tokenizer"
|
|
|
|
|
2015-02-15 23:29:26 -05:00
|
|
|
@keyword_only
|
2015-04-16 02:49:42 -04:00
|
|
|
def __init__(self, inputCol=None, outputCol=None):
|
2015-02-15 23:29:26 -05:00
|
|
|
"""
|
2015-04-16 02:49:42 -04:00
|
|
|
__init__(self, inputCol=None, outputCol=None)
|
2015-02-15 23:29:26 -05:00
|
|
|
"""
|
|
|
|
super(Tokenizer, self).__init__()
|
|
|
|
kwargs = self.__init__._input_kwargs
|
|
|
|
self.setParams(**kwargs)
|
|
|
|
|
|
|
|
@keyword_only
|
2015-04-16 02:49:42 -04:00
|
|
|
def setParams(self, inputCol=None, outputCol=None):
|
2015-02-15 23:29:26 -05:00
|
|
|
"""
|
|
|
|
setParams(self, inputCol="input", outputCol="output")
|
|
|
|
Sets params for this Tokenizer.
|
|
|
|
"""
|
|
|
|
kwargs = self.setParams._input_kwargs
|
2015-04-16 02:49:42 -04:00
|
|
|
return self._set(**kwargs)
|
2015-02-15 23:29:26 -05:00
|
|
|
|
2015-01-28 20:14:23 -05:00
|
|
|
|
|
|
|
@inherit_doc
|
|
|
|
class HashingTF(JavaTransformer, HasInputCol, HasOutputCol, HasNumFeatures):
|
|
|
|
"""
|
|
|
|
Maps a sequence of terms to their term frequencies using the
|
|
|
|
hashing trick.
|
|
|
|
|
|
|
|
>>> from pyspark.sql import Row
|
2015-02-15 23:29:26 -05:00
|
|
|
>>> df = sc.parallelize([Row(words=["a", "b", "c"])]).toDF()
|
|
|
|
>>> hashingTF = HashingTF(numFeatures=10, inputCol="words", outputCol="features")
|
2015-04-16 19:20:57 -04:00
|
|
|
>>> hashingTF.transform(df).head().features
|
|
|
|
SparseVector(10, {7: 1.0, 8: 1.0, 9: 1.0})
|
|
|
|
>>> hashingTF.setParams(outputCol="freqs").transform(df).head().freqs
|
|
|
|
SparseVector(10, {7: 1.0, 8: 1.0, 9: 1.0})
|
2015-01-28 20:14:23 -05:00
|
|
|
>>> params = {hashingTF.numFeatures: 5, hashingTF.outputCol: "vector"}
|
2015-04-16 19:20:57 -04:00
|
|
|
>>> hashingTF.transform(df, params).head().vector
|
|
|
|
SparseVector(5, {2: 1.0, 3: 1.0, 4: 1.0})
|
2015-01-28 20:14:23 -05:00
|
|
|
"""
|
|
|
|
|
|
|
|
_java_class = "org.apache.spark.ml.feature.HashingTF"
|
|
|
|
|
2015-02-15 23:29:26 -05:00
|
|
|
@keyword_only
|
2015-04-16 02:49:42 -04:00
|
|
|
def __init__(self, numFeatures=1 << 18, inputCol=None, outputCol=None):
|
2015-02-15 23:29:26 -05:00
|
|
|
"""
|
2015-04-16 02:49:42 -04:00
|
|
|
__init__(self, numFeatures=1 << 18, inputCol=None, outputCol=None)
|
2015-02-15 23:29:26 -05:00
|
|
|
"""
|
|
|
|
super(HashingTF, self).__init__()
|
2015-04-16 02:49:42 -04:00
|
|
|
self._setDefault(numFeatures=1 << 18)
|
2015-02-15 23:29:26 -05:00
|
|
|
kwargs = self.__init__._input_kwargs
|
|
|
|
self.setParams(**kwargs)
|
|
|
|
|
|
|
|
@keyword_only
|
2015-04-16 02:49:42 -04:00
|
|
|
def setParams(self, numFeatures=1 << 18, inputCol=None, outputCol=None):
|
2015-02-15 23:29:26 -05:00
|
|
|
"""
|
2015-04-16 02:49:42 -04:00
|
|
|
setParams(self, numFeatures=1 << 18, inputCol=None, outputCol=None)
|
2015-02-15 23:29:26 -05:00
|
|
|
Sets params for this HashingTF.
|
|
|
|
"""
|
|
|
|
kwargs = self.setParams._input_kwargs
|
2015-04-16 02:49:42 -04:00
|
|
|
return self._set(**kwargs)
|
2015-02-15 23:29:26 -05:00
|
|
|
|
2015-01-28 20:14:23 -05:00
|
|
|
|
2015-05-07 13:25:41 -04:00
|
|
|
@inherit_doc
|
|
|
|
class VectorAssembler(JavaTransformer, HasInputCols, HasOutputCol):
|
|
|
|
"""
|
|
|
|
A feature transformer that merges multiple columns into a vector column.
|
|
|
|
|
|
|
|
>>> from pyspark.sql import Row
|
|
|
|
>>> df = sc.parallelize([Row(a=1, b=0, c=3)]).toDF()
|
|
|
|
>>> vecAssembler = VectorAssembler(inputCols=["a", "b", "c"], outputCol="features")
|
|
|
|
>>> vecAssembler.transform(df).head().features
|
|
|
|
SparseVector(3, {0: 1.0, 2: 3.0})
|
|
|
|
>>> vecAssembler.setParams(outputCol="freqs").transform(df).head().freqs
|
|
|
|
SparseVector(3, {0: 1.0, 2: 3.0})
|
|
|
|
>>> params = {vecAssembler.inputCols: ["b", "a"], vecAssembler.outputCol: "vector"}
|
|
|
|
>>> vecAssembler.transform(df, params).head().vector
|
|
|
|
SparseVector(2, {1: 1.0})
|
|
|
|
"""
|
|
|
|
|
|
|
|
_java_class = "org.apache.spark.ml.feature.VectorAssembler"
|
|
|
|
|
|
|
|
@keyword_only
|
|
|
|
def __init__(self, inputCols=None, outputCol=None):
|
|
|
|
"""
|
|
|
|
__init__(self, inputCols=None, outputCol=None)
|
|
|
|
"""
|
|
|
|
super(VectorAssembler, self).__init__()
|
|
|
|
self._setDefault()
|
|
|
|
kwargs = self.__init__._input_kwargs
|
|
|
|
self.setParams(**kwargs)
|
|
|
|
|
|
|
|
@keyword_only
|
|
|
|
def setParams(self, inputCols=None, outputCol=None):
|
|
|
|
"""
|
|
|
|
setParams(self, inputCols=None, outputCol=None)
|
|
|
|
Sets params for this VectorAssembler.
|
|
|
|
"""
|
|
|
|
kwargs = self.setParams._input_kwargs
|
|
|
|
return self._set(**kwargs)
|
|
|
|
|
|
|
|
|
2015-01-28 20:14:23 -05:00
|
|
|
if __name__ == "__main__":
|
|
|
|
import doctest
|
|
|
|
from pyspark.context import SparkContext
|
|
|
|
from pyspark.sql import SQLContext
|
|
|
|
globs = globals().copy()
|
|
|
|
# The small batch size here ensures that we see multiple batches,
|
|
|
|
# even in these small test examples:
|
|
|
|
sc = SparkContext("local[2]", "ml.feature tests")
|
2015-04-08 16:31:45 -04:00
|
|
|
sqlContext = SQLContext(sc)
|
2015-01-28 20:14:23 -05:00
|
|
|
globs['sc'] = sc
|
2015-04-08 16:31:45 -04:00
|
|
|
globs['sqlContext'] = sqlContext
|
2015-01-28 20:14:23 -05:00
|
|
|
(failure_count, test_count) = doctest.testmod(
|
|
|
|
globs=globs, optionflags=doctest.ELLIPSIS)
|
|
|
|
sc.stop()
|
|
|
|
if failure_count:
|
|
|
|
exit(-1)
|