2015-01-28 20:14:23 -05:00
|
|
|
#
|
|
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
|
|
|
|
# contributor license agreements. See the NOTICE file distributed with
|
|
|
|
# this work for additional information regarding copyright ownership.
|
|
|
|
# The ASF licenses this file to You under the Apache License, Version 2.0
|
|
|
|
# (the "License"); you may not use this file except in compliance with
|
|
|
|
# the License. You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
#
|
|
|
|
|
2015-04-16 19:20:57 -04:00
|
|
|
from __future__ import print_function
|
|
|
|
|
2015-01-28 20:14:23 -05:00
|
|
|
header = """#
|
|
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
|
|
|
|
# contributor license agreements. See the NOTICE file distributed with
|
|
|
|
# this work for additional information regarding copyright ownership.
|
|
|
|
# The ASF licenses this file to You under the Apache License, Version 2.0
|
|
|
|
# (the "License"); you may not use this file except in compliance with
|
|
|
|
# the License. You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
#"""
|
|
|
|
|
2015-04-16 02:49:42 -04:00
|
|
|
# Code generator for shared params (shared.py). Run under this folder with:
|
|
|
|
# python _shared_params_code_gen.py > shared.py
|
2015-01-28 20:14:23 -05:00
|
|
|
|
2015-04-16 02:49:42 -04:00
|
|
|
|
2016-03-23 14:20:44 -04:00
|
|
|
def _gen_param_header(name, doc, defaultValueStr, typeConverter):
|
2015-01-28 20:14:23 -05:00
|
|
|
"""
|
2015-05-12 15:17:05 -04:00
|
|
|
Generates the header part for shared variables
|
2015-01-28 20:14:23 -05:00
|
|
|
|
|
|
|
:param name: param name
|
|
|
|
:param doc: param doc
|
|
|
|
"""
|
|
|
|
template = '''class Has$Name(Params):
|
|
|
|
"""
|
2015-10-20 19:51:32 -04:00
|
|
|
Mixin for param $name: $doc
|
2015-01-28 20:14:23 -05:00
|
|
|
"""
|
|
|
|
|
2016-03-23 14:20:44 -04:00
|
|
|
$name = Param(Params._dummy(), "$name", "$doc", typeConverter=$typeConverter)
|
2015-01-28 20:14:23 -05:00
|
|
|
|
|
|
|
def __init__(self):
|
2016-01-26 18:53:48 -05:00
|
|
|
super(Has$Name, self).__init__()'''
|
|
|
|
|
2015-05-20 18:16:12 -04:00
|
|
|
if defaultValueStr is not None:
|
|
|
|
template += '''
|
|
|
|
self._setDefault($name=$defaultValueStr)'''
|
2015-05-12 15:17:05 -04:00
|
|
|
|
|
|
|
Name = name[0].upper() + name[1:]
|
2016-03-23 14:20:44 -04:00
|
|
|
if typeConverter is None:
|
|
|
|
typeConverter = str(None)
|
2015-05-12 15:17:05 -04:00
|
|
|
return template \
|
|
|
|
.replace("$name", name) \
|
|
|
|
.replace("$Name", Name) \
|
|
|
|
.replace("$doc", doc) \
|
2016-01-06 13:43:03 -05:00
|
|
|
.replace("$defaultValueStr", str(defaultValueStr)) \
|
2016-03-23 14:20:44 -04:00
|
|
|
.replace("$typeConverter", typeConverter)
|
2015-01-28 20:14:23 -05:00
|
|
|
|
2015-05-12 15:17:05 -04:00
|
|
|
|
|
|
|
def _gen_param_code(name, doc, defaultValueStr):
|
|
|
|
"""
|
|
|
|
Generates Python code for a shared param class.
|
|
|
|
|
|
|
|
:param name: param name
|
|
|
|
:param doc: param doc
|
|
|
|
:param defaultValueStr: string representation of the default value
|
|
|
|
:return: code string
|
|
|
|
"""
|
|
|
|
# TODO: How to correctly inherit instance attributes?
|
|
|
|
template = '''
|
2015-01-28 20:14:23 -05:00
|
|
|
def set$Name(self, value):
|
|
|
|
"""
|
|
|
|
Sets the value of :py:attr:`$name`.
|
|
|
|
"""
|
2016-05-03 10:46:13 -04:00
|
|
|
return self._set($name=value)
|
2015-01-28 20:14:23 -05:00
|
|
|
|
|
|
|
def get$Name(self):
|
|
|
|
"""
|
|
|
|
Gets the value of $name or its default value.
|
|
|
|
"""
|
2015-04-16 02:49:42 -04:00
|
|
|
return self.getOrDefault(self.$name)'''
|
2015-01-28 20:14:23 -05:00
|
|
|
|
2015-04-16 02:49:42 -04:00
|
|
|
Name = name[0].upper() + name[1:]
|
2015-01-28 20:14:23 -05:00
|
|
|
return template \
|
|
|
|
.replace("$name", name) \
|
2015-04-16 02:49:42 -04:00
|
|
|
.replace("$Name", Name) \
|
2015-01-28 20:14:23 -05:00
|
|
|
.replace("$doc", doc) \
|
2015-04-16 02:49:42 -04:00
|
|
|
.replace("$defaultValueStr", str(defaultValueStr))
|
2015-01-28 20:14:23 -05:00
|
|
|
|
|
|
|
if __name__ == "__main__":
|
2015-04-16 19:20:57 -04:00
|
|
|
print(header)
|
|
|
|
print("\n# DO NOT MODIFY THIS FILE! It was generated by _shared_params_code_gen.py.\n")
|
2016-03-23 14:20:44 -04:00
|
|
|
print("from pyspark.ml.param import *\n\n")
|
2015-01-28 20:14:23 -05:00
|
|
|
shared = [
|
2016-03-23 14:20:44 -04:00
|
|
|
("maxIter", "max number of iterations (>= 0).", None, "TypeConverters.toInt"),
|
|
|
|
("regParam", "regularization parameter (>= 0).", None, "TypeConverters.toFloat"),
|
|
|
|
("featuresCol", "features column name.", "'features'", "TypeConverters.toString"),
|
|
|
|
("labelCol", "label column name.", "'label'", "TypeConverters.toString"),
|
|
|
|
("predictionCol", "prediction column name.", "'prediction'", "TypeConverters.toString"),
|
2015-05-13 18:13:09 -04:00
|
|
|
("probabilityCol", "Column name for predicted class conditional probabilities. " +
|
|
|
|
"Note: Not all models output well-calibrated probability estimates! These probabilities " +
|
2016-03-23 14:20:44 -04:00
|
|
|
"should be treated as confidences, not precise probabilities.", "'probability'",
|
|
|
|
"TypeConverters.toString"),
|
2016-01-06 13:43:03 -05:00
|
|
|
("rawPredictionCol", "raw prediction (a.k.a. confidence) column name.", "'rawPrediction'",
|
2016-03-23 14:20:44 -04:00
|
|
|
"TypeConverters.toString"),
|
|
|
|
("inputCol", "input column name.", None, "TypeConverters.toString"),
|
|
|
|
("inputCols", "input column names.", None, "TypeConverters.toListString"),
|
|
|
|
("outputCol", "output column name.", "self.uid + '__output'", "TypeConverters.toString"),
|
|
|
|
("numFeatures", "number of features.", None, "TypeConverters.toInt"),
|
2015-11-20 01:14:01 -05:00
|
|
|
("checkpointInterval", "set checkpoint interval (>= 1) or disable checkpoint (-1). " +
|
2016-03-23 14:20:44 -04:00
|
|
|
"E.g. 10 means that the cache will get checkpointed every 10 iterations.", None,
|
|
|
|
"TypeConverters.toInt"),
|
|
|
|
("seed", "random seed.", "hash(type(self).__name__)", "TypeConverters.toInt"),
|
2016-05-13 02:52:06 -04:00
|
|
|
("tol", "the convergence tolerance for iterative algorithms (>= 0).", None,
|
2016-03-23 14:20:44 -04:00
|
|
|
"TypeConverters.toFloat"),
|
2016-05-13 02:52:06 -04:00
|
|
|
("stepSize", "Step size to be used for each iteration of optimization (>= 0).", None,
|
2016-03-23 14:20:44 -04:00
|
|
|
"TypeConverters.toFloat"),
|
2015-09-10 23:43:38 -04:00
|
|
|
("handleInvalid", "how to handle invalid entries. Options are skip (which will filter " +
|
2016-05-13 02:52:06 -04:00
|
|
|
"out rows with bad values), or error (which will throw an error). More options may be " +
|
2016-04-15 15:14:41 -04:00
|
|
|
"added later.", None, "TypeConverters.toString"),
|
2015-09-11 11:50:35 -04:00
|
|
|
("elasticNetParam", "the ElasticNet mixing parameter, in range [0, 1]. For alpha = 0, " +
|
2016-03-23 14:20:44 -04:00
|
|
|
"the penalty is an L2 penalty. For alpha = 1, it is an L1 penalty.", "0.0",
|
|
|
|
"TypeConverters.toFloat"),
|
|
|
|
("fitIntercept", "whether to fit an intercept term.", "True", "TypeConverters.toBoolean"),
|
2015-09-11 11:50:35 -04:00
|
|
|
("standardization", "whether to standardize the training features before fitting the " +
|
2016-03-23 14:20:44 -04:00
|
|
|
"model.", "True", "TypeConverters.toBoolean"),
|
2015-09-11 11:50:35 -04:00
|
|
|
("thresholds", "Thresholds in multi-class classification to adjust the probability of " +
|
|
|
|
"predicting each class. Array must have length equal to the number of classes, with " +
|
2016-09-24 03:15:55 -04:00
|
|
|
"values > 0, excepting that at most one value may be 0. " +
|
|
|
|
"The class with largest value p/t is predicted, where p is the original " +
|
|
|
|
"probability of that class and t is the class's threshold.", None,
|
2016-03-23 14:20:44 -04:00
|
|
|
"TypeConverters.toListFloat"),
|
2017-01-27 19:03:53 -05:00
|
|
|
("threshold", "threshold in binary classification prediction, in range [0, 1]",
|
|
|
|
"0.5", "TypeConverters.toFloat"),
|
2015-10-07 20:50:35 -04:00
|
|
|
("weightCol", "weight column name. If this is not set or empty, we treat " +
|
2016-03-23 14:20:44 -04:00
|
|
|
"all instance weights as 1.0.", None, "TypeConverters.toString"),
|
2015-10-28 11:54:20 -04:00
|
|
|
("solver", "the solver algorithm for optimization. If this is not set or empty, " +
|
2016-04-08 13:47:05 -04:00
|
|
|
"default value is 'auto'.", "'auto'", "TypeConverters.toString"),
|
|
|
|
("varianceCol", "column name for the biased sample variance of prediction.",
|
2016-08-25 05:26:33 -04:00
|
|
|
None, "TypeConverters.toString"),
|
|
|
|
("aggregationDepth", "suggested depth for treeAggregate (>= 2).", "2",
|
2017-09-12 13:02:27 -04:00
|
|
|
"TypeConverters.toInt"),
|
|
|
|
("parallelism", "the number of threads to use when running parallel algorithms (>= 1).",
|
2017-12-20 20:51:42 -05:00
|
|
|
"1", "TypeConverters.toInt"),
|
|
|
|
("loss", "the loss function to be optimized.", None, "TypeConverters.toString")]
|
2015-10-07 20:50:35 -04:00
|
|
|
|
2015-01-28 20:14:23 -05:00
|
|
|
code = []
|
2016-03-23 14:20:44 -04:00
|
|
|
for name, doc, defaultValueStr, typeConverter in shared:
|
|
|
|
param_code = _gen_param_header(name, doc, defaultValueStr, typeConverter)
|
2015-05-12 15:17:05 -04:00
|
|
|
code.append(param_code + "\n" + _gen_param_code(name, doc, defaultValueStr))
|
|
|
|
|
|
|
|
decisionTreeParams = [
|
|
|
|
("maxDepth", "Maximum depth of the tree. (>= 0) E.g., depth 0 means 1 leaf node; " +
|
2016-03-23 14:20:44 -04:00
|
|
|
"depth 1 means 1 internal node + 2 leaf nodes.", "TypeConverters.toInt"),
|
2015-05-12 15:17:05 -04:00
|
|
|
("maxBins", "Max number of bins for" +
|
|
|
|
" discretizing continuous features. Must be >=2 and >= number of categories for any" +
|
2016-03-23 14:20:44 -04:00
|
|
|
" categorical feature.", "TypeConverters.toInt"),
|
2015-05-12 15:17:05 -04:00
|
|
|
("minInstancesPerNode", "Minimum number of instances each child must have after split. " +
|
|
|
|
"If a split causes the left or right child to have fewer than minInstancesPerNode, the " +
|
2016-03-23 14:20:44 -04:00
|
|
|
"split will be discarded as invalid. Should be >= 1.", "TypeConverters.toInt"),
|
|
|
|
("minInfoGain", "Minimum information gain for a split to be considered at a tree node.",
|
|
|
|
"TypeConverters.toFloat"),
|
[SPARK-12183][ML][MLLIB] Remove mllib tree implementation, and wrap spark.ml one
Primary change:
* Removed spark.mllib.tree.DecisionTree implementation of tree and forest learning.
* spark.mllib now calls the spark.ml implementation.
* Moved unit tests (of tree learning internals) from spark.mllib to spark.ml as needed.
ml.tree.DecisionTreeModel
* Added toOld and made ```private[spark]```, implemented for Classifier and Regressor in subclasses. These methods now use OldInformationGainStats.invalidInformationGainStats for LeafNodes in order to mimic the spark.mllib implementation.
ml.tree.Node
* Added ```private[tree] def deepCopy```, used by unit tests
Copied developer comments from spark.mllib implementation to spark.ml one.
Moving unit tests
* Tree learning internals were tested by spark.mllib.tree.DecisionTreeSuite, or spark.mllib.tree.RandomForestSuite.
* Those tests were all moved to spark.ml.tree.impl.RandomForestSuite. The order in the file + the test names are the same, so you should be able to compare them by opening them in 2 windows side-by-side.
* I made minimal changes to each test to allow it to run. Each test makes the same checks as before, except for a few removed assertions which were checking irrelevant values.
* No new unit tests were added.
* mllib.tree.DecisionTreeSuite: I removed some checks of splits and bins which were not relevant to the unit tests they were in. Those same split calculations were already being tested in other unit tests, for each dataset type.
**Changes of behavior** (to be noted in SPARK-13448 once this PR is merged)
* spark.ml.tree.impl.RandomForest: Rather than throwing an error when maxMemoryInMB is set to too small a value (to split any node), we now allow 1 node to be split, even if its memory requirements exceed maxMemoryInMB. This involved removing the maxMemoryPerNode check in RandomForest.run, as well as modifying selectNodesToSplit(). Once this PR is merged, I will note the change of behavior on SPARK-13448.
* spark.mllib.tree.DecisionTree: When a tree only has one node (root = leaf node), the "stats" field will now be empty, rather than being set to InformationGainStats.invalidInformationGainStats. This does not remove information from the tree, and it will save a bit of storage.
Author: Joseph K. Bradley <joseph@databricks.com>
Closes #11855 from jkbradley/remove-mllib-tree-impl.
2016-03-24 00:16:00 -04:00
|
|
|
("maxMemoryInMB", "Maximum memory in MB allocated to histogram aggregation. If too small," +
|
|
|
|
" then 1 node will be split per iteration, and its aggregates may exceed this size.",
|
2016-03-23 14:20:44 -04:00
|
|
|
"TypeConverters.toInt"),
|
2015-05-12 15:17:05 -04:00
|
|
|
("cacheNodeIds", "If false, the algorithm will pass trees to executors to match " +
|
|
|
|
"instances with nodes. If true, the algorithm will cache node IDs for each instance. " +
|
2015-11-20 01:14:01 -05:00
|
|
|
"Caching can speed up training of deeper trees. Users can set how often should the " +
|
2016-03-23 14:20:44 -04:00
|
|
|
"cache be checkpointed or disable it by setting checkpointInterval.",
|
|
|
|
"TypeConverters.toBoolean")]
|
2015-05-12 15:17:05 -04:00
|
|
|
|
|
|
|
decisionTreeCode = '''class DecisionTreeParams(Params):
|
|
|
|
"""
|
|
|
|
Mixin for Decision Tree parameters.
|
|
|
|
"""
|
|
|
|
|
|
|
|
$dummyPlaceHolders
|
|
|
|
|
|
|
|
def __init__(self):
|
2016-01-26 18:53:48 -05:00
|
|
|
super(DecisionTreeParams, self).__init__()'''
|
2015-05-12 15:17:05 -04:00
|
|
|
dtParamMethods = ""
|
|
|
|
dummyPlaceholders = ""
|
2016-03-23 14:20:44 -04:00
|
|
|
paramTemplate = """$name = Param($owner, "$name", "$doc", typeConverter=$typeConverterStr)"""
|
|
|
|
for name, doc, typeConverterStr in decisionTreeParams:
|
|
|
|
if typeConverterStr is None:
|
|
|
|
typeConverterStr = str(None)
|
|
|
|
variable = paramTemplate.replace("$name", name).replace("$doc", doc) \
|
|
|
|
.replace("$typeConverterStr", typeConverterStr)
|
2015-05-12 15:17:05 -04:00
|
|
|
dummyPlaceholders += variable.replace("$owner", "Params._dummy()") + "\n "
|
|
|
|
dtParamMethods += _gen_param_code(name, doc, None) + "\n"
|
2016-01-26 18:53:48 -05:00
|
|
|
code.append(decisionTreeCode.replace("$dummyPlaceHolders", dummyPlaceholders) + "\n" +
|
|
|
|
dtParamMethods)
|
2015-04-16 19:20:57 -04:00
|
|
|
print("\n\n\n".join(code))
|