2014-10-31 01:25:18 -04:00
|
|
|
#
|
|
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
|
|
|
|
# contributor license agreements. See the NOTICE file distributed with
|
|
|
|
# this work for additional information regarding copyright ownership.
|
|
|
|
# The ASF licenses this file to You under the Apache License, Version 2.0
|
|
|
|
# (the "License"); you may not use this file except in compliance with
|
|
|
|
# the License. You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
#
|
|
|
|
|
|
|
|
import py4j.protocol
|
|
|
|
from py4j.protocol import Py4JJavaError
|
|
|
|
from py4j.java_gateway import JavaObject
|
2016-10-03 17:12:03 -04:00
|
|
|
from py4j.java_collections import JavaArray, JavaList
|
2014-10-31 01:25:18 -04:00
|
|
|
|
|
|
|
from pyspark import RDD, SparkContext
|
|
|
|
from pyspark.serializers import PickleSerializer, AutoBatchedSerializer
|
2015-05-29 01:38:38 -04:00
|
|
|
from pyspark.sql import DataFrame, SQLContext
|
2014-10-31 01:25:18 -04:00
|
|
|
|
|
|
|
# Hack for support float('inf') in Py4j
|
|
|
|
_old_smart_decode = py4j.protocol.smart_decode
|
|
|
|
|
|
|
|
_float_str_mapping = {
|
|
|
|
'nan': 'NaN',
|
|
|
|
'inf': 'Infinity',
|
|
|
|
'-inf': '-Infinity',
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
def _new_smart_decode(obj):
|
|
|
|
if isinstance(obj, float):
|
2015-04-16 19:20:57 -04:00
|
|
|
s = str(obj)
|
2014-10-31 01:25:18 -04:00
|
|
|
return _float_str_mapping.get(s, s)
|
|
|
|
return _old_smart_decode(obj)
|
|
|
|
|
|
|
|
py4j.protocol.smart_decode = _new_smart_decode
|
|
|
|
|
|
|
|
|
|
|
|
_picklable_classes = [
|
|
|
|
'LinkedList',
|
|
|
|
'SparseVector',
|
|
|
|
'DenseVector',
|
|
|
|
'DenseMatrix',
|
|
|
|
'Rating',
|
|
|
|
'LabeledPoint',
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
|
|
# this will call the MLlib version of pythonToJava()
|
2014-11-21 18:02:31 -05:00
|
|
|
def _to_java_object_rdd(rdd):
|
2016-06-06 04:35:47 -04:00
|
|
|
""" Return a JavaRDD of Object by unpickling
|
2014-10-31 01:25:18 -04:00
|
|
|
|
|
|
|
It will convert each Python object into Java object by Pyrolite, whenever the
|
|
|
|
RDD is serialized in batch or not.
|
|
|
|
"""
|
|
|
|
rdd = rdd._reserialize(AutoBatchedSerializer(PickleSerializer()))
|
2016-07-05 20:00:24 -04:00
|
|
|
return rdd.ctx._jvm.org.apache.spark.mllib.api.python.SerDe.pythonToJava(rdd._jrdd, True)
|
2014-10-31 01:25:18 -04:00
|
|
|
|
|
|
|
|
|
|
|
def _py2java(sc, obj):
|
|
|
|
""" Convert Python object into Java """
|
|
|
|
if isinstance(obj, RDD):
|
|
|
|
obj = _to_java_object_rdd(obj)
|
[SPARK-6485] [MLLIB] [PYTHON] Add CoordinateMatrix/RowMatrix/IndexedRowMatrix to PySpark.
This PR adds the RowMatrix, IndexedRowMatrix, and CoordinateMatrix distributed matrices to PySpark. Each distributed matrix class acts as a wrapper around the Scala/Java counterpart by maintaining a reference to the Java object. New distributed matrices can be created using factory methods added to DistributedMatrices, which creates the Java distributed matrix and then wraps it with the corresponding PySpark class. This design allows for simple conversion between the various distributed matrices, and lets us re-use the Scala code. Serialization between Python and Java is implemented using DataFrames as needed for IndexedRowMatrix and CoordinateMatrix for simplicity. Associated documentation and unit-tests have also been added. To facilitate code review, this PR implements access to the rows/entries as RDDs, the number of rows & columns, and conversions between the various distributed matrices (not including BlockMatrix), and does not implement the other linear algebra functions of the matrices, although this will be very simple to add now.
Author: Mike Dusenberry <mwdusenb@us.ibm.com>
Closes #7554 from dusenberrymw/SPARK-6485_Add_CoordinateMatrix_RowMatrix_IndexedMatrix_to_PySpark and squashes the following commits:
bb039cb [Mike Dusenberry] Minor documentation update.
b887c18 [Mike Dusenberry] Updating the matrix conversion logic again to make it even cleaner. Now, we allow the 'rows' parameter in the constructors to be either an RDD or the Java matrix object. If 'rows' is an RDD, we create a Java matrix object, wrap it, and then store that. If 'rows' is a Java matrix object of the correct type, we just wrap and store that directly. This is only for internal usage, and publicly, we still require 'rows' to be an RDD. We no longer store the 'rows' RDD, and instead just compute it from the Java object when needed. The point of this is that when we do matrix conversions, we do the conversion on the Scala/Java side, which returns a Java object, so we should use that directly, but exposing 'java_matrix' parameter in the public API is not ideal. This non-public feature of allowing 'rows' to be a Java matrix object is documented in the '__init__' constructor docstrings, which are not part of the generated public API, and doctests are also included.
7f0dcb6 [Mike Dusenberry] Updating module docstring.
cfc1be5 [Mike Dusenberry] Use 'new SQLContext(matrix.rows.sparkContext)' rather than 'SQLContext.getOrCreate', as the later doesn't guarantee that the SparkContext will be the same as for the matrix.rows data.
687e345 [Mike Dusenberry] Improving conversion performance. This adds an optional 'java_matrix' parameter to the constructors, and pulls the conversion logic out into a '_create_from_java' function. Now, if the constructors are given a valid Java distributed matrix object as 'java_matrix', they will store those internally, rather than create a new one on the Scala/Java side.
3e50b6e [Mike Dusenberry] Moving the distributed matrices to pyspark.mllib.linalg.distributed.
308f197 [Mike Dusenberry] Using properties for better documentation.
1633f86 [Mike Dusenberry] Minor documentation cleanup.
f0c13a7 [Mike Dusenberry] CoordinateMatrix should inherit from DistributedMatrix.
ffdd724 [Mike Dusenberry] Updating doctests to make documentation cleaner.
3fd4016 [Mike Dusenberry] Updating docstrings.
27cd5f6 [Mike Dusenberry] Simplifying input conversions in the constructors for each distributed matrix.
a409cf5 [Mike Dusenberry] Updating doctests to be less verbose by using lists instead of DenseVectors explicitly.
d19b0ba [Mike Dusenberry] Updating code and documentation to note that a vector-like object (numpy array, list, etc.) can be used in place of explicit Vector object, and adding conversions when necessary to RowMatrix construction.
4bd756d [Mike Dusenberry] Adding param documentation to IndexedRow and MatrixEntry.
c6bded5 [Mike Dusenberry] Move conversion logic from tuples to IndexedRow or MatrixEntry types from within the IndexedRowMatrix and CoordinateMatrix constructors to separate _convert_to_indexed_row and _convert_to_matrix_entry functions.
329638b [Mike Dusenberry] Moving the Experimental tag to the top of each docstring.
0be6826 [Mike Dusenberry] Simplifying doctests by removing duplicated rows/entries RDDs within the various tests.
c0900df [Mike Dusenberry] Adding the colons that were accidentally not inserted.
4ad6819 [Mike Dusenberry] Documenting the and parameters.
3b854b9 [Mike Dusenberry] Minor updates to documentation.
10046e8 [Mike Dusenberry] Updating documentation to use class constructors instead of the removed DistributedMatrices factory methods.
119018d [Mike Dusenberry] Adding static methods to each of the distributed matrix classes to consolidate conversion logic.
4d7af86 [Mike Dusenberry] Adding type checks to the constructors. Although it is slightly verbose, it is better for the user to have a good error message than a cryptic stacktrace.
93b6a3d [Mike Dusenberry] Pulling the DistributedMatrices Python class out of this pull request.
f6f3c68 [Mike Dusenberry] Pulling the DistributedMatrices Scala class out of this pull request.
6a3ecb7 [Mike Dusenberry] Updating pattern matching.
08f287b [Mike Dusenberry] Slight reformatting of the documentation.
a245dc0 [Mike Dusenberry] Updating Python doctests for compatability between Python 2 & 3. Since Python 3 removed the idea of a separate 'long' type, all values that would have been outputted as a 'long' (ex: '4L') will now be treated as an 'int' and outputed as one (ex: '4'). The doctests now explicitly convert to ints so that both Python 2 and 3 will have the same output. This is fine since the values are all small, and thus can be easily represented as ints.
4d3a37e [Mike Dusenberry] Reformatting a few long Python doctest lines.
7e3ca16 [Mike Dusenberry] Fixing long lines.
f721ead [Mike Dusenberry] Updating documentation for each of the distributed matrices.
ab0e8b6 [Mike Dusenberry] Updating unit test to be more useful.
dda2f89 [Mike Dusenberry] Added wrappers for the conversions between the various distributed matrices. Added logic to be able to access the rows/entries of the distributed matrices, which requires serialization through DataFrames for IndexedRowMatrix and CoordinateMatrix types. Added unit tests.
0cd7166 [Mike Dusenberry] Implemented the CoordinateMatrix API in PySpark, following the idea of the IndexedRowMatrix API, including using DataFrames for serialization.
3c369cb [Mike Dusenberry] Updating the architecture a bit to make conversions between the various distributed matrix types easier. The different distributed matrix classes are now only wrappers around the Java objects, and take the Java object as an argument during construction. This way, we can call for example on an , which returns a reference to a Java RowMatrix object, and then construct a PySpark RowMatrix object wrapped around the Java object. This is analogous to the behavior of PySpark RDDs and DataFrames. We now delegate creation of the various distributed matrices from scratch in PySpark to the factory methods on .
4bdd09b [Mike Dusenberry] Implemented the IndexedRowMatrix API in PySpark, following the idea of the RowMatrix API. Note that for the IndexedRowMatrix, we use DataFrames to serialize the data between Python and Scala/Java, so we accept PySpark RDDs, then convert to a DataFrame, then convert back to RDDs on the Scala/Java side before constructing the IndexedRowMatrix.
23bf1ec [Mike Dusenberry] Updating documentation to add PySpark RowMatrix. Inserting newline above doctest so that it renders properly in API docs.
b194623 [Mike Dusenberry] Updating design to have a PySpark RowMatrix simply create and keep a reference to a wrapper over a Java RowMatrix. Updating DistributedMatrices factory methods to accept numRows and numCols with default values. Updating PySpark DistributedMatrices factory method to simply create a PySpark RowMatrix. Adding additional doctests for numRows and numCols parameters.
bc2d220 [Mike Dusenberry] Adding unit tests for RowMatrix methods.
d7e316f [Mike Dusenberry] Implemented the RowMatrix API in PySpark by doing the following: Added a DistributedMatrices class to contain factory methods for creating the various distributed matrices. Added a factory method for creating a RowMatrix from an RDD of Vectors. Added a createRowMatrix function to the PythonMLlibAPI to interface with the factory method. Added DistributedMatrix, DistributedMatrices, and RowMatrix classes to the pyspark.mllib.linalg api.
2015-08-04 19:30:03 -04:00
|
|
|
elif isinstance(obj, DataFrame):
|
|
|
|
obj = obj._jdf
|
2014-10-31 01:25:18 -04:00
|
|
|
elif isinstance(obj, SparkContext):
|
|
|
|
obj = obj._jsc
|
2015-03-17 15:14:40 -04:00
|
|
|
elif isinstance(obj, list):
|
2016-10-03 17:12:03 -04:00
|
|
|
obj = [_py2java(sc, x) for x in obj]
|
2014-10-31 01:25:18 -04:00
|
|
|
elif isinstance(obj, JavaObject):
|
|
|
|
pass
|
2020-07-13 22:22:44 -04:00
|
|
|
elif isinstance(obj, (int, float, bool, bytes, str)):
|
2014-10-31 01:25:18 -04:00
|
|
|
pass
|
|
|
|
else:
|
2015-04-16 19:20:57 -04:00
|
|
|
data = bytearray(PickleSerializer().dumps(obj))
|
2016-07-05 20:00:24 -04:00
|
|
|
obj = sc._jvm.org.apache.spark.mllib.api.python.SerDe.loads(data)
|
2014-10-31 01:25:18 -04:00
|
|
|
return obj
|
|
|
|
|
|
|
|
|
2015-04-16 19:20:57 -04:00
|
|
|
def _java2py(sc, r, encoding="bytes"):
|
2014-10-31 01:25:18 -04:00
|
|
|
if isinstance(r, JavaObject):
|
|
|
|
clsName = r.getClass().getSimpleName()
|
|
|
|
# convert RDD into JavaRDD
|
|
|
|
if clsName != 'JavaRDD' and clsName.endswith("RDD"):
|
|
|
|
r = r.toJavaRDD()
|
|
|
|
clsName = 'JavaRDD'
|
|
|
|
|
|
|
|
if clsName == 'JavaRDD':
|
2016-07-05 20:00:24 -04:00
|
|
|
jrdd = sc._jvm.org.apache.spark.mllib.api.python.SerDe.javaToPython(r)
|
2014-11-04 02:56:14 -05:00
|
|
|
return RDD(jrdd, sc)
|
2014-10-31 01:25:18 -04:00
|
|
|
|
[SPARK-13244][SQL] Migrates DataFrame to Dataset
## What changes were proposed in this pull request?
This PR unifies DataFrame and Dataset by migrating existing DataFrame operations to Dataset and make `DataFrame` a type alias of `Dataset[Row]`.
Most Scala code changes are source compatible, but Java API is broken as Java knows nothing about Scala type alias (mostly replacing `DataFrame` with `Dataset<Row>`).
There are several noticeable API changes related to those returning arrays:
1. `collect`/`take`
- Old APIs in class `DataFrame`:
```scala
def collect(): Array[Row]
def take(n: Int): Array[Row]
```
- New APIs in class `Dataset[T]`:
```scala
def collect(): Array[T]
def take(n: Int): Array[T]
def collectRows(): Array[Row]
def takeRows(n: Int): Array[Row]
```
Two specialized methods `collectRows` and `takeRows` are added because Java doesn't support returning generic arrays. Thus, for example, `DataFrame.collect(): Array[T]` actually returns `Object` instead of `Array<T>` from Java side.
Normally, Java users may fall back to `collectAsList` and `takeAsList`. The two new specialized versions are added to avoid performance regression in ML related code (but maybe I'm wrong and they are not necessary here).
1. `randomSplit`
- Old APIs in class `DataFrame`:
```scala
def randomSplit(weights: Array[Double], seed: Long): Array[DataFrame]
def randomSplit(weights: Array[Double]): Array[DataFrame]
```
- New APIs in class `Dataset[T]`:
```scala
def randomSplit(weights: Array[Double], seed: Long): Array[Dataset[T]]
def randomSplit(weights: Array[Double]): Array[Dataset[T]]
```
Similar problem as above, but hasn't been addressed for Java API yet. We can probably add `randomSplitAsList` to fix this one.
1. `groupBy`
Some original `DataFrame.groupBy` methods have conflicting signature with original `Dataset.groupBy` methods. To distinguish these two, typed `Dataset.groupBy` methods are renamed to `groupByKey`.
Other noticeable changes:
1. Dataset always do eager analysis now
We used to support disabling DataFrame eager analysis to help reporting partially analyzed malformed logical plan on analysis failure. However, Dataset encoders requires eager analysi during Dataset construction. To preserve the error reporting feature, `AnalysisException` now takes an extra `Option[LogicalPlan]` argument to hold the partially analyzed plan, so that we can check the plan tree when reporting test failures. This plan is passed by `QueryExecution.assertAnalyzed`.
## How was this patch tested?
Existing tests do the work.
## TODO
- [ ] Fix all tests
- [ ] Re-enable MiMA check
- [ ] Update ScalaDoc (`since`, `group`, and example code)
Author: Cheng Lian <lian@databricks.com>
Author: Yin Huai <yhuai@databricks.com>
Author: Wenchen Fan <wenchen@databricks.com>
Author: Cheng Lian <liancheng@users.noreply.github.com>
Closes #11443 from liancheng/ds-to-df.
2016-03-10 20:00:17 -05:00
|
|
|
if clsName == 'Dataset':
|
2015-12-16 18:48:11 -05:00
|
|
|
return DataFrame(r, SQLContext.getOrCreate(sc))
|
2015-05-29 01:38:38 -04:00
|
|
|
|
[SPARK-3964] [MLlib] [PySpark] add Hypothesis test Python API
```
pyspark.mllib.stat.StatisticschiSqTest(observed, expected=None)
:: Experimental ::
If `observed` is Vector, conduct Pearson's chi-squared goodness
of fit test of the observed data against the expected distribution,
or againt the uniform distribution (by default), with each category
having an expected frequency of `1 / len(observed)`.
(Note: `observed` cannot contain negative values)
If `observed` is matrix, conduct Pearson's independence test on the
input contingency matrix, which cannot contain negative entries or
columns or rows that sum up to 0.
If `observed` is an RDD of LabeledPoint, conduct Pearson's independence
test for every feature against the label across the input RDD.
For each feature, the (feature, label) pairs are converted into a
contingency matrix for which the chi-squared statistic is computed.
All label and feature values must be categorical.
:param observed: it could be a vector containing the observed categorical
counts/relative frequencies, or the contingency matrix
(containing either counts or relative frequencies),
or an RDD of LabeledPoint containing the labeled dataset
with categorical features. Real-valued features will be
treated as categorical for each distinct value.
:param expected: Vector containing the expected categorical counts/relative
frequencies. `expected` is rescaled if the `expected` sum
differs from the `observed` sum.
:return: ChiSquaredTest object containing the test statistic, degrees
of freedom, p-value, the method used, and the null hypothesis.
```
Author: Davies Liu <davies@databricks.com>
Closes #3091 from davies/his and squashes the following commits:
145d16c [Davies Liu] address comments
0ab0764 [Davies Liu] fix float
5097d54 [Davies Liu] add Hypothesis test Python API
2014-11-05 00:35:52 -05:00
|
|
|
if clsName in _picklable_classes:
|
2016-07-05 20:00:24 -04:00
|
|
|
r = sc._jvm.org.apache.spark.mllib.api.python.SerDe.dumps(r)
|
[SPARK-3964] [MLlib] [PySpark] add Hypothesis test Python API
```
pyspark.mllib.stat.StatisticschiSqTest(observed, expected=None)
:: Experimental ::
If `observed` is Vector, conduct Pearson's chi-squared goodness
of fit test of the observed data against the expected distribution,
or againt the uniform distribution (by default), with each category
having an expected frequency of `1 / len(observed)`.
(Note: `observed` cannot contain negative values)
If `observed` is matrix, conduct Pearson's independence test on the
input contingency matrix, which cannot contain negative entries or
columns or rows that sum up to 0.
If `observed` is an RDD of LabeledPoint, conduct Pearson's independence
test for every feature against the label across the input RDD.
For each feature, the (feature, label) pairs are converted into a
contingency matrix for which the chi-squared statistic is computed.
All label and feature values must be categorical.
:param observed: it could be a vector containing the observed categorical
counts/relative frequencies, or the contingency matrix
(containing either counts or relative frequencies),
or an RDD of LabeledPoint containing the labeled dataset
with categorical features. Real-valued features will be
treated as categorical for each distinct value.
:param expected: Vector containing the expected categorical counts/relative
frequencies. `expected` is rescaled if the `expected` sum
differs from the `observed` sum.
:return: ChiSquaredTest object containing the test statistic, degrees
of freedom, p-value, the method used, and the null hypothesis.
```
Author: Davies Liu <davies@databricks.com>
Closes #3091 from davies/his and squashes the following commits:
145d16c [Davies Liu] address comments
0ab0764 [Davies Liu] fix float
5097d54 [Davies Liu] add Hypothesis test Python API
2014-11-05 00:35:52 -05:00
|
|
|
elif isinstance(r, (JavaArray, JavaList)):
|
|
|
|
try:
|
2016-07-05 20:00:24 -04:00
|
|
|
r = sc._jvm.org.apache.spark.mllib.api.python.SerDe.dumps(r)
|
[SPARK-3964] [MLlib] [PySpark] add Hypothesis test Python API
```
pyspark.mllib.stat.StatisticschiSqTest(observed, expected=None)
:: Experimental ::
If `observed` is Vector, conduct Pearson's chi-squared goodness
of fit test of the observed data against the expected distribution,
or againt the uniform distribution (by default), with each category
having an expected frequency of `1 / len(observed)`.
(Note: `observed` cannot contain negative values)
If `observed` is matrix, conduct Pearson's independence test on the
input contingency matrix, which cannot contain negative entries or
columns or rows that sum up to 0.
If `observed` is an RDD of LabeledPoint, conduct Pearson's independence
test for every feature against the label across the input RDD.
For each feature, the (feature, label) pairs are converted into a
contingency matrix for which the chi-squared statistic is computed.
All label and feature values must be categorical.
:param observed: it could be a vector containing the observed categorical
counts/relative frequencies, or the contingency matrix
(containing either counts or relative frequencies),
or an RDD of LabeledPoint containing the labeled dataset
with categorical features. Real-valued features will be
treated as categorical for each distinct value.
:param expected: Vector containing the expected categorical counts/relative
frequencies. `expected` is rescaled if the `expected` sum
differs from the `observed` sum.
:return: ChiSquaredTest object containing the test statistic, degrees
of freedom, p-value, the method used, and the null hypothesis.
```
Author: Davies Liu <davies@databricks.com>
Closes #3091 from davies/his and squashes the following commits:
145d16c [Davies Liu] address comments
0ab0764 [Davies Liu] fix float
5097d54 [Davies Liu] add Hypothesis test Python API
2014-11-05 00:35:52 -05:00
|
|
|
except Py4JJavaError:
|
|
|
|
pass # not pickable
|
2014-10-31 01:25:18 -04:00
|
|
|
|
2015-04-16 19:20:57 -04:00
|
|
|
if isinstance(r, (bytearray, bytes)):
|
|
|
|
r = PickleSerializer().loads(bytes(r), encoding=encoding)
|
2014-10-31 01:25:18 -04:00
|
|
|
return r
|
|
|
|
|
|
|
|
|
|
|
|
def callJavaFunc(sc, func, *args):
|
|
|
|
""" Call Java Function """
|
|
|
|
args = [_py2java(sc, a) for a in args]
|
|
|
|
return _java2py(sc, func(*args))
|
|
|
|
|
|
|
|
|
|
|
|
def callMLlibFunc(name, *args):
|
|
|
|
""" Call API in PythonMLLibAPI """
|
2015-12-16 18:48:11 -05:00
|
|
|
sc = SparkContext.getOrCreate()
|
2014-10-31 01:25:18 -04:00
|
|
|
api = getattr(sc._jvm.PythonMLLibAPI(), name)
|
|
|
|
return callJavaFunc(sc, api, *args)
|
|
|
|
|
|
|
|
|
|
|
|
class JavaModelWrapper(object):
|
|
|
|
"""
|
|
|
|
Wrapper for the model in JVM
|
|
|
|
"""
|
|
|
|
def __init__(self, java_model):
|
2015-12-16 18:48:11 -05:00
|
|
|
self._sc = SparkContext.getOrCreate()
|
2014-10-31 01:25:18 -04:00
|
|
|
self._java_model = java_model
|
|
|
|
|
|
|
|
def __del__(self):
|
|
|
|
self._sc._gateway.detach(self._java_model)
|
|
|
|
|
|
|
|
def call(self, name, *a):
|
|
|
|
"""Call method of java_model"""
|
|
|
|
return callJavaFunc(self._sc, getattr(self._java_model, name), *a)
|
2015-02-20 05:31:32 -05:00
|
|
|
|
|
|
|
|
|
|
|
def inherit_doc(cls):
|
|
|
|
"""
|
|
|
|
A decorator that makes a class inherit documentation from its parents.
|
|
|
|
"""
|
|
|
|
for name, func in vars(cls).items():
|
|
|
|
# only inherit docstring for public functions
|
|
|
|
if name.startswith("_"):
|
|
|
|
continue
|
|
|
|
if not func.__doc__:
|
|
|
|
for parent in cls.__bases__:
|
|
|
|
parent_func = getattr(parent, name, None)
|
|
|
|
if parent_func and getattr(parent_func, "__doc__", None):
|
|
|
|
func.__doc__ = parent_func.__doc__
|
|
|
|
break
|
|
|
|
return cls
|