ee8d661058
### What changes were proposed in this pull request? This PR proposes to move pandas related functionalities into pandas package. Namely: ```bash pyspark/sql/pandas ├── __init__.py ├── conversion.py # Conversion between pandas <> PySpark DataFrames ├── functions.py # pandas_udf ├── group_ops.py # Grouped UDF / Cogrouped UDF + groupby.apply, groupby.cogroup.apply ├── map_ops.py # Map Iter UDF + mapInPandas ├── serializers.py # pandas <> PyArrow serializers ├── types.py # Type utils between pandas <> PyArrow └── utils.py # Version requirement checks ``` In order to separately locate `groupby.apply`, `groupby.cogroup.apply`, `mapInPandas`, `toPandas`, and `createDataFrame(pdf)` under `pandas` sub-package, I had to use a mix-in approach which Scala side uses often by `trait`, and also pandas itself uses this approach (see `IndexOpsMixin` as an example) to group related functionalities. Currently, you can think it's like Scala's self typed trait. See the structure below: ```python class PandasMapOpsMixin(object): def mapInPandas(self, ...): ... return ... # other Pandas <> PySpark APIs ``` ```python class DataFrame(PandasMapOpsMixin): # other DataFrame APIs equivalent to Scala side. ``` Yes, This is a big PR but they are mostly just moving around except one case `createDataFrame` which I had to split the methods. ### Why are the changes needed? There are pandas functionalities here and there and I myself gets lost where it was. Also, when you have to make a change commonly for all of pandas related features, it's almost impossible now. Also, after this change, `DataFrame` and `SparkSession` become more consistent with Scala side since pandas is specific to Python, and this change separates pandas-specific APIs away from `DataFrame` or `SparkSession`. ### Does this PR introduce any user-facing change? No. ### How was this patch tested? Existing tests should cover. Also, I manually built the PySpark API documentation and checked. Closes #27109 from HyukjinKwon/pandas-refactoring. Authored-by: HyukjinKwon <gurwls223@apache.org> Signed-off-by: HyukjinKwon <gurwls223@apache.org>
262 lines
9.9 KiB
Python
262 lines
9.9 KiB
Python
#
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
|
|
# contributor license agreements. See the NOTICE file distributed with
|
|
# this work for additional information regarding copyright ownership.
|
|
# The ASF licenses this file to You under the Apache License, Version 2.0
|
|
# (the "License"); you may not use this file except in compliance with
|
|
# the License. You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
#
|
|
|
|
import sys
|
|
|
|
from pyspark import since
|
|
from pyspark.rdd import ignore_unicode_prefix
|
|
from pyspark.sql.column import Column, _to_seq
|
|
from pyspark.sql.dataframe import DataFrame
|
|
from pyspark.sql.pandas.group_ops import PandasGroupedOpsMixin
|
|
from pyspark.sql.types import *
|
|
|
|
__all__ = ["GroupedData"]
|
|
|
|
|
|
def dfapi(f):
|
|
def _api(self):
|
|
name = f.__name__
|
|
jdf = getattr(self._jgd, name)()
|
|
return DataFrame(jdf, self.sql_ctx)
|
|
_api.__name__ = f.__name__
|
|
_api.__doc__ = f.__doc__
|
|
return _api
|
|
|
|
|
|
def df_varargs_api(f):
|
|
def _api(self, *cols):
|
|
name = f.__name__
|
|
jdf = getattr(self._jgd, name)(_to_seq(self.sql_ctx._sc, cols))
|
|
return DataFrame(jdf, self.sql_ctx)
|
|
_api.__name__ = f.__name__
|
|
_api.__doc__ = f.__doc__
|
|
return _api
|
|
|
|
|
|
class GroupedData(PandasGroupedOpsMixin):
|
|
"""
|
|
A set of methods for aggregations on a :class:`DataFrame`,
|
|
created by :func:`DataFrame.groupBy`.
|
|
|
|
.. versionadded:: 1.3
|
|
"""
|
|
|
|
def __init__(self, jgd, df):
|
|
self._jgd = jgd
|
|
self._df = df
|
|
self.sql_ctx = df.sql_ctx
|
|
|
|
@ignore_unicode_prefix
|
|
@since(1.3)
|
|
def agg(self, *exprs):
|
|
"""Compute aggregates and returns the result as a :class:`DataFrame`.
|
|
|
|
The available aggregate functions can be:
|
|
|
|
1. built-in aggregation functions, such as `avg`, `max`, `min`, `sum`, `count`
|
|
|
|
2. group aggregate pandas UDFs, created with :func:`pyspark.sql.functions.pandas_udf`
|
|
|
|
.. note:: There is no partial aggregation with group aggregate UDFs, i.e.,
|
|
a full shuffle is required. Also, all the data of a group will be loaded into
|
|
memory, so the user should be aware of the potential OOM risk if data is skewed
|
|
and certain groups are too large to fit in memory.
|
|
|
|
.. seealso:: :func:`pyspark.sql.functions.pandas_udf`
|
|
|
|
If ``exprs`` is a single :class:`dict` mapping from string to string, then the key
|
|
is the column to perform aggregation on, and the value is the aggregate function.
|
|
|
|
Alternatively, ``exprs`` can also be a list of aggregate :class:`Column` expressions.
|
|
|
|
.. note:: Built-in aggregation functions and group aggregate pandas UDFs cannot be mixed
|
|
in a single call to this function.
|
|
|
|
:param exprs: a dict mapping from column name (string) to aggregate functions (string),
|
|
or a list of :class:`Column`.
|
|
|
|
>>> gdf = df.groupBy(df.name)
|
|
>>> sorted(gdf.agg({"*": "count"}).collect())
|
|
[Row(name=u'Alice', count(1)=1), Row(name=u'Bob', count(1)=1)]
|
|
|
|
>>> from pyspark.sql import functions as F
|
|
>>> sorted(gdf.agg(F.min(df.age)).collect())
|
|
[Row(name=u'Alice', min(age)=2), Row(name=u'Bob', min(age)=5)]
|
|
|
|
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
|
|
>>> @pandas_udf('int', PandasUDFType.GROUPED_AGG) # doctest: +SKIP
|
|
... def min_udf(v):
|
|
... return v.min()
|
|
>>> sorted(gdf.agg(min_udf(df.age)).collect()) # doctest: +SKIP
|
|
[Row(name=u'Alice', min_udf(age)=2), Row(name=u'Bob', min_udf(age)=5)]
|
|
"""
|
|
assert exprs, "exprs should not be empty"
|
|
if len(exprs) == 1 and isinstance(exprs[0], dict):
|
|
jdf = self._jgd.agg(exprs[0])
|
|
else:
|
|
# Columns
|
|
assert all(isinstance(c, Column) for c in exprs), "all exprs should be Column"
|
|
jdf = self._jgd.agg(exprs[0]._jc,
|
|
_to_seq(self.sql_ctx._sc, [c._jc for c in exprs[1:]]))
|
|
return DataFrame(jdf, self.sql_ctx)
|
|
|
|
@dfapi
|
|
@since(1.3)
|
|
def count(self):
|
|
"""Counts the number of records for each group.
|
|
|
|
>>> sorted(df.groupBy(df.age).count().collect())
|
|
[Row(age=2, count=1), Row(age=5, count=1)]
|
|
"""
|
|
|
|
@df_varargs_api
|
|
@since(1.3)
|
|
def mean(self, *cols):
|
|
"""Computes average values for each numeric columns for each group.
|
|
|
|
:func:`mean` is an alias for :func:`avg`.
|
|
|
|
:param cols: list of column names (string). Non-numeric columns are ignored.
|
|
|
|
>>> df.groupBy().mean('age').collect()
|
|
[Row(avg(age)=3.5)]
|
|
>>> df3.groupBy().mean('age', 'height').collect()
|
|
[Row(avg(age)=3.5, avg(height)=82.5)]
|
|
"""
|
|
|
|
@df_varargs_api
|
|
@since(1.3)
|
|
def avg(self, *cols):
|
|
"""Computes average values for each numeric columns for each group.
|
|
|
|
:func:`mean` is an alias for :func:`avg`.
|
|
|
|
:param cols: list of column names (string). Non-numeric columns are ignored.
|
|
|
|
>>> df.groupBy().avg('age').collect()
|
|
[Row(avg(age)=3.5)]
|
|
>>> df3.groupBy().avg('age', 'height').collect()
|
|
[Row(avg(age)=3.5, avg(height)=82.5)]
|
|
"""
|
|
|
|
@df_varargs_api
|
|
@since(1.3)
|
|
def max(self, *cols):
|
|
"""Computes the max value for each numeric columns for each group.
|
|
|
|
>>> df.groupBy().max('age').collect()
|
|
[Row(max(age)=5)]
|
|
>>> df3.groupBy().max('age', 'height').collect()
|
|
[Row(max(age)=5, max(height)=85)]
|
|
"""
|
|
|
|
@df_varargs_api
|
|
@since(1.3)
|
|
def min(self, *cols):
|
|
"""Computes the min value for each numeric column for each group.
|
|
|
|
:param cols: list of column names (string). Non-numeric columns are ignored.
|
|
|
|
>>> df.groupBy().min('age').collect()
|
|
[Row(min(age)=2)]
|
|
>>> df3.groupBy().min('age', 'height').collect()
|
|
[Row(min(age)=2, min(height)=80)]
|
|
"""
|
|
|
|
@df_varargs_api
|
|
@since(1.3)
|
|
def sum(self, *cols):
|
|
"""Compute the sum for each numeric columns for each group.
|
|
|
|
:param cols: list of column names (string). Non-numeric columns are ignored.
|
|
|
|
>>> df.groupBy().sum('age').collect()
|
|
[Row(sum(age)=7)]
|
|
>>> df3.groupBy().sum('age', 'height').collect()
|
|
[Row(sum(age)=7, sum(height)=165)]
|
|
"""
|
|
|
|
@since(1.6)
|
|
def pivot(self, pivot_col, values=None):
|
|
"""
|
|
Pivots a column of the current :class:`DataFrame` and perform the specified aggregation.
|
|
There are two versions of pivot function: one that requires the caller to specify the list
|
|
of distinct values to pivot on, and one that does not. The latter is more concise but less
|
|
efficient, because Spark needs to first compute the list of distinct values internally.
|
|
|
|
:param pivot_col: Name of the column to pivot.
|
|
:param values: List of values that will be translated to columns in the output DataFrame.
|
|
|
|
# Compute the sum of earnings for each year by course with each course as a separate column
|
|
|
|
>>> df4.groupBy("year").pivot("course", ["dotNET", "Java"]).sum("earnings").collect()
|
|
[Row(year=2012, dotNET=15000, Java=20000), Row(year=2013, dotNET=48000, Java=30000)]
|
|
|
|
# Or without specifying column values (less efficient)
|
|
|
|
>>> df4.groupBy("year").pivot("course").sum("earnings").collect()
|
|
[Row(year=2012, Java=20000, dotNET=15000), Row(year=2013, Java=30000, dotNET=48000)]
|
|
>>> df5.groupBy("sales.year").pivot("sales.course").sum("sales.earnings").collect()
|
|
[Row(year=2012, Java=20000, dotNET=15000), Row(year=2013, Java=30000, dotNET=48000)]
|
|
"""
|
|
if values is None:
|
|
jgd = self._jgd.pivot(pivot_col)
|
|
else:
|
|
jgd = self._jgd.pivot(pivot_col, values)
|
|
return GroupedData(jgd, self._df)
|
|
|
|
|
|
def _test():
|
|
import doctest
|
|
from pyspark.sql import Row, SparkSession
|
|
import pyspark.sql.group
|
|
globs = pyspark.sql.group.__dict__.copy()
|
|
spark = SparkSession.builder\
|
|
.master("local[4]")\
|
|
.appName("sql.group tests")\
|
|
.getOrCreate()
|
|
sc = spark.sparkContext
|
|
globs['sc'] = sc
|
|
globs['spark'] = spark
|
|
globs['df'] = sc.parallelize([(2, 'Alice'), (5, 'Bob')]) \
|
|
.toDF(StructType([StructField('age', IntegerType()),
|
|
StructField('name', StringType())]))
|
|
globs['df3'] = sc.parallelize([Row(name='Alice', age=2, height=80),
|
|
Row(name='Bob', age=5, height=85)]).toDF()
|
|
globs['df4'] = sc.parallelize([Row(course="dotNET", year=2012, earnings=10000),
|
|
Row(course="Java", year=2012, earnings=20000),
|
|
Row(course="dotNET", year=2012, earnings=5000),
|
|
Row(course="dotNET", year=2013, earnings=48000),
|
|
Row(course="Java", year=2013, earnings=30000)]).toDF()
|
|
globs['df5'] = sc.parallelize([
|
|
Row(training="expert", sales=Row(course="dotNET", year=2012, earnings=10000)),
|
|
Row(training="junior", sales=Row(course="Java", year=2012, earnings=20000)),
|
|
Row(training="expert", sales=Row(course="dotNET", year=2012, earnings=5000)),
|
|
Row(training="junior", sales=Row(course="dotNET", year=2013, earnings=48000)),
|
|
Row(training="expert", sales=Row(course="Java", year=2013, earnings=30000))]).toDF()
|
|
|
|
(failure_count, test_count) = doctest.testmod(
|
|
pyspark.sql.group, globs=globs,
|
|
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
|
|
spark.stop()
|
|
if failure_count:
|
|
sys.exit(-1)
|
|
|
|
|
|
if __name__ == "__main__":
|
|
_test()
|