spark-instrumented-optimizer/python/pyspark/pandas/tests/test_numpy_compat.py
Xinrong Meng 47d62af2a9 [SPARK-35035][PYTHON] Port Koalas internal implementation unit tests into PySpark
### What changes were proposed in this pull request?
Now that we merged the Koalas main code into the PySpark code base (#32036), we should port the Koalas internal implementation unit tests to PySpark.

### Why are the changes needed?
Currently, the pandas-on-Spark modules are not tested fully. We should enable the internal implementation unit tests.

### Does this PR introduce _any_ user-facing change?
No.

### How was this patch tested?
Enable internal implementation unit tests.

Closes #32137 from xinrong-databricks/port.test_internal_impl.

Lead-authored-by: Xinrong Meng <xinrong.meng@databricks.com>
Co-authored-by: xinrong-databricks <47337188+xinrong-databricks@users.noreply.github.com>
Signed-off-by: HyukjinKwon <gurwls223@apache.org>
2021-04-14 13:59:33 +09:00

211 lines
8.4 KiB
Python

#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
from pyspark import pandas as ps
from pyspark.pandas import set_option, reset_option
from pyspark.pandas.numpy_compat import unary_np_spark_mappings, binary_np_spark_mappings
from pyspark.pandas.testing.utils import ReusedSQLTestCase, SQLTestUtils
class NumPyCompatTest(ReusedSQLTestCase, SQLTestUtils):
blacklist = [
# Koalas does not currently support
"conj",
"conjugate",
"isnat",
"matmul",
"frexp",
# Values are close enough but tests failed.
"arccos",
"exp",
"expm1",
"log", # flaky
"log10", # flaky
"log1p", # flaky
"modf",
"floor_divide", # flaky
# Results seem inconsistent in a different version of, I (Hyukjin) suspect, PyArrow.
# From PyArrow 0.15, seems it returns the correct results via PySpark. Probably we
# can enable it later when Koalas switches to PyArrow 0.15 completely.
"left_shift",
]
@property
def pdf(self):
return pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [4, 5, 6, 3, 2, 1, 0, 0, 0]},
index=[0, 1, 3, 5, 6, 8, 9, 9, 9],
)
@property
def kdf(self):
return ps.from_pandas(self.pdf)
def test_np_add_series(self):
kdf = self.kdf
pdf = self.pdf
if LooseVersion(pd.__version__) < LooseVersion("0.25"):
self.assert_eq(np.add(kdf.a, kdf.b), np.add(pdf.a, pdf.b).rename())
else:
self.assert_eq(np.add(kdf.a, kdf.b), np.add(pdf.a, pdf.b))
kdf = self.kdf
pdf = self.pdf
self.assert_eq(np.add(kdf.a, 1), np.add(pdf.a, 1))
def test_np_add_index(self):
k_index = self.kdf.index
p_index = self.pdf.index
self.assert_eq(np.add(k_index, k_index), np.add(p_index, p_index))
def test_np_unsupported_series(self):
kdf = self.kdf
with self.assertRaisesRegex(NotImplementedError, "Koalas.*not.*support.*sqrt.*"):
np.sqrt(kdf.a, kdf.b)
def test_np_unsupported_frame(self):
kdf = self.kdf
with self.assertRaisesRegex(NotImplementedError, "Koalas.*not.*support.*sqrt.*"):
np.sqrt(kdf, kdf)
def test_np_spark_compat_series(self):
# Use randomly generated dataFrame
pdf = pd.DataFrame(
np.random.randint(-100, 100, size=(np.random.randint(100), 2)), columns=["a", "b"]
)
pdf2 = pd.DataFrame(
np.random.randint(-100, 100, size=(len(pdf), len(pdf.columns))), columns=["a", "b"]
)
kdf = ps.from_pandas(pdf)
kdf2 = ps.from_pandas(pdf2)
for np_name, spark_func in unary_np_spark_mappings.items():
np_func = getattr(np, np_name)
if np_name not in self.blacklist:
try:
# unary ufunc
self.assert_eq(np_func(pdf.a), np_func(kdf.a), almost=True)
except Exception as e:
raise AssertionError("Test in '%s' function was failed." % np_name) from e
for np_name, spark_func in binary_np_spark_mappings.items():
np_func = getattr(np, np_name)
if np_name not in self.blacklist:
try:
# binary ufunc
if LooseVersion(pd.__version__) < LooseVersion("0.25"):
self.assert_eq(
np_func(pdf.a, pdf.b).rename(), np_func(kdf.a, kdf.b), almost=True
)
else:
self.assert_eq(np_func(pdf.a, pdf.b), np_func(kdf.a, kdf.b), almost=True)
self.assert_eq(np_func(pdf.a, 1), np_func(kdf.a, 1), almost=True)
except Exception as e:
raise AssertionError("Test in '%s' function was failed." % np_name) from e
# Test only top 5 for now. 'compute.ops_on_diff_frames' option increases too much time.
try:
set_option("compute.ops_on_diff_frames", True)
for np_name, spark_func in list(binary_np_spark_mappings.items())[:5]:
np_func = getattr(np, np_name)
if np_name not in self.blacklist:
try:
# binary ufunc
if LooseVersion(pd.__version__) < LooseVersion("0.25"):
self.assert_eq(
np_func(pdf.a, pdf2.b).sort_index().rename(),
np_func(kdf.a, kdf2.b).sort_index(),
almost=True,
)
else:
self.assert_eq(
np_func(pdf.a, pdf2.b).sort_index(),
np_func(kdf.a, kdf2.b).sort_index(),
almost=True,
)
except Exception as e:
raise AssertionError("Test in '%s' function was failed." % np_name) from e
finally:
reset_option("compute.ops_on_diff_frames")
def test_np_spark_compat_frame(self):
# Use randomly generated dataFrame
pdf = pd.DataFrame(
np.random.randint(-100, 100, size=(np.random.randint(100), 2)), columns=["a", "b"]
)
pdf2 = pd.DataFrame(
np.random.randint(-100, 100, size=(len(pdf), len(pdf.columns))), columns=["a", "b"]
)
kdf = ps.from_pandas(pdf)
kdf2 = ps.from_pandas(pdf2)
for np_name, spark_func in unary_np_spark_mappings.items():
np_func = getattr(np, np_name)
if np_name not in self.blacklist:
try:
# unary ufunc
self.assert_eq(np_func(pdf), np_func(kdf), almost=True)
except Exception as e:
raise AssertionError("Test in '%s' function was failed." % np_name) from e
for np_name, spark_func in binary_np_spark_mappings.items():
np_func = getattr(np, np_name)
if np_name not in self.blacklist:
try:
# binary ufunc
self.assert_eq(np_func(pdf, pdf), np_func(kdf, kdf), almost=True)
self.assert_eq(np_func(pdf, 1), np_func(kdf, 1), almost=True)
except Exception as e:
raise AssertionError("Test in '%s' function was failed." % np_name) from e
# Test only top 5 for now. 'compute.ops_on_diff_frames' option increases too much time.
try:
set_option("compute.ops_on_diff_frames", True)
for np_name, spark_func in list(binary_np_spark_mappings.items())[:5]:
np_func = getattr(np, np_name)
if np_name not in self.blacklist:
try:
# binary ufunc
self.assert_eq(
np_func(pdf, pdf2).sort_index(),
np_func(kdf, kdf2).sort_index(),
almost=True,
)
except Exception as e:
raise AssertionError("Test in '%s' function was failed." % np_name) from e
finally:
reset_option("compute.ops_on_diff_frames")
if __name__ == "__main__":
import unittest
from pyspark.pandas.tests.test_numpy_compat import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)