e11a24c1ba
### What changes were proposed in this pull request? This PR proposes to fix PySpark to officially support Python 3.9. The main codes already work. We should just note that we support Python 3.9. Also, this PR fixes some minor fixes into the test codes. - `Thread.isAlive` is removed in Python 3.9, and `Thread.is_alive` exists in Python 3.6+, see https://docs.python.org/3/whatsnew/3.9.html#removed - Fixed `TaskContextTestsWithWorkerReuse.test_barrier_with_python_worker_reuse` and `TaskContextTests.test_barrier` to be less flaky. This becomes more flaky in Python 3.9 for some reasons. NOTE that PyArrow does not support Python 3.9 yet. ### Why are the changes needed? To officially support Python 3.9. ### Does this PR introduce _any_ user-facing change? Yes, it officially supports Python 3.9. ### How was this patch tested? Manually ran the tests: ``` $ ./run-tests --python-executable=python Running PySpark tests. Output is in /.../spark/python/unit-tests.log Will test against the following Python executables: ['python'] Will test the following Python modules: ['pyspark-core', 'pyspark-ml', 'pyspark-mllib', 'pyspark-resource', 'pyspark-sql', 'pyspark-streaming'] python python_implementation is CPython python version is: Python 3.9.0 Starting test(python): pyspark.ml.tests.test_base Starting test(python): pyspark.ml.tests.test_evaluation Starting test(python): pyspark.ml.tests.test_algorithms Starting test(python): pyspark.ml.tests.test_feature Finished test(python): pyspark.ml.tests.test_base (12s) Starting test(python): pyspark.ml.tests.test_image Finished test(python): pyspark.ml.tests.test_evaluation (15s) Starting test(python): pyspark.ml.tests.test_linalg Finished test(python): pyspark.ml.tests.test_feature (25s) Starting test(python): pyspark.ml.tests.test_param Finished test(python): pyspark.ml.tests.test_image (17s) Starting test(python): pyspark.ml.tests.test_persistence Finished test(python): pyspark.ml.tests.test_param (17s) Starting test(python): pyspark.ml.tests.test_pipeline Finished test(python): pyspark.ml.tests.test_linalg (30s) Starting test(python): pyspark.ml.tests.test_stat Finished test(python): pyspark.ml.tests.test_pipeline (6s) Starting test(python): pyspark.ml.tests.test_training_summary Finished test(python): pyspark.ml.tests.test_stat (12s) Starting test(python): pyspark.ml.tests.test_tuning Finished test(python): pyspark.ml.tests.test_algorithms (68s) Starting test(python): pyspark.ml.tests.test_wrapper Finished test(python): pyspark.ml.tests.test_persistence (51s) Starting test(python): pyspark.mllib.tests.test_algorithms Finished test(python): pyspark.ml.tests.test_training_summary (33s) Starting test(python): pyspark.mllib.tests.test_feature Finished test(python): pyspark.ml.tests.test_wrapper (19s) Starting test(python): pyspark.mllib.tests.test_linalg Finished test(python): pyspark.mllib.tests.test_feature (26s) Starting test(python): pyspark.mllib.tests.test_stat Finished test(python): pyspark.mllib.tests.test_stat (22s) Starting test(python): pyspark.mllib.tests.test_streaming_algorithms Finished test(python): pyspark.mllib.tests.test_algorithms (53s) Starting test(python): pyspark.mllib.tests.test_util Finished test(python): pyspark.mllib.tests.test_linalg (54s) Starting test(python): pyspark.sql.tests.test_arrow Finished test(python): pyspark.sql.tests.test_arrow (0s) ... 61 tests were skipped Starting test(python): pyspark.sql.tests.test_catalog Finished test(python): pyspark.mllib.tests.test_util (11s) Starting test(python): pyspark.sql.tests.test_column Finished test(python): pyspark.sql.tests.test_catalog (16s) Starting test(python): pyspark.sql.tests.test_conf Finished test(python): pyspark.sql.tests.test_column (17s) Starting test(python): pyspark.sql.tests.test_context Finished test(python): pyspark.sql.tests.test_context (6s) ... 3 tests were skipped Starting test(python): pyspark.sql.tests.test_dataframe Finished test(python): pyspark.sql.tests.test_conf (11s) Starting test(python): pyspark.sql.tests.test_datasources Finished test(python): pyspark.sql.tests.test_datasources (19s) Starting test(python): pyspark.sql.tests.test_functions Finished test(python): pyspark.sql.tests.test_dataframe (35s) ... 3 tests were skipped Starting test(python): pyspark.sql.tests.test_group Finished test(python): pyspark.sql.tests.test_functions (32s) Starting test(python): pyspark.sql.tests.test_pandas_cogrouped_map Finished test(python): pyspark.sql.tests.test_pandas_cogrouped_map (1s) ... 15 tests were skipped Starting test(python): pyspark.sql.tests.test_pandas_grouped_map Finished test(python): pyspark.sql.tests.test_group (19s) Starting test(python): pyspark.sql.tests.test_pandas_map Finished test(python): pyspark.sql.tests.test_pandas_grouped_map (0s) ... 21 tests were skipped Starting test(python): pyspark.sql.tests.test_pandas_udf Finished test(python): pyspark.sql.tests.test_pandas_map (0s) ... 6 tests were skipped Starting test(python): pyspark.sql.tests.test_pandas_udf_grouped_agg Finished test(python): pyspark.sql.tests.test_pandas_udf (0s) ... 6 tests were skipped Starting test(python): pyspark.sql.tests.test_pandas_udf_scalar Finished test(python): pyspark.sql.tests.test_pandas_udf_grouped_agg (0s) ... 13 tests were skipped Starting test(python): pyspark.sql.tests.test_pandas_udf_typehints Finished test(python): pyspark.sql.tests.test_pandas_udf_scalar (0s) ... 50 tests were skipped Starting test(python): pyspark.sql.tests.test_pandas_udf_window Finished test(python): pyspark.sql.tests.test_pandas_udf_typehints (0s) ... 10 tests were skipped Starting test(python): pyspark.sql.tests.test_readwriter Finished test(python): pyspark.sql.tests.test_pandas_udf_window (0s) ... 14 tests were skipped Starting test(python): pyspark.sql.tests.test_serde Finished test(python): pyspark.sql.tests.test_serde (19s) Starting test(python): pyspark.sql.tests.test_session Finished test(python): pyspark.mllib.tests.test_streaming_algorithms (120s) Starting test(python): pyspark.sql.tests.test_streaming Finished test(python): pyspark.sql.tests.test_readwriter (25s) Starting test(python): pyspark.sql.tests.test_types Finished test(python): pyspark.ml.tests.test_tuning (208s) Starting test(python): pyspark.sql.tests.test_udf Finished test(python): pyspark.sql.tests.test_session (31s) Starting test(python): pyspark.sql.tests.test_utils Finished test(python): pyspark.sql.tests.test_streaming (35s) Starting test(python): pyspark.streaming.tests.test_context Finished test(python): pyspark.sql.tests.test_types (34s) Starting test(python): pyspark.streaming.tests.test_dstream Finished test(python): pyspark.sql.tests.test_utils (14s) Starting test(python): pyspark.streaming.tests.test_kinesis Finished test(python): pyspark.streaming.tests.test_kinesis (0s) ... 2 tests were skipped Starting test(python): pyspark.streaming.tests.test_listener Finished test(python): pyspark.streaming.tests.test_listener (11s) Starting test(python): pyspark.tests.test_appsubmit Finished test(python): pyspark.sql.tests.test_udf (39s) Starting test(python): pyspark.tests.test_broadcast Finished test(python): pyspark.streaming.tests.test_context (23s) Starting test(python): pyspark.tests.test_conf Finished test(python): pyspark.tests.test_conf (15s) Starting test(python): pyspark.tests.test_context Finished test(python): pyspark.tests.test_broadcast (33s) Starting test(python): pyspark.tests.test_daemon Finished test(python): pyspark.tests.test_daemon (5s) Starting test(python): pyspark.tests.test_install_spark Finished test(python): pyspark.tests.test_context (44s) Starting test(python): pyspark.tests.test_join Finished test(python): pyspark.tests.test_appsubmit (68s) Starting test(python): pyspark.tests.test_profiler Finished test(python): pyspark.tests.test_join (7s) Starting test(python): pyspark.tests.test_rdd Finished test(python): pyspark.tests.test_profiler (9s) Starting test(python): pyspark.tests.test_rddbarrier Finished test(python): pyspark.tests.test_rddbarrier (7s) Starting test(python): pyspark.tests.test_readwrite Finished test(python): pyspark.streaming.tests.test_dstream (107s) Starting test(python): pyspark.tests.test_serializers Finished test(python): pyspark.tests.test_serializers (8s) Starting test(python): pyspark.tests.test_shuffle Finished test(python): pyspark.tests.test_readwrite (14s) Starting test(python): pyspark.tests.test_taskcontext Finished test(python): pyspark.tests.test_install_spark (65s) Starting test(python): pyspark.tests.test_util Finished test(python): pyspark.tests.test_shuffle (8s) Starting test(python): pyspark.tests.test_worker Finished test(python): pyspark.tests.test_util (5s) Starting test(python): pyspark.accumulators Finished test(python): pyspark.accumulators (5s) Starting test(python): pyspark.broadcast Finished test(python): pyspark.broadcast (6s) Starting test(python): pyspark.conf Finished test(python): pyspark.tests.test_worker (14s) Starting test(python): pyspark.context Finished test(python): pyspark.conf (4s) Starting test(python): pyspark.ml.classification Finished test(python): pyspark.tests.test_rdd (60s) Starting test(python): pyspark.ml.clustering Finished test(python): pyspark.context (21s) Starting test(python): pyspark.ml.evaluation Finished test(python): pyspark.tests.test_taskcontext (69s) Starting test(python): pyspark.ml.feature Finished test(python): pyspark.ml.evaluation (26s) Starting test(python): pyspark.ml.fpm Finished test(python): pyspark.ml.clustering (45s) Starting test(python): pyspark.ml.functions Finished test(python): pyspark.ml.fpm (24s) Starting test(python): pyspark.ml.image Finished test(python): pyspark.ml.functions (17s) Starting test(python): pyspark.ml.linalg.__init__ Finished test(python): pyspark.ml.linalg.__init__ (0s) Starting test(python): pyspark.ml.recommendation Finished test(python): pyspark.ml.classification (74s) Starting test(python): pyspark.ml.regression Finished test(python): pyspark.ml.image (8s) Starting test(python): pyspark.ml.stat Finished test(python): pyspark.ml.stat (29s) Starting test(python): pyspark.ml.tuning Finished test(python): pyspark.ml.regression (53s) Starting test(python): pyspark.mllib.classification Finished test(python): pyspark.ml.tuning (35s) Starting test(python): pyspark.mllib.clustering Finished test(python): pyspark.ml.feature (103s) Starting test(python): pyspark.mllib.evaluation Finished test(python): pyspark.mllib.classification (33s) Starting test(python): pyspark.mllib.feature Finished test(python): pyspark.mllib.evaluation (21s) Starting test(python): pyspark.mllib.fpm Finished test(python): pyspark.ml.recommendation (103s) Starting test(python): pyspark.mllib.linalg.__init__ Finished test(python): pyspark.mllib.linalg.__init__ (1s) Starting test(python): pyspark.mllib.linalg.distributed Finished test(python): pyspark.mllib.feature (26s) Starting test(python): pyspark.mllib.random Finished test(python): pyspark.mllib.fpm (23s) Starting test(python): pyspark.mllib.recommendation Finished test(python): pyspark.mllib.clustering (50s) Starting test(python): pyspark.mllib.regression Finished test(python): pyspark.mllib.random (13s) Starting test(python): pyspark.mllib.stat.KernelDensity Finished test(python): pyspark.mllib.stat.KernelDensity (1s) Starting test(python): pyspark.mllib.stat._statistics Finished test(python): pyspark.mllib.linalg.distributed (42s) Starting test(python): pyspark.mllib.tree Finished test(python): pyspark.mllib.stat._statistics (19s) Starting test(python): pyspark.mllib.util Finished test(python): pyspark.mllib.regression (33s) Starting test(python): pyspark.profiler Finished test(python): pyspark.mllib.recommendation (36s) Starting test(python): pyspark.rdd Finished test(python): pyspark.profiler (9s) Starting test(python): pyspark.resource.tests.test_resources Finished test(python): pyspark.mllib.tree (19s) Starting test(python): pyspark.serializers Finished test(python): pyspark.mllib.util (21s) Starting test(python): pyspark.shuffle Finished test(python): pyspark.resource.tests.test_resources (9s) Starting test(python): pyspark.sql.avro.functions Finished test(python): pyspark.shuffle (1s) Starting test(python): pyspark.sql.catalog Finished test(python): pyspark.rdd (22s) Starting test(python): pyspark.sql.column Finished test(python): pyspark.serializers (12s) Starting test(python): pyspark.sql.conf Finished test(python): pyspark.sql.conf (6s) Starting test(python): pyspark.sql.context Finished test(python): pyspark.sql.catalog (14s) Starting test(python): pyspark.sql.dataframe Finished test(python): pyspark.sql.avro.functions (15s) Starting test(python): pyspark.sql.functions Finished test(python): pyspark.sql.column (24s) Starting test(python): pyspark.sql.group Finished test(python): pyspark.sql.context (20s) Starting test(python): pyspark.sql.pandas.conversion Finished test(python): pyspark.sql.pandas.conversion (13s) Starting test(python): pyspark.sql.pandas.group_ops Finished test(python): pyspark.sql.group (36s) Starting test(python): pyspark.sql.pandas.map_ops Finished test(python): pyspark.sql.pandas.group_ops (21s) Starting test(python): pyspark.sql.pandas.serializers Finished test(python): pyspark.sql.pandas.serializers (0s) Starting test(python): pyspark.sql.pandas.typehints Finished test(python): pyspark.sql.pandas.typehints (0s) Starting test(python): pyspark.sql.pandas.types Finished test(python): pyspark.sql.pandas.types (0s) Starting test(python): pyspark.sql.pandas.utils Finished test(python): pyspark.sql.pandas.utils (0s) Starting test(python): pyspark.sql.readwriter Finished test(python): pyspark.sql.dataframe (56s) Starting test(python): pyspark.sql.session Finished test(python): pyspark.sql.functions (57s) Starting test(python): pyspark.sql.streaming Finished test(python): pyspark.sql.pandas.map_ops (12s) Starting test(python): pyspark.sql.types Finished test(python): pyspark.sql.types (10s) Starting test(python): pyspark.sql.udf Finished test(python): pyspark.sql.streaming (16s) Starting test(python): pyspark.sql.window Finished test(python): pyspark.sql.session (19s) Starting test(python): pyspark.streaming.util Finished test(python): pyspark.streaming.util (0s) Starting test(python): pyspark.util Finished test(python): pyspark.util (0s) Finished test(python): pyspark.sql.readwriter (24s) Finished test(python): pyspark.sql.udf (13s) Finished test(python): pyspark.sql.window (14s) Tests passed in 780 seconds ``` Closes #30277 from HyukjinKwon/SPARK-33371. Authored-by: HyukjinKwon <gurwls223@apache.org> Signed-off-by: Dongjoon Hyun <dhyun@apple.com>
297 lines
12 KiB
Python
Executable file
297 lines
12 KiB
Python
Executable file
#!/usr/bin/env python3
|
|
|
|
#
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
|
|
# contributor license agreements. See the NOTICE file distributed with
|
|
# this work for additional information regarding copyright ownership.
|
|
# The ASF licenses this file to You under the Apache License, Version 2.0
|
|
# (the "License"); you may not use this file except in compliance with
|
|
# the License. You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
|
|
import importlib.util
|
|
import glob
|
|
import os
|
|
import sys
|
|
from setuptools import setup
|
|
from setuptools.command.install import install
|
|
from shutil import copyfile, copytree, rmtree
|
|
|
|
try:
|
|
exec(open('pyspark/version.py').read())
|
|
except IOError:
|
|
print("Failed to load PySpark version file for packaging. You must be in Spark's python dir.",
|
|
file=sys.stderr)
|
|
sys.exit(-1)
|
|
try:
|
|
spec = importlib.util.spec_from_file_location("install", "pyspark/install.py")
|
|
install_module = importlib.util.module_from_spec(spec)
|
|
spec.loader.exec_module(install_module)
|
|
except IOError:
|
|
print("Failed to load the installing module (pyspark/install.py) which had to be "
|
|
"packaged together.",
|
|
file=sys.stderr)
|
|
sys.exit(-1)
|
|
VERSION = __version__ # noqa
|
|
# A temporary path so we can access above the Python project root and fetch scripts and jars we need
|
|
TEMP_PATH = "deps"
|
|
SPARK_HOME = os.path.abspath("../")
|
|
|
|
# Provide guidance about how to use setup.py
|
|
incorrect_invocation_message = """
|
|
If you are installing pyspark from spark source, you must first build Spark and
|
|
run sdist.
|
|
|
|
To build Spark with maven you can run:
|
|
./build/mvn -DskipTests clean package
|
|
Building the source dist is done in the Python directory:
|
|
cd python
|
|
python setup.py sdist
|
|
pip install dist/*.tar.gz"""
|
|
|
|
# Figure out where the jars are we need to package with PySpark.
|
|
JARS_PATH = glob.glob(os.path.join(SPARK_HOME, "assembly/target/scala-*/jars/"))
|
|
|
|
if len(JARS_PATH) == 1:
|
|
JARS_PATH = JARS_PATH[0]
|
|
elif (os.path.isfile("../RELEASE") and len(glob.glob("../jars/spark*core*.jar")) == 1):
|
|
# Release mode puts the jars in a jars directory
|
|
JARS_PATH = os.path.join(SPARK_HOME, "jars")
|
|
elif len(JARS_PATH) > 1:
|
|
print("Assembly jars exist for multiple scalas ({0}), please cleanup assembly/target".format(
|
|
JARS_PATH), file=sys.stderr)
|
|
sys.exit(-1)
|
|
elif len(JARS_PATH) == 0 and not os.path.exists(TEMP_PATH):
|
|
print(incorrect_invocation_message, file=sys.stderr)
|
|
sys.exit(-1)
|
|
|
|
EXAMPLES_PATH = os.path.join(SPARK_HOME, "examples/src/main/python")
|
|
SCRIPTS_PATH = os.path.join(SPARK_HOME, "bin")
|
|
USER_SCRIPTS_PATH = os.path.join(SPARK_HOME, "sbin")
|
|
DATA_PATH = os.path.join(SPARK_HOME, "data")
|
|
LICENSES_PATH = os.path.join(SPARK_HOME, "licenses")
|
|
|
|
SCRIPTS_TARGET = os.path.join(TEMP_PATH, "bin")
|
|
USER_SCRIPTS_TARGET = os.path.join(TEMP_PATH, "sbin")
|
|
JARS_TARGET = os.path.join(TEMP_PATH, "jars")
|
|
EXAMPLES_TARGET = os.path.join(TEMP_PATH, "examples")
|
|
DATA_TARGET = os.path.join(TEMP_PATH, "data")
|
|
LICENSES_TARGET = os.path.join(TEMP_PATH, "licenses")
|
|
|
|
# Check and see if we are under the spark path in which case we need to build the symlink farm.
|
|
# This is important because we only want to build the symlink farm while under Spark otherwise we
|
|
# want to use the symlink farm. And if the symlink farm exists under while under Spark (e.g. a
|
|
# partially built sdist) we should error and have the user sort it out.
|
|
in_spark = (os.path.isfile("../core/src/main/scala/org/apache/spark/SparkContext.scala") or
|
|
(os.path.isfile("../RELEASE") and len(glob.glob("../jars/spark*core*.jar")) == 1))
|
|
|
|
|
|
def _supports_symlinks():
|
|
"""Check if the system supports symlinks (e.g. *nix) or not."""
|
|
return getattr(os, "symlink", None) is not None
|
|
|
|
|
|
if (in_spark):
|
|
# Construct links for setup
|
|
try:
|
|
os.mkdir(TEMP_PATH)
|
|
except:
|
|
print("Temp path for symlink to parent already exists {0}".format(TEMP_PATH),
|
|
file=sys.stderr)
|
|
sys.exit(-1)
|
|
|
|
# If you are changing the versions here, please also change ./python/pyspark/sql/pandas/utils.py
|
|
# For Arrow, you should also check ./pom.xml and ensure there are no breaking changes in the
|
|
# binary format protocol with the Java version, see ARROW_HOME/format/* for specifications.
|
|
# Also don't forget to update python/docs/source/getting_started/install.rst.
|
|
_minimum_pandas_version = "0.23.2"
|
|
_minimum_pyarrow_version = "1.0.0"
|
|
|
|
|
|
class InstallCommand(install):
|
|
# TODO(SPARK-32837) leverage pip's custom options
|
|
|
|
def run(self):
|
|
install.run(self)
|
|
|
|
# Make sure the destination is always clean.
|
|
spark_dist = os.path.join(self.install_lib, "pyspark", "spark-distribution")
|
|
rmtree(spark_dist, ignore_errors=True)
|
|
|
|
if ("HADOOP_VERSION" in os.environ) or ("HIVE_VERSION" in os.environ):
|
|
# Note that SPARK_VERSION environment is just a testing purpose.
|
|
# HIVE_VERSION environment variable is also internal for now in case
|
|
# we support another version of Hive in the future.
|
|
spark_version, hadoop_version, hive_version = install_module.checked_versions(
|
|
os.environ.get("SPARK_VERSION", VERSION).lower(),
|
|
os.environ.get("HADOOP_VERSION", install_module.DEFAULT_HADOOP).lower(),
|
|
os.environ.get("HIVE_VERSION", install_module.DEFAULT_HIVE).lower())
|
|
|
|
if ("SPARK_VERSION" not in os.environ and
|
|
((install_module.DEFAULT_HADOOP, install_module.DEFAULT_HIVE) ==
|
|
(hadoop_version, hive_version))):
|
|
# Do not download and install if they are same as default.
|
|
return
|
|
|
|
install_module.install_spark(
|
|
dest=spark_dist,
|
|
spark_version=spark_version,
|
|
hadoop_version=hadoop_version,
|
|
hive_version=hive_version)
|
|
|
|
|
|
try:
|
|
# We copy the shell script to be under pyspark/python/pyspark so that the launcher scripts
|
|
# find it where expected. The rest of the files aren't copied because they are accessed
|
|
# using Python imports instead which will be resolved correctly.
|
|
try:
|
|
os.makedirs("pyspark/python/pyspark")
|
|
except OSError:
|
|
# Don't worry if the directory already exists.
|
|
pass
|
|
copyfile("pyspark/shell.py", "pyspark/python/pyspark/shell.py")
|
|
|
|
if (in_spark):
|
|
# Construct the symlink farm - this is necessary since we can't refer to the path above the
|
|
# package root and we need to copy the jars and scripts which are up above the python root.
|
|
if _supports_symlinks():
|
|
os.symlink(JARS_PATH, JARS_TARGET)
|
|
os.symlink(SCRIPTS_PATH, SCRIPTS_TARGET)
|
|
os.symlink(USER_SCRIPTS_PATH, USER_SCRIPTS_TARGET)
|
|
os.symlink(EXAMPLES_PATH, EXAMPLES_TARGET)
|
|
os.symlink(DATA_PATH, DATA_TARGET)
|
|
os.symlink(LICENSES_PATH, LICENSES_TARGET)
|
|
else:
|
|
# For windows fall back to the slower copytree
|
|
copytree(JARS_PATH, JARS_TARGET)
|
|
copytree(SCRIPTS_PATH, SCRIPTS_TARGET)
|
|
copytree(USER_SCRIPTS_PATH, USER_SCRIPTS_TARGET)
|
|
copytree(EXAMPLES_PATH, EXAMPLES_TARGET)
|
|
copytree(DATA_PATH, DATA_TARGET)
|
|
copytree(LICENSES_PATH, LICENSES_TARGET)
|
|
else:
|
|
# If we are not inside of SPARK_HOME verify we have the required symlink farm
|
|
if not os.path.exists(JARS_TARGET):
|
|
print("To build packaging must be in the python directory under the SPARK_HOME.",
|
|
file=sys.stderr)
|
|
|
|
if not os.path.isdir(SCRIPTS_TARGET):
|
|
print(incorrect_invocation_message, file=sys.stderr)
|
|
sys.exit(-1)
|
|
|
|
# Scripts directive requires a list of each script path and does not take wild cards.
|
|
script_names = os.listdir(SCRIPTS_TARGET)
|
|
scripts = list(map(lambda script: os.path.join(SCRIPTS_TARGET, script), script_names))
|
|
# We add find_spark_home.py to the bin directory we install so that pip installed PySpark
|
|
# will search for SPARK_HOME with Python.
|
|
scripts.append("pyspark/find_spark_home.py")
|
|
|
|
with open('README.md') as f:
|
|
long_description = f.read()
|
|
|
|
setup(
|
|
name='pyspark',
|
|
version=VERSION,
|
|
description='Apache Spark Python API',
|
|
long_description=long_description,
|
|
long_description_content_type="text/markdown",
|
|
author='Spark Developers',
|
|
author_email='dev@spark.apache.org',
|
|
url='https://github.com/apache/spark/tree/master/python',
|
|
packages=['pyspark',
|
|
'pyspark.cloudpickle',
|
|
'pyspark.mllib',
|
|
'pyspark.mllib.linalg',
|
|
'pyspark.mllib.stat',
|
|
'pyspark.ml',
|
|
'pyspark.ml.linalg',
|
|
'pyspark.ml.param',
|
|
'pyspark.sql',
|
|
'pyspark.sql.avro',
|
|
'pyspark.sql.pandas',
|
|
'pyspark.streaming',
|
|
'pyspark.bin',
|
|
'pyspark.sbin',
|
|
'pyspark.jars',
|
|
'pyspark.python.pyspark',
|
|
'pyspark.python.lib',
|
|
'pyspark.data',
|
|
'pyspark.licenses',
|
|
'pyspark.resource',
|
|
'pyspark.examples.src.main.python'],
|
|
include_package_data=True,
|
|
package_dir={
|
|
'pyspark.jars': 'deps/jars',
|
|
'pyspark.bin': 'deps/bin',
|
|
'pyspark.sbin': 'deps/sbin',
|
|
'pyspark.python.lib': 'lib',
|
|
'pyspark.data': 'deps/data',
|
|
'pyspark.licenses': 'deps/licenses',
|
|
'pyspark.examples.src.main.python': 'deps/examples',
|
|
},
|
|
package_data={
|
|
'pyspark.jars': ['*.jar'],
|
|
'pyspark.bin': ['*'],
|
|
'pyspark.sbin': ['spark-config.sh', 'spark-daemon.sh',
|
|
'start-history-server.sh',
|
|
'stop-history-server.sh', ],
|
|
'pyspark.python.lib': ['*.zip'],
|
|
'pyspark.data': ['*.txt', '*.data'],
|
|
'pyspark.licenses': ['*.txt'],
|
|
'pyspark.examples.src.main.python': ['*.py', '*/*.py']},
|
|
scripts=scripts,
|
|
license='http://www.apache.org/licenses/LICENSE-2.0',
|
|
# Don't forget to update python/docs/source/getting_started/install.rst
|
|
# if you're updating the versions or dependencies.
|
|
install_requires=['py4j==0.10.9'],
|
|
extras_require={
|
|
'ml': ['numpy>=1.7'],
|
|
'mllib': ['numpy>=1.7'],
|
|
'sql': [
|
|
'pandas>=%s' % _minimum_pandas_version,
|
|
'pyarrow>=%s' % _minimum_pyarrow_version,
|
|
]
|
|
},
|
|
python_requires='>=3.6',
|
|
classifiers=[
|
|
'Development Status :: 5 - Production/Stable',
|
|
'License :: OSI Approved :: Apache Software License',
|
|
'Programming Language :: Python :: 3.6',
|
|
'Programming Language :: Python :: 3.7',
|
|
'Programming Language :: Python :: 3.8',
|
|
'Programming Language :: Python :: 3.9',
|
|
'Programming Language :: Python :: Implementation :: CPython',
|
|
'Programming Language :: Python :: Implementation :: PyPy',
|
|
'Typing :: Typed'],
|
|
cmdclass={
|
|
'install': InstallCommand,
|
|
},
|
|
)
|
|
finally:
|
|
# We only cleanup the symlink farm if we were in Spark, otherwise we are installing rather than
|
|
# packaging.
|
|
if (in_spark):
|
|
# Depending on cleaning up the symlink farm or copied version
|
|
if _supports_symlinks():
|
|
os.remove(os.path.join(TEMP_PATH, "jars"))
|
|
os.remove(os.path.join(TEMP_PATH, "bin"))
|
|
os.remove(os.path.join(TEMP_PATH, "sbin"))
|
|
os.remove(os.path.join(TEMP_PATH, "examples"))
|
|
os.remove(os.path.join(TEMP_PATH, "data"))
|
|
os.remove(os.path.join(TEMP_PATH, "licenses"))
|
|
else:
|
|
rmtree(os.path.join(TEMP_PATH, "jars"))
|
|
rmtree(os.path.join(TEMP_PATH, "bin"))
|
|
rmtree(os.path.join(TEMP_PATH, "sbin"))
|
|
rmtree(os.path.join(TEMP_PATH, "examples"))
|
|
rmtree(os.path.join(TEMP_PATH, "data"))
|
|
rmtree(os.path.join(TEMP_PATH, "licenses"))
|
|
os.rmdir(TEMP_PATH)
|