2018-11-14 23:30:52 -05:00
|
|
|
#
|
|
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
|
|
|
|
# contributor license agreements. See the NOTICE file distributed with
|
|
|
|
# this work for additional information regarding copyright ownership.
|
|
|
|
# The ASF licenses this file to You under the Apache License, Version 2.0
|
|
|
|
# (the "License"); you may not use this file except in compliance with
|
|
|
|
# the License. You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
#
|
2021-04-22 16:07:35 -04:00
|
|
|
|
2019-03-10 21:15:07 -04:00
|
|
|
import glob
|
2018-11-14 23:30:52 -05:00
|
|
|
import os
|
|
|
|
import struct
|
|
|
|
import sys
|
|
|
|
import unittest
|
[SPARK-22340][PYTHON] Add a mode to pin Python thread into JVM's
## What changes were proposed in this pull request?
This PR proposes to add **Single threading model design (pinned thread model)** mode which is an experimental mode to sync threads on PVM and JVM. See https://www.py4j.org/advanced_topics.html#using-single-threading-model-pinned-thread
### Multi threading model
Currently, PySpark uses this model. Threads on PVM and JVM are independent. For instance, in a different Python thread, callbacks are received and relevant Python codes are executed. JVM threads are reused when possible.
Py4J will create a new thread every time a command is received and there is no thread available. See the current model we're using - https://www.py4j.org/advanced_topics.html#the-multi-threading-model
One problem in this model is that we can't sync threads on PVM and JVM out of the box. This leads to some problems in particular at some codes related to threading in JVM side. See:
https://github.com/apache/spark/blob/7056e004ee566fabbb9b22ddee2de55ef03260db/core/src/main/scala/org/apache/spark/SparkContext.scala#L334
Due to reusing JVM threads, seems the job groups in Python threads cannot be set in each thread as described in the JIRA.
### Single threading model design (pinned thread model)
This mode pins and syncs the threads on PVM and JVM to work around the problem above. For instance, in the same Python thread, callbacks are received and relevant Python codes are executed. See https://www.py4j.org/advanced_topics.html#the-single-threading-model
Even though this mode can sync threads on PVM and JVM for other thread related code paths,
this might cause another problem: seems unable to inherit properties as below (assuming multi-thread mode still creates new threads when existing threads are busy, I suspect this issue already exists when multiple jobs are submitted in multi-thread mode; however, it can be always seen in single threading mode):
```bash
$ PYSPARK_PIN_THREAD=true ./bin/pyspark
```
```python
import threading
spark.sparkContext.setLocalProperty("a", "hi")
def print_prop():
print(spark.sparkContext.getLocalProperty("a"))
threading.Thread(target=print_prop).start()
```
```
None
```
Unlike Scala side:
```scala
spark.sparkContext.setLocalProperty("a", "hi")
new Thread(new Runnable {
def run() = println(spark.sparkContext.getLocalProperty("a"))
}).start()
```
```
hi
```
This behaviour potentially could cause weird issues but this PR currently does not target this fix this for now since this mode is experimental.
### How does this PR fix?
Basically there are two types of Py4J servers `GatewayServer` and `ClientServer`. The former is for multi threading and the latter is for single threading. This PR adds a switch to use the latter.
In Scala side:
The logic to select a server is encapsulated in `Py4JServer` and use `Py4JServer` at `PythonRunner` for Spark summit and `PythonGatewayServer` for Spark shell. Each uses `ClientServer` when `PYSPARK_PIN_THREAD` is `true` and `GatewayServer` otherwise.
In Python side:
Simply do an if-else to switch the server to talk. It uses `ClientServer` when `PYSPARK_PIN_THREAD` is `true` and `GatewayServer` otherwise.
This is disabled by default for now.
## How was this patch tested?
Manually tested. This can be tested via:
```python
PYSPARK_PIN_THREAD=true ./bin/pyspark
```
and/or
```bash
cd python
./run-tests --python-executables=python --testnames "pyspark.tests.test_pin_thread"
```
Also, ran the Jenkins tests with `PYSPARK_PIN_THREAD` enabled.
Closes #24898 from HyukjinKwon/pinned-thread.
Authored-by: HyukjinKwon <gurwls223@apache.org>
Signed-off-by: HyukjinKwon <gurwls223@apache.org>
2019-11-07 16:44:58 -05:00
|
|
|
from time import time, sleep
|
2018-11-14 23:30:52 -05:00
|
|
|
|
|
|
|
from pyspark import SparkContext, SparkConf
|
|
|
|
|
|
|
|
|
|
|
|
have_scipy = False
|
|
|
|
have_numpy = False
|
|
|
|
try:
|
2020-08-08 11:51:57 -04:00
|
|
|
import scipy.sparse # noqa: F401
|
2018-11-14 23:30:52 -05:00
|
|
|
have_scipy = True
|
2021-04-22 16:07:35 -04:00
|
|
|
except ImportError:
|
2018-11-14 23:30:52 -05:00
|
|
|
# No SciPy, but that's okay, we'll skip those tests
|
|
|
|
pass
|
|
|
|
try:
|
2020-08-08 11:51:57 -04:00
|
|
|
import numpy as np # noqa: F401
|
2018-11-14 23:30:52 -05:00
|
|
|
have_numpy = True
|
2021-04-22 16:07:35 -04:00
|
|
|
except ImportError:
|
2018-11-14 23:30:52 -05:00
|
|
|
# No NumPy, but that's okay, we'll skip those tests
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
|
|
SPARK_HOME = os.environ["SPARK_HOME"]
|
|
|
|
|
|
|
|
|
|
|
|
def read_int(b):
|
|
|
|
return struct.unpack("!i", b)[0]
|
|
|
|
|
|
|
|
|
|
|
|
def write_int(i):
|
|
|
|
return struct.pack("!i", i)
|
|
|
|
|
|
|
|
|
[SPARK-22340][PYTHON] Add a mode to pin Python thread into JVM's
## What changes were proposed in this pull request?
This PR proposes to add **Single threading model design (pinned thread model)** mode which is an experimental mode to sync threads on PVM and JVM. See https://www.py4j.org/advanced_topics.html#using-single-threading-model-pinned-thread
### Multi threading model
Currently, PySpark uses this model. Threads on PVM and JVM are independent. For instance, in a different Python thread, callbacks are received and relevant Python codes are executed. JVM threads are reused when possible.
Py4J will create a new thread every time a command is received and there is no thread available. See the current model we're using - https://www.py4j.org/advanced_topics.html#the-multi-threading-model
One problem in this model is that we can't sync threads on PVM and JVM out of the box. This leads to some problems in particular at some codes related to threading in JVM side. See:
https://github.com/apache/spark/blob/7056e004ee566fabbb9b22ddee2de55ef03260db/core/src/main/scala/org/apache/spark/SparkContext.scala#L334
Due to reusing JVM threads, seems the job groups in Python threads cannot be set in each thread as described in the JIRA.
### Single threading model design (pinned thread model)
This mode pins and syncs the threads on PVM and JVM to work around the problem above. For instance, in the same Python thread, callbacks are received and relevant Python codes are executed. See https://www.py4j.org/advanced_topics.html#the-single-threading-model
Even though this mode can sync threads on PVM and JVM for other thread related code paths,
this might cause another problem: seems unable to inherit properties as below (assuming multi-thread mode still creates new threads when existing threads are busy, I suspect this issue already exists when multiple jobs are submitted in multi-thread mode; however, it can be always seen in single threading mode):
```bash
$ PYSPARK_PIN_THREAD=true ./bin/pyspark
```
```python
import threading
spark.sparkContext.setLocalProperty("a", "hi")
def print_prop():
print(spark.sparkContext.getLocalProperty("a"))
threading.Thread(target=print_prop).start()
```
```
None
```
Unlike Scala side:
```scala
spark.sparkContext.setLocalProperty("a", "hi")
new Thread(new Runnable {
def run() = println(spark.sparkContext.getLocalProperty("a"))
}).start()
```
```
hi
```
This behaviour potentially could cause weird issues but this PR currently does not target this fix this for now since this mode is experimental.
### How does this PR fix?
Basically there are two types of Py4J servers `GatewayServer` and `ClientServer`. The former is for multi threading and the latter is for single threading. This PR adds a switch to use the latter.
In Scala side:
The logic to select a server is encapsulated in `Py4JServer` and use `Py4JServer` at `PythonRunner` for Spark summit and `PythonGatewayServer` for Spark shell. Each uses `ClientServer` when `PYSPARK_PIN_THREAD` is `true` and `GatewayServer` otherwise.
In Python side:
Simply do an if-else to switch the server to talk. It uses `ClientServer` when `PYSPARK_PIN_THREAD` is `true` and `GatewayServer` otherwise.
This is disabled by default for now.
## How was this patch tested?
Manually tested. This can be tested via:
```python
PYSPARK_PIN_THREAD=true ./bin/pyspark
```
and/or
```bash
cd python
./run-tests --python-executables=python --testnames "pyspark.tests.test_pin_thread"
```
Also, ran the Jenkins tests with `PYSPARK_PIN_THREAD` enabled.
Closes #24898 from HyukjinKwon/pinned-thread.
Authored-by: HyukjinKwon <gurwls223@apache.org>
Signed-off-by: HyukjinKwon <gurwls223@apache.org>
2019-11-07 16:44:58 -05:00
|
|
|
def eventually(condition, timeout=30.0, catch_assertions=False):
|
|
|
|
"""
|
|
|
|
Wait a given amount of time for a condition to pass, else fail with an error.
|
|
|
|
This is a helper utility for PySpark tests.
|
|
|
|
|
2020-11-15 20:21:50 -05:00
|
|
|
Parameters
|
|
|
|
----------
|
|
|
|
condition : function
|
|
|
|
Function that checks for termination conditions. condition() can return:
|
|
|
|
- True: Conditions met. Return without error.
|
|
|
|
- other value: Conditions not met yet. Continue. Upon timeout,
|
|
|
|
include last such value in error message.
|
|
|
|
Note that this method may be called at any time during
|
|
|
|
streaming execution (e.g., even before any results
|
|
|
|
have been created).
|
|
|
|
timeout : int
|
|
|
|
Number of seconds to wait. Default 30 seconds.
|
|
|
|
catch_assertions : bool
|
|
|
|
If False (default), do not catch AssertionErrors.
|
|
|
|
If True, catch AssertionErrors; continue, but save
|
|
|
|
error to throw upon timeout.
|
[SPARK-22340][PYTHON] Add a mode to pin Python thread into JVM's
## What changes were proposed in this pull request?
This PR proposes to add **Single threading model design (pinned thread model)** mode which is an experimental mode to sync threads on PVM and JVM. See https://www.py4j.org/advanced_topics.html#using-single-threading-model-pinned-thread
### Multi threading model
Currently, PySpark uses this model. Threads on PVM and JVM are independent. For instance, in a different Python thread, callbacks are received and relevant Python codes are executed. JVM threads are reused when possible.
Py4J will create a new thread every time a command is received and there is no thread available. See the current model we're using - https://www.py4j.org/advanced_topics.html#the-multi-threading-model
One problem in this model is that we can't sync threads on PVM and JVM out of the box. This leads to some problems in particular at some codes related to threading in JVM side. See:
https://github.com/apache/spark/blob/7056e004ee566fabbb9b22ddee2de55ef03260db/core/src/main/scala/org/apache/spark/SparkContext.scala#L334
Due to reusing JVM threads, seems the job groups in Python threads cannot be set in each thread as described in the JIRA.
### Single threading model design (pinned thread model)
This mode pins and syncs the threads on PVM and JVM to work around the problem above. For instance, in the same Python thread, callbacks are received and relevant Python codes are executed. See https://www.py4j.org/advanced_topics.html#the-single-threading-model
Even though this mode can sync threads on PVM and JVM for other thread related code paths,
this might cause another problem: seems unable to inherit properties as below (assuming multi-thread mode still creates new threads when existing threads are busy, I suspect this issue already exists when multiple jobs are submitted in multi-thread mode; however, it can be always seen in single threading mode):
```bash
$ PYSPARK_PIN_THREAD=true ./bin/pyspark
```
```python
import threading
spark.sparkContext.setLocalProperty("a", "hi")
def print_prop():
print(spark.sparkContext.getLocalProperty("a"))
threading.Thread(target=print_prop).start()
```
```
None
```
Unlike Scala side:
```scala
spark.sparkContext.setLocalProperty("a", "hi")
new Thread(new Runnable {
def run() = println(spark.sparkContext.getLocalProperty("a"))
}).start()
```
```
hi
```
This behaviour potentially could cause weird issues but this PR currently does not target this fix this for now since this mode is experimental.
### How does this PR fix?
Basically there are two types of Py4J servers `GatewayServer` and `ClientServer`. The former is for multi threading and the latter is for single threading. This PR adds a switch to use the latter.
In Scala side:
The logic to select a server is encapsulated in `Py4JServer` and use `Py4JServer` at `PythonRunner` for Spark summit and `PythonGatewayServer` for Spark shell. Each uses `ClientServer` when `PYSPARK_PIN_THREAD` is `true` and `GatewayServer` otherwise.
In Python side:
Simply do an if-else to switch the server to talk. It uses `ClientServer` when `PYSPARK_PIN_THREAD` is `true` and `GatewayServer` otherwise.
This is disabled by default for now.
## How was this patch tested?
Manually tested. This can be tested via:
```python
PYSPARK_PIN_THREAD=true ./bin/pyspark
```
and/or
```bash
cd python
./run-tests --python-executables=python --testnames "pyspark.tests.test_pin_thread"
```
Also, ran the Jenkins tests with `PYSPARK_PIN_THREAD` enabled.
Closes #24898 from HyukjinKwon/pinned-thread.
Authored-by: HyukjinKwon <gurwls223@apache.org>
Signed-off-by: HyukjinKwon <gurwls223@apache.org>
2019-11-07 16:44:58 -05:00
|
|
|
"""
|
|
|
|
start_time = time()
|
|
|
|
lastValue = None
|
|
|
|
while time() - start_time < timeout:
|
|
|
|
if catch_assertions:
|
|
|
|
try:
|
|
|
|
lastValue = condition()
|
|
|
|
except AssertionError as e:
|
|
|
|
lastValue = e
|
|
|
|
else:
|
|
|
|
lastValue = condition()
|
|
|
|
if lastValue is True:
|
|
|
|
return
|
|
|
|
sleep(0.01)
|
|
|
|
if isinstance(lastValue, AssertionError):
|
|
|
|
raise lastValue
|
|
|
|
else:
|
|
|
|
raise AssertionError(
|
|
|
|
"Test failed due to timeout after %g sec, with last condition returning: %s"
|
|
|
|
% (timeout, lastValue))
|
|
|
|
|
|
|
|
|
2018-11-14 23:30:52 -05:00
|
|
|
class QuietTest(object):
|
|
|
|
def __init__(self, sc):
|
|
|
|
self.log4j = sc._jvm.org.apache.log4j
|
|
|
|
|
|
|
|
def __enter__(self):
|
|
|
|
self.old_level = self.log4j.LogManager.getRootLogger().getLevel()
|
|
|
|
self.log4j.LogManager.getRootLogger().setLevel(self.log4j.Level.FATAL)
|
|
|
|
|
|
|
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
|
|
self.log4j.LogManager.getRootLogger().setLevel(self.old_level)
|
|
|
|
|
|
|
|
|
|
|
|
class PySparkTestCase(unittest.TestCase):
|
|
|
|
|
|
|
|
def setUp(self):
|
|
|
|
self._old_sys_path = list(sys.path)
|
|
|
|
class_name = self.__class__.__name__
|
|
|
|
self.sc = SparkContext('local[4]', class_name)
|
|
|
|
|
|
|
|
def tearDown(self):
|
|
|
|
self.sc.stop()
|
|
|
|
sys.path = self._old_sys_path
|
|
|
|
|
|
|
|
|
|
|
|
class ReusedPySparkTestCase(unittest.TestCase):
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def conf(cls):
|
|
|
|
"""
|
|
|
|
Override this in subclasses to supply a more specific conf
|
|
|
|
"""
|
|
|
|
return SparkConf()
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def setUpClass(cls):
|
|
|
|
cls.sc = SparkContext('local[4]', cls.__name__, conf=cls.conf())
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def tearDownClass(cls):
|
|
|
|
cls.sc.stop()
|
|
|
|
|
|
|
|
|
|
|
|
class ByteArrayOutput(object):
|
|
|
|
def __init__(self):
|
|
|
|
self.buffer = bytearray()
|
|
|
|
|
|
|
|
def write(self, b):
|
|
|
|
self.buffer += b
|
|
|
|
|
|
|
|
def close(self):
|
|
|
|
pass
|
2019-03-10 21:15:07 -04:00
|
|
|
|
|
|
|
|
2019-04-02 01:52:56 -04:00
|
|
|
def search_jar(project_relative_path, sbt_jar_name_prefix, mvn_jar_name_prefix):
|
|
|
|
# Note that 'sbt_jar_name_prefix' and 'mvn_jar_name_prefix' are used since the prefix can
|
|
|
|
# vary for SBT or Maven specifically. See also SPARK-26856
|
2019-03-10 21:15:07 -04:00
|
|
|
project_full_path = os.path.join(
|
|
|
|
os.environ["SPARK_HOME"], project_relative_path)
|
|
|
|
|
|
|
|
# We should ignore the following jars
|
|
|
|
ignored_jar_suffixes = ("javadoc.jar", "sources.jar", "test-sources.jar", "tests.jar")
|
|
|
|
|
|
|
|
# Search jar in the project dir using the jar name_prefix for both sbt build and maven
|
|
|
|
# build because the artifact jars are in different directories.
|
|
|
|
sbt_build = glob.glob(os.path.join(
|
2019-04-02 01:52:56 -04:00
|
|
|
project_full_path, "target/scala-*/%s*.jar" % sbt_jar_name_prefix))
|
2019-03-10 21:15:07 -04:00
|
|
|
maven_build = glob.glob(os.path.join(
|
2019-04-02 01:52:56 -04:00
|
|
|
project_full_path, "target/%s*.jar" % mvn_jar_name_prefix))
|
2019-03-10 21:15:07 -04:00
|
|
|
jar_paths = sbt_build + maven_build
|
|
|
|
jars = [jar for jar in jar_paths if not jar.endswith(ignored_jar_suffixes)]
|
|
|
|
|
|
|
|
if not jars:
|
|
|
|
return None
|
|
|
|
elif len(jars) > 1:
|
|
|
|
raise Exception("Found multiple JARs: %s; please remove all but one" % (", ".join(jars)))
|
|
|
|
else:
|
|
|
|
return jars[0]
|