2013-07-16 20:21:33 -04:00
|
|
|
#
|
|
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
|
|
|
|
# contributor license agreements. See the NOTICE file distributed with
|
|
|
|
# this work for additional information regarding copyright ownership.
|
|
|
|
# The ASF licenses this file to You under the Apache License, Version 2.0
|
|
|
|
# (the "License"); you may not use this file except in compliance with
|
|
|
|
# the License. You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
#
|
|
|
|
|
2018-08-13 22:35:34 -04:00
|
|
|
import gc
|
2014-08-16 19:59:34 -04:00
|
|
|
import os
|
2015-04-16 19:20:57 -04:00
|
|
|
import sys
|
2014-11-24 20:17:03 -05:00
|
|
|
from tempfile import NamedTemporaryFile
|
2017-08-01 18:12:23 -04:00
|
|
|
import threading
|
2014-08-16 19:59:34 -04:00
|
|
|
|
2018-08-13 22:35:34 -04:00
|
|
|
from pyspark.java_gateway import local_connect_and_auth
|
2019-02-01 21:49:45 -05:00
|
|
|
from pyspark.serializers import ChunkedStream, pickle_protocol
|
[SPARK-29341][PYTHON] Upgrade cloudpickle to 1.0.0
### What changes were proposed in this pull request?
This patch upgrades cloudpickle to 1.0.0 version.
Main changes:
1. cleanup unused functions: https://github.com/cloudpipe/cloudpickle/commit/936f16fac89986453c4bb3a4af9f04da16d30a9a
2. Fix relative imports inside function body: https://github.com/cloudpipe/cloudpickle/commit/31ecdd6f57c6013a1affb21f69e86e638f463710
3. Write kw only arguments to pickle: https://github.com/cloudpipe/cloudpickle/commit/6cb47185284548d5706beccd69f172586d127502
### Why are the changes needed?
We should include new bug fix like https://github.com/cloudpipe/cloudpickle/commit/6cb47185284548d5706beccd69f172586d127502, because users might use such python function in PySpark.
```python
>>> def f(a, *, b=1):
... return a + b
...
>>> rdd = sc.parallelize([1, 2, 3])
>>> rdd.map(f).collect()
[Stage 0:> (0 + 12) / 12]19/10/03 00:42:24 ERROR Executor: Exception in task 3.0 in stage 0.0 (TID 3)
org.apache.spark.api.python.PythonException: Traceback (most recent call last):
File "/spark/python/lib/pyspark.zip/pyspark/worker.py", line 598, in main
process()
File "/spark/python/lib/pyspark.zip/pyspark/worker.py", line 590, in process
serializer.dump_stream(out_iter, outfile)
File "/spark/python/lib/pyspark.zip/pyspark/serializers.py", line 513, in dump_stream
vs = list(itertools.islice(iterator, batch))
File "/spark/python/lib/pyspark.zip/pyspark/util.py", line 99, in wrapper
return f(*args, **kwargs)
TypeError: f() missing 1 required keyword-only argument: 'b'
```
After:
```python
>>> def f(a, *, b=1):
... return a + b
...
>>> rdd = sc.parallelize([1, 2, 3])
>>> rdd.map(f).collect()
[2, 3, 4]
```
### Does this PR introduce any user-facing change?
Yes. This fixes two bugs when pickling Python functions.
### How was this patch tested?
Existing tests.
Closes #26009 from viirya/upgrade-cloudpickle.
Authored-by: Liang-Chi Hsieh <viirya@gmail.com>
Signed-off-by: HyukjinKwon <gurwls223@apache.org>
2019-10-03 06:20:51 -04:00
|
|
|
from pyspark.util import _exception_message, print_exec
|
2016-09-14 16:37:35 -04:00
|
|
|
|
2015-04-16 19:20:57 -04:00
|
|
|
if sys.version < '3':
|
|
|
|
import cPickle as pickle
|
|
|
|
else:
|
|
|
|
import pickle
|
|
|
|
unicode = str
|
2014-09-03 14:49:45 -04:00
|
|
|
|
|
|
|
__all__ = ['Broadcast']
|
|
|
|
|
|
|
|
|
2012-10-19 13:24:49 -04:00
|
|
|
# Holds broadcasted data received from Java, keyed by its id.
|
2012-08-25 16:59:01 -04:00
|
|
|
_broadcastRegistry = {}
|
|
|
|
|
|
|
|
|
2012-10-19 13:24:49 -04:00
|
|
|
def _from_id(bid):
|
2012-08-25 16:59:01 -04:00
|
|
|
from pyspark.broadcast import _broadcastRegistry
|
2012-10-19 13:24:49 -04:00
|
|
|
if bid not in _broadcastRegistry:
|
|
|
|
raise Exception("Broadcast variable '%s' not loaded!" % bid)
|
|
|
|
return _broadcastRegistry[bid]
|
2012-08-25 16:59:01 -04:00
|
|
|
|
|
|
|
|
|
|
|
class Broadcast(object):
|
2014-08-06 15:58:24 -04:00
|
|
|
|
2013-12-29 20:15:07 -05:00
|
|
|
"""
|
2019-07-05 13:08:22 -04:00
|
|
|
A broadcast variable created with :meth:`SparkContext.broadcast`.
|
|
|
|
Access its value through :attr:`value`.
|
2014-11-24 20:17:03 -05:00
|
|
|
|
|
|
|
Examples:
|
|
|
|
|
|
|
|
>>> from pyspark.context import SparkContext
|
|
|
|
>>> sc = SparkContext('local', 'test')
|
|
|
|
>>> b = sc.broadcast([1, 2, 3, 4, 5])
|
|
|
|
>>> b.value
|
|
|
|
[1, 2, 3, 4, 5]
|
|
|
|
>>> sc.parallelize([0, 0]).flatMap(lambda x: b.value).collect()
|
|
|
|
[1, 2, 3, 4, 5, 1, 2, 3, 4, 5]
|
|
|
|
>>> b.unpersist()
|
|
|
|
|
|
|
|
>>> large_broadcast = sc.broadcast(range(10000))
|
2013-12-29 20:15:07 -05:00
|
|
|
"""
|
|
|
|
|
2018-08-13 22:35:34 -04:00
|
|
|
def __init__(self, sc=None, value=None, pickle_registry=None, path=None,
|
|
|
|
sock_file=None):
|
2013-12-29 20:15:07 -05:00
|
|
|
"""
|
2019-07-05 13:08:22 -04:00
|
|
|
Should not be called directly by users -- use :meth:`SparkContext.broadcast`
|
2013-12-29 20:15:07 -05:00
|
|
|
instead.
|
|
|
|
"""
|
2014-11-24 20:17:03 -05:00
|
|
|
if sc is not None:
|
2018-08-13 22:35:34 -04:00
|
|
|
# we're on the driver. We want the pickled data to end up in a file (maybe encrypted)
|
2014-11-24 20:17:03 -05:00
|
|
|
f = NamedTemporaryFile(delete=False, dir=sc._temp_dir)
|
2018-08-13 22:35:34 -04:00
|
|
|
self._path = f.name
|
[SPARK-26201] Fix python broadcast with encryption
## What changes were proposed in this pull request?
Python with rpc and disk encryption enabled along with a python broadcast variable and just read the value back on the driver side the job failed with:
Traceback (most recent call last): File "broadcast.py", line 37, in <module> words_new.value File "/pyspark.zip/pyspark/broadcast.py", line 137, in value File "pyspark.zip/pyspark/broadcast.py", line 122, in load_from_path File "pyspark.zip/pyspark/broadcast.py", line 128, in load EOFError: Ran out of input
To reproduce use configs: --conf spark.network.crypto.enabled=true --conf spark.io.encryption.enabled=true
Code:
words_new = sc.broadcast(["scala", "java", "hadoop", "spark", "akka"])
words_new.value
print(words_new.value)
## How was this patch tested?
words_new = sc.broadcast([“scala”, “java”, “hadoop”, “spark”, “akka”])
textFile = sc.textFile(“README.md”)
wordCounts = textFile.flatMap(lambda line: line.split()).map(lambda word: (word + words_new.value[1], 1)).reduceByKey(lambda a, b: a+b)
count = wordCounts.count()
print(count)
words_new.value
print(words_new.value)
Closes #23166 from redsanket/SPARK-26201.
Authored-by: schintap <schintap@oath.com>
Signed-off-by: Thomas Graves <tgraves@apache.org>
2018-11-30 13:48:56 -05:00
|
|
|
self._sc = sc
|
|
|
|
self._python_broadcast = sc._jvm.PythonRDD.setupBroadcast(self._path)
|
2018-08-13 22:35:34 -04:00
|
|
|
if sc._encryption_enabled:
|
|
|
|
# with encryption, we ask the jvm to do the encryption for us, we send it data
|
|
|
|
# over a socket
|
[SPARK-26201] Fix python broadcast with encryption
## What changes were proposed in this pull request?
Python with rpc and disk encryption enabled along with a python broadcast variable and just read the value back on the driver side the job failed with:
Traceback (most recent call last): File "broadcast.py", line 37, in <module> words_new.value File "/pyspark.zip/pyspark/broadcast.py", line 137, in value File "pyspark.zip/pyspark/broadcast.py", line 122, in load_from_path File "pyspark.zip/pyspark/broadcast.py", line 128, in load EOFError: Ran out of input
To reproduce use configs: --conf spark.network.crypto.enabled=true --conf spark.io.encryption.enabled=true
Code:
words_new = sc.broadcast(["scala", "java", "hadoop", "spark", "akka"])
words_new.value
print(words_new.value)
## How was this patch tested?
words_new = sc.broadcast([“scala”, “java”, “hadoop”, “spark”, “akka”])
textFile = sc.textFile(“README.md”)
wordCounts = textFile.flatMap(lambda line: line.split()).map(lambda word: (word + words_new.value[1], 1)).reduceByKey(lambda a, b: a+b)
count = wordCounts.count()
print(count)
words_new.value
print(words_new.value)
Closes #23166 from redsanket/SPARK-26201.
Authored-by: schintap <schintap@oath.com>
Signed-off-by: Thomas Graves <tgraves@apache.org>
2018-11-30 13:48:56 -05:00
|
|
|
port, auth_secret = self._python_broadcast.setupEncryptionServer()
|
2018-08-13 22:35:34 -04:00
|
|
|
(encryption_sock_file, _) = local_connect_and_auth(port, auth_secret)
|
|
|
|
broadcast_out = ChunkedStream(encryption_sock_file, 8192)
|
|
|
|
else:
|
|
|
|
# no encryption, we can just write pickled data directly to the file from python
|
|
|
|
broadcast_out = f
|
|
|
|
self.dump(value, broadcast_out)
|
|
|
|
if sc._encryption_enabled:
|
[SPARK-26201] Fix python broadcast with encryption
## What changes were proposed in this pull request?
Python with rpc and disk encryption enabled along with a python broadcast variable and just read the value back on the driver side the job failed with:
Traceback (most recent call last): File "broadcast.py", line 37, in <module> words_new.value File "/pyspark.zip/pyspark/broadcast.py", line 137, in value File "pyspark.zip/pyspark/broadcast.py", line 122, in load_from_path File "pyspark.zip/pyspark/broadcast.py", line 128, in load EOFError: Ran out of input
To reproduce use configs: --conf spark.network.crypto.enabled=true --conf spark.io.encryption.enabled=true
Code:
words_new = sc.broadcast(["scala", "java", "hadoop", "spark", "akka"])
words_new.value
print(words_new.value)
## How was this patch tested?
words_new = sc.broadcast([“scala”, “java”, “hadoop”, “spark”, “akka”])
textFile = sc.textFile(“README.md”)
wordCounts = textFile.flatMap(lambda line: line.split()).map(lambda word: (word + words_new.value[1], 1)).reduceByKey(lambda a, b: a+b)
count = wordCounts.count()
print(count)
words_new.value
print(words_new.value)
Closes #23166 from redsanket/SPARK-26201.
Authored-by: schintap <schintap@oath.com>
Signed-off-by: Thomas Graves <tgraves@apache.org>
2018-11-30 13:48:56 -05:00
|
|
|
self._python_broadcast.waitTillDataReceived()
|
|
|
|
self._jbroadcast = sc._jsc.broadcast(self._python_broadcast)
|
2014-11-24 20:17:03 -05:00
|
|
|
self._pickle_registry = pickle_registry
|
|
|
|
else:
|
2018-08-13 22:35:34 -04:00
|
|
|
# we're on an executor
|
2014-11-24 20:17:03 -05:00
|
|
|
self._jbroadcast = None
|
[SPARK-26201] Fix python broadcast with encryption
## What changes were proposed in this pull request?
Python with rpc and disk encryption enabled along with a python broadcast variable and just read the value back on the driver side the job failed with:
Traceback (most recent call last): File "broadcast.py", line 37, in <module> words_new.value File "/pyspark.zip/pyspark/broadcast.py", line 137, in value File "pyspark.zip/pyspark/broadcast.py", line 122, in load_from_path File "pyspark.zip/pyspark/broadcast.py", line 128, in load EOFError: Ran out of input
To reproduce use configs: --conf spark.network.crypto.enabled=true --conf spark.io.encryption.enabled=true
Code:
words_new = sc.broadcast(["scala", "java", "hadoop", "spark", "akka"])
words_new.value
print(words_new.value)
## How was this patch tested?
words_new = sc.broadcast([“scala”, “java”, “hadoop”, “spark”, “akka”])
textFile = sc.textFile(“README.md”)
wordCounts = textFile.flatMap(lambda line: line.split()).map(lambda word: (word + words_new.value[1], 1)).reduceByKey(lambda a, b: a+b)
count = wordCounts.count()
print(count)
words_new.value
print(words_new.value)
Closes #23166 from redsanket/SPARK-26201.
Authored-by: schintap <schintap@oath.com>
Signed-off-by: Thomas Graves <tgraves@apache.org>
2018-11-30 13:48:56 -05:00
|
|
|
self._sc = None
|
|
|
|
self._python_broadcast = None
|
2018-08-13 22:35:34 -04:00
|
|
|
if sock_file is not None:
|
|
|
|
# the jvm is doing decryption for us. Read the value
|
|
|
|
# immediately from the sock_file
|
|
|
|
self._value = self.load(sock_file)
|
|
|
|
else:
|
|
|
|
# the jvm just dumps the pickled data in path -- we'll unpickle lazily when
|
|
|
|
# the value is requested
|
|
|
|
assert(path is not None)
|
|
|
|
self._path = path
|
2014-11-24 20:17:03 -05:00
|
|
|
|
|
|
|
def dump(self, value, f):
|
2016-09-14 16:37:35 -04:00
|
|
|
try:
|
2019-02-01 21:49:45 -05:00
|
|
|
pickle.dump(value, f, pickle_protocol)
|
2016-09-14 16:37:35 -04:00
|
|
|
except pickle.PickleError:
|
|
|
|
raise
|
|
|
|
except Exception as e:
|
2017-04-11 15:18:31 -04:00
|
|
|
msg = "Could not serialize broadcast: %s: %s" \
|
|
|
|
% (e.__class__.__name__, _exception_message(e))
|
2016-09-14 16:37:35 -04:00
|
|
|
print_exec(sys.stderr)
|
|
|
|
raise pickle.PicklingError(msg)
|
2014-11-24 20:17:03 -05:00
|
|
|
f.close()
|
|
|
|
|
2018-08-13 22:35:34 -04:00
|
|
|
def load_from_path(self, path):
|
2014-11-24 20:17:03 -05:00
|
|
|
with open(path, 'rb', 1 << 20) as f:
|
2018-08-13 22:35:34 -04:00
|
|
|
return self.load(f)
|
|
|
|
|
|
|
|
def load(self, file):
|
|
|
|
# "file" could also be a socket
|
|
|
|
gc.disable()
|
|
|
|
try:
|
|
|
|
return pickle.load(file)
|
|
|
|
finally:
|
|
|
|
gc.enable()
|
2014-08-16 19:59:34 -04:00
|
|
|
|
2014-09-03 14:49:45 -04:00
|
|
|
@property
|
|
|
|
def value(self):
|
|
|
|
""" Return the broadcasted value
|
|
|
|
"""
|
2014-11-24 20:17:03 -05:00
|
|
|
if not hasattr(self, "_value") and self._path is not None:
|
[SPARK-26201] Fix python broadcast with encryption
## What changes were proposed in this pull request?
Python with rpc and disk encryption enabled along with a python broadcast variable and just read the value back on the driver side the job failed with:
Traceback (most recent call last): File "broadcast.py", line 37, in <module> words_new.value File "/pyspark.zip/pyspark/broadcast.py", line 137, in value File "pyspark.zip/pyspark/broadcast.py", line 122, in load_from_path File "pyspark.zip/pyspark/broadcast.py", line 128, in load EOFError: Ran out of input
To reproduce use configs: --conf spark.network.crypto.enabled=true --conf spark.io.encryption.enabled=true
Code:
words_new = sc.broadcast(["scala", "java", "hadoop", "spark", "akka"])
words_new.value
print(words_new.value)
## How was this patch tested?
words_new = sc.broadcast([“scala”, “java”, “hadoop”, “spark”, “akka”])
textFile = sc.textFile(“README.md”)
wordCounts = textFile.flatMap(lambda line: line.split()).map(lambda word: (word + words_new.value[1], 1)).reduceByKey(lambda a, b: a+b)
count = wordCounts.count()
print(count)
words_new.value
print(words_new.value)
Closes #23166 from redsanket/SPARK-26201.
Authored-by: schintap <schintap@oath.com>
Signed-off-by: Thomas Graves <tgraves@apache.org>
2018-11-30 13:48:56 -05:00
|
|
|
# we only need to decrypt it here when encryption is enabled and
|
|
|
|
# if its on the driver, since executor decryption is handled already
|
|
|
|
if self._sc is not None and self._sc._encryption_enabled:
|
|
|
|
port, auth_secret = self._python_broadcast.setupDecryptionServer()
|
|
|
|
(decrypted_sock_file, _) = local_connect_and_auth(port, auth_secret)
|
|
|
|
self._python_broadcast.waitTillBroadcastDataSent()
|
|
|
|
return self.load(decrypted_sock_file)
|
|
|
|
else:
|
|
|
|
self._value = self.load_from_path(self._path)
|
2014-09-03 14:49:45 -04:00
|
|
|
return self._value
|
|
|
|
|
2014-08-16 19:59:34 -04:00
|
|
|
def unpersist(self, blocking=False):
|
2014-09-16 15:51:58 -04:00
|
|
|
"""
|
2016-04-06 13:46:34 -04:00
|
|
|
Delete cached copies of this broadcast on the executors. If the
|
|
|
|
broadcast is used after this is called, it will need to be
|
|
|
|
re-sent to each executor.
|
|
|
|
|
|
|
|
:param blocking: Whether to block until unpersisting has completed
|
2014-09-16 15:51:58 -04:00
|
|
|
"""
|
2014-11-24 20:17:03 -05:00
|
|
|
if self._jbroadcast is None:
|
|
|
|
raise Exception("Broadcast can only be unpersisted in driver")
|
2014-08-16 19:59:34 -04:00
|
|
|
self._jbroadcast.unpersist(blocking)
|
2016-04-06 13:46:34 -04:00
|
|
|
|
2019-02-01 19:29:55 -05:00
|
|
|
def destroy(self, blocking=False):
|
2016-04-06 13:46:34 -04:00
|
|
|
"""
|
|
|
|
Destroy all data and metadata related to this broadcast variable.
|
|
|
|
Use this with caution; once a broadcast variable has been destroyed,
|
2019-02-01 19:29:55 -05:00
|
|
|
it cannot be used again.
|
|
|
|
|
|
|
|
.. versionchanged:: 3.0.0
|
|
|
|
Added optional argument `blocking` to specify whether to block until all
|
|
|
|
blocks are deleted.
|
2016-04-06 13:46:34 -04:00
|
|
|
"""
|
|
|
|
if self._jbroadcast is None:
|
|
|
|
raise Exception("Broadcast can only be destroyed in driver")
|
2019-02-01 19:29:55 -05:00
|
|
|
self._jbroadcast.destroy(blocking)
|
2014-11-24 20:17:03 -05:00
|
|
|
os.unlink(self._path)
|
2012-08-25 16:59:01 -04:00
|
|
|
|
|
|
|
def __reduce__(self):
|
2014-11-24 20:17:03 -05:00
|
|
|
if self._jbroadcast is None:
|
|
|
|
raise Exception("Broadcast can only be serialized in driver")
|
2012-08-25 16:59:01 -04:00
|
|
|
self._pickle_registry.add(self)
|
2014-11-24 20:17:03 -05:00
|
|
|
return _from_id, (self._jbroadcast.id(),)
|
2014-08-16 19:59:34 -04:00
|
|
|
|
|
|
|
|
2017-08-01 18:12:23 -04:00
|
|
|
class BroadcastPickleRegistry(threading.local):
|
|
|
|
""" Thread-local registry for broadcast variables that have been pickled
|
|
|
|
"""
|
|
|
|
|
|
|
|
def __init__(self):
|
|
|
|
self.__dict__.setdefault("_registry", set())
|
|
|
|
|
|
|
|
def __iter__(self):
|
|
|
|
for bcast in self._registry:
|
|
|
|
yield bcast
|
|
|
|
|
|
|
|
def add(self, bcast):
|
|
|
|
self._registry.add(bcast)
|
|
|
|
|
|
|
|
def clear(self):
|
|
|
|
self._registry.clear()
|
|
|
|
|
|
|
|
|
2014-08-16 19:59:34 -04:00
|
|
|
if __name__ == "__main__":
|
|
|
|
import doctest
|
2015-06-26 11:12:22 -04:00
|
|
|
(failure_count, test_count) = doctest.testmod()
|
|
|
|
if failure_count:
|
2018-03-08 06:38:34 -05:00
|
|
|
sys.exit(-1)
|