2013-07-16 20:21:33 -04:00
|
|
|
#
|
|
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
|
|
|
|
# contributor license agreements. See the NOTICE file distributed with
|
|
|
|
# this work for additional information regarding copyright ownership.
|
|
|
|
# The ASF licenses this file to You under the Apache License, Version 2.0
|
|
|
|
# (the "License"); you may not use this file except in compliance with
|
|
|
|
# the License. You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
#
|
|
|
|
|
2012-08-10 04:10:02 -04:00
|
|
|
import os
|
2013-01-21 19:42:24 -05:00
|
|
|
import shutil
|
2013-01-23 13:36:18 -05:00
|
|
|
import sys
|
|
|
|
from threading import Lock
|
2012-08-10 04:10:02 -04:00
|
|
|
from tempfile import NamedTemporaryFile
|
2014-03-10 16:34:49 -04:00
|
|
|
from collections import namedtuple
|
2012-08-10 04:10:02 -04:00
|
|
|
|
2013-01-20 04:57:44 -05:00
|
|
|
from pyspark import accumulators
|
|
|
|
from pyspark.accumulators import Accumulator
|
2012-08-25 16:59:01 -04:00
|
|
|
from pyspark.broadcast import Broadcast
|
2013-12-29 14:03:39 -05:00
|
|
|
from pyspark.conf import SparkConf
|
2013-01-23 13:36:18 -05:00
|
|
|
from pyspark.files import SparkFiles
|
2012-08-10 04:10:02 -04:00
|
|
|
from pyspark.java_gateway import launch_gateway
|
2014-04-04 20:29:29 -04:00
|
|
|
from pyspark.serializers import PickleSerializer, BatchedSerializer, UTF8Deserializer, \
|
|
|
|
PairDeserializer
|
2013-09-07 17:41:31 -04:00
|
|
|
from pyspark.storagelevel import StorageLevel
|
2014-03-10 16:34:49 -04:00
|
|
|
from pyspark import rdd
|
2012-08-18 19:07:10 -04:00
|
|
|
from pyspark.rdd import RDD
|
2012-08-10 04:10:02 -04:00
|
|
|
|
2012-08-25 21:00:25 -04:00
|
|
|
from py4j.java_collections import ListConverter
|
|
|
|
|
2012-08-10 04:10:02 -04:00
|
|
|
|
|
|
|
class SparkContext(object):
|
2012-12-27 20:55:33 -05:00
|
|
|
"""
|
|
|
|
Main entry point for Spark functionality. A SparkContext represents the
|
|
|
|
connection to a Spark cluster, and can be used to create L{RDD}s and
|
|
|
|
broadcast variables on that cluster.
|
|
|
|
"""
|
2012-08-10 04:10:02 -04:00
|
|
|
|
2013-02-01 14:09:56 -05:00
|
|
|
_gateway = None
|
|
|
|
_jvm = None
|
2013-11-03 14:03:02 -05:00
|
|
|
_writeToFile = None
|
2013-01-20 04:57:44 -05:00
|
|
|
_next_accum_id = 0
|
2013-01-23 13:36:18 -05:00
|
|
|
_active_spark_context = None
|
|
|
|
_lock = Lock()
|
2013-08-15 19:01:19 -04:00
|
|
|
_python_includes = None # zip and egg files that need to be added to PYTHONPATH
|
2012-08-10 04:10:02 -04:00
|
|
|
|
2013-10-22 03:22:37 -04:00
|
|
|
|
2013-12-29 14:03:39 -05:00
|
|
|
def __init__(self, master=None, appName=None, sparkHome=None, pyFiles=None,
|
2014-02-21 00:20:39 -05:00
|
|
|
environment=None, batchSize=1024, serializer=PickleSerializer(), conf=None,
|
|
|
|
gateway=None):
|
2012-12-27 22:59:04 -05:00
|
|
|
"""
|
2013-12-29 14:46:59 -05:00
|
|
|
Create a new SparkContext. At least the master and app name should be set,
|
|
|
|
either through the named parameters here or through C{conf}.
|
2012-12-27 22:59:04 -05:00
|
|
|
|
|
|
|
@param master: Cluster URL to connect to
|
|
|
|
(e.g. mesos://host:port, spark://host:port, local[4]).
|
2013-12-29 14:03:39 -05:00
|
|
|
@param appName: A name for your job, to display on the cluster web UI.
|
2012-12-27 22:59:04 -05:00
|
|
|
@param sparkHome: Location where Spark is installed on cluster nodes.
|
|
|
|
@param pyFiles: Collection of .zip or .py files to send to the cluster
|
|
|
|
and add to PYTHONPATH. These can be paths on the local file
|
|
|
|
system or HDFS, HTTP, HTTPS, or FTP URLs.
|
|
|
|
@param environment: A dictionary of environment variables to set on
|
|
|
|
worker nodes.
|
|
|
|
@param batchSize: The number of Python objects represented as a single
|
|
|
|
Java object. Set 1 to disable batching or -1 to use an
|
|
|
|
unlimited batch size.
|
2013-11-05 20:52:39 -05:00
|
|
|
@param serializer: The serializer for RDDs.
|
2013-12-29 14:03:39 -05:00
|
|
|
@param conf: A L{SparkConf} object setting Spark properties.
|
2014-02-21 00:20:39 -05:00
|
|
|
@param gateway: Use an existing gateway and JVM, otherwise a new JVM
|
|
|
|
will be instatiated.
|
2013-10-22 14:26:49 -04:00
|
|
|
|
|
|
|
|
|
|
|
>>> from pyspark.context import SparkContext
|
|
|
|
>>> sc = SparkContext('local', 'test')
|
|
|
|
|
|
|
|
>>> sc2 = SparkContext('local', 'test2') # doctest: +IGNORE_EXCEPTION_DETAIL
|
|
|
|
Traceback (most recent call last):
|
|
|
|
...
|
|
|
|
ValueError:...
|
2012-12-27 22:59:04 -05:00
|
|
|
"""
|
2014-03-10 16:34:49 -04:00
|
|
|
if rdd._extract_concise_traceback() is not None:
|
|
|
|
self._callsite = rdd._extract_concise_traceback()
|
|
|
|
else:
|
|
|
|
tempNamedTuple = namedtuple("Callsite", "function file linenum")
|
|
|
|
self._callsite = tempNamedTuple(function=None, file=None, linenum=None)
|
2014-02-21 00:20:39 -05:00
|
|
|
SparkContext._ensure_initialized(self, gateway=gateway)
|
2013-10-22 03:22:37 -04:00
|
|
|
|
2013-12-29 14:31:45 -05:00
|
|
|
self.environment = environment or {}
|
2013-12-30 22:17:28 -05:00
|
|
|
self._conf = conf or SparkConf(_jvm=self._jvm)
|
2013-11-05 20:52:39 -05:00
|
|
|
self._batchSize = batchSize # -1 represents an unlimited batch size
|
|
|
|
self._unbatched_serializer = serializer
|
|
|
|
if batchSize == 1:
|
|
|
|
self.serializer = self._unbatched_serializer
|
|
|
|
else:
|
|
|
|
self.serializer = BatchedSerializer(self._unbatched_serializer,
|
|
|
|
batchSize)
|
2012-12-27 22:59:04 -05:00
|
|
|
|
2014-01-01 23:21:34 -05:00
|
|
|
# Set any parameters passed directly to us on the conf
|
|
|
|
if master:
|
|
|
|
self._conf.setMaster(master)
|
|
|
|
if appName:
|
|
|
|
self._conf.setAppName(appName)
|
|
|
|
if sparkHome:
|
|
|
|
self._conf.setSparkHome(sparkHome)
|
2013-12-29 14:31:45 -05:00
|
|
|
if environment:
|
|
|
|
for key, value in environment.iteritems():
|
2013-12-30 22:17:28 -05:00
|
|
|
self._conf.setExecutorEnv(key, value)
|
2013-12-29 14:03:39 -05:00
|
|
|
|
2013-12-29 14:31:45 -05:00
|
|
|
# Check that we have at least the required parameters
|
2013-12-30 22:17:28 -05:00
|
|
|
if not self._conf.contains("spark.master"):
|
2013-12-29 14:03:39 -05:00
|
|
|
raise Exception("A master URL must be set in your configuration")
|
2013-12-30 22:17:28 -05:00
|
|
|
if not self._conf.contains("spark.app.name"):
|
2013-12-29 14:03:39 -05:00
|
|
|
raise Exception("An application name must be set in your configuration")
|
|
|
|
|
2013-12-29 14:31:45 -05:00
|
|
|
# Read back our properties from the conf in case we loaded some of them from
|
|
|
|
# the classpath or an external config file
|
2013-12-30 22:17:28 -05:00
|
|
|
self.master = self._conf.get("spark.master")
|
|
|
|
self.appName = self._conf.get("spark.app.name")
|
2014-01-01 23:21:34 -05:00
|
|
|
self.sparkHome = self._conf.get("spark.home", None)
|
2013-12-30 22:17:28 -05:00
|
|
|
for (k, v) in self._conf.getAll():
|
2013-12-29 14:31:45 -05:00
|
|
|
if k.startswith("spark.executorEnv."):
|
|
|
|
varName = k[len("spark.executorEnv."):]
|
|
|
|
self.environment[varName] = v
|
2013-12-29 14:03:39 -05:00
|
|
|
|
2012-12-27 22:59:04 -05:00
|
|
|
# Create the Java SparkContext through Py4J
|
2014-02-21 00:20:39 -05:00
|
|
|
self._jsc = self._initialize_context(self._conf._jconf)
|
2012-12-27 22:59:04 -05:00
|
|
|
|
2013-01-20 04:57:44 -05:00
|
|
|
# Create a single Accumulator in Java that we'll send all our updates through;
|
|
|
|
# they will be passed back to us through a TCP server
|
|
|
|
self._accumulatorServer = accumulators._start_update_server()
|
|
|
|
(host, port) = self._accumulatorServer.server_address
|
|
|
|
self._javaAccumulator = self._jsc.accumulator(
|
2013-02-01 14:09:56 -05:00
|
|
|
self._jvm.java.util.ArrayList(),
|
|
|
|
self._jvm.PythonAccumulatorParam(host, port))
|
2013-01-20 04:57:44 -05:00
|
|
|
|
2013-01-10 10:45:12 -05:00
|
|
|
self.pythonExec = os.environ.get("PYSPARK_PYTHON", 'python')
|
2013-12-29 14:03:39 -05:00
|
|
|
|
2012-08-25 16:59:01 -04:00
|
|
|
# Broadcast's __reduce__ method stores Broadcast instances here.
|
|
|
|
# This allows other code to determine which Broadcast instances have
|
|
|
|
# been pickled, so it can determine which Java broadcast objects to
|
|
|
|
# send.
|
|
|
|
self._pickled_broadcast_vars = set()
|
2012-08-10 04:10:02 -04:00
|
|
|
|
2013-08-15 19:01:19 -04:00
|
|
|
SparkFiles._sc = self
|
|
|
|
root_dir = SparkFiles.getRootDirectory()
|
|
|
|
sys.path.append(root_dir)
|
|
|
|
|
2012-12-27 22:59:04 -05:00
|
|
|
# Deploy any code dependencies specified in the constructor
|
2013-08-15 19:01:19 -04:00
|
|
|
self._python_includes = list()
|
2012-12-27 22:59:04 -05:00
|
|
|
for path in (pyFiles or []):
|
|
|
|
self.addPyFile(path)
|
|
|
|
|
[SPARK-1549] Add Python support to spark-submit
This PR updates spark-submit to allow submitting Python scripts (currently only with deploy-mode=client, but that's all that was supported before) and updates the PySpark code to properly find various paths, etc. One significant change is that we assume we can always find the Python files either from the Spark assembly JAR (which will happen with the Maven assembly build in make-distribution.sh) or from SPARK_HOME (which will exist in local mode even if you use sbt assembly, and should be enough for testing). This means we no longer need a weird hack to modify the environment for YARN.
This patch also updates the Python worker manager to run python with -u, which means unbuffered output (send it to our logs right away instead of waiting a while after stuff was written); this should simplify debugging.
In addition, it fixes https://issues.apache.org/jira/browse/SPARK-1709, setting the main class from a JAR's Main-Class attribute if not specified by the user, and fixes a few help strings and style issues in spark-submit.
In the future we may want to make the `pyspark` shell use spark-submit as well, but it seems unnecessary for 1.0.
Author: Matei Zaharia <matei@databricks.com>
Closes #664 from mateiz/py-submit and squashes the following commits:
15e9669 [Matei Zaharia] Fix some uses of path.separator property
051278c [Matei Zaharia] Small style fixes
0afe886 [Matei Zaharia] Add license headers
4650412 [Matei Zaharia] Add pyFiles to PYTHONPATH in executors, remove old YARN stuff, add tests
15f8e1e [Matei Zaharia] Set PYTHONPATH in PythonWorkerFactory in case it wasn't set from outside
47c0655 [Matei Zaharia] More work to make spark-submit work with Python:
d4375bd [Matei Zaharia] Clean up description of spark-submit args a bit and add Python ones
2014-05-06 18:12:35 -04:00
|
|
|
# Deploy code dependencies set by spark-submit; these will already have been added
|
2014-05-24 21:01:49 -04:00
|
|
|
# with SparkContext.addFile, so we just need to add them to the PYTHONPATH
|
[SPARK-1549] Add Python support to spark-submit
This PR updates spark-submit to allow submitting Python scripts (currently only with deploy-mode=client, but that's all that was supported before) and updates the PySpark code to properly find various paths, etc. One significant change is that we assume we can always find the Python files either from the Spark assembly JAR (which will happen with the Maven assembly build in make-distribution.sh) or from SPARK_HOME (which will exist in local mode even if you use sbt assembly, and should be enough for testing). This means we no longer need a weird hack to modify the environment for YARN.
This patch also updates the Python worker manager to run python with -u, which means unbuffered output (send it to our logs right away instead of waiting a while after stuff was written); this should simplify debugging.
In addition, it fixes https://issues.apache.org/jira/browse/SPARK-1709, setting the main class from a JAR's Main-Class attribute if not specified by the user, and fixes a few help strings and style issues in spark-submit.
In the future we may want to make the `pyspark` shell use spark-submit as well, but it seems unnecessary for 1.0.
Author: Matei Zaharia <matei@databricks.com>
Closes #664 from mateiz/py-submit and squashes the following commits:
15e9669 [Matei Zaharia] Fix some uses of path.separator property
051278c [Matei Zaharia] Small style fixes
0afe886 [Matei Zaharia] Add license headers
4650412 [Matei Zaharia] Add pyFiles to PYTHONPATH in executors, remove old YARN stuff, add tests
15f8e1e [Matei Zaharia] Set PYTHONPATH in PythonWorkerFactory in case it wasn't set from outside
47c0655 [Matei Zaharia] More work to make spark-submit work with Python:
d4375bd [Matei Zaharia] Clean up description of spark-submit args a bit and add Python ones
2014-05-06 18:12:35 -04:00
|
|
|
for path in self._conf.get("spark.submit.pyFiles", "").split(","):
|
|
|
|
if path != "":
|
2014-05-24 21:01:49 -04:00
|
|
|
(dirname, filename) = os.path.split(path)
|
|
|
|
self._python_includes.append(filename)
|
|
|
|
sys.path.append(path)
|
|
|
|
if not dirname in sys.path:
|
|
|
|
sys.path.append(dirname)
|
[SPARK-1549] Add Python support to spark-submit
This PR updates spark-submit to allow submitting Python scripts (currently only with deploy-mode=client, but that's all that was supported before) and updates the PySpark code to properly find various paths, etc. One significant change is that we assume we can always find the Python files either from the Spark assembly JAR (which will happen with the Maven assembly build in make-distribution.sh) or from SPARK_HOME (which will exist in local mode even if you use sbt assembly, and should be enough for testing). This means we no longer need a weird hack to modify the environment for YARN.
This patch also updates the Python worker manager to run python with -u, which means unbuffered output (send it to our logs right away instead of waiting a while after stuff was written); this should simplify debugging.
In addition, it fixes https://issues.apache.org/jira/browse/SPARK-1709, setting the main class from a JAR's Main-Class attribute if not specified by the user, and fixes a few help strings and style issues in spark-submit.
In the future we may want to make the `pyspark` shell use spark-submit as well, but it seems unnecessary for 1.0.
Author: Matei Zaharia <matei@databricks.com>
Closes #664 from mateiz/py-submit and squashes the following commits:
15e9669 [Matei Zaharia] Fix some uses of path.separator property
051278c [Matei Zaharia] Small style fixes
0afe886 [Matei Zaharia] Add license headers
4650412 [Matei Zaharia] Add pyFiles to PYTHONPATH in executors, remove old YARN stuff, add tests
15f8e1e [Matei Zaharia] Set PYTHONPATH in PythonWorkerFactory in case it wasn't set from outside
47c0655 [Matei Zaharia] More work to make spark-submit work with Python:
d4375bd [Matei Zaharia] Clean up description of spark-submit args a bit and add Python ones
2014-05-06 18:12:35 -04:00
|
|
|
|
2013-02-01 14:48:11 -05:00
|
|
|
# Create a temporary directory inside spark.local.dir:
|
2013-12-29 00:11:36 -05:00
|
|
|
local_dir = self._jvm.org.apache.spark.util.Utils.getLocalDir(self._jsc.sc().conf())
|
2013-02-01 14:48:11 -05:00
|
|
|
self._temp_dir = \
|
2013-09-01 03:32:28 -04:00
|
|
|
self._jvm.org.apache.spark.util.Utils.createTempDir(local_dir).getAbsolutePath()
|
2013-02-01 14:48:11 -05:00
|
|
|
|
2014-02-21 00:20:39 -05:00
|
|
|
def _initialize_context(self, jconf):
|
2014-05-29 02:08:39 -04:00
|
|
|
"""
|
|
|
|
Initialize SparkContext in function to allow subclass specific initialization
|
|
|
|
"""
|
2014-02-21 00:20:39 -05:00
|
|
|
return self._jvm.JavaSparkContext(jconf)
|
|
|
|
|
2013-10-22 03:22:37 -04:00
|
|
|
@classmethod
|
2014-02-21 00:20:39 -05:00
|
|
|
def _ensure_initialized(cls, instance=None, gateway=None):
|
2014-05-29 02:08:39 -04:00
|
|
|
"""
|
|
|
|
Checks whether a SparkContext is initialized or not.
|
|
|
|
Throws error if a SparkContext is already running.
|
|
|
|
"""
|
2013-10-22 03:22:37 -04:00
|
|
|
with SparkContext._lock:
|
|
|
|
if not SparkContext._gateway:
|
2014-02-21 00:20:39 -05:00
|
|
|
SparkContext._gateway = gateway or launch_gateway()
|
2013-10-22 03:22:37 -04:00
|
|
|
SparkContext._jvm = SparkContext._gateway.jvm
|
2013-12-29 14:31:45 -05:00
|
|
|
SparkContext._writeToFile = SparkContext._jvm.PythonRDD.writeToFile
|
2013-10-22 03:22:37 -04:00
|
|
|
|
|
|
|
if instance:
|
|
|
|
if SparkContext._active_spark_context and SparkContext._active_spark_context != instance:
|
2014-03-10 16:34:49 -04:00
|
|
|
currentMaster = SparkContext._active_spark_context.master
|
|
|
|
currentAppName = SparkContext._active_spark_context.appName
|
|
|
|
callsite = SparkContext._active_spark_context._callsite
|
|
|
|
|
|
|
|
# Raise error if there is already a running Spark context
|
|
|
|
raise ValueError("Cannot run multiple SparkContexts at once; existing SparkContext(app=%s, master=%s)" \
|
|
|
|
" created by %s at %s:%s " \
|
|
|
|
% (currentAppName, currentMaster, callsite.function, callsite.file, callsite.linenum))
|
2013-10-22 03:22:37 -04:00
|
|
|
else:
|
|
|
|
SparkContext._active_spark_context = instance
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def setSystemProperty(cls, key, value):
|
|
|
|
"""
|
2013-12-29 14:03:39 -05:00
|
|
|
Set a Java system property, such as spark.executor.memory. This must
|
|
|
|
must be invoked before instantiating SparkContext.
|
2013-10-22 03:22:37 -04:00
|
|
|
"""
|
|
|
|
SparkContext._ensure_initialized()
|
|
|
|
SparkContext._jvm.java.lang.System.setProperty(key, value)
|
|
|
|
|
2012-12-27 22:59:04 -05:00
|
|
|
@property
|
|
|
|
def defaultParallelism(self):
|
|
|
|
"""
|
|
|
|
Default level of parallelism to use when not given by user (e.g. for
|
|
|
|
reduce tasks)
|
|
|
|
"""
|
|
|
|
return self._jsc.sc().defaultParallelism()
|
|
|
|
|
2014-05-21 16:26:53 -04:00
|
|
|
@property
|
|
|
|
def defaultMinPartitions(self):
|
|
|
|
"""
|
|
|
|
Default min number of partitions for Hadoop RDDs when not given by user
|
|
|
|
"""
|
|
|
|
return self._jsc.sc().defaultMinPartitions()
|
|
|
|
|
2012-08-10 04:10:02 -04:00
|
|
|
def __del__(self):
|
2013-01-23 13:36:18 -05:00
|
|
|
self.stop()
|
2012-08-10 04:10:02 -04:00
|
|
|
|
|
|
|
def stop(self):
|
2012-12-27 20:55:33 -05:00
|
|
|
"""
|
|
|
|
Shut down the SparkContext.
|
|
|
|
"""
|
2013-01-23 13:36:18 -05:00
|
|
|
if self._jsc:
|
|
|
|
self._jsc.stop()
|
|
|
|
self._jsc = None
|
|
|
|
if self._accumulatorServer:
|
|
|
|
self._accumulatorServer.shutdown()
|
|
|
|
self._accumulatorServer = None
|
|
|
|
with SparkContext._lock:
|
|
|
|
SparkContext._active_spark_context = None
|
2012-08-10 04:10:02 -04:00
|
|
|
|
2012-08-18 19:07:10 -04:00
|
|
|
def parallelize(self, c, numSlices=None):
|
2012-12-27 20:55:33 -05:00
|
|
|
"""
|
|
|
|
Distribute a local Python collection to form an RDD.
|
2013-07-29 00:09:11 -04:00
|
|
|
|
|
|
|
>>> sc.parallelize(range(5), 5).glom().collect()
|
|
|
|
[[0], [1], [2], [3], [4]]
|
2012-12-27 20:55:33 -05:00
|
|
|
"""
|
2012-08-10 04:10:02 -04:00
|
|
|
numSlices = numSlices or self.defaultParallelism
|
2012-08-18 19:07:10 -04:00
|
|
|
# Calling the Java parallelize() method with an ArrayList is too slow,
|
|
|
|
# because it sends O(n) Py4J commands. As an alternative, serialized
|
|
|
|
# objects are written to a file and loaded through textFile().
|
2013-02-01 14:48:11 -05:00
|
|
|
tempFile = NamedTemporaryFile(delete=False, dir=self._temp_dir)
|
2013-07-29 00:09:11 -04:00
|
|
|
# Make sure we distribute data evenly if it's smaller than self.batchSize
|
|
|
|
if "__len__" not in dir(c):
|
|
|
|
c = list(c) # Make it a list so we can compute its length
|
2013-11-05 20:52:39 -05:00
|
|
|
batchSize = min(len(c) // numSlices, self._batchSize)
|
2013-07-29 00:09:11 -04:00
|
|
|
if batchSize > 1:
|
2013-11-05 20:52:39 -05:00
|
|
|
serializer = BatchedSerializer(self._unbatched_serializer,
|
|
|
|
batchSize)
|
|
|
|
else:
|
|
|
|
serializer = self._unbatched_serializer
|
|
|
|
serializer.dump_stream(c, tempFile)
|
2012-08-10 04:10:02 -04:00
|
|
|
tempFile.close()
|
2013-11-03 14:03:02 -05:00
|
|
|
readRDDFromFile = self._jvm.PythonRDD.readRDDFromFile
|
|
|
|
jrdd = readRDDFromFile(self._jsc, tempFile.name, numSlices)
|
2013-11-05 20:52:39 -05:00
|
|
|
return RDD(jrdd, self, serializer)
|
2012-08-10 04:10:02 -04:00
|
|
|
|
2014-06-03 21:18:25 -04:00
|
|
|
def pickleFile(self, name, minPartitions=None):
|
|
|
|
"""
|
|
|
|
Load an RDD previously saved using L{RDD.saveAsPickleFile} method.
|
|
|
|
|
|
|
|
>>> tmpFile = NamedTemporaryFile(delete=True)
|
|
|
|
>>> tmpFile.close()
|
|
|
|
>>> sc.parallelize(range(10)).saveAsPickleFile(tmpFile.name, 5)
|
|
|
|
>>> sorted(sc.pickleFile(tmpFile.name, 3).collect())
|
|
|
|
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
|
|
|
|
"""
|
|
|
|
minPartitions = minPartitions or self.defaultMinPartitions
|
|
|
|
return RDD(self._jsc.objectFile(name, minPartitions), self,
|
|
|
|
BatchedSerializer(PickleSerializer()))
|
|
|
|
|
2014-04-18 13:01:16 -04:00
|
|
|
def textFile(self, name, minPartitions=None):
|
2012-12-27 20:55:33 -05:00
|
|
|
"""
|
|
|
|
Read a text file from HDFS, a local file system (available on all
|
|
|
|
nodes), or any Hadoop-supported file system URI, and return it as an
|
|
|
|
RDD of Strings.
|
2014-05-29 02:08:39 -04:00
|
|
|
|
|
|
|
>>> path = os.path.join(tempdir, "sample-text.txt")
|
|
|
|
>>> with open(path, "w") as testFile:
|
|
|
|
... testFile.write("Hello world!")
|
|
|
|
>>> textFile = sc.textFile(path)
|
|
|
|
>>> textFile.collect()
|
|
|
|
[u'Hello world!']
|
2012-12-27 20:55:33 -05:00
|
|
|
"""
|
2014-04-18 13:01:16 -04:00
|
|
|
minPartitions = minPartitions or min(self.defaultParallelism, 2)
|
|
|
|
return RDD(self._jsc.textFile(name, minPartitions), self,
|
2014-01-28 22:50:26 -05:00
|
|
|
UTF8Deserializer())
|
2012-08-25 16:59:01 -04:00
|
|
|
|
2014-05-21 16:26:53 -04:00
|
|
|
def wholeTextFiles(self, path, minPartitions=None):
|
2014-04-04 20:29:29 -04:00
|
|
|
"""
|
|
|
|
Read a directory of text files from HDFS, a local file system
|
|
|
|
(available on all nodes), or any Hadoop-supported file system
|
|
|
|
URI. Each file is read as a single record and returned in a
|
|
|
|
key-value pair, where the key is the path of each file, the
|
|
|
|
value is the content of each file.
|
|
|
|
|
|
|
|
For example, if you have the following files::
|
|
|
|
|
|
|
|
hdfs://a-hdfs-path/part-00000
|
|
|
|
hdfs://a-hdfs-path/part-00001
|
|
|
|
...
|
|
|
|
hdfs://a-hdfs-path/part-nnnnn
|
|
|
|
|
|
|
|
Do C{rdd = sparkContext.wholeTextFiles("hdfs://a-hdfs-path")},
|
|
|
|
then C{rdd} contains::
|
|
|
|
|
|
|
|
(a-hdfs-path/part-00000, its content)
|
|
|
|
(a-hdfs-path/part-00001, its content)
|
|
|
|
...
|
|
|
|
(a-hdfs-path/part-nnnnn, its content)
|
|
|
|
|
|
|
|
NOTE: Small files are preferred, as each file will be loaded
|
|
|
|
fully in memory.
|
|
|
|
|
|
|
|
>>> dirPath = os.path.join(tempdir, "files")
|
|
|
|
>>> os.mkdir(dirPath)
|
|
|
|
>>> with open(os.path.join(dirPath, "1.txt"), "w") as file1:
|
|
|
|
... file1.write("1")
|
|
|
|
>>> with open(os.path.join(dirPath, "2.txt"), "w") as file2:
|
|
|
|
... file2.write("2")
|
|
|
|
>>> textFiles = sc.wholeTextFiles(dirPath)
|
|
|
|
>>> sorted(textFiles.collect())
|
|
|
|
[(u'.../1.txt', u'1'), (u'.../2.txt', u'2')]
|
|
|
|
"""
|
2014-05-21 16:26:53 -04:00
|
|
|
minPartitions = minPartitions or self.defaultMinPartitions
|
|
|
|
return RDD(self._jsc.wholeTextFiles(path, minPartitions), self,
|
2014-04-04 20:29:29 -04:00
|
|
|
PairDeserializer(UTF8Deserializer(), UTF8Deserializer()))
|
|
|
|
|
SPARK-1416: PySpark support for SequenceFile and Hadoop InputFormats
So I finally resurrected this PR. It seems the old one against the incubator mirror is no longer available, so I cannot reference it.
This adds initial support for reading Hadoop ```SequenceFile```s, as well as arbitrary Hadoop ```InputFormat```s, in PySpark.
# Overview
The basics are as follows:
1. ```PythonRDD``` object contains the relevant methods, that are in turn invoked by ```SparkContext``` in PySpark
2. The SequenceFile or InputFormat is read on the Scala side and converted from ```Writable``` instances to the relevant Scala classes (in the case of primitives)
3. Pyrolite is used to serialize Java objects. If this fails, the fallback is ```toString```
4. ```PickleSerializer``` on the Python side deserializes.
This works "out the box" for simple ```Writable```s:
* ```Text```
* ```IntWritable```, ```DoubleWritable```, ```FloatWritable```
* ```NullWritable```
* ```BooleanWritable```
* ```BytesWritable```
* ```MapWritable```
It also works for simple, "struct-like" classes. Due to the way Pyrolite works, this requires that the classes satisfy the JavaBeans convenstions (i.e. with fields and a no-arg constructor and getters/setters). (Perhaps in future some sugar for case classes and reflection could be added).
I've tested it out with ```ESInputFormat``` as an example and it works very nicely:
```python
conf = {"es.resource" : "index/type" }
rdd = sc.newAPIHadoopRDD("org.elasticsearch.hadoop.mr.EsInputFormat", "org.apache.hadoop.io.NullWritable", "org.elasticsearch.hadoop.mr.LinkedMapWritable", conf=conf)
rdd.first()
```
I suspect for things like HBase/Cassandra it will be a bit trickier to get it to work out the box.
# Some things still outstanding:
1. ~~Requires ```msgpack-python``` and will fail without it. As originally discussed with Josh, add a ```as_strings``` argument that defaults to ```False```, that can be used if ```msgpack-python``` is not available~~
2. ~~I see from https://github.com/apache/spark/pull/363 that Pyrolite is being used there for SerDe between Scala and Python. @ahirreddy @mateiz what is the plan behind this - is Pyrolite preferred? It seems from a cursory glance that adapting the ```msgpack```-based SerDe here to use Pyrolite wouldn't be too hard~~
3. ~~Support the key and value "wrapper" that would allow a Scala/Java function to be plugged in that would transform whatever the key/value Writable class is into something that can be serialized (e.g. convert some custom Writable to a JavaBean or ```java.util.Map``` that can be easily serialized)~~
4. Support ```saveAsSequenceFile``` and ```saveAsHadoopFile``` etc. This would require SerDe in the reverse direction, that can be handled by Pyrolite. Will work on this as a separate PR
Author: Nick Pentreath <nick.pentreath@gmail.com>
Closes #455 from MLnick/pyspark-inputformats and squashes the following commits:
268df7e [Nick Pentreath] Documentation changes mer @pwendell comments
761269b [Nick Pentreath] Address @pwendell comments, simplify default writable conversions and remove registry.
4c972d8 [Nick Pentreath] Add license headers
d150431 [Nick Pentreath] Merge remote-tracking branch 'upstream/master' into pyspark-inputformats
cde6af9 [Nick Pentreath] Parameterize converter trait
5ebacfa [Nick Pentreath] Update docs for PySpark input formats
a985492 [Nick Pentreath] Move Converter examples to own package
365d0be [Nick Pentreath] Make classes private[python]. Add docs and @Experimental annotation to Converter interface.
eeb8205 [Nick Pentreath] Fix path relative to SPARK_HOME in tests
1eaa08b [Nick Pentreath] HBase -> Cassandra app name oversight
3f90c3e [Nick Pentreath] Merge remote-tracking branch 'upstream/master' into pyspark-inputformats
2c18513 [Nick Pentreath] Add examples for reading HBase and Cassandra InputFormats from Python
b65606f [Nick Pentreath] Add converter interface
5757f6e [Nick Pentreath] Default key/value classes for sequenceFile asre None
085b55f [Nick Pentreath] Move input format tests to tests.py and clean up docs
43eb728 [Nick Pentreath] PySpark InputFormats docs into programming guide
94beedc [Nick Pentreath] Clean up args in PythonRDD. Set key/value converter defaults to None for PySpark context.py methods
1a4a1d6 [Nick Pentreath] Address @mateiz style comments
01e0813 [Nick Pentreath] Merge remote-tracking branch 'upstream/master' into pyspark-inputformats
15a7d07 [Nick Pentreath] Remove default args for key/value classes. Arg names to camelCase
9fe6bd5 [Nick Pentreath] Merge remote-tracking branch 'upstream/master' into pyspark-inputformats
84fe8e3 [Nick Pentreath] Python programming guide space formatting
d0f52b6 [Nick Pentreath] Python programming guide
7caa73a [Nick Pentreath] Merge remote-tracking branch 'upstream/master' into pyspark-inputformats
93ef995 [Nick Pentreath] Add back context.py changes
9ef1896 [Nick Pentreath] Recover earlier changes lost in previous merge for serializers.py
077ecb2 [Nick Pentreath] Recover earlier changes lost in previous merge for context.py
5af4770 [Nick Pentreath] Merge branch 'master' into pyspark-inputformats
35b8e3a [Nick Pentreath] Another fix for test ordering
bef3afb [Nick Pentreath] Merge remote-tracking branch 'upstream/master' into pyspark-inputformats
e001b94 [Nick Pentreath] Fix test failures due to ordering
78978d9 [Nick Pentreath] Add doc for SequenceFile and InputFormat support to Python programming guide
64eb051 [Nick Pentreath] Scalastyle fix
e7552fa [Nick Pentreath] Merge branch 'master' into pyspark-inputformats
44f2857 [Nick Pentreath] Remove msgpack dependency and switch serialization to Pyrolite, plus some clean up and refactoring
c0ebfb6 [Nick Pentreath] Change sequencefile test data generator to easily be called from PySpark tests
1d7c17c [Nick Pentreath] Amend tests to auto-generate sequencefile data in temp dir
17a656b [Nick Pentreath] remove binary sequencefile for tests
f60959e [Nick Pentreath] Remove msgpack dependency and serializer from PySpark
450e0a2 [Nick Pentreath] Merge branch 'master' into pyspark-inputformats
31a2fff [Nick Pentreath] Scalastyle fixes
fc5099e [Nick Pentreath] Add Apache license headers
4e08983 [Nick Pentreath] Clean up docs for PySpark context methods
b20ec7e [Nick Pentreath] Clean up merge duplicate dependencies
951c117 [Nick Pentreath] Merge branch 'master' into pyspark-inputformats
f6aac55 [Nick Pentreath] Bring back msgpack
9d2256e [Nick Pentreath] Merge branch 'master' into pyspark-inputformats
1bbbfb0 [Nick Pentreath] Clean up SparkBuild from merge
a67dfad [Nick Pentreath] Clean up Msgpack serialization and registering
7237263 [Nick Pentreath] Add back msgpack serializer and hadoop file code lost during merging
25da1ca [Nick Pentreath] Add generator for nulls, bools, bytes and maps
65360d5 [Nick Pentreath] Adding test SequenceFiles
0c612e5 [Nick Pentreath] Merge branch 'master' into pyspark-inputformats
d72bf18 [Nick Pentreath] msgpack
dd57922 [Nick Pentreath] Merge remote-tracking branch 'upstream/master' into pyspark-inputformats
e67212a [Nick Pentreath] Add back msgpack dependency
f2d76a0 [Nick Pentreath] Merge branch 'master' into pyspark-inputformats
41856a5 [Nick Pentreath] Merge branch 'master' into pyspark-inputformats
97ef708 [Nick Pentreath] Remove old writeToStream
2beeedb [Nick Pentreath] Merge remote-tracking branch 'upstream/master' into pyspark-inputformats
795a763 [Nick Pentreath] Change name to WriteInputFormatTestDataGenerator. Cleanup some var names. Use SPARK_HOME in path for writing test sequencefile data.
174f520 [Nick Pentreath] Add back graphx settings
703ee65 [Nick Pentreath] Add back msgpack
619c0fa [Nick Pentreath] Merge remote-tracking branch 'upstream/master' into pyspark-inputformats
1c8efbc [Nick Pentreath] Merge remote-tracking branch 'upstream/master' into pyspark-inputformats
eb40036 [Nick Pentreath] Remove unused comment lines
4d7ef2e [Nick Pentreath] Fix indentation
f1d73e3 [Nick Pentreath] mergeConfs returns a copy rather than mutating one of the input arguments
0f5cd84 [Nick Pentreath] Remove unused pair UTF8 class. Add comments to msgpack deserializer
4294cbb [Nick Pentreath] Add old Hadoop api methods. Clean up and expand comments. Clean up argument names
818a1e6 [Nick Pentreath] Add seqencefile and Hadoop InputFormat support to PythonRDD
4e7c9e3 [Nick Pentreath] Merge remote-tracking branch 'upstream/master' into pyspark-inputformats
c304cc8 [Nick Pentreath] Adding supporting sequncefiles for tests. Cleaning up
4b0a43f [Nick Pentreath] Refactoring utils into own objects. Cleaning up old commented-out code
d86325f [Nick Pentreath] Initial WIP of PySpark support for SequenceFile and arbitrary Hadoop InputFormat
2014-06-10 01:21:03 -04:00
|
|
|
def _dictToJavaMap(self, d):
|
|
|
|
jm = self._jvm.java.util.HashMap()
|
|
|
|
if not d:
|
|
|
|
d = {}
|
|
|
|
for k, v in d.iteritems():
|
|
|
|
jm[k] = v
|
|
|
|
return jm
|
|
|
|
|
|
|
|
def sequenceFile(self, path, keyClass=None, valueClass=None, keyConverter=None,
|
|
|
|
valueConverter=None, minSplits=None):
|
|
|
|
"""
|
|
|
|
Read a Hadoop SequenceFile with arbitrary key and value Writable class from HDFS,
|
|
|
|
a local file system (available on all nodes), or any Hadoop-supported file system URI.
|
|
|
|
The mechanism is as follows:
|
|
|
|
1. A Java RDD is created from the SequenceFile or other InputFormat, and the key
|
|
|
|
and value Writable classes
|
|
|
|
2. Serialization is attempted via Pyrolite pickling
|
|
|
|
3. If this fails, the fallback is to call 'toString' on each key and value
|
|
|
|
4. C{PickleSerializer} is used to deserialize pickled objects on the Python side
|
|
|
|
|
|
|
|
@param path: path to sequncefile
|
|
|
|
@param keyClass: fully qualified classname of key Writable class
|
|
|
|
(e.g. "org.apache.hadoop.io.Text")
|
|
|
|
@param valueClass: fully qualified classname of value Writable class
|
|
|
|
(e.g. "org.apache.hadoop.io.LongWritable")
|
|
|
|
@param keyConverter:
|
|
|
|
@param valueConverter:
|
|
|
|
@param minSplits: minimum splits in dataset
|
|
|
|
(default min(2, sc.defaultParallelism))
|
|
|
|
"""
|
|
|
|
minSplits = minSplits or min(self.defaultParallelism, 2)
|
|
|
|
jrdd = self._jvm.PythonRDD.sequenceFile(self._jsc, path, keyClass, valueClass,
|
|
|
|
keyConverter, valueConverter, minSplits)
|
|
|
|
return RDD(jrdd, self, PickleSerializer())
|
|
|
|
|
|
|
|
def newAPIHadoopFile(self, path, inputFormatClass, keyClass, valueClass, keyConverter=None,
|
|
|
|
valueConverter=None, conf=None):
|
|
|
|
"""
|
|
|
|
Read a 'new API' Hadoop InputFormat with arbitrary key and value class from HDFS,
|
|
|
|
a local file system (available on all nodes), or any Hadoop-supported file system URI.
|
|
|
|
The mechanism is the same as for sc.sequenceFile.
|
|
|
|
|
|
|
|
A Hadoop configuration can be passed in as a Python dict. This will be converted into a
|
|
|
|
Configuration in Java
|
|
|
|
|
|
|
|
@param path: path to Hadoop file
|
|
|
|
@param inputFormatClass: fully qualified classname of Hadoop InputFormat
|
|
|
|
(e.g. "org.apache.hadoop.mapreduce.lib.input.TextInputFormat")
|
|
|
|
@param keyClass: fully qualified classname of key Writable class
|
|
|
|
(e.g. "org.apache.hadoop.io.Text")
|
|
|
|
@param valueClass: fully qualified classname of value Writable class
|
|
|
|
(e.g. "org.apache.hadoop.io.LongWritable")
|
|
|
|
@param keyConverter: (None by default)
|
|
|
|
@param valueConverter: (None by default)
|
|
|
|
@param conf: Hadoop configuration, passed in as a dict
|
|
|
|
(None by default)
|
|
|
|
"""
|
|
|
|
jconf = self._dictToJavaMap(conf)
|
|
|
|
jrdd = self._jvm.PythonRDD.newAPIHadoopFile(self._jsc, path, inputFormatClass, keyClass,
|
|
|
|
valueClass, keyConverter, valueConverter, jconf)
|
|
|
|
return RDD(jrdd, self, PickleSerializer())
|
|
|
|
|
|
|
|
def newAPIHadoopRDD(self, inputFormatClass, keyClass, valueClass, keyConverter=None,
|
|
|
|
valueConverter=None, conf=None):
|
|
|
|
"""
|
|
|
|
Read a 'new API' Hadoop InputFormat with arbitrary key and value class, from an arbitrary
|
|
|
|
Hadoop configuration, which is passed in as a Python dict.
|
|
|
|
This will be converted into a Configuration in Java.
|
|
|
|
The mechanism is the same as for sc.sequenceFile.
|
|
|
|
|
|
|
|
@param inputFormatClass: fully qualified classname of Hadoop InputFormat
|
|
|
|
(e.g. "org.apache.hadoop.mapreduce.lib.input.TextInputFormat")
|
|
|
|
@param keyClass: fully qualified classname of key Writable class
|
|
|
|
(e.g. "org.apache.hadoop.io.Text")
|
|
|
|
@param valueClass: fully qualified classname of value Writable class
|
|
|
|
(e.g. "org.apache.hadoop.io.LongWritable")
|
|
|
|
@param keyConverter: (None by default)
|
|
|
|
@param valueConverter: (None by default)
|
|
|
|
@param conf: Hadoop configuration, passed in as a dict
|
|
|
|
(None by default)
|
|
|
|
"""
|
|
|
|
jconf = self._dictToJavaMap(conf)
|
|
|
|
jrdd = self._jvm.PythonRDD.newAPIHadoopRDD(self._jsc, inputFormatClass, keyClass,
|
|
|
|
valueClass, keyConverter, valueConverter, jconf)
|
|
|
|
return RDD(jrdd, self, PickleSerializer())
|
|
|
|
|
|
|
|
def hadoopFile(self, path, inputFormatClass, keyClass, valueClass, keyConverter=None,
|
|
|
|
valueConverter=None, conf=None):
|
|
|
|
"""
|
|
|
|
Read an 'old' Hadoop InputFormat with arbitrary key and value class from HDFS,
|
|
|
|
a local file system (available on all nodes), or any Hadoop-supported file system URI.
|
|
|
|
The mechanism is the same as for sc.sequenceFile.
|
|
|
|
|
|
|
|
A Hadoop configuration can be passed in as a Python dict. This will be converted into a
|
|
|
|
Configuration in Java.
|
|
|
|
|
|
|
|
@param path: path to Hadoop file
|
|
|
|
@param inputFormatClass: fully qualified classname of Hadoop InputFormat
|
|
|
|
(e.g. "org.apache.hadoop.mapred.TextInputFormat")
|
|
|
|
@param keyClass: fully qualified classname of key Writable class
|
|
|
|
(e.g. "org.apache.hadoop.io.Text")
|
|
|
|
@param valueClass: fully qualified classname of value Writable class
|
|
|
|
(e.g. "org.apache.hadoop.io.LongWritable")
|
|
|
|
@param keyConverter: (None by default)
|
|
|
|
@param valueConverter: (None by default)
|
|
|
|
@param conf: Hadoop configuration, passed in as a dict
|
|
|
|
(None by default)
|
|
|
|
"""
|
|
|
|
jconf = self._dictToJavaMap(conf)
|
|
|
|
jrdd = self._jvm.PythonRDD.hadoopFile(self._jsc, path, inputFormatClass, keyClass,
|
|
|
|
valueClass, keyConverter, valueConverter, jconf)
|
|
|
|
return RDD(jrdd, self, PickleSerializer())
|
|
|
|
|
|
|
|
def hadoopRDD(self, inputFormatClass, keyClass, valueClass, keyConverter=None,
|
|
|
|
valueConverter=None, conf=None):
|
|
|
|
"""
|
|
|
|
Read an 'old' Hadoop InputFormat with arbitrary key and value class, from an arbitrary
|
|
|
|
Hadoop configuration, which is passed in as a Python dict.
|
|
|
|
This will be converted into a Configuration in Java.
|
|
|
|
The mechanism is the same as for sc.sequenceFile.
|
|
|
|
|
|
|
|
@param inputFormatClass: fully qualified classname of Hadoop InputFormat
|
|
|
|
(e.g. "org.apache.hadoop.mapred.TextInputFormat")
|
|
|
|
@param keyClass: fully qualified classname of key Writable class
|
|
|
|
(e.g. "org.apache.hadoop.io.Text")
|
|
|
|
@param valueClass: fully qualified classname of value Writable class
|
|
|
|
(e.g. "org.apache.hadoop.io.LongWritable")
|
|
|
|
@param keyConverter: (None by default)
|
|
|
|
@param valueConverter: (None by default)
|
|
|
|
@param conf: Hadoop configuration, passed in as a dict
|
|
|
|
(None by default)
|
|
|
|
"""
|
|
|
|
jconf = self._dictToJavaMap(conf)
|
|
|
|
jrdd = self._jvm.PythonRDD.hadoopRDD(self._jsc, inputFormatClass, keyClass, valueClass,
|
|
|
|
keyConverter, valueConverter, jconf)
|
|
|
|
return RDD(jrdd, self, PickleSerializer())
|
|
|
|
|
2013-11-05 20:52:39 -05:00
|
|
|
def _checkpointFile(self, name, input_deserializer):
|
2013-01-20 16:59:45 -05:00
|
|
|
jrdd = self._jsc.checkpointFile(name)
|
2013-11-05 20:52:39 -05:00
|
|
|
return RDD(jrdd, self, input_deserializer)
|
2013-01-20 16:59:45 -05:00
|
|
|
|
2012-08-25 21:00:25 -04:00
|
|
|
def union(self, rdds):
|
2012-12-27 20:55:33 -05:00
|
|
|
"""
|
2012-12-27 22:59:04 -05:00
|
|
|
Build the union of a list of RDDs.
|
2013-11-05 20:52:39 -05:00
|
|
|
|
|
|
|
This supports unions() of RDDs with different serialized formats,
|
|
|
|
although this forces them to be reserialized using the default
|
|
|
|
serializer:
|
|
|
|
|
|
|
|
>>> path = os.path.join(tempdir, "union-text.txt")
|
|
|
|
>>> with open(path, "w") as testFile:
|
|
|
|
... testFile.write("Hello")
|
|
|
|
>>> textFile = sc.textFile(path)
|
|
|
|
>>> textFile.collect()
|
|
|
|
[u'Hello']
|
|
|
|
>>> parallelized = sc.parallelize(["World!"])
|
|
|
|
>>> sorted(sc.union([textFile, parallelized]).collect())
|
|
|
|
[u'Hello', 'World!']
|
2012-12-27 20:55:33 -05:00
|
|
|
"""
|
2013-11-05 20:52:39 -05:00
|
|
|
first_jrdd_deserializer = rdds[0]._jrdd_deserializer
|
|
|
|
if any(x._jrdd_deserializer != first_jrdd_deserializer for x in rdds):
|
|
|
|
rdds = [x._reserialize() for x in rdds]
|
2012-08-25 21:00:25 -04:00
|
|
|
first = rdds[0]._jrdd
|
|
|
|
rest = [x._jrdd for x in rdds[1:]]
|
2013-11-05 20:52:39 -05:00
|
|
|
rest = ListConverter().convert(rest, self._gateway._gateway_client)
|
|
|
|
return RDD(self._jsc.union(first, rest), self,
|
|
|
|
rdds[0]._jrdd_deserializer)
|
2012-08-25 21:00:25 -04:00
|
|
|
|
2012-08-25 16:59:01 -04:00
|
|
|
def broadcast(self, value):
|
2012-12-27 20:55:33 -05:00
|
|
|
"""
|
2013-12-29 20:15:07 -05:00
|
|
|
Broadcast a read-only variable to the cluster, returning a
|
|
|
|
L{Broadcast<pyspark.broadcast.Broadcast>}
|
2012-12-27 20:55:33 -05:00
|
|
|
object for reading it in distributed functions. The variable will be
|
|
|
|
sent to each cluster only once.
|
|
|
|
"""
|
2013-11-05 20:52:39 -05:00
|
|
|
pickleSer = PickleSerializer()
|
2013-11-10 20:48:27 -05:00
|
|
|
pickled = pickleSer.dumps(value)
|
2013-11-05 20:52:39 -05:00
|
|
|
jbroadcast = self._jsc.broadcast(bytearray(pickled))
|
2012-10-19 13:24:49 -04:00
|
|
|
return Broadcast(jbroadcast.id(), value, jbroadcast,
|
2012-08-25 16:59:01 -04:00
|
|
|
self._pickled_broadcast_vars)
|
2012-12-27 22:59:04 -05:00
|
|
|
|
2013-01-20 04:57:44 -05:00
|
|
|
def accumulator(self, value, accum_param=None):
|
|
|
|
"""
|
2013-01-20 18:12:54 -05:00
|
|
|
Create an L{Accumulator} with the given initial value, using a given
|
|
|
|
L{AccumulatorParam} helper object to define how to add values of the
|
|
|
|
data type if provided. Default AccumulatorParams are used for integers
|
|
|
|
and floating-point numbers if you do not provide one. For other types,
|
|
|
|
a custom AccumulatorParam can be used.
|
2013-01-20 04:57:44 -05:00
|
|
|
"""
|
2013-11-05 20:52:39 -05:00
|
|
|
if accum_param is None:
|
2013-01-20 04:57:44 -05:00
|
|
|
if isinstance(value, int):
|
|
|
|
accum_param = accumulators.INT_ACCUMULATOR_PARAM
|
|
|
|
elif isinstance(value, float):
|
|
|
|
accum_param = accumulators.FLOAT_ACCUMULATOR_PARAM
|
|
|
|
elif isinstance(value, complex):
|
|
|
|
accum_param = accumulators.COMPLEX_ACCUMULATOR_PARAM
|
|
|
|
else:
|
|
|
|
raise Exception("No default accumulator param for type %s" % type(value))
|
|
|
|
SparkContext._next_accum_id += 1
|
|
|
|
return Accumulator(SparkContext._next_accum_id - 1, value, accum_param)
|
|
|
|
|
2012-12-27 22:59:04 -05:00
|
|
|
def addFile(self, path):
|
|
|
|
"""
|
2013-01-21 19:42:24 -05:00
|
|
|
Add a file to be downloaded with this Spark job on every node.
|
|
|
|
The C{path} passed can be either a local file, a file in HDFS
|
|
|
|
(or other Hadoop-supported filesystems), or an HTTP, HTTPS or
|
|
|
|
FTP URI.
|
|
|
|
|
|
|
|
To access the file in Spark jobs, use
|
|
|
|
L{SparkFiles.get(path)<pyspark.files.SparkFiles.get>} to find its
|
|
|
|
download location.
|
|
|
|
|
|
|
|
>>> from pyspark import SparkFiles
|
|
|
|
>>> path = os.path.join(tempdir, "test.txt")
|
|
|
|
>>> with open(path, "w") as testFile:
|
|
|
|
... testFile.write("100")
|
|
|
|
>>> sc.addFile(path)
|
|
|
|
>>> def func(iterator):
|
|
|
|
... with open(SparkFiles.get("test.txt")) as testFile:
|
|
|
|
... fileVal = int(testFile.readline())
|
|
|
|
... return [x * 100 for x in iterator]
|
|
|
|
>>> sc.parallelize([1, 2, 3, 4]).mapPartitions(func).collect()
|
|
|
|
[100, 200, 300, 400]
|
2012-12-27 22:59:04 -05:00
|
|
|
"""
|
|
|
|
self._jsc.sc().addFile(path)
|
|
|
|
|
|
|
|
def clearFiles(self):
|
|
|
|
"""
|
|
|
|
Clear the job's list of files added by L{addFile} or L{addPyFile} so
|
|
|
|
that they do not get downloaded to any new nodes.
|
|
|
|
"""
|
|
|
|
# TODO: remove added .py or .zip files from the PYTHONPATH?
|
|
|
|
self._jsc.sc().clearFiles()
|
|
|
|
|
|
|
|
def addPyFile(self, path):
|
|
|
|
"""
|
|
|
|
Add a .py or .zip dependency for all tasks to be executed on this
|
|
|
|
SparkContext in the future. The C{path} passed can be either a local
|
|
|
|
file, a file in HDFS (or other Hadoop-supported filesystems), or an
|
|
|
|
HTTP, HTTPS or FTP URI.
|
|
|
|
"""
|
|
|
|
self.addFile(path)
|
2013-08-15 19:01:19 -04:00
|
|
|
(dirname, filename) = os.path.split(path) # dirname may be directory or HDFS/S3 prefix
|
|
|
|
|
|
|
|
if filename.endswith('.zip') or filename.endswith('.ZIP') or filename.endswith('.egg'):
|
|
|
|
self._python_includes.append(filename)
|
|
|
|
sys.path.append(os.path.join(SparkFiles.getRootDirectory(), filename)) # for tests in local mode
|
2013-01-16 22:15:14 -05:00
|
|
|
|
2013-12-24 17:01:13 -05:00
|
|
|
def setCheckpointDir(self, dirName):
|
2013-01-16 22:15:14 -05:00
|
|
|
"""
|
2013-01-20 18:31:41 -05:00
|
|
|
Set the directory under which RDDs are going to be checkpointed. The
|
|
|
|
directory must be a HDFS path if running on a cluster.
|
2013-01-16 22:15:14 -05:00
|
|
|
"""
|
2013-12-24 17:01:13 -05:00
|
|
|
self._jsc.sc().setCheckpointDir(dirName)
|
2013-01-21 19:42:24 -05:00
|
|
|
|
2013-09-07 17:41:31 -04:00
|
|
|
def _getJavaStorageLevel(self, storageLevel):
|
|
|
|
"""
|
|
|
|
Returns a Java StorageLevel based on a pyspark.StorageLevel.
|
|
|
|
"""
|
|
|
|
if not isinstance(storageLevel, StorageLevel):
|
|
|
|
raise Exception("storageLevel must be of type pyspark.StorageLevel")
|
|
|
|
|
|
|
|
newStorageLevel = self._jvm.org.apache.spark.storage.StorageLevel
|
2014-04-04 23:36:24 -04:00
|
|
|
return newStorageLevel(storageLevel.useDisk,
|
|
|
|
storageLevel.useMemory,
|
|
|
|
storageLevel.useOffHeap,
|
|
|
|
storageLevel.deserialized,
|
|
|
|
storageLevel.replication)
|
2013-09-07 17:41:31 -04:00
|
|
|
|
2014-04-24 23:21:10 -04:00
|
|
|
def setJobGroup(self, groupId, description, interruptOnCancel=False):
|
2014-03-06 15:45:27 -05:00
|
|
|
"""
|
|
|
|
Assigns a group ID to all the jobs started by this thread until the group ID is set to a
|
|
|
|
different value or cleared.
|
|
|
|
|
|
|
|
Often, a unit of execution in an application consists of multiple Spark actions or jobs.
|
|
|
|
Application programmers can use this method to group all those jobs together and give a
|
|
|
|
group description. Once set, the Spark web UI will associate such jobs with this group.
|
2014-04-24 23:21:10 -04:00
|
|
|
|
|
|
|
The application can use L{SparkContext.cancelJobGroup} to cancel all
|
|
|
|
running jobs in this group.
|
|
|
|
|
|
|
|
>>> import thread, threading
|
|
|
|
>>> from time import sleep
|
|
|
|
>>> result = "Not Set"
|
|
|
|
>>> lock = threading.Lock()
|
|
|
|
>>> def map_func(x):
|
|
|
|
... sleep(100)
|
SPARK-1579: Clean up PythonRDD and avoid swallowing IOExceptions
This patch includes several cleanups to PythonRDD, focused around fixing [SPARK-1579](https://issues.apache.org/jira/browse/SPARK-1579) cleanly. Listed in order of approximate importance:
- The Python daemon waits for Spark to close the socket before exiting,
in order to avoid causing spurious IOExceptions in Spark's
`PythonRDD::WriterThread`.
- Removes the Python Monitor Thread, which polled for task cancellations
in order to kill the Python worker. Instead, we do this in the
onCompleteCallback, since this is guaranteed to be called during
cancellation.
- Adds a "completed" variable to TaskContext to avoid the issue noted in
[SPARK-1019](https://issues.apache.org/jira/browse/SPARK-1019), where onCompleteCallbacks may be execution-order dependent.
Along with this, I removed the "context.interrupted = true" flag in
the onCompleteCallback.
- Extracts PythonRDD::WriterThread to its own class.
Since this patch provides an alternative solution to [SPARK-1019](https://issues.apache.org/jira/browse/SPARK-1019), I did test it with
```
sc.textFile("latlon.tsv").take(5)
```
many times without error.
Additionally, in order to test the unswallowed exceptions, I performed
```
sc.textFile("s3n://<big file>").count()
```
and cut my internet during execution. Prior to this patch, we got the "stdin writer exited early" message, which was unhelpful. Now, we get the SocketExceptions propagated through Spark to the user and get proper (though unsuccessful) task retries.
Author: Aaron Davidson <aaron@databricks.com>
Closes #640 from aarondav/pyspark-io and squashes the following commits:
b391ff8 [Aaron Davidson] Detect "clean socket shutdowns" and stop waiting on the socket
c0c49da [Aaron Davidson] SPARK-1579: Clean up PythonRDD and avoid swallowing IOExceptions
2014-05-07 12:48:31 -04:00
|
|
|
... raise Exception("Task should have been cancelled")
|
2014-04-24 23:21:10 -04:00
|
|
|
>>> def start_job(x):
|
|
|
|
... global result
|
|
|
|
... try:
|
|
|
|
... sc.setJobGroup("job_to_cancel", "some description")
|
|
|
|
... result = sc.parallelize(range(x)).map(map_func).collect()
|
|
|
|
... except Exception as e:
|
|
|
|
... result = "Cancelled"
|
|
|
|
... lock.release()
|
|
|
|
>>> def stop_job():
|
|
|
|
... sleep(5)
|
|
|
|
... sc.cancelJobGroup("job_to_cancel")
|
|
|
|
>>> supress = lock.acquire()
|
|
|
|
>>> supress = thread.start_new_thread(start_job, (10,))
|
|
|
|
>>> supress = thread.start_new_thread(stop_job, tuple())
|
|
|
|
>>> supress = lock.acquire()
|
|
|
|
>>> print result
|
|
|
|
Cancelled
|
|
|
|
|
|
|
|
If interruptOnCancel is set to true for the job group, then job cancellation will result
|
|
|
|
in Thread.interrupt() being called on the job's executor threads. This is useful to help ensure
|
|
|
|
that the tasks are actually stopped in a timely manner, but is off by default due to HDFS-1208,
|
|
|
|
where HDFS may respond to Thread.interrupt() by marking nodes as dead.
|
|
|
|
"""
|
|
|
|
self._jsc.setJobGroup(groupId, description, interruptOnCancel)
|
2014-03-06 15:45:27 -05:00
|
|
|
|
|
|
|
def setLocalProperty(self, key, value):
|
|
|
|
"""
|
|
|
|
Set a local property that affects jobs submitted from this thread, such as the
|
|
|
|
Spark fair scheduler pool.
|
|
|
|
"""
|
|
|
|
self._jsc.setLocalProperty(key, value)
|
|
|
|
|
|
|
|
def getLocalProperty(self, key):
|
|
|
|
"""
|
|
|
|
Get a local property set in this thread, or null if it is missing. See
|
|
|
|
L{setLocalProperty}
|
|
|
|
"""
|
|
|
|
return self._jsc.getLocalProperty(key)
|
|
|
|
|
|
|
|
def sparkUser(self):
|
|
|
|
"""
|
|
|
|
Get SPARK_USER for user who is running SparkContext.
|
|
|
|
"""
|
|
|
|
return self._jsc.sc().sparkUser()
|
|
|
|
|
2014-04-24 23:21:10 -04:00
|
|
|
def cancelJobGroup(self, groupId):
|
|
|
|
"""
|
|
|
|
Cancel active jobs for the specified group. See L{SparkContext.setJobGroup}
|
|
|
|
for more information.
|
|
|
|
"""
|
|
|
|
self._jsc.sc().cancelJobGroup(groupId)
|
|
|
|
|
|
|
|
def cancelAllJobs(self):
|
|
|
|
"""
|
|
|
|
Cancel all jobs that have been scheduled or are running.
|
|
|
|
"""
|
|
|
|
self._jsc.sc().cancelAllJobs()
|
|
|
|
|
2014-05-31 16:04:57 -04:00
|
|
|
def runJob(self, rdd, partitionFunc, partitions = None, allowLocal = False):
|
|
|
|
"""
|
|
|
|
Executes the given partitionFunc on the specified set of partitions,
|
|
|
|
returning the result as an array of elements.
|
|
|
|
|
|
|
|
If 'partitions' is not specified, this will run over all partitions.
|
|
|
|
|
|
|
|
>>> myRDD = sc.parallelize(range(6), 3)
|
|
|
|
>>> sc.runJob(myRDD, lambda part: [x * x for x in part])
|
|
|
|
[0, 1, 4, 9, 16, 25]
|
|
|
|
|
|
|
|
>>> myRDD = sc.parallelize(range(6), 3)
|
|
|
|
>>> sc.runJob(myRDD, lambda part: [x * x for x in part], [0, 2], True)
|
|
|
|
[0, 1, 16, 25]
|
|
|
|
"""
|
|
|
|
if partitions == None:
|
|
|
|
partitions = range(rdd._jrdd.splits().size())
|
|
|
|
javaPartitions = ListConverter().convert(partitions, self._gateway._gateway_client)
|
|
|
|
|
|
|
|
# Implementation note: This is implemented as a mapPartitions followed
|
|
|
|
# by runJob() in order to avoid having to pass a Python lambda into
|
|
|
|
# SparkContext#runJob.
|
|
|
|
mappedRDD = rdd.mapPartitions(partitionFunc)
|
|
|
|
it = self._jvm.PythonRDD.runJob(self._jsc.sc(), mappedRDD._jrdd, javaPartitions, allowLocal)
|
|
|
|
return list(mappedRDD._collect_iterator_through_file(it))
|
|
|
|
|
2013-01-21 19:42:24 -05:00
|
|
|
def _test():
|
2013-02-01 14:48:11 -05:00
|
|
|
import atexit
|
2013-01-21 19:42:24 -05:00
|
|
|
import doctest
|
2013-02-01 14:48:11 -05:00
|
|
|
import tempfile
|
2013-01-21 19:42:24 -05:00
|
|
|
globs = globals().copy()
|
|
|
|
globs['sc'] = SparkContext('local[4]', 'PythonTest', batchSize=2)
|
|
|
|
globs['tempdir'] = tempfile.mkdtemp()
|
|
|
|
atexit.register(lambda: shutil.rmtree(globs['tempdir']))
|
2014-04-04 20:29:29 -04:00
|
|
|
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
|
2013-01-21 19:42:24 -05:00
|
|
|
globs['sc'].stop()
|
2013-02-03 01:44:11 -05:00
|
|
|
if failure_count:
|
|
|
|
exit(-1)
|
2013-01-21 19:42:24 -05:00
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
_test()
|