Merge branch 'scripts-reorg' of github.com:shane-huang/incubator-spark into spark-915-segregate-scripts
Conflicts: bin/spark-shell core/pom.xml core/src/main/scala/org/apache/spark/SparkContext.scala core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/CoarseMesosSchedulerBackend.scala core/src/main/scala/org/apache/spark/ui/UIWorkloadGenerator.scala core/src/test/scala/org/apache/spark/DriverSuite.scala python/run-tests sbin/compute-classpath.sh sbin/spark-class sbin/stop-slaves.sh
This commit is contained in:
commit
980afd280a
|
@ -39,23 +39,20 @@
|
|||
</fileSet>
|
||||
<fileSet>
|
||||
<directory>
|
||||
${project.parent.basedir}/bin/
|
||||
${project.parent.basedir}/sbin/
|
||||
</directory>
|
||||
<outputDirectory>/bin</outputDirectory>
|
||||
<outputDirectory>/sbin</outputDirectory>
|
||||
<includes>
|
||||
<include>**/*</include>
|
||||
</includes>
|
||||
</fileSet>
|
||||
<fileSet>
|
||||
<directory>
|
||||
${project.parent.basedir}
|
||||
${project.parent.basedir}/bin/
|
||||
</directory>
|
||||
<outputDirectory>/bin</outputDirectory>
|
||||
<includes>
|
||||
<include>run-example*</include>
|
||||
<include>spark-class*</include>
|
||||
<include>spark-shell*</include>
|
||||
<include>spark-executor*</include>
|
||||
<include>**/*</include>
|
||||
</includes>
|
||||
</fileSet>
|
||||
</fileSets>
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
#
|
||||
|
||||
# Figure out where the Scala framework is installed
|
||||
FWDIR="$(cd `dirname $0`; pwd)"
|
||||
FWDIR="$(cd `dirname $0`/..; pwd)"
|
||||
|
||||
# Export this as SPARK_HOME
|
||||
export SPARK_HOME="$FWDIR"
|
||||
|
@ -37,7 +37,7 @@ if [ ! -f "$FWDIR/RELEASE" ]; then
|
|||
fi
|
||||
|
||||
# Load environment variables from conf/spark-env.sh, if it exists
|
||||
if [ -e $FWDIR/conf/spark-env.sh ] ; then
|
||||
if [ -e "$FWDIR/conf/spark-env.sh" ] ; then
|
||||
. $FWDIR/conf/spark-env.sh
|
||||
fi
|
||||
|
|
@ -20,7 +20,7 @@ rem
|
|||
set SCALA_VERSION=2.10
|
||||
|
||||
rem Figure out where the Spark framework is installed
|
||||
set FWDIR=%~dp0
|
||||
set FWDIR=%~dp0..\
|
||||
|
||||
rem Export this as SPARK_HOME
|
||||
set SPARK_HOME=%FWDIR%
|
|
@ -25,13 +25,13 @@ esac
|
|||
SCALA_VERSION=2.10
|
||||
|
||||
# Figure out where the Scala framework is installed
|
||||
FWDIR="$(cd `dirname $0`; pwd)"
|
||||
FWDIR="$(cd `dirname $0`/..; pwd)"
|
||||
|
||||
# Export this as SPARK_HOME
|
||||
export SPARK_HOME="$FWDIR"
|
||||
|
||||
# Load environment variables from conf/spark-env.sh, if it exists
|
||||
if [ -e $FWDIR/conf/spark-env.sh ] ; then
|
||||
if [ -e "$FWDIR/conf/spark-env.sh" ] ; then
|
||||
. $FWDIR/conf/spark-env.sh
|
||||
fi
|
||||
|
||||
|
@ -61,7 +61,7 @@ fi
|
|||
|
||||
# Since the examples JAR ideally shouldn't include spark-core (that dependency should be
|
||||
# "provided"), also add our standard Spark classpath, built using compute-classpath.sh.
|
||||
CLASSPATH=`$FWDIR/bin/compute-classpath.sh`
|
||||
CLASSPATH=`$FWDIR/sbin/compute-classpath.sh`
|
||||
CLASSPATH="$SPARK_EXAMPLES_JAR:$CLASSPATH"
|
||||
|
||||
if $cygwin; then
|
|
@ -20,7 +20,7 @@ rem
|
|||
set SCALA_VERSION=2.10
|
||||
|
||||
rem Figure out where the Spark framework is installed
|
||||
set FWDIR=%~dp0
|
||||
set FWDIR=%~dp0..\
|
||||
|
||||
rem Export this as SPARK_HOME
|
||||
set SPARK_HOME=%FWDIR%
|
||||
|
@ -49,7 +49,7 @@ if "x%SPARK_EXAMPLES_JAR%"=="x" (
|
|||
|
||||
rem Compute Spark classpath using external script
|
||||
set DONT_PRINT_CLASSPATH=1
|
||||
call "%FWDIR%bin\compute-classpath.cmd"
|
||||
call "%FWDIR%sbin\compute-classpath.cmd"
|
||||
set DONT_PRINT_CLASSPATH=0
|
||||
set CLASSPATH=%SPARK_EXAMPLES_JAR%;%CLASSPATH%
|
||||
|
|
@ -32,7 +32,7 @@ esac
|
|||
# Enter posix mode for bash
|
||||
set -o posix
|
||||
|
||||
FWDIR="`dirname $0`"
|
||||
FWDIR="$(cd `dirname $0`/..; pwd)"
|
||||
|
||||
for o in "$@"; do
|
||||
if [ "$1" = "-c" -o "$1" = "--cores" ]; then
|
||||
|
@ -90,10 +90,10 @@ if $cygwin; then
|
|||
# "Backspace sends ^H" setting in "Keys" section of the Mintty options
|
||||
# (see https://github.com/sbt/sbt/issues/562).
|
||||
stty -icanon min 1 -echo > /dev/null 2>&1
|
||||
$FWDIR/spark-class -Djline.terminal=unix $OPTIONS org.apache.spark.repl.Main "$@"
|
||||
$FWDIR/sbin/spark-class -Djline.terminal=unix $OPTIONS org.apache.spark.repl.Main "$@"
|
||||
stty icanon echo > /dev/null 2>&1
|
||||
else
|
||||
$FWDIR/spark-class $OPTIONS org.apache.spark.repl.Main "$@"
|
||||
$FWDIR/sbin/spark-class $OPTIONS org.apache.spark.repl.Main "$@"
|
||||
fi
|
||||
|
||||
# record the exit status lest it be overwritten:
|
|
@ -17,6 +17,7 @@ rem See the License for the specific language governing permissions and
|
|||
rem limitations under the License.
|
||||
rem
|
||||
|
||||
set FWDIR=%~dp0
|
||||
rem Find the path of sbin
|
||||
set SBIN=%~dp0..\sbin\
|
||||
|
||||
cmd /V /E /C %FWDIR%spark-class2.cmd org.apache.spark.repl.Main %*
|
||||
cmd /V /E /C %SBIN%spark-class2.cmd org.apache.spark.repl.Main %*
|
|
@ -122,7 +122,7 @@ private[spark] class ExecutorRunner(
|
|||
// Figure out our classpath with the external compute-classpath script
|
||||
val ext = if (System.getProperty("os.name").startsWith("Windows")) ".cmd" else ".sh"
|
||||
val classPath = Utils.executeAndGetOutput(
|
||||
Seq(sparkHome + "/bin/compute-classpath" + ext),
|
||||
Seq(sparkHome + "/sbin/compute-classpath" + ext),
|
||||
extraEnvironment=appDesc.command.environment)
|
||||
|
||||
Seq("-cp", classPath) ++ libraryOpts ++ workerLocalOpts ++ userOpts ++ memoryOpts
|
||||
|
|
|
@ -127,7 +127,7 @@ private[spark] class CoarseMesosSchedulerBackend(
|
|||
CoarseGrainedSchedulerBackend.ACTOR_NAME)
|
||||
val uri = conf.get("spark.executor.uri", null)
|
||||
if (uri == null) {
|
||||
val runScript = new File(sparkHome, "spark-class").getCanonicalPath
|
||||
val runScript = new File(sparkHome, "./sbin/spark-class").getCanonicalPath
|
||||
command.setValue(
|
||||
"\"%s\" org.apache.spark.executor.CoarseGrainedExecutorBackend %s %s %s %d".format(
|
||||
runScript, driverUrl, offer.getSlaveId.getValue, offer.getHostname, numCores))
|
||||
|
@ -136,7 +136,7 @@ private[spark] class CoarseMesosSchedulerBackend(
|
|||
// glob the directory "correctly".
|
||||
val basename = uri.split('/').last.split('.').head
|
||||
command.setValue(
|
||||
"cd %s*; ./spark-class org.apache.spark.executor.CoarseGrainedExecutorBackend %s %s %s %d"
|
||||
"cd %s*; ./sbin/spark-class org.apache.spark.executor.CoarseGrainedExecutorBackend %s %s %s %d"
|
||||
.format(basename, driverUrl, offer.getSlaveId.getValue, offer.getHostname, numCores))
|
||||
command.addUris(CommandInfo.URI.newBuilder().setValue(uri))
|
||||
}
|
||||
|
|
|
@ -102,12 +102,12 @@ private[spark] class MesosSchedulerBackend(
|
|||
.setEnvironment(environment)
|
||||
val uri = sc.conf.get("spark.executor.uri", null)
|
||||
if (uri == null) {
|
||||
command.setValue(new File(sparkHome, "spark-executor").getCanonicalPath)
|
||||
command.setValue(new File(sparkHome, "/sbin/spark-executor").getCanonicalPath)
|
||||
} else {
|
||||
// Grab everything to the first '.'. We'll use that and '*' to
|
||||
// glob the directory "correctly".
|
||||
val basename = uri.split('/').last.split('.').head
|
||||
command.setValue("cd %s*; ./spark-executor".format(basename))
|
||||
command.setValue("cd %s*; ./sbin/spark-executor".format(basename))
|
||||
command.addUris(CommandInfo.URI.newBuilder().setValue(uri))
|
||||
}
|
||||
val memory = Resource.newBuilder()
|
||||
|
|
|
@ -36,7 +36,7 @@ private[spark] object UIWorkloadGenerator {
|
|||
|
||||
def main(args: Array[String]) {
|
||||
if (args.length < 2) {
|
||||
println("usage: ./spark-class org.apache.spark.ui.UIWorkloadGenerator [master] [FIFO|FAIR]")
|
||||
println("usage: ./sbin/spark-class org.apache.spark.ui.UIWorkloadGenerator [master] [FIFO|FAIR]")
|
||||
System.exit(1)
|
||||
}
|
||||
|
||||
|
|
|
@ -35,7 +35,7 @@ class DriverSuite extends FunSuite with Timeouts {
|
|||
val masters = Table(("master"), ("local"), ("local-cluster[2,1,512]"))
|
||||
forAll(masters) { (master: String) =>
|
||||
failAfter(60 seconds) {
|
||||
Utils.execute(Seq("./spark-class", "org.apache.spark.DriverWithoutCleanup", master),
|
||||
Utils.execute(Seq("./sbin/spark-class", "org.apache.spark.DriverWithoutCleanup", master),
|
||||
new File(System.getenv("SPARK_HOME")))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -54,7 +54,7 @@ There are two scheduler mode that can be used to launch spark application on YAR
|
|||
|
||||
The command to launch the YARN Client is as follows:
|
||||
|
||||
SPARK_JAR=<SPARK_ASSEMBLY_JAR_FILE> ./spark-class org.apache.spark.deploy.yarn.Client \
|
||||
SPARK_JAR=<SPARK_ASSEMBLY_JAR_FILE> ./sbin/spark-class org.apache.spark.deploy.yarn.Client \
|
||||
--jar <YOUR_APP_JAR_FILE> \
|
||||
--class <APP_MAIN_CLASS> \
|
||||
--args <APP_MAIN_ARGUMENTS> \
|
||||
|
@ -79,7 +79,7 @@ For example:
|
|||
|
||||
# Submit Spark's ApplicationMaster to YARN's ResourceManager, and instruct Spark to run the SparkPi example
|
||||
$ SPARK_JAR=./assembly/target/scala-{{site.SCALA_VERSION}}/spark-assembly-{{site.SPARK_VERSION}}-hadoop2.0.5-alpha.jar \
|
||||
./spark-class org.apache.spark.deploy.yarn.Client \
|
||||
./sbin/spark-class org.apache.spark.deploy.yarn.Client \
|
||||
--jar examples/target/scala-{{site.SCALA_VERSION}}/spark-examples-assembly-{{site.SPARK_VERSION}}.jar \
|
||||
--class org.apache.spark.examples.SparkPi \
|
||||
--args yarn-standalone \
|
||||
|
|
|
@ -28,7 +28,7 @@ the master's web UI, which is [http://localhost:8080](http://localhost:8080) by
|
|||
|
||||
Similarly, you can start one or more workers and connect them to the master via:
|
||||
|
||||
./spark-class org.apache.spark.deploy.worker.Worker spark://IP:PORT
|
||||
./sbin/spark-class org.apache.spark.deploy.worker.Worker spark://IP:PORT
|
||||
|
||||
Once you have started a worker, look at the master's web UI ([http://localhost:8080](http://localhost:8080) by default).
|
||||
You should see the new node listed there, along with its number of CPUs and memory (minus one gigabyte left for the OS).
|
||||
|
@ -70,12 +70,12 @@ To launch a Spark standalone cluster with the launch scripts, you need to create
|
|||
|
||||
Once you've set up this file, you can launch or stop your cluster with the following shell scripts, based on Hadoop's deploy scripts, and available in `SPARK_HOME/bin`:
|
||||
|
||||
- `bin/start-master.sh` - Starts a master instance on the machine the script is executed on.
|
||||
- `bin/start-slaves.sh` - Starts a slave instance on each machine specified in the `conf/slaves` file.
|
||||
- `bin/start-all.sh` - Starts both a master and a number of slaves as described above.
|
||||
- `bin/stop-master.sh` - Stops the master that was started via the `bin/start-master.sh` script.
|
||||
- `bin/stop-slaves.sh` - Stops the slave instances that were started via `bin/start-slaves.sh`.
|
||||
- `bin/stop-all.sh` - Stops both the master and the slaves as described above.
|
||||
- `sbin/start-master.sh` - Starts a master instance on the machine the script is executed on.
|
||||
- `sbin/start-slaves.sh` - Starts a slave instance on each machine specified in the `conf/slaves` file.
|
||||
- `sbin/start-all.sh` - Starts both a master and a number of slaves as described above.
|
||||
- `sbin/stop-master.sh` - Stops the master that was started via the `bin/start-master.sh` script.
|
||||
- `sbin/stop-slaves.sh` - Stops the slave instances that were started via `bin/start-slaves.sh`.
|
||||
- `sbin/stop-all.sh` - Stops both the master and the slaves as described above.
|
||||
|
||||
Note that these scripts must be executed on the machine you want to run the Spark master on, not your local machine.
|
||||
|
||||
|
|
|
@ -98,10 +98,7 @@ mkdir "$DISTDIR"/conf
|
|||
cp "$FWDIR"/conf/*.template "$DISTDIR"/conf
|
||||
cp -r "$FWDIR/bin" "$DISTDIR"
|
||||
cp -r "$FWDIR/python" "$DISTDIR"
|
||||
cp "$FWDIR/spark-class" "$DISTDIR"
|
||||
cp "$FWDIR/spark-shell" "$DISTDIR"
|
||||
cp "$FWDIR/spark-executor" "$DISTDIR"
|
||||
cp "$FWDIR/pyspark" "$DISTDIR"
|
||||
cp -r "$FWDIR/sbin" "$DISTDIR"
|
||||
|
||||
|
||||
if [ "$MAKE_TGZ" == "true" ]; then
|
||||
|
|
|
@ -31,7 +31,7 @@ def launch_gateway():
|
|||
# Launch the Py4j gateway using Spark's run command so that we pick up the
|
||||
# proper classpath and SPARK_MEM settings from spark-env.sh
|
||||
on_windows = platform.system() == "Windows"
|
||||
script = "spark-class.cmd" if on_windows else "spark-class"
|
||||
script = "./sbin/spark-class.cmd" if on_windows else "./sbin/spark-class"
|
||||
command = [os.path.join(SPARK_HOME, script), "py4j.GatewayServer",
|
||||
"--die-on-broken-pipe", "0"]
|
||||
if not on_windows:
|
||||
|
|
|
@ -29,7 +29,7 @@ FAILED=0
|
|||
rm -f unit-tests.log
|
||||
|
||||
function run_test() {
|
||||
SPARK_TESTING=0 $FWDIR/pyspark $1 2>&1 | tee -a unit-tests.log
|
||||
SPARK_TESTING=0 $FWDIR/bin/pyspark $1 2>&1 | tee -a unit-tests.log
|
||||
FAILED=$((PIPESTATUS[0]||$FAILED))
|
||||
}
|
||||
|
||||
|
|
|
@ -48,8 +48,7 @@ fi
|
|||
export JAVA_OPTS
|
||||
|
||||
# Build up classpath
|
||||
CLASSPATH="$SPARK_CLASSPATH"
|
||||
CLASSPATH+=":$FWDIR/conf"
|
||||
CLASSPATH=":$FWDIR/conf"
|
||||
for jar in `find $FWDIR -name '*jar'`; do
|
||||
CLASSPATH+=":$jar"
|
||||
done
|
||||
|
|
|
@ -127,7 +127,6 @@
|
|||
<environmentVariables>
|
||||
<SPARK_HOME>${basedir}/..</SPARK_HOME>
|
||||
<SPARK_TESTING>1</SPARK_TESTING>
|
||||
<SPARK_CLASSPATH>${spark.classpath}</SPARK_CLASSPATH>
|
||||
</environmentVariables>
|
||||
</configuration>
|
||||
</plugin>
|
||||
|
|
|
@ -29,7 +29,7 @@ rem Load environment variables from conf\spark-env.cmd, if it exists
|
|||
if exist "%FWDIR%conf\spark-env.cmd" call "%FWDIR%conf\spark-env.cmd"
|
||||
|
||||
rem Build up classpath
|
||||
set CLASSPATH=%SPARK_CLASSPATH%;%FWDIR%conf
|
||||
set CLASSPATH=%FWDIR%conf
|
||||
if exist "%FWDIR%RELEASE" (
|
||||
for %%d in ("%FWDIR%jars\spark-assembly*.jar") do (
|
||||
set ASSEMBLY_JAR=%%d
|
|
@ -26,7 +26,7 @@ SCALA_VERSION=2.10
|
|||
FWDIR="$(cd `dirname $0`/..; pwd)"
|
||||
|
||||
# Load environment variables from conf/spark-env.sh, if it exists
|
||||
if [ -e $FWDIR/conf/spark-env.sh ] ; then
|
||||
if [ -e "$FWDIR/conf/spark-env.sh" ] ; then
|
||||
. $FWDIR/conf/spark-env.sh
|
||||
fi
|
||||
|
|
@ -36,10 +36,10 @@ if [ $# -le 0 ]; then
|
|||
exit 1
|
||||
fi
|
||||
|
||||
bin=`dirname "$0"`
|
||||
bin=`cd "$bin"; pwd`
|
||||
sbin=`dirname "$0"`
|
||||
sbin=`cd "$sbin"; pwd`
|
||||
|
||||
. "$bin/spark-config.sh"
|
||||
. "$sbin/spark-config.sh"
|
||||
|
||||
# If the slaves file is specified in the command line,
|
||||
# then it takes precedence over the definition in
|
|
@ -25,13 +25,13 @@ esac
|
|||
SCALA_VERSION=2.10
|
||||
|
||||
# Figure out where the Scala framework is installed
|
||||
FWDIR="$(cd `dirname $0`; pwd)"
|
||||
FWDIR="$(cd `dirname $0`/..; pwd)"
|
||||
|
||||
# Export this as SPARK_HOME
|
||||
export SPARK_HOME="$FWDIR"
|
||||
|
||||
# Load environment variables from conf/spark-env.sh, if it exists
|
||||
if [ -e $FWDIR/conf/spark-env.sh ] ; then
|
||||
if [ -e "$FWDIR/conf/spark-env.sh" ] ; then
|
||||
. $FWDIR/conf/spark-env.sh
|
||||
fi
|
||||
|
||||
|
@ -92,7 +92,7 @@ JAVA_OPTS="$OUR_JAVA_OPTS"
|
|||
JAVA_OPTS="$JAVA_OPTS -Djava.library.path=$SPARK_LIBRARY_PATH"
|
||||
JAVA_OPTS="$JAVA_OPTS -Xms$SPARK_MEM -Xmx$SPARK_MEM"
|
||||
# Load extra JAVA_OPTS from conf/java-opts, if it exists
|
||||
if [ -e $FWDIR/conf/java-opts ] ; then
|
||||
if [ -e "$FWDIR/conf/java-opts" ] ; then
|
||||
JAVA_OPTS="$JAVA_OPTS `cat $FWDIR/conf/java-opts`"
|
||||
fi
|
||||
export JAVA_OPTS
|
||||
|
@ -128,7 +128,7 @@ if [ -e "$TOOLS_DIR"/target/spark-tools*[0-9Tg].jar ]; then
|
|||
fi
|
||||
|
||||
# Compute classpath using external script
|
||||
CLASSPATH=`$FWDIR/bin/compute-classpath.sh`
|
||||
CLASSPATH=`$FWDIR/sbin/compute-classpath.sh`
|
||||
|
||||
if [ "$1" == "org.apache.spark.tools.JavaAPICompletenessChecker" ]; then
|
||||
CLASSPATH="$CLASSPATH:$SPARK_TOOLS_JAR"
|
|
@ -20,7 +20,7 @@ rem
|
|||
set SCALA_VERSION=2.10
|
||||
|
||||
rem Figure out where the Spark framework is installed
|
||||
set FWDIR=%~dp0
|
||||
set FWDIR=%~dp0..\
|
||||
|
||||
rem Export this as SPARK_HOME
|
||||
set SPARK_HOME=%FWDIR%
|
||||
|
@ -73,7 +73,7 @@ for %%d in ("%TOOLS_DIR%\target\scala-%SCALA_VERSION%\spark-tools*assembly*.jar"
|
|||
|
||||
rem Compute classpath using external script
|
||||
set DONT_PRINT_CLASSPATH=1
|
||||
call "%FWDIR%bin\compute-classpath.cmd"
|
||||
call "%FWDIR%sbin\compute-classpath.cmd"
|
||||
set DONT_PRINT_CLASSPATH=0
|
||||
set CLASSPATH=%CLASSPATH%;%SPARK_TOOLS_JAR%
|
||||
|
|
@ -37,10 +37,10 @@ if [ $# -le 1 ]; then
|
|||
exit 1
|
||||
fi
|
||||
|
||||
bin=`dirname "$0"`
|
||||
bin=`cd "$bin"; pwd`
|
||||
sbin=`dirname "$0"`
|
||||
sbin=`cd "$sbin"; pwd`
|
||||
|
||||
. "$bin/spark-config.sh"
|
||||
. "$sbin/spark-config.sh"
|
||||
|
||||
# get arguments
|
||||
|
||||
|
@ -147,7 +147,7 @@ case $startStop in
|
|||
spark_rotate_log "$log"
|
||||
echo starting $command, logging to $log
|
||||
cd "$SPARK_PREFIX"
|
||||
nohup nice -n $SPARK_NICENESS "$SPARK_PREFIX"/spark-class $command "$@" >> "$log" 2>&1 < /dev/null &
|
||||
nohup nice -n $SPARK_NICENESS "$SPARK_PREFIX"/sbin/spark-class $command "$@" >> "$log" 2>&1 < /dev/null &
|
||||
newpid=$!
|
||||
echo $newpid > $pid
|
||||
sleep 2
|
|
@ -27,9 +27,9 @@ if [ $# -le 1 ]; then
|
|||
exit 1
|
||||
fi
|
||||
|
||||
bin=`dirname "$0"`
|
||||
bin=`cd "$bin"; pwd`
|
||||
sbin=`dirname "$0"`
|
||||
sbin=`cd "$sbin"; pwd`
|
||||
|
||||
. "$bin/spark-config.sh"
|
||||
. "$sbin/spark-config.sh"
|
||||
|
||||
exec "$bin/slaves.sh" cd "$SPARK_HOME" \; "$bin/spark-daemon.sh" "$@"
|
||||
exec "$sbin/slaves.sh" cd "$SPARK_HOME" \; "$sbin/spark-daemon.sh" "$@"
|
|
@ -17,6 +17,7 @@
|
|||
# limitations under the License.
|
||||
#
|
||||
|
||||
FWDIR="`dirname $0`"
|
||||
FWDIR="$(cd `dirname $0`/..; pwd)"
|
||||
|
||||
echo "Running spark-executor with framework dir = $FWDIR"
|
||||
exec $FWDIR/spark-class org.apache.spark.executor.MesosExecutorBackend
|
||||
exec $FWDIR/sbin/spark-class org.apache.spark.executor.MesosExecutorBackend
|
|
@ -21,14 +21,14 @@
|
|||
# Starts the master on this node.
|
||||
# Starts a worker on each node specified in conf/slaves
|
||||
|
||||
bin=`dirname "$0"`
|
||||
bin=`cd "$bin"; pwd`
|
||||
sbin=`dirname "$0"`
|
||||
sbin=`cd "$sbin"; pwd`
|
||||
|
||||
# Load the Spark configuration
|
||||
. "$bin/spark-config.sh"
|
||||
. "$sbin/spark-config.sh"
|
||||
|
||||
# Start Master
|
||||
"$bin"/start-master.sh
|
||||
"$sbin"/start-master.sh
|
||||
|
||||
# Start Workers
|
||||
"$bin"/start-slaves.sh
|
||||
"$sbin"/start-slaves.sh
|
|
@ -19,10 +19,10 @@
|
|||
|
||||
# Starts the master on the machine this script is executed on.
|
||||
|
||||
bin=`dirname "$0"`
|
||||
bin=`cd "$bin"; pwd`
|
||||
sbin=`dirname "$0"`
|
||||
sbin=`cd "$sbin"; pwd`
|
||||
|
||||
. "$bin/spark-config.sh"
|
||||
. "$sbin/spark-config.sh"
|
||||
|
||||
if [ -f "${SPARK_CONF_DIR}/spark-env.sh" ]; then
|
||||
. "${SPARK_CONF_DIR}/spark-env.sh"
|
||||
|
@ -49,4 +49,4 @@ if [ "$SPARK_PUBLIC_DNS" = "" ]; then
|
|||
fi
|
||||
fi
|
||||
|
||||
"$bin"/spark-daemon.sh start org.apache.spark.deploy.master.Master 1 --ip $SPARK_MASTER_IP --port $SPARK_MASTER_PORT --webui-port $SPARK_MASTER_WEBUI_PORT
|
||||
"$sbin"/spark-daemon.sh start org.apache.spark.deploy.master.Master 1 --ip $SPARK_MASTER_IP --port $SPARK_MASTER_PORT --webui-port $SPARK_MASTER_WEBUI_PORT
|
|
@ -20,8 +20,8 @@
|
|||
# Usage: start-slave.sh <worker#> <master-spark-URL>
|
||||
# where <master-spark-URL> is like "spark://localhost:7077"
|
||||
|
||||
bin=`dirname "$0"`
|
||||
bin=`cd "$bin"; pwd`
|
||||
sbin=`dirname "$0"`
|
||||
sbin=`cd "$sbin"; pwd`
|
||||
|
||||
# Set SPARK_PUBLIC_DNS so slaves can be linked in master web UI
|
||||
if [ "$SPARK_PUBLIC_DNS" = "" ]; then
|
||||
|
@ -32,4 +32,4 @@ if [ "$SPARK_PUBLIC_DNS" = "" ]; then
|
|||
fi
|
||||
fi
|
||||
|
||||
"$bin"/spark-daemon.sh start org.apache.spark.deploy.worker.Worker "$@"
|
||||
"$sbin"/spark-daemon.sh start org.apache.spark.deploy.worker.Worker "$@"
|
|
@ -17,10 +17,10 @@
|
|||
# limitations under the License.
|
||||
#
|
||||
|
||||
bin=`dirname "$0"`
|
||||
bin=`cd "$bin"; pwd`
|
||||
sbin=`dirname "$0"`
|
||||
sbin=`cd "$sbin"; pwd`
|
||||
|
||||
. "$bin/spark-config.sh"
|
||||
. "$sbin/spark-config.sh"
|
||||
|
||||
if [ -f "${SPARK_CONF_DIR}/spark-env.sh" ]; then
|
||||
. "${SPARK_CONF_DIR}/spark-env.sh"
|
||||
|
@ -37,12 +37,12 @@ fi
|
|||
|
||||
# Launch the slaves
|
||||
if [ "$SPARK_WORKER_INSTANCES" = "" ]; then
|
||||
exec "$bin/slaves.sh" cd "$SPARK_HOME" \; "$bin/start-slave.sh" 1 spark://$SPARK_MASTER_IP:$SPARK_MASTER_PORT
|
||||
exec "$sbin/slaves.sh" cd "$SPARK_HOME" \; "$sbin/start-slave.sh" 1 spark://$SPARK_MASTER_IP:$SPARK_MASTER_PORT
|
||||
else
|
||||
if [ "$SPARK_WORKER_WEBUI_PORT" = "" ]; then
|
||||
SPARK_WORKER_WEBUI_PORT=8081
|
||||
fi
|
||||
for ((i=0; i<$SPARK_WORKER_INSTANCES; i++)); do
|
||||
"$bin/slaves.sh" cd "$SPARK_HOME" \; "$bin/start-slave.sh" $(( $i + 1 )) spark://$SPARK_MASTER_IP:$SPARK_MASTER_PORT --webui-port $(( $SPARK_WORKER_WEBUI_PORT + $i ))
|
||||
"$sbin/slaves.sh" cd "$SPARK_HOME" \; "$sbin/start-slave.sh" $(( $i + 1 )) spark://$SPARK_MASTER_IP:$SPARK_MASTER_PORT --webui-port $(( $SPARK_WORKER_WEBUI_PORT + $i ))
|
||||
done
|
||||
fi
|
|
@ -21,12 +21,12 @@
|
|||
# Run this on the master nde
|
||||
|
||||
|
||||
bin=`dirname "$0"`
|
||||
bin=`cd "$bin"; pwd`
|
||||
sbin=`dirname "$0"`
|
||||
sbin=`cd "$sbin"; pwd`
|
||||
|
||||
# Load the Spark configuration
|
||||
. "$bin/spark-config.sh"
|
||||
. "$sbin/spark-config.sh"
|
||||
|
||||
# Stop the slaves, then the master
|
||||
"$bin"/stop-slaves.sh
|
||||
"$bin"/stop-master.sh
|
||||
"$sbin"/stop-slaves.sh
|
||||
"$sbin"/stop-master.sh
|
|
@ -19,9 +19,9 @@
|
|||
|
||||
# Starts the master on the machine this script is executed on.
|
||||
|
||||
bin=`dirname "$0"`
|
||||
bin=`cd "$bin"; pwd`
|
||||
sbin=`dirname "$0"`
|
||||
sbin=`cd "$sbin"; pwd`
|
||||
|
||||
. "$bin/spark-config.sh"
|
||||
. "$sbin/spark-config.sh"
|
||||
|
||||
"$bin"/spark-daemon.sh stop org.apache.spark.deploy.master.Master 1
|
||||
"$sbin"/spark-daemon.sh stop org.apache.spark.deploy.master.Master 1
|
|
@ -18,18 +18,18 @@
|
|||
#
|
||||
|
||||
bin=`dirname "$0"`
|
||||
bin=`cd "$bin"; pwd`
|
||||
bin=`cd "$sbin"; pwd`
|
||||
|
||||
. "$bin/spark-config.sh"
|
||||
. "$sbin/spark-config.sh"
|
||||
|
||||
if [ -f "${SPARK_CONF_DIR}/spark-env.sh" ]; then
|
||||
. "${SPARK_CONF_DIR}/spark-env.sh"
|
||||
fi
|
||||
|
||||
if [ "$SPARK_WORKER_INSTANCES" = "" ]; then
|
||||
"$bin"/spark-daemons.sh stop org.apache.spark.deploy.worker.Worker 1
|
||||
"$sbin"/spark-daemons.sh stop org.apache.spark.deploy.worker.Worker 1
|
||||
else
|
||||
for ((i=0; i<$SPARK_WORKER_INSTANCES; i++)); do
|
||||
"$bin"/spark-daemons.sh stop org.apache.spark.deploy.worker.Worker $(( $i + 1 ))
|
||||
"$sbin"/spark-daemons.sh stop org.apache.spark.deploy.worker.Worker $(( $i + 1 ))
|
||||
done
|
||||
fi
|
Loading…
Reference in a new issue