added spark-class and spark-executor to sbin
Signed-off-by: shane-huang <shengsheng.huang@intel.com>
This commit is contained in:
parent
834686b108
commit
dfbdc9ddb7
|
@ -39,23 +39,20 @@
|
|||
</fileSet>
|
||||
<fileSet>
|
||||
<directory>
|
||||
${project.parent.basedir}/bin/
|
||||
${project.parent.basedir}/sbin/
|
||||
</directory>
|
||||
<outputDirectory>/bin</outputDirectory>
|
||||
<outputDirectory>/sbin</outputDirectory>
|
||||
<includes>
|
||||
<include>**/*</include>
|
||||
</includes>
|
||||
</fileSet>
|
||||
<fileSet>
|
||||
<directory>
|
||||
${project.parent.basedir}
|
||||
${project.parent.basedir}/bin/
|
||||
</directory>
|
||||
<outputDirectory>/bin</outputDirectory>
|
||||
<includes>
|
||||
<include>run-example*</include>
|
||||
<include>spark-class*</include>
|
||||
<include>spark-shell*</include>
|
||||
<include>spark-executor*</include>
|
||||
<include>**/*</include>
|
||||
</includes>
|
||||
</fileSet>
|
||||
</fileSets>
|
||||
|
|
|
@ -128,7 +128,7 @@ case $startStop in
|
|||
spark_rotate_log "$log"
|
||||
echo starting $command, logging to $log
|
||||
cd "$SPARK_PREFIX"
|
||||
nohup nice -n $SPARK_NICENESS "$SPARK_PREFIX"/spark-class $command "$@" >> "$log" 2>&1 < /dev/null &
|
||||
nohup nice -n $SPARK_NICENESS "$SPARK_PREFIX"/sbin/spark-class $command "$@" >> "$log" 2>&1 < /dev/null &
|
||||
newpid=$!
|
||||
echo $newpid > $pid
|
||||
sleep 2
|
||||
|
|
|
@ -125,7 +125,7 @@ private[spark] class CoarseMesosSchedulerBackend(
|
|||
StandaloneSchedulerBackend.ACTOR_NAME)
|
||||
val uri = System.getProperty("spark.executor.uri")
|
||||
if (uri == null) {
|
||||
val runScript = new File(sparkHome, "spark-class").getCanonicalPath
|
||||
val runScript = new File(sparkHome, "/sbin/spark-class").getCanonicalPath
|
||||
command.setValue(
|
||||
"\"%s\" org.apache.spark.executor.StandaloneExecutorBackend %s %s %s %d".format(
|
||||
runScript, driverUrl, offer.getSlaveId.getValue, offer.getHostname, numCores))
|
||||
|
@ -134,7 +134,7 @@ private[spark] class CoarseMesosSchedulerBackend(
|
|||
// glob the directory "correctly".
|
||||
val basename = uri.split('/').last.split('.').head
|
||||
command.setValue(
|
||||
"cd %s*; ./spark-class org.apache.spark.executor.StandaloneExecutorBackend %s %s %s %d".format(
|
||||
"cd %s*; ./sbin/spark-class org.apache.spark.executor.StandaloneExecutorBackend %s %s %s %d".format(
|
||||
basename, driverUrl, offer.getSlaveId.getValue, offer.getHostname, numCores))
|
||||
command.addUris(CommandInfo.URI.newBuilder().setValue(uri))
|
||||
}
|
||||
|
|
|
@ -101,12 +101,12 @@ private[spark] class MesosSchedulerBackend(
|
|||
.setEnvironment(environment)
|
||||
val uri = System.getProperty("spark.executor.uri")
|
||||
if (uri == null) {
|
||||
command.setValue(new File(sparkHome, "spark-executor").getCanonicalPath)
|
||||
command.setValue(new File(sparkHome, "/sbin/spark-executor").getCanonicalPath)
|
||||
} else {
|
||||
// Grab everything to the first '.'. We'll use that and '*' to
|
||||
// glob the directory "correctly".
|
||||
val basename = uri.split('/').last.split('.').head
|
||||
command.setValue("cd %s*; ./spark-executor".format(basename))
|
||||
command.setValue("cd %s*; ./sbin/spark-executor".format(basename))
|
||||
command.addUris(CommandInfo.URI.newBuilder().setValue(uri))
|
||||
}
|
||||
val memory = Resource.newBuilder()
|
||||
|
|
|
@ -35,7 +35,7 @@ private[spark] object UIWorkloadGenerator {
|
|||
|
||||
def main(args: Array[String]) {
|
||||
if (args.length < 2) {
|
||||
println("usage: ./spark-class spark.ui.UIWorkloadGenerator [master] [FIFO|FAIR]")
|
||||
println("usage: ./sbin/spark-class spark.ui.UIWorkloadGenerator [master] [FIFO|FAIR]")
|
||||
System.exit(1)
|
||||
}
|
||||
val master = args(0)
|
||||
|
|
|
@ -35,7 +35,7 @@ class DriverSuite extends FunSuite with Timeouts {
|
|||
val masters = Table(("master"), ("local"), ("local-cluster[2,1,512]"))
|
||||
forAll(masters) { (master: String) =>
|
||||
failAfter(30 seconds) {
|
||||
Utils.execute(Seq("./spark-class", "org.apache.spark.DriverWithoutCleanup", master),
|
||||
Utils.execute(Seq("./sbin/spark-class", "org.apache.spark.DriverWithoutCleanup", master),
|
||||
new File(System.getenv("SPARK_HOME")))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -42,7 +42,7 @@ This would be used to connect to the cluster, write to the dfs and submit jobs t
|
|||
|
||||
The command to launch the YARN Client is as follows:
|
||||
|
||||
SPARK_JAR=<SPARK_ASSEMBLY_JAR_FILE> ./spark-class org.apache.spark.deploy.yarn.Client \
|
||||
SPARK_JAR=<SPARK_ASSEMBLY_JAR_FILE> ./sbin/spark-class org.apache.spark.deploy.yarn.Client \
|
||||
--jar <YOUR_APP_JAR_FILE> \
|
||||
--class <APP_MAIN_CLASS> \
|
||||
--args <APP_MAIN_ARGUMENTS> \
|
||||
|
@ -62,7 +62,7 @@ For example:
|
|||
|
||||
# Submit Spark's ApplicationMaster to YARN's ResourceManager, and instruct Spark to run the SparkPi example
|
||||
$ SPARK_JAR=./assembly/target/scala-{{site.SCALA_VERSION}}/spark-assembly-{{site.SPARK_VERSION}}-hadoop2.0.5-alpha.jar \
|
||||
./spark-class org.apache.spark.deploy.yarn.Client \
|
||||
./sbin/spark-class org.apache.spark.deploy.yarn.Client \
|
||||
--jar examples/target/scala-{{site.SCALA_VERSION}}/spark-examples-assembly-{{site.SPARK_VERSION}}.jar \
|
||||
--class org.apache.spark.examples.SparkPi \
|
||||
--args yarn-standalone \
|
||||
|
|
|
@ -25,7 +25,7 @@ the master's web UI, which is [http://localhost:8080](http://localhost:8080) by
|
|||
|
||||
Similarly, you can start one or more workers and connect them to the master via:
|
||||
|
||||
./spark-class org.apache.spark.deploy.worker.Worker spark://IP:PORT
|
||||
./sbin/spark-class org.apache.spark.deploy.worker.Worker spark://IP:PORT
|
||||
|
||||
Once you have started a worker, look at the master's web UI ([http://localhost:8080](http://localhost:8080) by default).
|
||||
You should see the new node listed there, along with its number of CPUs and memory (minus one gigabyte left for the OS).
|
||||
|
|
|
@ -98,10 +98,7 @@ mkdir "$DISTDIR"/conf
|
|||
cp "$FWDIR/conf/*.template" "$DISTDIR"/conf
|
||||
cp -r "$FWDIR/bin" "$DISTDIR"
|
||||
cp -r "$FWDIR/python" "$DISTDIR"
|
||||
cp "$FWDIR/spark-class" "$DISTDIR"
|
||||
cp "$FWDIR/spark-shell" "$DISTDIR"
|
||||
cp "$FWDIR/spark-executor" "$DISTDIR"
|
||||
cp "$FWDIR/pyspark" "$DISTDIR"
|
||||
cp -r "$FWDIR/sbin" "$DISTDIR"
|
||||
|
||||
|
||||
if [ "$MAKE_TGZ" == "true" ]; then
|
||||
|
|
|
@ -31,7 +31,7 @@ def launch_gateway():
|
|||
# Launch the Py4j gateway using Spark's run command so that we pick up the
|
||||
# proper classpath and SPARK_MEM settings from spark-env.sh
|
||||
on_windows = platform.system() == "Windows"
|
||||
script = "spark-class.cmd" if on_windows else "spark-class"
|
||||
script = "/sbin/spark-class.cmd" if on_windows else "/sbin/spark-class"
|
||||
command = [os.path.join(SPARK_HOME, script), "py4j.GatewayServer",
|
||||
"--die-on-broken-pipe", "0"]
|
||||
if not on_windows:
|
||||
|
|
Loading…
Reference in a new issue