Merge pull request #565 from pwendell/dev-scripts. Closes #565.

SPARK-1066: Add developer scripts to repository.

These are some developer scripts I've been maintaining in a separate public repo. This patch adds them to the Spark repository so they can evolve here and are clearly accessible to all committers.

I may do some small additional clean-up in this PR, but wanted to put them here in case others want to review. There are a few types of scripts here:

1. A tool to merge pull requests.
2. A script for packaging releases.
3. A script for auditing release candidates.

Author: Patrick Wendell <pwendell@gmail.com>

== Merge branch commits ==

commit 5d5d331d01f6fd59c2eb830f652955119b012173
Author: Patrick Wendell <pwendell@gmail.com>
Date:   Sat Feb 8 22:11:47 2014 -0800

    SPARK-1066: Add developer scripts to repository.
This commit is contained in:
Patrick Wendell 2014-02-08 23:13:34 -08:00 committed by Reynold Xin
parent c2341c92bb
commit f892da8716
17 changed files with 984 additions and 0 deletions

5
dev/README.md Normal file
View file

@ -0,0 +1,5 @@
# Spark Developer Scripts
This directory contains scripts useful to developers when packaging,
testing, or committing to Spark.
Many of these scripts require Apache credentials to work correctly.

2
dev/audit-release/.gitignore vendored Normal file
View file

@ -0,0 +1,2 @@
project/
spark_audit*

View file

@ -0,0 +1,227 @@
#!/usr/bin/python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Audits binary and maven artifacts for a Spark release.
# Requires GPG and Maven.
# usage:
# python audit_release.py
import os
import re
import shutil
import subprocess
import sys
import time
import urllib2
## Fill in release details here:
RELEASE_URL = "http://people.apache.org/~pwendell/spark-0.9.0-incubating-rc5/"
RELEASE_KEY = "9E4FE3AF"
RELEASE_REPOSITORY = "https://repository.apache.org/content/repositories/orgapachespark-1006/"
RELEASE_VERSION = "0.9.0-incubating"
SCALA_VERSION = "2.10.3"
SCALA_BINARY_VERSION = "2.10"
##
LOG_FILE_NAME = "spark_audit_%s" % time.strftime("%h_%m_%Y_%I_%M_%S")
LOG_FILE = open(LOG_FILE_NAME, 'w')
WORK_DIR = "/tmp/audit_%s" % int(time.time())
MAVEN_CMD = "mvn"
GPG_CMD = "gpg"
print "Starting tests, log output in %s. Test results printed below:" % LOG_FILE_NAME
# Track failures
failures = []
def clean_work_files():
print "OK to delete scratch directory '%s'? (y/N): " % WORK_DIR
response = raw_input()
if response == "y":
shutil.rmtree(WORK_DIR)
print "Should I delete the log output file '%s'? (y/N): " % LOG_FILE_NAME
response = raw_input()
if response == "y":
os.unlink(LOG_FILE_NAME)
def run_cmd(cmd, exit_on_failure=True):
print >> LOG_FILE, "Running command: %s" % cmd
ret = subprocess.call(cmd, shell=True, stdout=LOG_FILE, stderr=LOG_FILE)
if ret != 0 and exit_on_failure:
print "Command failed: %s" % cmd
clean_work_files()
sys.exit(-1)
return ret
def run_cmd_with_output(cmd):
print >> sys.stderr, "Running command: %s" % cmd
return subprocess.check_output(cmd, shell=True, stderr=LOG_FILE)
def test(bool, str):
if bool:
return passed(str)
failed(str)
def passed(str):
print "[PASSED] %s" % str
def failed(str):
failures.append(str)
print "[**FAILED**] %s" % str
def get_url(url):
return urllib2.urlopen(url).read()
original_dir = os.getcwd()
# For each of these modules, we'll test an 'empty' application in sbt and
# maven that links against them. This will catch issues with messed up
# dependencies within those projects.
modules = ["spark-core", "spark-bagel", "spark-mllib", "spark-streaming", "spark-repl",
"spark-graphx", "spark-streaming-flume", "spark-streaming-kafka",
"spark-streaming-mqtt", "spark-streaming-twitter", "spark-streaming-zeromq"]
modules = map(lambda m: "%s_%s" % (m, SCALA_BINARY_VERSION), modules)
# Check for directories that might interfere with tests
local_ivy_spark = "~/.ivy2/local/org.apache.spark"
cache_ivy_spark = "~/.ivy2/cache/org.apache.spark"
local_maven_kafka = "~/.m2/repository/org/apache/kafka"
local_maven_kafka = "~/.m2/repository/org/apache/spark"
def ensure_path_not_present(x):
if os.path.exists(os.path.expanduser(x)):
print "Please remove %s, it can interfere with testing published artifacts." % x
sys.exit(-1)
map(ensure_path_not_present, [local_ivy_spark, cache_ivy_spark, local_maven_kafka])
# SBT build tests
os.chdir("blank_sbt_build")
os.environ["SPARK_VERSION"] = RELEASE_VERSION
os.environ["SCALA_VERSION"] = SCALA_VERSION
os.environ["SPARK_RELEASE_REPOSITORY"] = RELEASE_REPOSITORY
for module in modules:
os.environ["SPARK_MODULE"] = module
ret = run_cmd("sbt clean update", exit_on_failure=False)
test(ret == 0, "sbt build against '%s' module" % module)
os.chdir(original_dir)
# SBT application tests
for app in ["sbt_app_core", "sbt_app_graphx", "sbt_app_streaming"]:
os.chdir(app)
ret = run_cmd("sbt clean run", exit_on_failure=False)
test(ret == 0, "sbt application (%s)" % app)
os.chdir(original_dir)
# Maven build tests
os.chdir("blank_maven_build")
for module in modules:
cmd = ('%s --update-snapshots -Dspark.release.repository="%s" -Dspark.version="%s" '
'-Dspark.module="%s" clean compile' %
(MAVEN_CMD, RELEASE_REPOSITORY, RELEASE_VERSION, module))
ret = run_cmd(cmd, exit_on_failure=False)
test(ret == 0, "maven build against '%s' module" % module)
os.chdir(original_dir)
os.chdir("maven_app_core")
mvn_exec_cmd = ('%s --update-snapshots -Dspark.release.repository="%s" -Dspark.version="%s" '
'-Dscala.binary.version="%s" clean compile '
'exec:java -Dexec.mainClass="SimpleApp"' %
(MAVEN_CMD, RELEASE_REPOSITORY, RELEASE_VERSION, SCALA_BINARY_VERSION))
ret = run_cmd(mvn_exec_cmd, exit_on_failure=False)
test(ret == 0, "maven application (core)")
os.chdir(original_dir)
# Binary artifact tests
if os.path.exists(WORK_DIR):
print "Working directory '%s' already exists" % WORK_DIR
sys.exit(-1)
os.mkdir(WORK_DIR)
os.chdir(WORK_DIR)
index_page = get_url(RELEASE_URL)
artifact_regex = r = re.compile("<a href=\"(.*.tgz)\">")
artifacts = r.findall(index_page)
for artifact in artifacts:
print "==== Verifying download integrity for artifact: %s ====" % artifact
artifact_url = "%s/%s" % (RELEASE_URL, artifact)
run_cmd("wget %s" % artifact_url)
key_file = "%s.asc" % artifact
run_cmd("wget %s/%s" % (RELEASE_URL, key_file))
run_cmd("wget %s%s" % (artifact_url, ".sha"))
# Verify signature
run_cmd("%s --keyserver pgp.mit.edu --recv-key %s" % (GPG_CMD, RELEASE_KEY))
run_cmd("%s %s" % (GPG_CMD, key_file))
passed("Artifact signature verified.")
# Verify md5
my_md5 = run_cmd_with_output("%s --print-md MD5 %s" % (GPG_CMD, artifact)).strip()
release_md5 = get_url("%s.md5" % artifact_url).strip()
test(my_md5 == release_md5, "Artifact MD5 verified.")
# Verify sha
my_sha = run_cmd_with_output("%s --print-md SHA512 %s" % (GPG_CMD, artifact)).strip()
release_sha = get_url("%s.sha" % artifact_url).strip()
test(my_sha == release_sha, "Artifact SHA verified.")
# Verify Apache required files
dir_name = artifact.replace(".tgz", "")
run_cmd("tar xvzf %s" % artifact)
base_files = os.listdir(dir_name)
test("CHANGES.txt" in base_files, "Tarball contains CHANGES.txt file")
test("NOTICE" in base_files, "Tarball contains NOTICE file")
test("LICENSE" in base_files, "Tarball contains LICENSE file")
os.chdir(os.path.join(WORK_DIR, dir_name))
readme = "".join(open("README.md").readlines())
disclaimer_part = "is an effort undergoing incubation"
test(disclaimer_part in readme, "README file contains disclaimer")
os.chdir(WORK_DIR)
for artifact in artifacts:
print "==== Verifying build and tests for artifact: %s ====" % artifact
os.chdir(os.path.join(WORK_DIR, dir_name))
os.environ["MAVEN_OPTS"] = "-Xmx3g -XX:MaxPermSize=1g -XX:ReservedCodeCacheSize=1g"
# Verify build
print "==> Running build"
run_cmd("sbt assembly")
passed("sbt build successful")
run_cmd("%s package -DskipTests" % MAVEN_CMD)
passed("Maven build successful")
# Verify tests
print "==> Performing unit tests"
run_cmd("%s test" % MAVEN_CMD)
passed("Tests successful")
os.chdir(WORK_DIR)
clean_work_files()
if len(failures) == 0:
print "ALL TESTS PASSED"
else:
print "SOME TESTS DID NOT PASS"
for f in failures:
print f
os.chdir(original_dir)

View file

@ -0,0 +1,47 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<project>
<groupId>spark.audit</groupId>
<artifactId>spark-audit</artifactId>
<modelVersion>4.0.0</modelVersion>
<name>Spark Release Auditor</name>
<packaging>jar</packaging>
<version>1.0</version>
<repositories>
<repository>
<id>Spray.cc repository</id>
<url>http://repo.spray.cc</url>
</repository>
<repository>
<id>Akka repository</id>
<url>http://repo.akka.io/releases</url>
</repository>
<repository>
<id>Spark Staging Repo</id>
<url>${spark.release.repository}</url>
</repository>
</repositories>
<dependencies>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>${spark.module}</artifactId>
<version>${spark.version}</version>
</dependency>
</dependencies>
</project>

View file

@ -0,0 +1,29 @@
//
// Licensed to the Apache Software Foundation (ASF) under one or more
// contributor license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright ownership.
// The ASF licenses this file to You under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
name := "Spark Release Auditor"
version := "1.0"
scalaVersion := "2.9.3"
libraryDependencies += "org.apache.spark" % System.getenv.get("SPARK_MODULE") % System.getenv.get("SPARK_VERSION")
resolvers ++= Seq(
"Spark Release Repository" at System.getenv.get("SPARK_RELEASE_REPOSITORY"),
"Akka Repository" at "http://repo.akka.io/releases/",
"Spray Repository" at "http://repo.spray.cc/")

View file

@ -0,0 +1,8 @@
a
b
c
d
a
b
c
d

View file

@ -0,0 +1,56 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<project>
<groupId>spark.audit</groupId>
<artifactId>spark-audit</artifactId>
<modelVersion>4.0.0</modelVersion>
<name>Simple Project</name>
<packaging>jar</packaging>
<version>1.0</version>
<repositories>
<repository>
<id>Spray.cc repository</id>
<url>http://repo.spray.cc</url>
</repository>
<repository>
<id>Akka repository</id>
<url>http://repo.akka.io/releases</url>
</repository>
<repository>
<id>Spark Staging Repo</id>
<url>${spark.release.repository}</url>
</repository>
</repositories>
<dependencies>
<dependency> <!-- Spark dependency -->
<groupId>org.apache.spark</groupId>
<artifactId>spark-core_${scala.binary.version}</artifactId>
<version>${spark.version}</version>
</dependency>
</dependencies>
<!-- Makes sure we get a fairly recent compiler plugin. -->
<build>
<plugins>
<plugin>
<artifactId>maven-compiler-plugin</artifactId>
<version>2.3.2</version>
</plugin>
</plugins>
</build>
</project>

View file

@ -0,0 +1,41 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.spark.api.java.*;
import org.apache.spark.api.java.function.Function;
public class SimpleApp {
public static void main(String[] args) {
String logFile = "input.txt";
JavaSparkContext sc = new JavaSparkContext("local", "Simple App");
JavaRDD<String> logData = sc.textFile(logFile).cache();
long numAs = logData.filter(new Function<String, Boolean>() {
public Boolean call(String s) { return s.contains("a"); }
}).count();
long numBs = logData.filter(new Function<String, Boolean>() {
public Boolean call(String s) { return s.contains("b"); }
}).count();
if (numAs != 2 || numBs != 2) {
System.out.println("Failed to parse log files with Spark");
System.exit(-1);
}
System.out.println("Test succeeded");
}
}

View file

@ -0,0 +1,29 @@
//
// Licensed to the Apache Software Foundation (ASF) under one or more
// contributor license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright ownership.
// The ASF licenses this file to You under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
name := "Simple Project"
version := "1.0"
scalaVersion := System.getenv.get("SCALA_VERSION")
libraryDependencies += "org.apache.spark" %% "spark-core" % System.getenv.get("SPARK_VERSION")
resolvers ++= Seq(
"Spark Release Repository" at System.getenv.get("SPARK_RELEASE_REPOSITORY"),
"Akka Repository" at "http://repo.akka.io/releases/",
"Spray Repository" at "http://repo.spray.cc/")

View file

@ -0,0 +1,8 @@
a
b
c
d
a
b
c
d

View file

@ -0,0 +1,36 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main.scala
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
object SimpleApp {
def main(args: Array[String]) {
val logFile = "input.txt"
val sc = new SparkContext("local", "Simple App")
val logData = sc.textFile(logFile, 2).cache()
val numAs = logData.filter(line => line.contains("a")).count()
val numBs = logData.filter(line => line.contains("b")).count()
if (numAs != 2 || numBs != 2) {
println("Failed to parse log files with Spark")
System.exit(-1)
}
println("Test succeeded")
}
}

View file

@ -0,0 +1,29 @@
//
// Licensed to the Apache Software Foundation (ASF) under one or more
// contributor license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright ownership.
// The ASF licenses this file to You under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
name := "Simple Project"
version := "1.0"
scalaVersion := System.getenv.get("SCALA_VERSION")
libraryDependencies += "org.apache.spark" %% "spark-graphx" % System.getenv.get("SPARK_VERSION")
resolvers ++= Seq(
"Spark Release Repository" at System.getenv.get("SPARK_RELEASE_REPOSITORY"),
"Akka Repository" at "http://repo.akka.io/releases/",
"Spray Repository" at "http://repo.spray.cc/")

View file

@ -0,0 +1,47 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main.scala
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import org.apache.spark.graphx._
import org.apache.spark.rdd.RDD
object GraphXApp {
def main(args: Array[String]) {
val sc = new SparkContext("local", "Simple GraphX App")
val users: RDD[(VertexId, (String, String))] =
sc.parallelize(Array((3L, ("rxin", "student")), (7L, ("jgonzal", "postdoc")),
(5L, ("franklin", "prof")), (2L, ("istoica", "prof")),
(4L, ("peter", "student"))))
val relationships: RDD[Edge[String]] =
sc.parallelize(Array(Edge(3L, 7L, "collab"), Edge(5L, 3L, "advisor"),
Edge(2L, 5L, "colleague"), Edge(5L, 7L, "pi"),
Edge(4L, 0L, "student"), Edge(5L, 0L, "colleague")))
val defaultUser = ("John Doe", "Missing")
val graph = Graph(users, relationships, defaultUser)
// Notice that there is a user 0 (for which we have no information) connected to users
// 4 (peter) and 5 (franklin).
val triplets = graph.triplets.map(e => (e.srcAttr._1, e.dstAttr._1)).collect
if (!triplets.exists(_ == ("peter", "John Doe"))) {
println("Failed to run GraphX")
System.exit(-1)
}
println("Test succeeded")
}
}

View file

@ -0,0 +1,29 @@
//
// Licensed to the Apache Software Foundation (ASF) under one or more
// contributor license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright ownership.
// The ASF licenses this file to You under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
name := "Simple Project"
version := "1.0"
scalaVersion := System.getenv.get("SCALA_VERSION")
libraryDependencies += "org.apache.spark" %% "spark-streaming" % System.getenv.get("SPARK_VERSION")
resolvers ++= Seq(
"Spark Release Repository" at System.getenv.get("SPARK_RELEASE_REPOSITORY"),
"Akka Repository" at "http://repo.akka.io/releases/",
"Spray Repository" at "http://repo.spray.cc/")

View file

@ -0,0 +1,62 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main.scala
import scala.collection.mutable.{ListBuffer, Queue}
import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming._
object SparkStreamingExample {
def main(args: Array[String]) {
val conf = new SparkConf(true)
.setMaster("local[2]")
.setAppName("Streaming test")
val ssc = new StreamingContext(conf, Seconds(1))
val seen = ListBuffer[RDD[Int]]()
val rdd1 = ssc.sparkContext.makeRDD(1 to 100, 10)
val rdd2 = ssc.sparkContext.makeRDD(1 to 1000, 10)
val rdd3 = ssc.sparkContext.makeRDD(1 to 10000, 10)
val queue = Queue(rdd1, rdd2, rdd3)
val stream = ssc.queueStream(queue)
stream.foreachRDD(rdd => seen += rdd)
ssc.start()
Thread.sleep(5000)
def test(f: => Boolean, failureMsg: String) = {
if (!f) {
println(failureMsg)
System.exit(-1)
}
}
val rddCounts = seen.map(rdd => rdd.count()).filter(_ > 0)
test(rddCounts.length == 3, "Did not collect three RDD's from stream")
test(rddCounts.toSet == Set(100, 1000, 10000), "Did not find expected streams")
println("Test succeeded")
ssc.stop()
}
}

View file

@ -0,0 +1,132 @@
#!/usr/bin/env bash
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Quick-and-dirty automation of making maven and binary releases. Not robust at all.
# Publishes releases to Maven and packages/copies binary release artifacts.
# Expects to be run in a totally empty directory.
#
# Would be nice to add:
# - Send output to stderr and have useful logging in stdout
# - Have this use sbt rather than Maven release plug in
GIT_USERNAME=pwendell
GIT_PASSWORD=XXX
GPG_PASSPHRASE=XXX
GIT_BRANCH=branch-0.9
RELEASE_VERSION=0.9.0-incubating
RC_NAME=rc2
USER_NAME=pwendell
set -e
GIT_TAG=v$RELEASE_VERSION
# Artifact publishing
git clone https://git-wip-us.apache.org/repos/asf/incubator-spark.git -b $GIT_BRANCH
cd incubator-spark
export MAVEN_OPTS="-Xmx3g -XX:MaxPermSize=1g -XX:ReservedCodeCacheSize=1g"
mvn -Pyarn release:clean
mvn -DskipTests \
-Darguments="-DskipTests=true -Dhadoop.version=2.2.0 -Dyarn.version=2.2.0 -Dgpg.passphrase=${GPG_PASSPHRASE}" \
-Dusername=$GIT_USERNAME -Dpassword=$GIT_PASSWORD \
-Dhadoop.version=2.2.0 -Dyarn.version=2.2.0 \
-Pyarn \
-Dtag=$GIT_TAG -DautoVersionSubmodules=true \
--batch-mode release:prepare
mvn -DskipTests \
-Darguments="-DskipTests=true -Dhadoop.version=2.2.0 -Dyarn.version=2.2.0 -Dgpg.passphrase=${GPG_PASSPHRASE}" \
-Dhadoop.version=2.2.0 -Dyarn.version=2.2.0 \
-Pyarn \
release:perform
rm -rf incubator-spark
# Source and binary tarballs
git clone https://git-wip-us.apache.org/repos/asf/incubator-spark.git
cd incubator-spark
git checkout --force $GIT_TAG
release_hash=`git rev-parse HEAD`
rm .gitignore
rm -rf .git
cd ..
cp -r incubator-spark spark-$RELEASE_VERSION
tar cvzf spark-$RELEASE_VERSION.tgz spark-$RELEASE_VERSION
echo $GPG_PASSPHRASE | gpg --passphrase-fd 0 --armour --output spark-$RELEASE_VERSION.tgz.asc \
--detach-sig spark-$RELEASE_VERSION.tgz
echo $GPG_PASSPHRASE | gpg --passphrase-fd 0 --print-md MD5 spark-$RELEASE_VERSION.tgz > \
spark-$RELEASE_VERSION.tgz.md5
echo $GPG_PASSPHRASE | gpg --passphrase-fd 0 --print-md SHA512 spark-$RELEASE_VERSION.tgz > \
spark-$RELEASE_VERSION.tgz.sha
rm -rf spark-$RELEASE_VERSION
make_binary_release() {
NAME=$1
MAVEN_FLAGS=$2
cp -r incubator-spark spark-$RELEASE_VERSION-bin-$NAME
cd spark-$RELEASE_VERSION-bin-$NAME
export MAVEN_OPTS="-Xmx3g -XX:MaxPermSize=1g -XX:ReservedCodeCacheSize=1g"
mvn $MAVEN_FLAGS -DskipTests clean package
find . -name test-classes -type d | xargs rm -rf
find . -name classes -type d | xargs rm -rf
cd ..
tar cvzf spark-$RELEASE_VERSION-bin-$NAME.tgz spark-$RELEASE_VERSION-bin-$NAME
echo $GPG_PASSPHRASE | gpg --passphrase-fd 0 --armour \
--output spark-$RELEASE_VERSION-bin-$NAME.tgz.asc \
--detach-sig spark-$RELEASE_VERSION-bin-$NAME.tgz
echo $GPG_PASSPHRASE | gpg --passphrase-fd 0 --print-md \
MD5 spark-$RELEASE_VERSION-bin-$NAME.tgz > \
spark-$RELEASE_VERSION-bin-$NAME.tgz.md5
echo $GPG_PASSPHRASE | gpg --passphrase-fd 0 --print-md \
SHA512 spark-$RELEASE_VERSION-bin-$NAME.tgz > \
spark-$RELEASE_VERSION-bin-$NAME.tgz.sha
rm -rf spark-$RELEASE_VERSION-bin-$NAME
}
make_binary_release "hadoop1" "-Dhadoop.version=1.0.4"
make_binary_release "cdh4" "-Dhadoop.version=2.0.0-mr1-cdh4.2.0"
make_binary_release "hadoop2" "-Pyarn -Dhadoop.version=2.2.0 -Dyarn.version=2.2.0"
# Copy data
echo "Copying release tarballs"
ssh $USER_NAME@people.apache.org \
mkdir /home/$USER_NAME/public_html/spark-$RELEASE_VERSION-$RC_NAME
rc_folder=spark-$RELEASE_VERSION-$RC_NAME
scp spark* \
$USER_NAME@people.apache.org:/home/$USER_NAME/public_html/$rc_folder/
# Docs
cd incubator-spark
cd docs
jekyll build
echo "Copying release documentation"
rc_docs_folder=${rc_folder}-docs
rsync -r _site/* $USER_NAME@people.apache.org /home/$USER_NAME/public_html/$rc_docs_folder
echo "Release $RELEASE_VERSION completed:"
echo "Git tag:\t $GIT_TAG"
echo "Release commit:\t $release_hash"
echo "Binary location:\t http://people.apache.org/~$USER_NAME/$rc_folder"
echo "Doc location:\t http://people.apache.org/~$USER_NAME/$rc_docs_folder"

197
dev/merge_spark_pr.py Executable file
View file

@ -0,0 +1,197 @@
#!/usr/bin/python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Utility for creating well-formed pull request merges and pushing them to Apache.
# usage: ./apache-pr-merge.py (see config env vars below)
#
# This utility assumes you already have local a Spark git folder and that you
# have added remotes corresponding to both (i) the github apache Spark
# mirror and (ii) the apache git repo.
import json
import os
import subprocess
import sys
import tempfile
import urllib2
# Location of your Spark git development area
SPARK_HOME = os.environ.get("SPARK_HOME", "/home/patrick/Documents/spark")
# Remote name which points to the Gihub site
PR_REMOTE_NAME = os.environ.get("PR_REMOTE_NAME", "apache-github")
# Remote name which points to Apache git
PUSH_REMOTE_NAME = os.environ.get("PUSH_REMOTE_NAME", "apache")
GIT_API_BASE = "https://api.github.com/repos/apache/incubator-spark"
# Prefix added to temporary branches
BRANCH_PREFIX = "PR_TOOL"
os.chdir(SPARK_HOME)
def get_json(url):
try:
return json.load(urllib2.urlopen(url))
except urllib2.HTTPError as e:
print "Unable to fetch URL, exiting: %s" % url
sys.exit(-1)
def fail(msg):
print msg
clean_up()
sys.exit(-1)
def run_cmd(cmd):
if isinstance(cmd, list):
return subprocess.check_output(cmd)
else:
return subprocess.check_output(cmd.split(" "))
def continue_maybe(prompt):
result = raw_input("\n%s (y/n): " % prompt)
if result.lower() != "y":
fail("Okay, exiting")
original_head = run_cmd("git rev-parse HEAD")[:8]
def clean_up():
print "Restoring head pointer to %s" % original_head
run_cmd("git checkout %s" % original_head)
branches = run_cmd("git branch").replace(" ", "").split("\n")
for branch in filter(lambda x: x.startswith(BRANCH_PREFIX), branches):
print "Deleting local branch %s" % branch
run_cmd("git branch -D %s" % branch)
# merge the requested PR and return the merge hash
def merge_pr(pr_num, target_ref):
pr_branch_name = "%s_MERGE_PR_%s" % (BRANCH_PREFIX, pr_num)
target_branch_name = "%s_MERGE_PR_%s_%s" % (BRANCH_PREFIX, pr_num, target_ref.upper())
run_cmd("git fetch %s pull/%s/head:%s" % (PR_REMOTE_NAME, pr_num, pr_branch_name))
run_cmd("git fetch %s %s:%s" % (PUSH_REMOTE_NAME, target_ref, target_branch_name))
run_cmd("git checkout %s" % target_branch_name)
run_cmd(['git', 'merge', pr_branch_name, '--squash'])
commit_authors = run_cmd(['git', 'log', 'HEAD..%s' % pr_branch_name,
'--pretty=format:%an <%ae>']).split("\n")
distinct_authors = sorted(set(commit_authors), key=lambda x: commit_authors.count(x), reverse=True)
primary_author = distinct_authors[0]
commits = run_cmd(['git', 'log', 'HEAD..%s' % pr_branch_name]).split("\n\n")
merge_message = "Merge pull request #%s from %s. Closes #%s.\n\n%s\n\n%s" % (
pr_num, pr_repo_desc, pr_num, title, body)
merge_message_parts = merge_message.split("\n\n")
merge_message_flags = []
for p in merge_message_parts:
merge_message_flags = merge_message_flags + ["-m", p]
authors = "\n".join(["Author: %s" % a for a in distinct_authors])
merge_message_flags = merge_message_flags + ["-m", authors]
merge_message_flags = merge_message_flags + ["-m", "== Merge branch commits =="]
for c in commits:
merge_message_flags = merge_message_flags + ["-m", c]
run_cmd(['git', 'commit', '--author="%s"' % primary_author] + merge_message_flags)
continue_maybe("Merge complete (local ref %s). Push to %s?" % (
target_branch_name, PUSH_REMOTE_NAME))
try:
run_cmd('git push %s %s:%s' % (PUSH_REMOTE_NAME, target_branch_name, target_ref))
except Exception as e:
clean_up()
fail("Exception while pushing: %s" % e)
merge_hash = run_cmd("git rev-parse %s" % target_branch_name)[:8]
clean_up()
print("Pull request #%s merged!" % pr_num)
print("Merge hash: %s" % merge_hash)
return merge_hash
def maybe_cherry_pick(pr_num, merge_hash, default_branch):
continue_maybe("Would you like to pick %s into another branch?" % merge_hash)
pick_ref = raw_input("Enter a branch name [%s]: " % default_branch)
if pick_ref == "":
pick_ref = default_branch
pick_branch_name = "%s_PICK_PR_%s_%s" % (BRANCH_PREFIX, pr_num, pick_ref.upper())
run_cmd("git fetch %s %s:%s" % (PUSH_REMOTE_NAME, pick_ref, pick_branch_name))
run_cmd("git checkout %s" % pick_branch_name)
run_cmd("git cherry-pick -sx %s" % merge_hash)
continue_maybe("Pick complete (local ref %s). Push to %s?" % (
pick_branch_name, PUSH_REMOTE_NAME))
try:
run_cmd('git push %s %s:%s' % (PUSH_REMOTE_NAME, pick_branch_name, pick_ref))
except Exception as e:
clean_up()
fail("Exception while pushing: %s" % e)
pick_hash = run_cmd("git rev-parse %s" % pick_branch_name)[:8]
clean_up()
print("Pull request #%s picked into %s!" % (pr_num, pick_ref))
print("Pick hash: %s" % pick_hash)
branches = get_json("%s/branches" % GIT_API_BASE)
branch_names = filter(lambda x: x.startswith("branch-"), [x['name'] for x in branches])
# Assumes branch names can be sorted lexicographically
latest_branch = sorted(branch_names, reverse=True)[0]
pr_num = raw_input("Which pull request would you like to merge? (e.g. 34): ")
pr = get_json("%s/pulls/%s" % (GIT_API_BASE, pr_num))
url = pr["url"]
title = pr["title"]
body = pr["body"]
target_ref = pr["base"]["ref"]
user_login = pr["user"]["login"]
base_ref = pr["head"]["ref"]
pr_repo_desc = "%s/%s" % (user_login, base_ref)
if pr["merged"] == True:
print "Pull request %s has already been merged, assuming you want to backport" % pr_num
merge_commit_desc = run_cmd(['git', 'log', '--merges', '--first-parent',
'--grep=pull request #%s' % pr_num, '--oneline']).split("\n")[0]
if merge_commit_desc == "":
fail("Couldn't find any merge commit for #%s, you may need to update HEAD." % pr_num)
merge_hash = merge_commit_desc[:7]
message = merge_commit_desc[8:]
print "Found: %s" % message
maybe_cherry_pick(pr_num, merge_hash, latest_branch)
sys.exit(0)
if bool(pr["mergeable"]) == False:
fail("Pull request %s is not mergeable in its current form" % pr_num)
print ("\n=== Pull Request #%s ===" % pr_num)
print("title\t%s\nsource\t%s\ntarget\t%s\nurl\t%s" % (
title, pr_repo_desc, target_ref, url))
continue_maybe("Proceed with merging pull request #%s?" % pr_num)
merge_hash = merge_pr(pr_num, target_ref)
while True:
maybe_cherry_pick(pr_num, merge_hash, latest_branch)