spark-instrumented-optimizer/python/run-tests
jerryshao 3290d2d13b [SPARK-6211][Streaming] Add Python Kafka API unit test
Refactor the Kafka unit test and add Python API support. CC tdas davies please help to review, thanks a lot.

Author: jerryshao <saisai.shao@intel.com>
Author: Saisai Shao <saisai.shao@intel.com>

Closes #4961 from jerryshao/SPARK-6211 and squashes the following commits:

ee4b919 [jerryshao] Fixed newly merged issue
82c756e [jerryshao] Address the comments
92912d1 [jerryshao] Address the commits
0708bb1 [jerryshao] Fix rebase issue
40b47a3 [Saisai Shao] Style fix
f889657 [Saisai Shao] Update the code according
8a2f3e2 [jerryshao] Address the issues
0f1b7ce [jerryshao] Still fix the bug
61a04f0 [jerryshao] Fix bugs and address the issues
64d9877 [jerryshao] Fix rebase bugs
8ad442f [jerryshao] Add kafka-assembly in run-tests
6020b00 [jerryshao] Add more debug info in Shell
8102d6e [jerryshao] Fix bug in Jenkins test
fde1213 [jerryshao] Code style changes
5536f95 [jerryshao] Refactor the Kafka unit test and add Python Kafka unittest support
2015-04-09 23:14:24 -07:00

158 lines
4.7 KiB
Bash
Executable file

#!/usr/bin/env bash
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Figure out where the Spark framework is installed
FWDIR="$(cd "`dirname "$0"`"; cd ../; pwd)"
. "$FWDIR"/bin/load-spark-env.sh
# CD into the python directory to find things on the right path
cd "$FWDIR/python"
FAILED=0
LOG_FILE=unit-tests.log
rm -f $LOG_FILE
# Remove the metastore and warehouse directory created by the HiveContext tests in Spark SQL
rm -rf metastore warehouse
function run_test() {
echo "Running test: $1" | tee -a $LOG_FILE
SPARK_TESTING=1 time "$FWDIR"/bin/pyspark $1 > $LOG_FILE 2>&1
FAILED=$((PIPESTATUS[0]||$FAILED))
# Fail and exit on the first test failure.
if [[ $FAILED != 0 ]]; then
cat $LOG_FILE | grep -v "^[0-9][0-9]*" # filter all lines starting with a number.
echo -en "\033[31m" # Red
echo "Had test failures; see logs."
echo -en "\033[0m" # No color
exit -1
fi
}
function run_core_tests() {
echo "Run core tests ..."
run_test "pyspark/rdd.py"
run_test "pyspark/context.py"
run_test "pyspark/conf.py"
PYSPARK_DOC_TEST=1 run_test "pyspark/broadcast.py"
PYSPARK_DOC_TEST=1 run_test "pyspark/accumulators.py"
run_test "pyspark/serializers.py"
run_test "pyspark/profiler.py"
run_test "pyspark/shuffle.py"
run_test "pyspark/tests.py"
}
function run_sql_tests() {
echo "Run sql tests ..."
run_test "pyspark/sql/types.py"
run_test "pyspark/sql/context.py"
run_test "pyspark/sql/dataframe.py"
run_test "pyspark/sql/functions.py"
run_test "pyspark/sql/tests.py"
}
function run_mllib_tests() {
echo "Run mllib tests ..."
run_test "pyspark/mllib/classification.py"
run_test "pyspark/mllib/clustering.py"
run_test "pyspark/mllib/evaluation.py"
run_test "pyspark/mllib/feature.py"
run_test "pyspark/mllib/fpm.py"
run_test "pyspark/mllib/linalg.py"
run_test "pyspark/mllib/rand.py"
run_test "pyspark/mllib/recommendation.py"
run_test "pyspark/mllib/regression.py"
run_test "pyspark/mllib/stat/_statistics.py"
run_test "pyspark/mllib/tree.py"
run_test "pyspark/mllib/util.py"
run_test "pyspark/mllib/tests.py"
}
function run_ml_tests() {
echo "Run ml tests ..."
run_test "pyspark/ml/feature.py"
run_test "pyspark/ml/classification.py"
run_test "pyspark/ml/tests.py"
}
function run_streaming_tests() {
echo "Run streaming tests ..."
KAFKA_ASSEMBLY_DIR="$FWDIR"/external/kafka-assembly
JAR_PATH="${KAFKA_ASSEMBLY_DIR}/target/scala-${SPARK_SCALA_VERSION}"
for f in "${JAR_PATH}"/spark-streaming-kafka-assembly-*.jar; do
if [[ ! -e "$f" ]]; then
echo "Failed to find Spark Streaming Kafka assembly jar in $KAFKA_ASSEMBLY_DIR" 1>&2
echo "You need to build Spark with " \
"'build/sbt assembly/assembly streaming-kafka-assembly/assembly' or" \
"'build/mvn package' before running this program" 1>&2
exit 1
fi
KAFKA_ASSEMBLY_JAR="$f"
done
export PYSPARK_SUBMIT_ARGS="--jars ${KAFKA_ASSEMBLY_JAR} pyspark-shell"
run_test "pyspark/streaming/util.py"
run_test "pyspark/streaming/tests.py"
}
echo "Running PySpark tests. Output is in python/$LOG_FILE."
export PYSPARK_PYTHON="python"
# Try to test with Python 2.6, since that's the minimum version that we support:
if [ $(which python2.6) ]; then
export PYSPARK_PYTHON="python2.6"
fi
echo "Testing with Python version:"
$PYSPARK_PYTHON --version
run_core_tests
run_sql_tests
run_mllib_tests
run_ml_tests
run_streaming_tests
# Try to test with PyPy
if [ $(which pypy) ]; then
export PYSPARK_PYTHON="pypy"
echo "Testing with PyPy version:"
$PYSPARK_PYTHON --version
run_core_tests
run_sql_tests
run_streaming_tests
fi
if [[ $FAILED == 0 ]]; then
echo -en "\033[32m" # Green
echo "Tests passed."
echo -en "\033[0m" # No color
fi
# TODO: in the long-run, it would be nice to use a test runner like `nose`.
# The doctest fixtures are the current barrier to doing this.