spark-instrumented-optimizer/python/run-tests
Andrew Or fe78b8b6f7 HOTFIX: A few PySpark tests were not actually run
This is a hot fix for the hot fix in fb499be1ac. The changes in that commit did not actually cause the `doctest` module in python to be loaded for the following tests:
- pyspark/broadcast.py
- pyspark/accumulators.py
- pyspark/serializers.py

(@pwendell I might have told you the wrong thing)

Author: Andrew Or <andrewor14@gmail.com>

Closes #1053 from andrewor14/python-test-fix and squashes the following commits:

d2e5401 [Andrew Or] Explain why these tests are handled differently
0bd6fdd [Andrew Or] Fix 3 pyspark tests not being invoked
2014-06-11 12:11:46 -07:00

81 lines
2.6 KiB
Bash
Executable file

#!/usr/bin/env bash
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Figure out where the Spark framework is installed
FWDIR="$(cd `dirname $0`; cd ../; pwd)"
# CD into the python directory to find things on the right path
cd "$FWDIR/python"
FAILED=0
rm -f unit-tests.log
# Remove the metastore and warehouse directory created by the HiveContext tests in SparkSQL
rm -rf metastore warehouse
function run_test() {
echo "Running test: $1"
SPARK_TESTING=1 $FWDIR/bin/pyspark $1 2>&1 | tee -a > unit-tests.log
FAILED=$((PIPESTATUS[0]||$FAILED))
# Fail and exit on the first test failure.
if [[ $FAILED != 0 ]]; then
cat unit-tests.log | grep -v "^[0-9][0-9]*" # filter all lines starting with a number.
echo -en "\033[31m" # Red
echo "Had test failures; see logs."
echo -en "\033[0m" # No color
exit -1
fi
}
echo "Running PySpark tests. Output is in python/unit-tests.log."
run_test "pyspark/rdd.py"
run_test "pyspark/context.py"
run_test "pyspark/conf.py"
if [ -n "$_RUN_SQL_TESTS" ]; then
run_test "pyspark/sql.py"
fi
# These tests are included in the module-level docs, and so must
# be handled on a higher level rather than within the python file.
export PYSPARK_DOC_TEST=1
run_test "pyspark/broadcast.py"
run_test "pyspark/accumulators.py"
run_test "pyspark/serializers.py"
unset PYSPARK_DOC_TEST
run_test "pyspark/tests.py"
run_test "pyspark/mllib/_common.py"
run_test "pyspark/mllib/classification.py"
run_test "pyspark/mllib/clustering.py"
run_test "pyspark/mllib/linalg.py"
run_test "pyspark/mllib/recommendation.py"
run_test "pyspark/mllib/regression.py"
run_test "pyspark/mllib/tests.py"
if [[ $FAILED == 0 ]]; then
echo -en "\033[32m" # Green
echo "Tests passed."
echo -en "\033[0m" # No color
fi
# TODO: in the long-run, it would be nice to use a test runner like `nose`.
# The doctest fixtures are the current barrier to doing this.