spark-instrumented-optimizer/python/run-tests

36 lines
837 B
Plaintext
Raw Normal View History

#!/usr/bin/env bash
# Figure out where the Scala framework is installed
FWDIR="$(cd `dirname $0`; cd ../; pwd)"
FAILED=0
$FWDIR/pyspark pyspark/rdd.py
FAILED=$(($?||$FAILED))
$FWDIR/pyspark pyspark/context.py
FAILED=$(($?||$FAILED))
$FWDIR/pyspark -m doctest pyspark/broadcast.py
FAILED=$(($?||$FAILED))
2013-01-20 04:59:07 -05:00
$FWDIR/pyspark -m doctest pyspark/accumulators.py
FAILED=$(($?||$FAILED))
2013-01-16 22:15:14 -05:00
$FWDIR/pyspark -m unittest pyspark.tests
FAILED=$(($?||$FAILED))
if [[ $FAILED != 0 ]]; then
echo -en "\033[31m" # Red
echo "Had test failures; see logs."
echo -en "\033[0m" # No color
exit -1
else
echo -en "\033[32m" # Green
echo "Tests passed."
echo -en "\033[0m" # No color
fi
# TODO: in the long-run, it would be nice to use a test runner like `nose`.
# The doctest fixtures are the current barrier to doing this.