#!/usr/bin/env bash # # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # # Creates a Spark release candidate. The script will update versions, tag the branch, # build Spark binary packages and documentation, and upload maven artifacts to a staging # repository. There is also a dry run mode where only local builds are performed, and # nothing is uploaded to the ASF repos. # # Run with "-h" for options. # set -e SELF=$(cd $(dirname $0) && pwd) . "$SELF/release-util.sh" function usage { local NAME=$(basename $0) cat < "$GPG_KEY_FILE" run_silent "Building spark-rm image with tag $IMGTAG..." "docker-build.log" \ docker build -t "spark-rm:$IMGTAG" --build-arg UID=$UID "$SELF/spark-rm" # Write the release information to a file with environment variables to be used when running the # image. ENVFILE="$WORKDIR/env.list" fcreate_secure "$ENVFILE" function cleanup { rm -f "$ENVFILE" rm -f "$GPG_KEY_FILE" } trap cleanup EXIT cat > $ENVFILE <> $ENVFILE JAVA_VOL="--volume $JAVA:/opt/spark-java" fi echo "Building $RELEASE_TAG; output will be at $WORKDIR/output" docker run -ti \ --env-file "$ENVFILE" \ --volume "$WORKDIR:/opt/spark-rm" \ $JAVA_VOL \ "spark-rm:$IMGTAG"