spark-instrumented-optimizer/dev/create-release/do-release-docker.sh
Wenchen Fan 95ba000279 [SPARK-35872][INFRA] Automatize some steps to finalize the release
### What changes were proposed in this pull request?

After the RC vote, the release manager still need to do many work to finalize the release. This PR updates the script the automatize some steps:
1. create the final git tag
2. publish to pypi
3. publish docs to spark-website
4. move the release binaries from dev directory to release directory.
5. update the KEYS file

### Why are the changes needed?

easy the work of release manager.

### Does this PR introduce _any_ user-facing change?

no

### How was this patch tested?

tested with the recent 3.0.3.

Closes #33055 from cloud-fan/release.

Authored-by: Wenchen Fan <wenchen@databricks.com>
Signed-off-by: Dongjoon Hyun <dhyun@apple.com>
2021-06-24 13:25:41 -07:00

163 lines
4.9 KiB
Bash
Executable file

#!/usr/bin/env bash
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Creates a Spark release candidate. The script will update versions, tag the branch,
# build Spark binary packages and documentation, and upload maven artifacts to a staging
# repository. There is also a dry run mode where only local builds are performed, and
# nothing is uploaded to the ASF repos.
#
# Run with "-h" for options.
#
set -e
SELF=$(cd $(dirname $0) && pwd)
. "$SELF/release-util.sh"
function usage {
local NAME=$(basename $0)
cat <<EOF
Usage: $NAME [options]
This script runs the release scripts inside a docker image. The image is hardcoded to be called
"spark-rm" and will be re-generated (as needed) on every invocation of this script.
Options are:
-d [path] : required: working directory (output will be written to an "output" directory in
the working directory).
-n : dry run mode. Performs checks and local builds, but do not upload anything.
-t [tag] : tag for the spark-rm docker image to use for building (default: "latest").
-j [path] : path to local JDK installation to use for building. By default the script will
use openjdk8 installed in the docker image.
-s [step] : runs a single step of the process; valid steps are: tag, build, docs, publish, finalize
EOF
}
WORKDIR=
IMGTAG=latest
JAVA=
RELEASE_STEP=
while getopts ":d:hj:ns:t:" opt; do
case $opt in
d) WORKDIR="$OPTARG" ;;
n) DRY_RUN=1 ;;
t) IMGTAG="$OPTARG" ;;
j) JAVA="$OPTARG" ;;
s) RELEASE_STEP="$OPTARG" ;;
h) usage ;;
\?) error "Invalid option. Run with -h for help." ;;
esac
done
if [ -z "$WORKDIR" ] || [ ! -d "$WORKDIR" ]; then
error "Work directory (-d) must be defined and exist. Run with -h for help."
fi
if [ -d "$WORKDIR/output" ]; then
read -p "Output directory already exists. Overwrite and continue? [y/n] " ANSWER
if [ "$ANSWER" != "y" ]; then
error "Exiting."
fi
fi
if [ ! -z "$RELEASE_STEP" ] && [ "$RELEASE_STEP" = "finalize" ]; then
echo "THIS STEP IS IRREVERSIBLE! Make sure the vote has passed and you pick the right RC to finalize."
read -p "You must be a PMC member to run this step. Continue? [y/n] " ANSWER
if [ "$ANSWER" != "y" ]; then
error "Exiting."
fi
if [ -z "$PYPI_PASSWORD" ]; then
stty -echo && printf "PyPi password: " && read PYPI_PASSWORD && printf '\n' && stty echo
fi
fi
cd "$WORKDIR"
rm -rf "$WORKDIR/output"
mkdir "$WORKDIR/output"
get_release_info
# Place all RM scripts and necessary data in a local directory that must be defined in the command
# line. This directory is mounted into the image.
for f in "$SELF"/*; do
if [ -f "$f" ]; then
cp "$f" "$WORKDIR"
fi
done
# Add the fallback version of Gemfile, Gemfile.lock and .bundle/config to the local directory.
cp "$SELF/../../docs/Gemfile" "$WORKDIR"
cp "$SELF/../../docs/Gemfile.lock" "$WORKDIR"
cp -r "$SELF/../../docs/.bundle" "$WORKDIR"
GPG_KEY_FILE="$WORKDIR/gpg.key"
fcreate_secure "$GPG_KEY_FILE"
$GPG --export-secret-key --armor --pinentry-mode loopback --passphrase "$GPG_PASSPHRASE" "$GPG_KEY" > "$GPG_KEY_FILE"
run_silent "Building spark-rm image with tag $IMGTAG..." "docker-build.log" \
docker build -t "spark-rm:$IMGTAG" --build-arg UID=$UID "$SELF/spark-rm"
# Write the release information to a file with environment variables to be used when running the
# image.
ENVFILE="$WORKDIR/env.list"
fcreate_secure "$ENVFILE"
function cleanup {
rm -f "$ENVFILE"
rm -f "$GPG_KEY_FILE"
}
trap cleanup EXIT
cat > $ENVFILE <<EOF
DRY_RUN=$DRY_RUN
SKIP_TAG=$SKIP_TAG
RUNNING_IN_DOCKER=1
GIT_BRANCH=$GIT_BRANCH
NEXT_VERSION=$NEXT_VERSION
RELEASE_VERSION=$RELEASE_VERSION
RELEASE_TAG=$RELEASE_TAG
GIT_REF=$GIT_REF
SPARK_PACKAGE_VERSION=$SPARK_PACKAGE_VERSION
ASF_USERNAME=$ASF_USERNAME
GIT_NAME=$GIT_NAME
GIT_EMAIL=$GIT_EMAIL
GPG_KEY=$GPG_KEY
ASF_PASSWORD=$ASF_PASSWORD
PYPI_PASSWORD=$PYPI_PASSWORD
GPG_PASSPHRASE=$GPG_PASSPHRASE
RELEASE_STEP=$RELEASE_STEP
USER=$USER
EOF
JAVA_VOL=
if [ -n "$JAVA" ]; then
echo "JAVA_HOME=/opt/spark-java" >> $ENVFILE
JAVA_VOL="--volume $JAVA:/opt/spark-java"
fi
echo "Building $RELEASE_TAG; output will be at $WORKDIR/output"
docker run -ti \
--env-file "$ENVFILE" \
--volume "$WORKDIR:/opt/spark-rm" \
$JAVA_VOL \
"spark-rm:$IMGTAG"