From e2c90d28cd355dfb91fdfebe6056a828d7dcdac9 Mon Sep 17 00:00:00 2001 From: "W. Trevor King" Date: Fri, 8 Feb 2019 23:07:24 -0800 Subject: [PATCH] scripts/install-release-image: Helper for launching from an install payload MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A number of users have had issues mixing and matching installers and update payloads. This script removes a degree of freedom by extracting the installer from the update payload itself. You'll need a fairly new oc for this, since --image-for was only added in openshift/origin@f1d50464 (Add `--image-for` and `--output` to `oc adm release info`, 2019-01-10, openshift/origin#21766). I've taken a fairly conservative approach to pushing host information into the container. If you leave off the AWS_* variables, the installer will prompty you, so that's a bit tedious, but not the end of the world. With all the trappings for an AWS cluster, this could look like: $ SSH_PUBKEY=~/.ssh/id_rsa.pub \ > AWS_PROFILE=openshift-dev \ > AWS_CONFIG_FILE=~/.aws/config \ > AWS_SHARED_CREDENTIALS_FILE=~/.aws/credentials \ > RELEASE=registry.svc.ci.openshift.org/openshift/origin-release:4.0.0-0.alpha-2019-02-06-200409 \ > install-release-image create cluster which is a bit of a mouthful :p. We could set defaults for AWS_SHARED_CREDENTIALS_FILE and mount them into the cluster by default, but I don't want callers to be concerned about leaking information they may consider highly sensitive. I'm less concerned about SSH public keys or AWS_CONFIG_FILE being considered sensitive, so the default behavior there is to mount them in from the usual locations. I'm setting HOME so I can mount in ~/.ssh, ~/.aws, etc. without mounting those into the asset directory at /output. We want the mounted (semi-)secret data to be reaped with the container, with no chance of persisting in the asset directory. The mkdir call avoids: $ ASSETS=does-not-exist install-release-image.sh realpath: ‘does-not-exist’: No such file or directory failed to resolve asset path since folks are likely to expect the installer's semantics (where it creates the requested asset directory if it didn't already exist). We can't wait on the installer though, because we are using realpath to convert to an absolute path so we can set up the volume options for Podman. The SC2154 disable avoids [2]: ./scripts/install-release-image.sh:50:9: note: Don't use variables in the printf format string. Use printf "..%s.." "$foo". [SC2059] Folks calling 'die' should follow that advice. This 'die' code that calls 'printf' is just passing along the value given by the 'die' caller. [1]: https://github.com/openshift/machine-config-operator/blob/master/docs/Update-SSHKeys.md [2]: https://storage.googleapis.com/origin-ci-test/pr-logs/pull/openshift_installer/1221/pull-ci-openshift-installer-shellcheck/3407/build-log.txt --- scripts/install-release-image.sh | 93 ++++++++++++++++++++++++++++++++ 1 file changed, 93 insertions(+) create mode 100755 scripts/install-release-image.sh diff --git a/scripts/install-release-image.sh b/scripts/install-release-image.sh new file mode 100755 index 00000000000..59773b0a9a6 --- /dev/null +++ b/scripts/install-release-image.sh @@ -0,0 +1,93 @@ +#!/bin/sh +# +# Given a release image, extract the referenced installer and use it +# to launch a cluster based on that same release image. +# +# Usage: +# +# install-release-image.sh [ARG...] +# +# Requires: +# +# * oc, to extract the installer image +# * podman, to run the installer image +# * realpath, which is not in POSIX as a shell command [1], but is in +# GNU coreutils [2]. The backing C function is in POSIX [3]. +# * A pull secret in ~/.docker/config.json for both oc and podman. +# +# Optional: +# +# * ASSETS +# The path is mounted into the installer container and used as the +# installer's asset directory. Defaults to the current working +# directory. +# * RELEASE +# The pull-spec for the release image. Defaults to the development +# OKD tip. +# * SSH_PUBKEY +# If set, this is mounted into the installer container at +# ~/.ssh/key.pub. Otherwise all ~/.ssh/*.pub are mounted in. +# * AWS_PROFILE [4] +# If set, this is also set in the installer container. +# * AWS_CONFIG_FILE [4] +# This defaults to ~/.aws/config, and, if set to an existing file, +# it is mounted into the installer container at ~/.aws/config +# * AWS_SHARED_CREDENTIALS_FILE [4] +# If set, this is mounted into the installer container at +# ~/.aws/credentials +# +# [1]: http://pubs.opengroup.org/onlinepubs/9699919799/idx/utilities.html +# [2]: http://man7.org/linux/man-pages/man1/realpath.1.html +# [3]: http://pubs.opengroup.org/onlinepubs/9699919799/functions/realpath.html +# [4]: https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-envvars.html + +die () { + # shellcheck disable=SC2059 + printf "${@}" >&2 + exit 1 +} + +mkdir -p "${ASSETS:-.}" || die 'failed to create asset directory\n' +ASSETS="$(realpath -e "${ASSETS:-.}")" || die 'failed to resolve asset path\n' +RELEASE="${RELEASE:-registry.svc.ci.openshift.org/openshift/origin-release:v4.0}" +INSTALLER="$(oc adm release info --image-for=installer "${RELEASE}")" || die 'failed to resolve installer image\n' +set -- --env HOME=/home "${INSTALLER}" "${@}" || die 'failed to insert installer into podman arguments\n' +if test -n "${SSH_PUBKEY}" +then + SSH_PUBKEY="$(realpath -e "${SSH_PUBKEY}")" || die 'failed to resolve SSH_PUBKEY\n' + set -- --volume "${SSH_PUBKEY}:/home/.ssh/id_key.pub:ro,z" "${@}" || die 'failed to insert SSH_PUBKEY into podman arguments\n' +else + for SSH_PUBKEY in ~/.ssh/*.pub + do + SSH_PUBKEY_FILENAME="$(basename "${SSH_PUBKEY}")" || die 'failed to split filename from %s\n' "${SSH_PUBKEY}" + set -- --volume "${SSH_PUBKEY}:/home/.ssh/${SSH_PUBKEY_FILENAME}:ro,z" "${@}" || die 'failed to insert %s into podman arguments\n' "${SSH_PUBKEY}" + done +fi + +if test -n "${AWS_PROFILE}" +then + set -- --env AWS_PROFILE="${AWS_PROFILE}" "${@}" || die 'failed to insert AWS_PROFILE into podman arguments\n' +fi + +if test -n "${AWS_CONFIG_FILE}" +then + AWS_CONFIG_FILE="$(realpath -e "${AWS_CONFIG_FILE}")" || die 'failed to resolve AWS_CONFIG_FILE\n' +else + AWS_CONFIG_FILE=~/.aws/config +fi +if test -e "${AWS_CONFIG_FILE}" +then + set -- --volume "${AWS_CONFIG_FILE}:/home/.aws/config:ro,z" "${@}" || die 'failed to insert AWS_CONFIG_FILE into podman arguments\n' +fi + +if test -n "${AWS_SHARED_CREDENTIALS_FILE}" +then + AWS_SHARED_CREDENTIALS_FILE="$(realpath -e "${AWS_SHARED_CREDENTIALS_FILE}")" || die 'failed to resolve AWS_SHARED_CREDENTIALS_FILE\n' + set -- --volume "${AWS_SHARED_CREDENTIALS_FILE}:/home/.aws/credentials:z" "${@}" || die 'failed to insert AWS_SHARED_CREDENTIALS_FILE into podman arguments\n' +fi + +exec podman run --rm -it \ + --user "$(id -u):$(id -g)" \ + --env OPENSHIFT_INSTALL_RELEASE_IMAGE_OVERRIDE="${RELEASE}" \ + --volume "${ASSETS}:/output:z" \ + "${@}"