diff --git a/util.sh b/util.sh index 31ce9fc4a..abeb1b2e1 100755 --- a/util.sh +++ b/util.sh @@ -39,31 +39,6 @@ kube::util::array_contains() { return 1 } -kube::util::wait_for_url() { - local url=$1 - local prefix=${2:-} - local wait=${3:-1} - local times=${4:-30} - local maxtime=${5:-1} - - command -v curl >/dev/null || { - kube::log::usage "curl must be installed" - exit 1 - } - - local i - for i in $(seq 1 "${times}"); do - local out - if out=$(curl --max-time "${maxtime}" -gkfs "${url}" 2>/dev/null); then - kube::log::status "On try ${i}, ${prefix}: ${out}" - return 0 - fi - sleep "${wait}" - done - kube::log::error "Timed out waiting for ${prefix} to answer at ${url}; tried ${times} waiting ${wait} between each" - return 1 -} - # Example: kube::util::trap_add 'echo "in trap DEBUG"' DEBUG # See: http://stackoverflow.com/questions/3338030/multiple-bash-traps-for-the-same-signal kube::util::trap_add() { @@ -93,422 +68,6 @@ kube::util::trap_add() { done } -# Opposite of kube::util::ensure-temp-dir() -kube::util::cleanup-temp-dir() { - rm -rf "${KUBE_TEMP}" -} - -# Create a temp dir that'll be deleted at the end of this bash session. -# -# Vars set: -# KUBE_TEMP -kube::util::ensure-temp-dir() { - if [[ -z ${KUBE_TEMP-} ]]; then - KUBE_TEMP=$(mktemp -d 2>/dev/null || mktemp -d -t kubernetes.XXXXXX) - kube::util::trap_add kube::util::cleanup-temp-dir EXIT - fi -} - -kube::util::host_os() { - local host_os - case "$(uname -s)" in - Darwin) - host_os=darwin - ;; - Linux) - host_os=linux - ;; - *) - kube::log::error "Unsupported host OS. Must be Linux or Mac OS X." - exit 1 - ;; - esac - echo "${host_os}" -} - -kube::util::host_arch() { - local host_arch - case "$(uname -m)" in - x86_64*) - host_arch=amd64 - ;; - i?86_64*) - host_arch=amd64 - ;; - amd64*) - host_arch=amd64 - ;; - aarch64*) - host_arch=arm64 - ;; - arm64*) - host_arch=arm64 - ;; - arm*) - host_arch=arm - ;; - i?86*) - host_arch=x86 - ;; - s390x*) - host_arch=s390x - ;; - ppc64le*) - host_arch=ppc64le - ;; - *) - kube::log::error "Unsupported host arch. Must be x86_64, 386, arm, arm64, s390x or ppc64le." - exit 1 - ;; - esac - echo "${host_arch}" -} - -# This figures out the host platform without relying on golang. We need this as -# we don't want a golang install to be a prerequisite to building yet we need -# this info to figure out where the final binaries are placed. -kube::util::host_platform() { - echo "$(kube::util::host_os)/$(kube::util::host_arch)" -} - -kube::util::find-binary-for-platform() { - local -r lookfor="$1" - local -r platform="$2" - local locations=( - "${KUBE_ROOT}/_output/bin/${lookfor}" - "${KUBE_ROOT}/_output/dockerized/bin/${platform}/${lookfor}" - "${KUBE_ROOT}/_output/local/bin/${platform}/${lookfor}" - "${KUBE_ROOT}/platforms/${platform}/${lookfor}" - ) - # Also search for binary in bazel build tree. - # The bazel go rules place some binaries in subtrees like - # "bazel-bin/source/path/linux_amd64_pure_stripped/binaryname", so make sure - # the platform name is matched in the path. - while IFS=$'\n' read -r location; do - locations+=("$location"); - done < <(find "${KUBE_ROOT}/bazel-bin/" -type f -executable \ - \( -path "*/${platform/\//_}*/${lookfor}" -o -path "*/${lookfor}" \) 2>/dev/null || true) - - # List most recently-updated location. - local -r bin=$( (ls -t "${locations[@]}" 2>/dev/null || true) | head -1 ) - echo -n "${bin}" -} - -kube::util::find-binary() { - kube::util::find-binary-for-platform "$1" "$(kube::util::host_platform)" -} - -# Run all known doc generators (today gendocs and genman for kubectl) -# $1 is the directory to put those generated documents -kube::util::gen-docs() { - local dest="$1" - - # Find binary - gendocs=$(kube::util::find-binary "gendocs") - genkubedocs=$(kube::util::find-binary "genkubedocs") - genman=$(kube::util::find-binary "genman") - genyaml=$(kube::util::find-binary "genyaml") - genfeddocs=$(kube::util::find-binary "genfeddocs") - - # TODO: If ${genfeddocs} is not used from anywhere (it isn't used at - # least from k/k tree), remove it completely. - kube::util::sourced_variable "${genfeddocs}" - - mkdir -p "${dest}/docs/user-guide/kubectl/" - "${gendocs}" "${dest}/docs/user-guide/kubectl/" - mkdir -p "${dest}/docs/admin/" - "${genkubedocs}" "${dest}/docs/admin/" "kube-apiserver" - "${genkubedocs}" "${dest}/docs/admin/" "kube-controller-manager" - "${genkubedocs}" "${dest}/docs/admin/" "cloud-controller-manager" - "${genkubedocs}" "${dest}/docs/admin/" "kube-proxy" - "${genkubedocs}" "${dest}/docs/admin/" "kube-scheduler" - "${genkubedocs}" "${dest}/docs/admin/" "kubelet" - "${genkubedocs}" "${dest}/docs/admin/" "kubeadm" - - mkdir -p "${dest}/docs/man/man1/" - "${genman}" "${dest}/docs/man/man1/" "kube-apiserver" - "${genman}" "${dest}/docs/man/man1/" "kube-controller-manager" - "${genman}" "${dest}/docs/man/man1/" "cloud-controller-manager" - "${genman}" "${dest}/docs/man/man1/" "kube-proxy" - "${genman}" "${dest}/docs/man/man1/" "kube-scheduler" - "${genman}" "${dest}/docs/man/man1/" "kubelet" - "${genman}" "${dest}/docs/man/man1/" "kubectl" - "${genman}" "${dest}/docs/man/man1/" "kubeadm" - - mkdir -p "${dest}/docs/yaml/kubectl/" - "${genyaml}" "${dest}/docs/yaml/kubectl/" - - # create the list of generated files - pushd "${dest}" > /dev/null || return 1 - touch docs/.generated_docs - find . -type f | cut -sd / -f 2- | LC_ALL=C sort > docs/.generated_docs - popd > /dev/null || return 1 -} - -# Removes previously generated docs-- we don't want to check them in. $KUBE_ROOT -# must be set. -kube::util::remove-gen-docs() { - if [ -e "${KUBE_ROOT}/docs/.generated_docs" ]; then - # remove all of the old docs; we don't want to check them in. - while read -r file; do - rm "${KUBE_ROOT}/${file}" 2>/dev/null || true - done <"${KUBE_ROOT}/docs/.generated_docs" - # The docs/.generated_docs file lists itself, so we don't need to explicitly - # delete it. - fi -} - -# Takes a group/version and returns the path to its location on disk, sans -# "pkg". E.g.: -# * default behavior: extensions/v1beta1 -> apis/extensions/v1beta1 -# * default behavior for only a group: experimental -> apis/experimental -# * Special handling for empty group: v1 -> api/v1, unversioned -> api/unversioned -# * Special handling for groups suffixed with ".k8s.io": foo.k8s.io/v1 -> apis/foo/v1 -# * Very special handling for when both group and version are "": / -> api -kube::util::group-version-to-pkg-path() { - local group_version="$1" - - while IFS=$'\n' read -r api; do - if [[ "${api}" = "${group_version/.*k8s.io/}" ]]; then - echo "vendor/k8s.io/api/${group_version/.*k8s.io/}" - return - fi - done < <(cd "${KUBE_ROOT}/staging/src/k8s.io/api" && find . -name types.go -exec dirname {} \; | sed "s|\./||g" | sort) - - # "v1" is the API GroupVersion - if [[ "${group_version}" == "v1" ]]; then - echo "vendor/k8s.io/api/core/v1" - return - fi - - # Special cases first. - # TODO(lavalamp): Simplify this by moving pkg/api/v1 and splitting pkg/api, - # moving the results to pkg/apis/api. - case "${group_version}" in - # both group and version are "", this occurs when we generate deep copies for internal objects of the legacy v1 API. - __internal) - echo "pkg/apis/core" - ;; - meta/v1) - echo "vendor/k8s.io/apimachinery/pkg/apis/meta/v1" - ;; - meta/v1beta1) - echo "vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1" - ;; - *.k8s.io) - echo "pkg/apis/${group_version%.*k8s.io}" - ;; - *.k8s.io/*) - echo "pkg/apis/${group_version/.*k8s.io/}" - ;; - *) - echo "pkg/apis/${group_version%__internal}" - ;; - esac -} - -# Takes a group/version and returns the swagger-spec file name. -# default behavior: extensions/v1beta1 -> extensions_v1beta1 -# special case for v1: v1 -> v1 -kube::util::gv-to-swagger-name() { - local group_version="$1" - case "${group_version}" in - v1) - echo "v1" - ;; - *) - echo "${group_version%/*}_${group_version#*/}" - ;; - esac -} - -# Returns the name of the upstream remote repository name for the local git -# repo, e.g. "upstream" or "origin". -kube::util::git_upstream_remote_name() { - git remote -v | grep fetch |\ - grep -E 'github.com[/:]kubernetes/kubernetes|k8s.io/kubernetes' |\ - head -n 1 | awk '{print $1}' -} - -# Ensures the current directory is a git tree for doing things like restoring or -# validating godeps -kube::util::create-fake-git-tree() { - local -r target_dir=${1:-$(pwd)} - - pushd "${target_dir}" >/dev/null || return 1 - git init >/dev/null - git config --local user.email "nobody@k8s.io" - git config --local user.name "$0" - git add . >/dev/null - git commit -q -m "Snapshot" >/dev/null - if (( ${KUBE_VERBOSE:-5} >= 6 )); then - kube::log::status "${target_dir} is now a git tree." - fi - popd >/dev/null || return 1 -} - -# Checks whether godep restore was run in the current GOPATH, i.e. that all referenced repos exist -# and are checked out to the referenced rev. -kube::util::godep_restored() { - local -r godeps_json=${1:-Godeps/Godeps.json} - local -r gopath=${2:-${GOPATH%:*}} - - kube::util::require-jq - - local root - local old_rev="" - while read -r path rev; do - rev="${rev//[\'\"]}" # remove quotes which are around revs sometimes - - if [[ "${rev}" == "${old_rev}" ]] && [[ "${path}" == "${root}"* ]]; then - # avoid checking the same git/hg root again - continue - fi - - root="${path}" - while [ "${root}" != "." ] && [ ! -d "${gopath}/src/${root}/.git" ] && [ ! -d "${gopath}/src/${root}/.hg" ]; do - root=$(dirname "${root}") - done - if [ "${root}" == "." ]; then - echo "No checkout of ${path} found in GOPATH \"${gopath}\"." 1>&2 - return 1 - fi - local head - if [ -d "${gopath}/src/${root}/.git" ]; then - head="$(cd "${gopath}/src/${root}" && git rev-parse HEAD)" - else - head="$(cd "${gopath}/src/${root}" && hg parent --template '{node}')" - fi - if [ "${head}" != "${rev}" ]; then - echo "Unexpected HEAD '${head}' at ${gopath}/src/${root}, expected '${rev}'." 1>&2 - return 1 - fi - old_rev="${rev}" - done < <(jq '.Deps|.[]|.ImportPath + " " + .Rev' -r < "${godeps_json}") - return 0 -} - -# Exits script if working directory is dirty. If it's run interactively in the terminal -# the user can commit changes in a second terminal. This script will wait. -kube::util::ensure_clean_working_dir() { - while ! git diff HEAD --exit-code &>/dev/null; do - echo -e "\nUnexpected dirty working directory:\n" - if tty -s; then - git status -s - else - git diff -a # be more verbose in log files without tty - exit 1 - fi | sed 's/^/ /' - echo -e "\nCommit your changes in another terminal and then continue here by pressing enter." - read -r - done 1>&2 -} - -# Ensure that the given godep version is installed and in the path. Almost -# nobody should use any version but the default. -# -# Sets: -# KUBE_GODEP: The path to the godep binary -# -kube::util::ensure_godep_version() { - local godep_target_version=${1:-"v80-k8s-r1"} # this version is known to work - - # If KUBE_GODEP is already set, and it's the right version, then use it. - if [[ -n "${KUBE_GODEP:-}" && "$(${KUBE_GODEP:?} version 2>/dev/null)" == *"godep ${godep_target_version}"* ]]; then - kube::log::status "Using ${KUBE_GODEP}" - return - fi - - # Otherwise, install forked godep - kube::log::status "Installing godep version ${godep_target_version}" - GOBIN="${KUBE_OUTPUT_BINPATH}" go install k8s.io/kubernetes/third_party/forked/godep - export KUBE_GODEP="${KUBE_OUTPUT_BINPATH}/godep" - kube::log::status "Installed ${KUBE_GODEP}" - - # Verify that the installed godep from fork is what we expect - if [[ "$(${KUBE_GODEP:?} version 2>/dev/null)" != *"godep ${godep_target_version}"* ]]; then - kube::log::error "Expected godep ${godep_target_version} from ${KUBE_GODEP}, got $(${KUBE_GODEP:?} version)" - return 1 - fi -} - -# Ensure that none of the staging repos is checked out in the GOPATH because this -# easily confused godep. -kube::util::ensure_no_staging_repos_in_gopath() { - kube::util::ensure_single_dir_gopath - local error=0 - for repo_file in "${KUBE_ROOT}"/staging/src/k8s.io/*; do - if [[ ! -d "${repo_file}" ]]; then - # not a directory or there were no files - continue; - fi - repo="$(basename "${repo_file}")" - if [ -e "${GOPATH}/src/k8s.io/${repo}" ]; then - echo "k8s.io/${repo} exists in GOPATH. Remove before running godep-save.sh." 1>&2 - error=1 - fi - done - if [ "${error}" = "1" ]; then - exit 1 - fi -} - -# Checks that the GOPATH is simple, i.e. consists only of one directory, not multiple. -kube::util::ensure_single_dir_gopath() { - if [[ "${GOPATH}" == *:* ]]; then - echo "GOPATH must consist of a single directory." 1>&2 - exit 1 - fi -} - -# Find the base commit using: -# $PULL_BASE_SHA if set (from Prow) -# current ref from the remote upstream branch -kube::util::base_ref() { - local -r git_branch=$1 - - if [[ -n ${PULL_BASE_SHA:-} ]]; then - echo "${PULL_BASE_SHA}" - return - fi - - full_branch="$(kube::util::git_upstream_remote_name)/${git_branch}" - - # make sure the branch is valid, otherwise the check will pass erroneously. - if ! git describe "${full_branch}" >/dev/null; then - # abort! - exit 1 - fi - - echo "${full_branch}" -} - -# Checks whether there are any files matching pattern $2 changed between the -# current branch and upstream branch named by $1. -# Returns 1 (false) if there are no changes -# 0 (true) if there are changes detected. -kube::util::has_changes() { - local -r git_branch=$1 - local -r pattern=$2 - local -r not_pattern=${3:-totallyimpossiblepattern} - - local base_ref - base_ref=$(kube::util::base_ref "${git_branch}") - echo "Checking for '${pattern}' changes against '${base_ref}'" - - # notice this uses ... to find the first shared ancestor - if git diff --name-only "${base_ref}...HEAD" | grep -v -E "${not_pattern}" | grep "${pattern}" > /dev/null; then - return 0 - fi - # also check for pending changes - if git status --porcelain | grep -v -E "${not_pattern}" | grep "${pattern}" > /dev/null; then - echo "Detected '${pattern}' uncommitted changes." - return 0 - fi - echo "No '${pattern}' changes detected." - return 1 -} - kube::util::download_file() { local -r url=$1 local -r destination_file=$2 @@ -528,152 +87,6 @@ kube::util::download_file() { return 1 } -# Test whether openssl is installed. -# Sets: -# OPENSSL_BIN: The path to the openssl binary to use -function kube::util::test_openssl_installed { - if ! openssl version >& /dev/null; then - echo "Failed to run openssl. Please ensure openssl is installed" - exit 1 - fi - - OPENSSL_BIN=$(command -v openssl) -} - -# creates a client CA, args are sudo, dest-dir, ca-id, purpose -# purpose is dropped in after "key encipherment", you usually want -# '"client auth"' -# '"server auth"' -# '"client auth","server auth"' -function kube::util::create_signing_certkey { - local sudo=$1 - local dest_dir=$2 - local id=$3 - local purpose=$4 - # Create client ca - ${sudo} /usr/bin/env bash -e < "${dest_dir}/${id}-ca-config.json" -EOF -} - -# signs a client certificate: args are sudo, dest-dir, CA, filename (roughly), username, groups... -function kube::util::create_client_certkey { - local sudo=$1 - local dest_dir=$2 - local ca=$3 - local id=$4 - local cn=${5:-$4} - local groups="" - local SEP="" - shift 5 - while [ -n "${1:-}" ]; do - groups+="${SEP}{\"O\":\"$1\"}" - SEP="," - shift 1 - done - ${sudo} /usr/bin/env bash -e < /dev/null -apiVersion: v1 -kind: Config -clusters: - - cluster: - certificate-authority: ${ca_file} - server: https://${api_host}:${api_port}/ - name: local-up-cluster -users: - - user: - token: ${token} - client-certificate: ${dest_dir}/client-${client_id}.crt - client-key: ${dest_dir}/client-${client_id}.key - name: local-up-cluster -contexts: - - context: - cluster: local-up-cluster - user: local-up-cluster - name: local-up-cluster -current-context: local-up-cluster -EOF - - # flatten the kubeconfig files to make them self contained - username=$(whoami) - ${sudo} /usr/bin/env bash -e < "/tmp/${client_id}.kubeconfig" - mv -f "/tmp/${client_id}.kubeconfig" "${dest_dir}/${client_id}.kubeconfig" - chown ${username} "${dest_dir}/${client_id}.kubeconfig" -EOF -} - -# Determines if docker can be run, failures may simply require that the user be added to the docker group. -function kube::util::ensure_docker_daemon_connectivity { - IFS=" " read -ra DOCKER <<< "${DOCKER_OPTS}" - # Expand ${DOCKER[@]} only if it's not unset. This is to work around - # Bash 3 issue with unbound variable. - DOCKER=(docker ${DOCKER[@]:+"${DOCKER[@]}"}) - if ! "${DOCKER[@]}" info > /dev/null 2>&1 ; then - cat <<'EOF' >&2 -Can't connect to 'docker' daemon. please fix and retry. - -Possible causes: - - Docker Daemon not started - - Linux: confirm via your init system - - macOS w/ docker-machine: run `docker-machine ls` and `docker-machine start ` - - macOS w/ Docker for Mac: Check the menu bar and start the Docker application - - DOCKER_HOST hasn't been set or is set incorrectly - - Linux: domain socket is used, DOCKER_* should be unset. In Bash run `unset ${!DOCKER_*}` - - macOS w/ docker-machine: run `eval "$(docker-machine env )"` - - macOS w/ Docker for Mac: domain socket is used, DOCKER_* should be unset. In Bash run `unset ${!DOCKER_*}` - - Other things to check: - - Linux: User isn't in 'docker' group. Add and relogin. - - Something like 'sudo usermod -a -G docker ${USER}' - - RHEL7 bug and workaround: https://bugzilla.redhat.com/show_bug.cgi?id=1119282#c8 -EOF - return 1 - fi -} - # Wait for background jobs to finish. Return with # an error status if any of the jobs failed. kube::util::wait-for-jobs() { @@ -696,101 +109,6 @@ function kube::util::join { echo "$*" } -# Downloads cfssl/cfssljson into $1 directory if they do not already exist in PATH -# -# Assumed vars: -# $1 (cfssl directory) (optional) -# -# Sets: -# CFSSL_BIN: The path of the installed cfssl binary -# CFSSLJSON_BIN: The path of the installed cfssljson binary -# -function kube::util::ensure-cfssl { - if command -v cfssl &>/dev/null && command -v cfssljson &>/dev/null; then - CFSSL_BIN=$(command -v cfssl) - CFSSLJSON_BIN=$(command -v cfssljson) - return 0 - fi - - host_arch=$(kube::util::host_arch) - - if [[ "${host_arch}" != "amd64" ]]; then - echo "Cannot download cfssl on non-amd64 hosts and cfssl does not appear to be installed." - echo "Please install cfssl and cfssljson and verify they are in \$PATH." - echo "Hint: export PATH=\$PATH:\$GOPATH/bin; go get -u github.com/cloudflare/cfssl/cmd/..." - exit 1 - fi - - # Create a temp dir for cfssl if no directory was given - local cfssldir=${1:-} - if [[ -z "${cfssldir}" ]]; then - kube::util::ensure-temp-dir - cfssldir="${KUBE_TEMP}/cfssl" - fi - - mkdir -p "${cfssldir}" - pushd "${cfssldir}" > /dev/null || return 1 - - echo "Unable to successfully run 'cfssl' from ${PATH}; downloading instead..." - kernel=$(uname -s) - case "${kernel}" in - Linux) - curl --retry 10 -L -o cfssl https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 - curl --retry 10 -L -o cfssljson https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 - ;; - Darwin) - curl --retry 10 -L -o cfssl https://pkg.cfssl.org/R1.2/cfssl_darwin-amd64 - curl --retry 10 -L -o cfssljson https://pkg.cfssl.org/R1.2/cfssljson_darwin-amd64 - ;; - *) - echo "Unknown, unsupported platform: ${kernel}." >&2 - echo "Supported platforms: Linux, Darwin." >&2 - exit 2 - esac - - chmod +x cfssl || true - chmod +x cfssljson || true - - CFSSL_BIN="${cfssldir}/cfssl" - CFSSLJSON_BIN="${cfssldir}/cfssljson" - if [[ ! -x ${CFSSL_BIN} || ! -x ${CFSSLJSON_BIN} ]]; then - echo "Failed to download 'cfssl'. Please install cfssl and cfssljson and verify they are in \$PATH." - echo "Hint: export PATH=\$PATH:\$GOPATH/bin; go get -u github.com/cloudflare/cfssl/cmd/..." - exit 1 - fi - popd > /dev/null || return 1 -} - -# kube::util::ensure_dockerized -# Confirms that the script is being run inside a kube-build image -# -function kube::util::ensure_dockerized { - if [[ -f /kube-build-image ]]; then - return 0 - else - echo "ERROR: This script is designed to be run inside a kube-build container" - exit 1 - fi -} - -# kube::util::ensure-gnu-sed -# Determines which sed binary is gnu-sed on linux/darwin -# -# Sets: -# SED: The name of the gnu-sed binary -# -function kube::util::ensure-gnu-sed { - if LANG=C sed --help 2>&1 | grep -q GNU; then - SED="sed" - elif command -v gsed &>/dev/null; then - SED="gsed" - else - kube::log::error "Failed to find GNU sed as sed or gsed. If you are on Mac: brew install gnu-sed." >&2 - return 1 - fi - kube::util::sourced_variable "${SED}" -} - # kube::util::check-file-in-alphabetical-order # Check that the file is in alphabetical order # @@ -808,15 +126,6 @@ function kube::util::check-file-in-alphabetical-order { fi } -# kube::util::require-jq -# Checks whether jq is installed. -function kube::util::require-jq { - if ! command -v jq &>/dev/null; then - echo "jq not found. Please install." 1>&2 - return 1 - fi -} - # Some useful colors. if [[ -z "${color_start-}" ]]; then declare -r color_start="\033[" diff --git a/verify-shellcheck.sh b/verify-shellcheck.sh index 1b882fa8b..fd28021ac 100755 --- a/verify-shellcheck.sh +++ b/verify-shellcheck.sh @@ -18,9 +18,12 @@ set -o errexit set -o nounset set -o pipefail -KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. -source "${KUBE_ROOT}/hack/lib/init.sh" -source "${KUBE_ROOT}/hack/lib/util.sh" +# The csi-release-tools directory. +TOOLS="$(dirname "${BASH_SOURCE[0]}")" +. "${TOOLS}/util.sh" + +# Directory to check. Default is the parent of the tools themselves. +ROOT="${1:-${TOOLS}/..}" # required version for this script, if not installed on the host we will # use the official docker image instead. keep this in sync with SHELLCHECK_IMAGE @@ -56,15 +59,15 @@ create_container () { # we're done. # This is incredibly much faster than creating a container for each shellcheck # call ... - docker run --name "${SHELLCHECK_CONTAINER}" -d --rm -v "${KUBE_ROOT}:${KUBE_ROOT}" -w "${KUBE_ROOT}" --entrypoint="sleep" "${SHELLCHECK_IMAGE}" 2147483647 + docker run --name "${SHELLCHECK_CONTAINER}" -d --rm -v "${ROOT}:${ROOT}" -w "${ROOT}" --entrypoint="sleep" "${SHELLCHECK_IMAGE}" 2147483647 } # removes the shellcheck container remove_container () { docker rm -f "${SHELLCHECK_CONTAINER}" &> /dev/null || true } -# ensure we're linting the k8s source tree -cd "${KUBE_ROOT}" +# ensure we're linting the source tree +cd "${ROOT}" # find all shell scripts excluding ./_*, ./.git/*, ./vendor*, # and anything git-ignored @@ -78,16 +81,6 @@ done < <(find . -name "*.sh" \ -path ./vendor\* \ \)) -# make sure known failures are sorted -failure_file="${KUBE_ROOT}/hack/.shellcheck_failures" -kube::util::check-file-in-alphabetical-order "${failure_file}" - -# load known failure files -failing_files=() -while IFS=$'\n' read -r script; - do failing_files+=("$script"); -done < <(cat "${failure_file}") - # detect if the host machine has the required shellcheck version installed # if so, we will use that instead. HAVE_SHELLCHECK=false @@ -119,7 +112,6 @@ fi # lint each script, tracking failures errors=() -not_failing=() for f in "${all_shell_scripts[@]}"; do set +o errexit if ${HAVE_SHELLCHECK}; then @@ -129,18 +121,14 @@ for f in "${all_shell_scripts[@]}"; do shellcheck --exclude="${SHELLCHECK_DISABLED}" "${f}") fi set -o errexit - kube::util::array_contains "${f}" "${failing_files[@]}" && in_failing=$? || in_failing=$? - if [[ -n "${failedLint}" ]] && [[ "${in_failing}" -ne "0" ]]; then - errors+=( "${failedLint}" ) - fi - if [[ -z "${failedLint}" ]] && [[ "${in_failing}" -eq "0" ]]; then - not_failing+=( "${f}" ) + if [[ -n "${failedLint}" ]]; then + errors+=( "${failedLint}" ) fi done # Check to be sure all the packages that should pass lint are. if [ ${#errors[@]} -eq 0 ]; then - echo 'Congratulations! All shell files are passing lint (excluding those in hack/.shellcheck_failures).' + echo 'Congratulations! All shell files are passing lint.' else { echo "Errors from shellcheck:" @@ -149,38 +137,9 @@ else done echo echo 'Please review the above warnings. You can test via "./hack/verify-shellcheck"' - echo 'If the above warnings do not make sense, you can exempt this package from shellcheck' - echo 'checking by adding it to hack/.shellcheck_failures (if your reviewer is okay with it).' - echo - } >&2 - false -fi - -if [[ ${#not_failing[@]} -gt 0 ]]; then - { - echo "Some packages in hack/.shellcheck_failures are passing shellcheck. Please remove them." - echo - for f in "${not_failing[@]}"; do - echo " $f" - done - echo - } >&2 - false -fi - -# Check that all failing_packages actually still exist -gone=() -for f in "${failing_files[@]}"; do - kube::util::array_contains "$f" "${all_shell_scripts[@]}" || gone+=( "$f" ) -done - -if [[ ${#gone[@]} -gt 0 ]]; then - { - echo "Some files in hack/.shellcheck_failures do not exist anymore. Please remove them." - echo - for f in "${gone[@]}"; do - echo " $f" - done + echo 'If the above warnings do not make sense, you can exempt them from shellcheck' + echo 'checking by adding the "shellcheck disable" directive' + echo '(https://github.com/koalaman/shellcheck/wiki/Directive#disable).' echo } >&2 false