Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Bump shellcheck to v0.7.0 #84249

Merged
merged 5 commits into from Nov 4, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
10 changes: 6 additions & 4 deletions build/common.sh
Expand Up @@ -14,6 +14,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.

# shellcheck disable=SC2034 # Variables sourced in other scripts.

# Common utilities, variables and checks for all build scripts.
set -o errexit
set -o nounset
Expand Down Expand Up @@ -97,10 +99,10 @@ kube::build::get_docker_wrapped_binaries() {
### If you change any of these lists, please also update DOCKERIZED_BINARIES
### in build/BUILD. And kube::golang::server_image_targets
local targets=(
kube-apiserver,"${KUBE_BASE_IMAGE_REGISTRY}/debian-base-${arch}:${debian_base_version}"
kube-controller-manager,"${KUBE_BASE_IMAGE_REGISTRY}/debian-base-${arch}:${debian_base_version}"
kube-scheduler,"${KUBE_BASE_IMAGE_REGISTRY}/debian-base-${arch}:${debian_base_version}"
kube-proxy,"${KUBE_BASE_IMAGE_REGISTRY}/debian-iptables-${arch}:${debian_iptables_version}"
"kube-apiserver,${KUBE_BASE_IMAGE_REGISTRY}/debian-base-${arch}:${debian_base_version}"
"kube-controller-manager,${KUBE_BASE_IMAGE_REGISTRY}/debian-base-${arch}:${debian_base_version}"
"kube-scheduler,${KUBE_BASE_IMAGE_REGISTRY}/debian-base-${arch}:${debian_base_version}"
"kube-proxy,${KUBE_BASE_IMAGE_REGISTRY}/debian-iptables-${arch}:${debian_iptables_version}"
)

echo "${targets[@]}"
Expand Down
2 changes: 1 addition & 1 deletion cluster/gce/gci/mounter/stage-upload.sh
@@ -1,4 +1,4 @@
#!/bin/sh
#!/usr/bin/env bash

# Copyright 2016 The Kubernetes Authors.
#
Expand Down
2 changes: 2 additions & 0 deletions hack/lib/golang.sh
Expand Up @@ -14,6 +14,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.

# shellcheck disable=SC2034 # Variables sourced in other scripts.

# The golang package that we are building.
readonly KUBE_GO_PACKAGE=k8s.io/kubernetes
readonly KUBE_GOPATH="${KUBE_OUTPUT}/go"
Expand Down
2 changes: 2 additions & 0 deletions hack/lib/test.sh
Expand Up @@ -14,6 +14,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.

# shellcheck disable=SC2034 # Variables sourced in other scripts.

# A set of helpers for tests

readonly reset=$(tput sgr0)
Expand Down
6 changes: 3 additions & 3 deletions hack/verify-shellcheck.sh
Expand Up @@ -24,9 +24,9 @@ source "${KUBE_ROOT}/hack/lib/util.sh"

# required version for this script, if not installed on the host we will
# use the official docker image instead. keep this in sync with SHELLCHECK_IMAGE
SHELLCHECK_VERSION="0.6.0"
# upstream shellcheck latest stable image as of January 10th, 2019
SHELLCHECK_IMAGE="koalaman/shellcheck-alpine:v0.6.0@sha256:7d4d712a2686da99d37580b4e2f45eb658b74e4b01caf67c1099adc294b96b52"
SHELLCHECK_VERSION="0.7.0"
# upstream shellcheck latest stable image as of October 23rd, 2019
SHELLCHECK_IMAGE="koalaman/shellcheck-alpine:v0.7.0@sha256:24bbf52aae6eaa27accc9f61de32d30a1498555e6ef452966d0702ff06f38ecb"

# fixed name for the shellcheck docker container so we can reliably clean it up
SHELLCHECK_CONTAINER="k8s-shellcheck"
Expand Down
8 changes: 4 additions & 4 deletions test/cmd/apply.sh
Expand Up @@ -55,9 +55,9 @@ run_kubectl_apply_tests() {
[[ "$(kubectl apply -f hack/testdata/retainKeys/deployment/deployment-after.yaml "${kube_flags[@]:?}")" ]]
# Post-Condition: deployment "test-deployment-retainkeys" has updated fields
grep -q Recreate <<< "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}")"
! grep -q RollingUpdate <<< "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}")"
! grep -q RollingUpdate <<< "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}")" || exit 1
grep -q hostPath <<< "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}")"
! grep -q emptyDir <<< "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}")"
! grep -q emptyDir <<< "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}")" || exit 1
# Clean up
kubectl delete deployments test-deployment-retainkeys "${kube_flags[@]:?}"

Expand Down Expand Up @@ -119,7 +119,7 @@ __EOF__
# Dry-run create the CR
kubectl "${kube_flags[@]:?}" apply --server-dry-run -f hack/testdata/CRD/resource.yaml "${kube_flags[@]:?}"
# Make sure that the CR doesn't exist
! kubectl "${kube_flags[@]:?}" get resource/myobj
! kubectl "${kube_flags[@]:?}" get resource/myobj || exit 1

# clean-up
kubectl "${kube_flags[@]:?}" delete customresourcedefinition resources.mygroup.example.com
Expand Down Expand Up @@ -317,7 +317,7 @@ __EOF__
# Dry-run create the CR
kubectl "${kube_flags[@]:?}" apply --server-side --server-dry-run -f hack/testdata/CRD/resource.yaml "${kube_flags[@]:?}"
# Make sure that the CR doesn't exist
! kubectl "${kube_flags[@]:?}" get resource/myobj
! kubectl "${kube_flags[@]:?}" get resource/myobj || exit 1

# clean-up
kubectl "${kube_flags[@]:?}" delete customresourcedefinition resources.mygroup.example.com
Expand Down
12 changes: 6 additions & 6 deletions test/cmd/apps.sh
Expand Up @@ -296,7 +296,7 @@ run_deployment_tests() {
sleep 1
kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
# Rollback to revision 1000000 - should be no-op
! kubectl rollout undo deployment nginx --to-revision=1000000 "${kube_flags[@]:?}"
! kubectl rollout undo deployment nginx --to-revision=1000000 "${kube_flags[@]:?}" || exit 1
kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
# Rollback to last revision
kubectl rollout undo deployment nginx "${kube_flags[@]:?}"
Expand All @@ -305,9 +305,9 @@ run_deployment_tests() {
# Pause the deployment
kubectl-with-retry rollout pause deployment nginx "${kube_flags[@]:?}"
# A paused deployment cannot be rolled back
! kubectl rollout undo deployment nginx "${kube_flags[@]:?}"
! kubectl rollout undo deployment nginx "${kube_flags[@]:?}" || exit 1
# A paused deployment cannot be restarted
! kubectl rollout restart deployment nginx "${kube_flags[@]:?}"
! kubectl rollout restart deployment nginx "${kube_flags[@]:?}" || exit 1
# Resume the deployment
kubectl-with-retry rollout resume deployment nginx "${kube_flags[@]:?}"
# The resumed deployment can now be rolled back
Expand All @@ -316,7 +316,7 @@ run_deployment_tests() {
newrs="$(kubectl describe deployment nginx | grep NewReplicaSet | awk '{print $2}')"
kubectl get rs "${newrs}" -o yaml | grep "deployment.kubernetes.io/revision-history: 1,3"
# Check that trying to watch the status of a superseded revision returns an error
! kubectl rollout status deployment/nginx --revision=3
! kubectl rollout status deployment/nginx --revision=3 || exit 1
# Restarting the deployment creates a new replicaset
kubectl rollout restart deployment/nginx
sleep 1
Expand All @@ -342,7 +342,7 @@ run_deployment_tests() {
kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
kube::test::get_object_assert deployment "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_PERL}:"
# Set non-existing container should fail
! kubectl set image deployment nginx-deployment redis=redis "${kube_flags[@]:?}"
! kubectl set image deployment nginx-deployment redis=redis "${kube_flags[@]:?}" || exit 1
# Set image of deployments without specifying name
kubectl set image deployments --all nginx="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]:?}"
kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
Expand Down Expand Up @@ -656,7 +656,7 @@ run_rs_tests() {
kube::test::get_object_assert 'hpa frontend' "{{${hpa_min_field:?}}} {{${hpa_max_field:?}}} {{${hpa_cpu_field:?}}}" '2 3 80'
kubectl delete hpa frontend "${kube_flags[@]:?}"
# autoscale without specifying --max should fail
! kubectl autoscale rs frontend "${kube_flags[@]:?}"
! kubectl autoscale rs frontend "${kube_flags[@]:?}" || exit 1
# Clean up
kubectl delete rs frontend "${kube_flags[@]:?}"
fi
Expand Down
30 changes: 15 additions & 15 deletions test/cmd/core.sh
Expand Up @@ -185,15 +185,15 @@ run_pod_tests() {
# Pre-condition: valid-pod POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
! kubectl delete pods "${kube_flags[@]}"
! kubectl delete pods "${kube_flags[@]}" || exit 1
# Post-condition: valid-pod POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'

### Delete PODs with --all and a label selector is not permitted
# Pre-condition: valid-pod POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
! kubectl delete --all pods -l'name in (valid-pod)' "${kube_flags[@]}"
! kubectl delete --all pods -l'name in (valid-pod)' "${kube_flags[@]}" || exit 1
# Post-condition: valid-pod POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'

Expand Down Expand Up @@ -255,7 +255,7 @@ run_pod_tests() {
kube::test::get_object_assert 'pdb/test-pdb-4 --namespace=test-kubectl-describe-pod' "{{$pdb_max_unavailable}}" '50%'

### Fail creating a pod disruption budget if both maxUnavailable and minAvailable specified
! kubectl create pdb test-pdb --selector=app=rails --min-available=2 --max-unavailable=3 --namespace=test-kubectl-describe-pod
! kubectl create pdb test-pdb --selector=app=rails --min-available=2 --max-unavailable=3 --namespace=test-kubectl-describe-pod || exit 1

# Create a pod that consumes secret, configmap, and downward API keys as envs
kube::test::get_object_assert 'pods --namespace=test-kubectl-describe-pod' "{{range.items}}{{$id_field}}:{{end}}" ''
Expand Down Expand Up @@ -567,7 +567,7 @@ __EOF__
grep -q 'Edit cancelled' <<< "$(EDITOR="cat" kubectl edit pod/valid-pod 2>&1)"
grep -q 'name: valid-pod' <<< "$(EDITOR="cat" kubectl edit pod/valid-pod)"
grep -q CRLF <<< "$(EDITOR="cat" kubectl edit --windows-line-endings pod/valid-pod | file - )"
! grep -q CRLF <<< "$(EDITOR="cat" kubectl edit --windows-line-endings=false pod/valid-pod | file - )"
! grep -q CRLF <<< "$(EDITOR="cat" kubectl edit --windows-line-endings=false pod/valid-pod | file - )" || exit 1
grep -q 'kind: List' <<< "$(EDITOR="cat" kubectl edit ns)"

### Label POD YAML file locally without effecting the live pod.
Expand All @@ -584,7 +584,7 @@ __EOF__
# Pre-condition: name is valid-pod
kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod'
# Command
! kubectl label pods valid-pod name=valid-pod-super-sayan "${kube_flags[@]}"
! kubectl label pods valid-pod name=valid-pod-super-sayan "${kube_flags[@]}" || exit 1
# Post-condition: name is still valid-pod
kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod'

Expand Down Expand Up @@ -631,15 +631,15 @@ __EOF__
# Post-Condition: pod "test-pod" is created
kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-label'
# Post-Condition: pod "test-pod" doesn't have configuration annotation
! grep -q kubectl.kubernetes.io/last-applied-configuration <<< "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" )"
! grep -q kubectl.kubernetes.io/last-applied-configuration <<< "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" )" || exit 1
## 2. kubectl replace doesn't set the annotation
kubectl get pods test-pod -o yaml "${kube_flags[@]}" | ${SED} 's/test-pod-label/test-pod-replaced/g' > "${KUBE_TEMP}"/test-pod-replace.yaml
# Command: replace the pod "test-pod"
kubectl replace -f "${KUBE_TEMP}"/test-pod-replace.yaml "${kube_flags[@]}"
# Post-Condition: pod "test-pod" is replaced
kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-replaced'
# Post-Condition: pod "test-pod" doesn't have configuration annotation
! grep -q kubectl.kubernetes.io/last-applied-configuration <<< "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}")"
! grep -q kubectl.kubernetes.io/last-applied-configuration <<< "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}")" || exit 1
## 3. kubectl apply does set the annotation
# Command: apply the pod "test-pod"
kubectl apply -f hack/testdata/pod-apply.yaml "${kube_flags[@]}"
Expand All @@ -657,7 +657,7 @@ __EOF__
# Post-Condition: pod "test-pod" has configuration annotation, and it's updated (different from the annotation when it's applied)
grep -q kubectl.kubernetes.io/last-applied-configuration <<< "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" )"
kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration > "${KUBE_TEMP}"/annotation-configuration-replaced
! [[ $(diff -q "${KUBE_TEMP}"/annotation-configuration "${KUBE_TEMP}"/annotation-configuration-replaced > /dev/null) ]]
! [[ $(diff -q "${KUBE_TEMP}"/annotation-configuration "${KUBE_TEMP}"/annotation-configuration-replaced > /dev/null) ]] || exit 1
# Clean up
rm "${KUBE_TEMP}"/test-pod-replace.yaml "${KUBE_TEMP}"/annotation-configuration "${KUBE_TEMP}"/annotation-configuration-replaced
kubectl delete pods test-pod "${kube_flags[@]}"
Expand Down Expand Up @@ -883,7 +883,7 @@ run_service_tests() {

# Set selector of a local file without talking to the server
kubectl set selector -f test/e2e/testing-manifests/guestbook/redis-master-service.yaml role=padawan --local -o yaml "${kube_flags[@]}"
! kubectl set selector -f test/e2e/testing-manifests/guestbook/redis-master-service.yaml role=padawan --dry-run -o yaml "${kube_flags[@]}"
kubectl set selector -f test/e2e/testing-manifests/guestbook/redis-master-service.yaml role=padawan --dry-run -o yaml "${kube_flags[@]}"
# Set command to change the selector.
kubectl set selector -f test/e2e/testing-manifests/guestbook/redis-master-service.yaml role=padawan
# prove role=padawan
Expand All @@ -894,7 +894,7 @@ run_service_tests() {
kube::test::get_object_assert 'services redis-master' "{{range$service_selector_field}}{{.}}:{{end}}" "redis:master:backend:"
# Show dry-run works on running selector
kubectl set selector services redis-master role=padawan --dry-run -o yaml "${kube_flags[@]}"
! kubectl set selector services redis-master role=padawan --local -o yaml "${kube_flags[@]}"
! kubectl set selector services redis-master role=padawan --local -o yaml "${kube_flags[@]}" || exit 1
kube::test::get_object_assert 'services redis-master' "{{range$service_selector_field}}{{.}}:{{end}}" "redis:master:backend:"

### Dump current redis-master service
Expand Down Expand Up @@ -1086,7 +1086,7 @@ run_rc_tests() {
# Pre-condition: 2 replicas
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2'
# Command
! kubectl scale --current-replicas=3 --replicas=2 replicationcontrollers frontend "${kube_flags[@]}"
! kubectl scale --current-replicas=3 --replicas=2 replicationcontrollers frontend "${kube_flags[@]}" || exit 1
# Post-condition: nothing changed
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2'

Expand Down Expand Up @@ -1250,7 +1250,7 @@ run_rc_tests() {
kube::test::get_object_assert 'hpa frontend' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '2 3 80'
kubectl delete hpa frontend "${kube_flags[@]}"
# autoscale without specifying --max should fail
! kubectl autoscale rc frontend "${kube_flags[@]}"
! kubectl autoscale rc frontend "${kube_flags[@]}" || exit 1
# Clean up
kubectl delete rc frontend "${kube_flags[@]}"

Expand All @@ -1259,7 +1259,7 @@ run_rc_tests() {
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Set resources of a local file without talking to the server
kubectl set resources -f hack/testdata/deployment-multicontainer-resources.yaml -c=perl --limits=cpu=300m --requests=cpu=300m --local -o yaml "${kube_flags[@]}"
! kubectl set resources -f hack/testdata/deployment-multicontainer-resources.yaml -c=perl --limits=cpu=300m --requests=cpu=300m --dry-run -o yaml "${kube_flags[@]}"
! kubectl set resources -f hack/testdata/deployment-multicontainer-resources.yaml -c=perl --limits=cpu=300m --requests=cpu=300m --dry-run -o yaml "${kube_flags[@]}" || exit 1
# Create a deployment
kubectl create -f hack/testdata/deployment-multicontainer-resources.yaml "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx-deployment-resources:'
Expand All @@ -1270,7 +1270,7 @@ run_rc_tests() {
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 0).resources.limits.cpu}}:{{end}}" "100m:"
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 1).resources.limits.cpu}}:{{end}}" "100m:"
# Set a non-existing container should fail
! kubectl set resources deployment nginx-deployment-resources -c=redis --limits=cpu=100m
! kubectl set resources deployment nginx-deployment-resources -c=redis --limits=cpu=100m || exit 1
# Set the limit of a specific container in deployment
kubectl set resources deployment nginx-deployment-resources -c=nginx --limits=cpu=200m "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 0).resources.limits.cpu}}:{{end}}" "200m:"
Expand All @@ -1282,7 +1282,7 @@ run_rc_tests() {
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 1).resources.requests.cpu}}:{{end}}" "300m:"
# Show dry-run works on running deployments
kubectl set resources deployment nginx-deployment-resources -c=perl --limits=cpu=400m --requests=cpu=400m --dry-run -o yaml "${kube_flags[@]}"
! kubectl set resources deployment nginx-deployment-resources -c=perl --limits=cpu=400m --requests=cpu=400m --local -o yaml "${kube_flags[@]}"
! kubectl set resources deployment nginx-deployment-resources -c=perl --limits=cpu=400m --requests=cpu=400m --local -o yaml "${kube_flags[@]}" || exit 1
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 0).resources.limits.cpu}}:{{end}}" "200m:"
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 1).resources.limits.cpu}}:{{end}}" "300m:"
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 1).resources.requests.cpu}}:{{end}}" "300m:"
Expand Down
2 changes: 1 addition & 1 deletion test/cmd/crd.sh
Expand Up @@ -243,7 +243,7 @@ run_non_native_resource_tests() {
kubectl "${kube_flags[@]}" get foos/test -o json > "${CRD_RESOURCE_FILE}"
# cannot apply strategic patch locally
CRD_PATCH_ERROR_FILE="${KUBE_TEMP}/crd-foos-test-error"
! kubectl "${kube_flags[@]}" patch --local -f "${CRD_RESOURCE_FILE}" -p '{"patched":"value3"}' 2> "${CRD_PATCH_ERROR_FILE}"
! kubectl "${kube_flags[@]}" patch --local -f "${CRD_RESOURCE_FILE}" -p '{"patched":"value3"}' 2> "${CRD_PATCH_ERROR_FILE}" || exit 1
if grep -q "try --type merge" "${CRD_PATCH_ERROR_FILE}"; then
kube::log::status "\"kubectl patch --local\" returns error as expected for CustomResource: $(cat "${CRD_PATCH_ERROR_FILE}")"
else
Expand Down
2 changes: 1 addition & 1 deletion test/cmd/create.sh
Expand Up @@ -50,7 +50,7 @@ run_kubectl_create_error_tests() {
kube::log::status "Testing kubectl create with error"

# Passing no arguments to create is an error
! kubectl create
! kubectl create || exit 1

## kubectl create should not panic on empty string lists in a template
ERROR_FILE="${KUBE_TEMP}/validation-error"
Expand Down