Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update shellcheck version (0.7.2 -> 0.8.0) and fix findings #113541

Merged
merged 1 commit into from Nov 12, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
2 changes: 1 addition & 1 deletion build/lib/release.sh
Expand Up @@ -134,7 +134,7 @@ function kube::release::package_client_tarballs() {
for platform_long in "${long_platforms[@]}"; do
local platform
local platform_tag
platform=${platform_long##${LOCAL_OUTPUT_BINPATH}/} # Strip LOCAL_OUTPUT_BINPATH
platform=${platform_long##"${LOCAL_OUTPUT_BINPATH}"/} # Strip LOCAL_OUTPUT_BINPATH
platform_tag=${platform/\//-} # Replace a "/" for a "-"
kube::log::status "Starting tarball: client $platform_tag"

Expand Down
35 changes: 24 additions & 11 deletions hack/lib/test.sh
Expand Up @@ -82,8 +82,8 @@ kube::test::object_assert() {

for j in $(seq 1 "${tries}"); do
# shellcheck disable=SC2086
# Disabling because "args" needs to allow for expansion here
res=$(eval kubectl get "${kube_flags[@]}" ${args} "${object}" -o go-template=\""${request}"\")
# Disabling because to allow for expansion here
res=$(kubectl get "${kube_flags[@]}" ${args} ${object} -o go-template="${request}")
if [[ "${res}" =~ ^$expected$ ]]; then
echo -n "${green}"
echo "$(kube::test::get_caller 3): Successful get ${object} ${request}: ${res}"
Expand All @@ -110,7 +110,9 @@ kube::test::get_object_jsonpath_assert() {
local request=$2
local expected=$3

res=$(eval kubectl get "${kube_flags[@]}" "${object}" -o jsonpath=\""${request}"\")
# shellcheck disable=SC2086
# Disabling to allow for expansion here
res=$(kubectl get "${kube_flags[@]}" ${object} -o jsonpath=${request})

if [[ "${res}" =~ ^$expected$ ]]; then
echo -n "${green}"
Expand All @@ -135,7 +137,9 @@ kube::test::describe_object_assert() {
local object=$2
local matches=( "${@:3}" )

result=$(eval kubectl describe "${kube_flags[@]}" "${resource}" "${object}")
# shellcheck disable=SC2086
# Disabling to allow for expansion here
result=$(kubectl describe "${kube_flags[@]}" ${resource} ${object})

for match in "${matches[@]}"; do
if grep -q "${match}" <<< "${result}"; then
Expand Down Expand Up @@ -166,10 +170,12 @@ kube::test::describe_object_events_assert() {
local object=$2
local showevents=${3:-"true"}

# shellcheck disable=SC2086
# Disabling to allow for expansion here
if [[ -z "${3:-}" ]]; then
result=$(eval kubectl describe "${kube_flags[@]}" "${resource}" "${object}")
result=$(kubectl describe "${kube_flags[@]}" ${resource} ${object})
else
result=$(eval kubectl describe "${kube_flags[@]}" "--show-events=${showevents}" "${resource}" "${object}")
result=$(kubectl describe "${kube_flags[@]}" "--show-events=${showevents}" ${resource} ${object})
fi

if grep -q "No events.\|Events:" <<< "${result}"; then
Expand Down Expand Up @@ -203,7 +209,9 @@ kube::test::describe_resource_assert() {
local resource=$1
local matches=( "${@:2}" )

result=$(eval kubectl describe "${kube_flags[@]}" "${resource}")
# shellcheck disable=SC2086
# Disabling to allow for expansion here
result=$(kubectl describe "${kube_flags[@]}" ${resource})

for match in "${matches[@]}"; do
if grep -q "${match}" <<< "${result}"; then
Expand Down Expand Up @@ -233,7 +241,9 @@ kube::test::describe_resource_events_assert() {
local resource=$1
local showevents=${2:-"true"}

result=$(eval kubectl describe "${kube_flags[@]}" "--show-events=${showevents}" "${resource}")
# shellcheck disable=SC2086
# Disabling to allow for expansion here
result=$(kubectl describe "${kube_flags[@]}" "--show-events=${showevents}" ${resource})

if grep -q "No events.\|Events:" <<< "${result}"; then
local has_events="true"
Expand Down Expand Up @@ -273,8 +283,9 @@ kube::test::describe_resource_chunk_size_assert() {
local expectLists
IFS="," read -r -a expectLists <<< "${resource},${additionalResources}"

# Default chunk size
defaultResult=$(eval kubectl describe "${resource}" --show-events=true -v=6 "${args}" "${kube_flags[@]}" 2>&1 >/dev/null)
# shellcheck disable=SC2086
# Disabling to allow for expansion here
defaultResult=$(kubectl describe ${resource} --show-events=true -v=6 ${args} "${kube_flags[@]}" 2>&1 >/dev/null)
for r in "${expectLists[@]}"; do
if grep -q "${r}?.*limit=500" <<< "${defaultResult}"; then
echo "query for ${r} had limit param"
Expand All @@ -292,8 +303,10 @@ kube::test::describe_resource_chunk_size_assert() {
fi
done

# shellcheck disable=SC2086
# Disabling to allow for expansion here
# Try a non-default chunk size
customResult=$(eval kubectl describe "${resource}" --show-events=false --chunk-size=10 -v=6 "${args}" "${kube_flags[@]}" 2>&1 >/dev/null)
thockin marked this conversation as resolved.
Show resolved Hide resolved
customResult=$(kubectl describe ${resource} --show-events=false --chunk-size=10 -v=6 ${args} "${kube_flags[@]}" 2>&1 >/dev/null)
if grep -q "${resource}?limit=10" <<< "${customResult}"; then
echo "query for ${resource} had user-specified limit param"
else
Expand Down
4 changes: 2 additions & 2 deletions hack/make-rules/clean.sh
Expand Up @@ -30,8 +30,8 @@ CLEAN_PATTERNS=(

for pattern in "${CLEAN_PATTERNS[@]}"; do
while IFS=$'\n' read -r match; do
echo "Removing ${match#${KUBE_ROOT}\/} .."
rm -rf "${match#${KUBE_ROOT}\/}"
echo "Removing ${match#"${KUBE_ROOT}"\/} .."
rm -rf "${match#"${KUBE_ROOT}"\/}"
done < <(find "${KUBE_ROOT}" -iregex "^${KUBE_ROOT}/${pattern}$")
done

Expand Down
4 changes: 2 additions & 2 deletions hack/verify-shellcheck.sh
Expand Up @@ -30,8 +30,8 @@ DOCKER="${DOCKER:-docker}"

# required version for this script, if not installed on the host we will
# use the official docker image instead. keep this in sync with SHELLCHECK_IMAGE
SHELLCHECK_VERSION="0.7.2"
SHELLCHECK_IMAGE="docker.io/koalaman/shellcheck-alpine:v0.7.2@sha256:ce6fd9cc808a47d1d121ba92c203ecc02e8ed78e0e4c412f7fca54c2e954526d"
SHELLCHECK_VERSION="0.8.0"
SHELLCHECK_IMAGE="docker.io/koalaman/shellcheck-alpine:v0.8.0@sha256:f42fde76d2d14a645a848826e54a4d650150e151d9c81057c898da89a82c8a56"
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

aside: should we be depending on stuff like this. It kinda feels like we shouldn't...

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

what do you mean?


# disabled lints
disabled=(
Expand Down
10 changes: 5 additions & 5 deletions test/cmd/apps.sh
Expand Up @@ -306,14 +306,14 @@ run_deployment_tests() {
# Pre-condition: no deployment exists
kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" ''
# Pre-condition: no hpa exists
kube::test::get_object_assert 'hpa' "{{range.items}}{{ if eq $id_field \\\"nginx-deployment\\\" }}found{{end}}{{end}}:" ':'
kube::test::get_object_assert 'hpa' "{{range.items}}{{ if eq $id_field \"nginx-deployment\" }}found{{end}}{{end}}:" ':'
# Command
kubectl create -f test/fixtures/doc-yaml/user-guide/deployment.yaml "${kube_flags[@]:?}"
kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" 'nginx-deployment:'
# Dry-run autoscale
kubectl-with-retry autoscale deployment nginx-deployment --dry-run=client "${kube_flags[@]:?}" --min=2 --max=3
kubectl-with-retry autoscale deployment nginx-deployment --dry-run=server "${kube_flags[@]:?}" --min=2 --max=3
kube::test::get_object_assert 'hpa' "{{range.items}}{{ if eq $id_field \\\"nginx-deployment\\\" }}found{{end}}{{end}}:" ':'
kube::test::get_object_assert 'hpa' "{{range.items}}{{ if eq $id_field \"nginx-deployment\" }}found{{end}}{{end}}:" ':'
# autoscale 2~3 pods, no CPU utilization specified
kubectl-with-retry autoscale deployment nginx-deployment "${kube_flags[@]:?}" --min=2 --max=3
kube::test::get_object_assert 'hpa nginx-deployment' "{{${hpa_min_field:?}}} {{${hpa_max_field:?}}} {{${hpa_cpu_field:?}}}" '2 3 80'
Expand Down Expand Up @@ -648,21 +648,21 @@ run_rs_tests() {
kube::log::status "Deleting rs"
kubectl delete rs frontend "${kube_flags[@]:?}"
# Post-condition: no pods from frontend replica set
kube::test::wait_object_assert 'pods -l "tier=frontend"' "{{range.items}}{{${id_field:?}}}:{{end}}" ''
kube::test::wait_object_assert "pods -l tier=frontend" "{{range.items}}{{${id_field:?}}}:{{end}}" ''

### Create and then delete a replica set with cascading strategy set to orphan, make sure it doesn't delete pods.
# Pre-condition: no replica set exists
kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" ''
# Command
kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
# wait for all 3 pods to be set up
kube::test::wait_object_assert 'pods -l "tier=frontend"' "{{range.items}}{{${pod_container_name_field:?}}}:{{end}}" 'php-redis:php-redis:php-redis:'
kube::test::wait_object_assert "pods -l tier=frontend" "{{range.items}}{{${pod_container_name_field:?}}}:{{end}}" 'php-redis:php-redis:php-redis:'
kube::log::status "Deleting rs"
kubectl delete rs frontend "${kube_flags[@]:?}" --cascade=orphan
# Wait for the rs to be deleted.
kube::test::wait_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" ''
# Post-condition: All 3 pods still remain from frontend replica set
kube::test::get_object_assert 'pods -l "tier=frontend"' "{{range.items}}{{$pod_container_name_field}}:{{end}}" 'php-redis:php-redis:php-redis:'
kube::test::get_object_assert "pods -l tier=frontend" "{{range.items}}{{$pod_container_name_field}}:{{end}}" 'php-redis:php-redis:php-redis:'
# Cleanup
kubectl delete pods -l "tier=frontend" "${kube_flags[@]:?}"
kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" ''
Expand Down
10 changes: 5 additions & 5 deletions test/cmd/batch.sh
Expand Up @@ -27,18 +27,18 @@ run_job_tests() {

### Create a new namespace
# Pre-condition: the test-jobs namespace does not exist
kube::test::get_object_assert 'namespaces' "{{range.items}}{{ if eq ${id_field:?} \\\"test-jobs\\\" }}found{{end}}{{end}}:" ':'
kube::test::get_object_assert 'namespaces' "{{range.items}}{{ if eq ${id_field:?} \"test-jobs\" }}found{{end}}{{end}}:" ':'
# Command
kubectl create namespace test-jobs
# Post-condition: namespace 'test-jobs' is created.
kube::test::get_object_assert 'namespaces/test-jobs' "{{$id_field}}" 'test-jobs'

# Pre-condition: cronjob does not exist
kube::test::get_object_assert 'cronjob --namespace=test-jobs' "{{range.items}}{{ if eq $id_field \\\"pi\\\" }}found{{end}}{{end}}:" ':'
kube::test::get_object_assert 'cronjob --namespace=test-jobs' "{{range.items}}{{ if eq $id_field \"pi\" }}found{{end}}{{end}}:" ':'
# Dry-run create CronJob
kubectl create cronjob pi --dry-run=client --schedule="59 23 31 2 *" --namespace=test-jobs "--image=$IMAGE_PERL" -- perl -Mbignum=bpi -wle 'print bpi(20)' "${kube_flags[@]:?}"
kubectl create cronjob pi --dry-run=server --schedule="59 23 31 2 *" --namespace=test-jobs "--image=$IMAGE_PERL" -- perl -Mbignum=bpi -wle 'print bpi(20)' "${kube_flags[@]:?}"
kube::test::get_object_assert 'cronjob' "{{range.items}}{{ if eq $id_field \\\"pi\\\" }}found{{end}}{{end}}:" ':'
kube::test::get_object_assert 'cronjob' "{{range.items}}{{ if eq $id_field \"pi\" }}found{{end}}{{end}}:" ':'
### Create a cronjob in a specific namespace
kubectl create cronjob pi --schedule="59 23 31 2 *" --namespace=test-jobs "--image=$IMAGE_PERL" -- perl -Mbignum=bpi -wle 'print bpi(20)' "${kube_flags[@]:?}"
# Post-Condition: assertion object exists
Expand All @@ -56,11 +56,11 @@ run_job_tests() {
kube::test::get_object_assert jobs "{{range.items}}{{$id_field}}{{end}}" ''

# Pre-condition: job does not exist
kube::test::get_object_assert 'job --namespace=test-jobs' "{{range.items}}{{ if eq $id_field \\\"test-jobs\\\" }}found{{end}}{{end}}:" ':'
kube::test::get_object_assert 'job --namespace=test-jobs' "{{range.items}}{{ if eq $id_field \"test-jobs\" }}found{{end}}{{end}}:" ':'
### Dry-run create a job in a specific namespace
kubectl create job test-job --from=cronjob/pi --namespace=test-jobs --dry-run=client
kubectl create job test-job --from=cronjob/pi --namespace=test-jobs --dry-run=server
kube::test::get_object_assert 'job --namespace=test-jobs' "{{range.items}}{{ if eq $id_field \\\"test-jobs\\\" }}found{{end}}{{end}}:" ':'
kube::test::get_object_assert 'job --namespace=test-jobs' "{{range.items}}{{ if eq $id_field \"test-jobs\" }}found{{end}}{{end}}:" ':'
### Create a job in a specific namespace
kubectl create job test-job --from=cronjob/pi --namespace=test-jobs
# Post-Condition: assertion object exists
Expand Down