Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Error in kubectl drain --dry-run=server #100206

Merged
merged 2 commits into from Apr 9, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
2 changes: 2 additions & 0 deletions staging/src/k8s.io/kubectl/pkg/drain/drain.go
Expand Up @@ -220,6 +220,8 @@ func filterPods(podList *corev1.PodList, filters []PodFilter) *PodDeleteList {
// Add the pod to PodDeleteList no matter what PodDeleteStatus is,
// those pods whose PodDeleteStatus is false like DaemonSet will
// be catched by list.errors()
pod.Kind = "Pod"
pod.APIVersion = "v1"
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is there a better way to add this? The reason it is not present is that these objects were loaded from a PodList, which does not include type meta on its items in the API response.

pods = append(pods, PodDelete{
Pod: pod,
Status: status,
Expand Down
61 changes: 40 additions & 21 deletions test/cmd/node-management.sh
Expand Up @@ -18,15 +18,7 @@ set -o errexit
set -o nounset
set -o pipefail

run_cluster_management_tests() {
set -o nounset
set -o errexit

create_and_use_new_namespace
kube::log::status "Testing cluster-management commands"

kube::test::get_object_assert nodes "{{range.items}}{{${id_field:?}}}:{{end}}" '127.0.0.1:'

create_test_pods(){
# create test pods we can work with
kubectl create -f - "${kube_flags[@]:?}" << __EOF__
{
Expand All @@ -35,10 +27,12 @@ run_cluster_management_tests() {
"metadata": {
"name": "test-pod-1",
"labels": {
"e": "f"
"e": "f",
"type": "test-pod"
}
},
"spec": {
"nodeName": "127.0.0.1",
"containers": [
{
"name": "container-1",
Expand All @@ -57,10 +51,12 @@ __EOF__
"metadata": {
"name": "test-pod-2",
"labels": {
"c": "d"
"c": "d",
"type": "test-pod"
}
},
"spec": {
"nodeName": "127.0.0.1",
"containers": [
{
"name": "container-1",
Expand All @@ -71,6 +67,24 @@ __EOF__
}
}
__EOF__
}

delete_test_pods() {
# need to use --force because node is unready
kubectl delete pod/test-pod-1 --force --ignore-not-found
kubectl delete pod/test-pod-2 --force --ignore-not-found
}

run_cluster_management_tests() {
set -o nounset
set -o errexit

create_and_use_new_namespace
kube::log::status "Testing cluster-management commands"

kube::test::get_object_assert nodes "{{range.items}}{{${id_field:?}}}:{{end}}" '127.0.0.1:'

create_test_pods

# taint/untaint
# Pre-condition: node doesn't have dedicated=foo:PreferNoSchedule taint
Expand Down Expand Up @@ -109,8 +123,8 @@ __EOF__
### kubectl drain update with --dry-run does not mark node unschedulable
# Pre-condition: node is schedulable
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
kubectl drain "127.0.0.1" --dry-run=client
kubectl drain "127.0.0.1" --dry-run=server
kubectl drain "127.0.0.1" --dry-run=client --force
kubectl drain "127.0.0.1" --dry-run=server --force
# Post-condition: node still exists, node is still schedulable
kube::test::get_object_assert nodes "{{range.items}}{{$id_field}}:{{end}}" '127.0.0.1:'
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
Expand All @@ -121,15 +135,17 @@ __EOF__
# Pre-condition: test-pod-1 and test-pod-2 exist
kube::test::get_object_assert "pods" "{{range .items}}{{.metadata.name}},{{end}}" 'test-pod-1,test-pod-2,'
# dry-run command
kubectl drain "127.0.0.1" --pod-selector 'e in (f)' --dry-run=client
kubectl drain "127.0.0.1" --pod-selector 'e in (f)' --dry-run=server
kubectl drain "127.0.0.1" --pod-selector 'e in (f)' --dry-run=client --force
kubectl drain "127.0.0.1" --pod-selector 'e in (f)' --dry-run=server --force
kube::test::get_object_assert "pods" "{{range .items}}{{.metadata.name}},{{end}}" 'test-pod-1,test-pod-2,'
# command
kubectl drain "127.0.0.1" --pod-selector 'e in (f)'
# only "test-pod-1" should have been matched and deleted - test-pod-2 should still exist
kube::test::get_object_assert "pods/test-pod-2" "{{.metadata.name}}" 'test-pod-2'
# delete pod no longer in use
kubectl delete pod/test-pod-2
# command - need --force because pod is unmanaged and --skip-wait-for-delete-timeout because node is unready
response=$(kubectl drain "127.0.0.1" --force --pod-selector 'e in (f)' --skip-wait-for-delete-timeout=1)
kube::test::if_has_string "${response}" "evicting pod .*/test-pod-1"
# only "test-pod-1" should have been matched and deleted - test-pod-2 should not have a deletion timestamp
kube::test::get_object_assert "pods/test-pod-2" "{{.metadata.deletionTimestamp}}" '<no value>'
# Post-condition: recreate test pods -- they have deletionTimestamp set but will not go away because node is unready
delete_test_pods
create_test_pods
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Recreating because I'll need them again in the tests I'm adding in #100148

# Post-condition: node is schedulable
kubectl uncordon "127.0.0.1"
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
Expand Down Expand Up @@ -170,6 +186,9 @@ __EOF__
# Post-condition: node "127.0.0.1" is cordoned
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" 'true'

# Clean up test pods
delete_test_pods

set +o nounset
set +o errexit
}