Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Automated cherry pick of #89539: Fixes problem where kubectl apply stops after first error #89607

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
19 changes: 19 additions & 0 deletions hack/testdata/multi-resource.yaml
@@ -0,0 +1,19 @@
# Tests that initial failures to not block subsequent applies.
# Pod must be before namespace, so it initially fails. Second
# apply of pod should succeed, since namespace finally exists.
apiVersion: v1
kind: Pod
metadata:
name: test-pod
namespace: multi-resource-ns
labels:
name: test-pod-label
spec:
containers:
- name: kubernetes-pause
image: k8s.gcr.io/pause:2.0
---
apiVersion: v1
kind: Namespace
metadata:
name: multi-resource-ns
1 change: 1 addition & 0 deletions staging/src/k8s.io/kubectl/pkg/cmd/apply/BUILD
Expand Up @@ -22,6 +22,7 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/jsonmergepatch:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/mergepatch:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
Expand Down
276 changes: 147 additions & 129 deletions staging/src/k8s.io/kubectl/pkg/cmd/apply/apply.go
Expand Up @@ -28,6 +28,7 @@ import (
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/cli-runtime/pkg/printers"
Expand Down Expand Up @@ -371,53 +372,78 @@ func (o *ApplyOptions) Run() error {

// Generates the objects using the resource builder if they have not
// already been stored by calling "SetObjects()" in the pre-processor.
errs := []error{}
infos, err := o.GetObjects()
if err != nil {
return err
errs = append(errs, err)
}
if len(infos) == 0 {
if len(infos) == 0 && len(errs) == 0 {
return fmt.Errorf("no objects passed to apply")
}
// Iterate through all objects, applying each one.
for _, info := range infos {
if err := o.applyOneObject(info); err != nil {
errs = append(errs, err)
}
}
// If any errors occurred during apply, then return error (or
// aggregate of errors).
if len(errs) == 1 {
return errs[0]
}
if len(errs) > 1 {
return utilerrors.NewAggregate(errs)
}

o.MarkNamespaceVisited(info)
if o.PostProcessorFn != nil {
klog.V(4).Infof("Running apply post-processor function")
if err := o.PostProcessorFn(); err != nil {
return err
}
}

return nil
}

func (o *ApplyOptions) applyOneObject(info *resource.Info) error {
o.MarkNamespaceVisited(info)

if err := o.Recorder.Record(info.Object); err != nil {
klog.V(4).Infof("error recording current command: %v", err)
}

if err := o.Recorder.Record(info.Object); err != nil {
klog.V(4).Infof("error recording current command: %v", err)
if o.ServerSideApply {
// Send the full object to be applied on the server side.
data, err := runtime.Encode(unstructured.UnstructuredJSONScheme, info.Object)
if err != nil {
return cmdutil.AddSourceToErr("serverside-apply", info.Source, err)
}

if o.ServerSideApply {
// Send the full object to be applied on the server side.
data, err := runtime.Encode(unstructured.UnstructuredJSONScheme, info.Object)
if err != nil {
return cmdutil.AddSourceToErr("serverside-apply", info.Source, err)
}
options := metav1.PatchOptions{
Force: &o.ForceConflicts,
FieldManager: o.FieldManager,
}

options := metav1.PatchOptions{
Force: &o.ForceConflicts,
FieldManager: o.FieldManager,
helper := resource.NewHelper(info.Client, info.Mapping)
if o.DryRunStrategy == cmdutil.DryRunServer {
if err := o.DryRunVerifier.HasSupport(info.Mapping.GroupVersionKind); err != nil {
return err
}

helper := resource.NewHelper(info.Client, info.Mapping)
if o.DryRunStrategy == cmdutil.DryRunServer {
if err := o.DryRunVerifier.HasSupport(info.Mapping.GroupVersionKind); err != nil {
return err
}
helper.DryRun(true)
helper.DryRun(true)
}
obj, err := helper.Patch(
info.Namespace,
info.Name,
types.ApplyPatchType,
data,
&options,
)
if err != nil {
if isIncompatibleServerError(err) {
err = fmt.Errorf("Server-side apply not available on the server: (%v)", err)
}
obj, err := helper.Patch(
info.Namespace,
info.Name,
types.ApplyPatchType,
data,
&options,
)
if err != nil {
if isIncompatibleServerError(err) {
err = fmt.Errorf("Server-side apply not available on the server: (%v)", err)
}
if errors.IsConflict(err) {
err = fmt.Errorf(`%v
if errors.IsConflict(err) {
err = fmt.Errorf(`%v
Please review the fields above--they currently have other managers. Here
are the ways you can resolve this warning:
* If you intend to manage all of these fields, please re-run the apply
Expand All @@ -429,136 +455,128 @@ are the ways you can resolve this warning:
value; in this case, you'll become the manager if the other manager(s)
stop managing the field (remove it from their configuration).
See http://k8s.io/docs/reference/using-api/api-concepts/#conflicts`, err)
}
return err
}

info.Refresh(obj, true)

if err := o.MarkObjectVisited(info); err != nil {
return err
}
return err
}

if o.shouldPrintObject() {
continue
}
info.Refresh(obj, true)

printer, err := o.ToPrinter("serverside-applied")
if err != nil {
return err
}
if err := o.MarkObjectVisited(info); err != nil {
return err
}

if err = printer.PrintObj(info.Object, o.Out); err != nil {
return err
}
continue
if o.shouldPrintObject() {
return nil
}

// Get the modified configuration of the object. Embed the result
// as an annotation in the modified configuration, so that it will appear
// in the patch sent to the server.
modified, err := util.GetModifiedConfiguration(info.Object, true, unstructured.UnstructuredJSONScheme)
printer, err := o.ToPrinter("serverside-applied")
if err != nil {
return cmdutil.AddSourceToErr(fmt.Sprintf("retrieving modified configuration from:\n%s\nfor:", info.String()), info.Source, err)
return err
}

if err := info.Get(); err != nil {
if !errors.IsNotFound(err) {
return cmdutil.AddSourceToErr(fmt.Sprintf("retrieving current configuration of:\n%s\nfrom server for:", info.String()), info.Source, err)
}

// Create the resource if it doesn't exist
// First, update the annotation used by kubectl apply
if err := util.CreateApplyAnnotation(info.Object, unstructured.UnstructuredJSONScheme); err != nil {
return cmdutil.AddSourceToErr("creating", info.Source, err)
}

if o.DryRunStrategy != cmdutil.DryRunClient {
// Then create the resource and skip the three-way merge
helper := resource.NewHelper(info.Client, info.Mapping)
if o.DryRunStrategy == cmdutil.DryRunServer {
if err := o.DryRunVerifier.HasSupport(info.Mapping.GroupVersionKind); err != nil {
return cmdutil.AddSourceToErr("creating", info.Source, err)
}
helper.DryRun(true)
}
obj, err := helper.Create(info.Namespace, true, info.Object)
if err != nil {
return cmdutil.AddSourceToErr("creating", info.Source, err)
}
info.Refresh(obj, true)
}

if err := o.MarkObjectVisited(info); err != nil {
return err
}
if err = printer.PrintObj(info.Object, o.Out); err != nil {
return err
}
return nil
}

if o.shouldPrintObject() {
continue
}
// Get the modified configuration of the object. Embed the result
// as an annotation in the modified configuration, so that it will appear
// in the patch sent to the server.
modified, err := util.GetModifiedConfiguration(info.Object, true, unstructured.UnstructuredJSONScheme)
if err != nil {
return cmdutil.AddSourceToErr(fmt.Sprintf("retrieving modified configuration from:\n%s\nfor:", info.String()), info.Source, err)
}

printer, err := o.ToPrinter("created")
if err != nil {
return err
}
if err = printer.PrintObj(info.Object, o.Out); err != nil {
return err
}
continue
if err := info.Get(); err != nil {
if !errors.IsNotFound(err) {
return cmdutil.AddSourceToErr(fmt.Sprintf("retrieving current configuration of:\n%s\nfrom server for:", info.String()), info.Source, err)
}

if err := o.MarkObjectVisited(info); err != nil {
return err
// Create the resource if it doesn't exist
// First, update the annotation used by kubectl apply
if err := util.CreateApplyAnnotation(info.Object, unstructured.UnstructuredJSONScheme); err != nil {
return cmdutil.AddSourceToErr("creating", info.Source, err)
}

if o.DryRunStrategy != cmdutil.DryRunClient {
metadata, _ := meta.Accessor(info.Object)
annotationMap := metadata.GetAnnotations()
if _, ok := annotationMap[corev1.LastAppliedConfigAnnotation]; !ok {
fmt.Fprintf(o.ErrOut, warningNoLastAppliedConfigAnnotation, o.cmdBaseName)
}

patcher, err := newPatcher(o, info)
if err != nil {
return err
// Then create the resource and skip the three-way merge
helper := resource.NewHelper(info.Client, info.Mapping)
if o.DryRunStrategy == cmdutil.DryRunServer {
if err := o.DryRunVerifier.HasSupport(info.Mapping.GroupVersionKind); err != nil {
return cmdutil.AddSourceToErr("creating", info.Source, err)
}
helper.DryRun(true)
}
patchBytes, patchedObject, err := patcher.Patch(info.Object, modified, info.Source, info.Namespace, info.Name, o.ErrOut)
obj, err := helper.Create(info.Namespace, true, info.Object)
if err != nil {
return cmdutil.AddSourceToErr(fmt.Sprintf("applying patch:\n%s\nto:\n%v\nfor:", patchBytes, info), info.Source, err)
return cmdutil.AddSourceToErr("creating", info.Source, err)
}
info.Refresh(obj, true)
}

info.Refresh(patchedObject, true)

if string(patchBytes) == "{}" && !o.shouldPrintObject() {
printer, err := o.ToPrinter("unchanged")
if err != nil {
return err
}
if err = printer.PrintObj(info.Object, o.Out); err != nil {
return err
}
continue
}
if err := o.MarkObjectVisited(info); err != nil {
return err
}

if o.shouldPrintObject() {
continue
return nil
}

printer, err := o.ToPrinter("configured")
printer, err := o.ToPrinter("created")
if err != nil {
return err
}
if err = printer.PrintObj(info.Object, o.Out); err != nil {
return err
}
return nil
}

if o.PostProcessorFn != nil {
klog.V(4).Infof("Running apply post-processor function")
if err := o.PostProcessorFn(); err != nil {
if err := o.MarkObjectVisited(info); err != nil {
return err
}

if o.DryRunStrategy != cmdutil.DryRunClient {
metadata, _ := meta.Accessor(info.Object)
annotationMap := metadata.GetAnnotations()
if _, ok := annotationMap[corev1.LastAppliedConfigAnnotation]; !ok {
fmt.Fprintf(o.ErrOut, warningNoLastAppliedConfigAnnotation, o.cmdBaseName)
}

patcher, err := newPatcher(o, info)
if err != nil {
return err
}
patchBytes, patchedObject, err := patcher.Patch(info.Object, modified, info.Source, info.Namespace, info.Name, o.ErrOut)
if err != nil {
return cmdutil.AddSourceToErr(fmt.Sprintf("applying patch:\n%s\nto:\n%v\nfor:", patchBytes, info), info.Source, err)
}

info.Refresh(patchedObject, true)

if string(patchBytes) == "{}" && !o.shouldPrintObject() {
printer, err := o.ToPrinter("unchanged")
if err != nil {
return err
}
if err = printer.PrintObj(info.Object, o.Out); err != nil {
return err
}
return nil
}
}

if o.shouldPrintObject() {
return nil
}

printer, err := o.ToPrinter("configured")
if err != nil {
return err
}
if err = printer.PrintObj(info.Object, o.Out); err != nil {
return err
}

return nil
Expand Down
13 changes: 13 additions & 0 deletions test/cmd/apply.sh
Expand Up @@ -274,6 +274,19 @@ __EOF__
# cleanup
kubectl delete --kustomize hack/testdata/kustomize

## kubectl apply multiple resources with initial failure.
# Pre-Condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" ''
# First pass, namespace is created, but pod is not (since namespace does not exist yet).
kubectl apply -f hack/testdata/multi-resource.yaml "${kube_flags[@]:?}"
output_message=$(! kubectl get pods test-pod 2>&1 "${kube_flags[@]:?}")
kube::test::if_has_string "${output_message}" 'pods "test-pod" not found'
# Second pass, pod is created (now that namespace exists).
kubectl apply -f hack/testdata/multi-resource.yaml "${kube_flags[@]:?}"
kube::test::get_object_assert 'pod test-pod' "{{${id_field}}}" 'test-pod'
# cleanup
kubectl delete -f hack/testdata/multi-resource.yaml

set +o nounset
set +o errexit
}
Expand Down