Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

✨ e2e Test: Ensure OwnerRefs are consistently reconciled #7606

Merged
merged 1 commit into from
Jan 20, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
81 changes: 81 additions & 0 deletions cmd/clusterctl/client/cluster/ownergraph.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,81 @@
/*
Copyright 2023 The Kubernetes Authors.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package cluster

import (
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)

// OwnerGraph contains a graph with all the objects considered by clusterctl move as nodes and the OwnerReference relationship
// between those objects as edges.
type OwnerGraph map[string]OwnerGraphNode

// OwnerGraphNode is a single node linking an ObjectReference to its OwnerReferences.
killianmuldoon marked this conversation as resolved.
Show resolved Hide resolved
type OwnerGraphNode struct {
Object corev1.ObjectReference
Owners []metav1.OwnerReference
}

func nodeToOwnerRef(n *node, attributes ownerReferenceAttributes) metav1.OwnerReference {
ref := metav1.OwnerReference{
Name: n.identity.Name,
APIVersion: n.identity.APIVersion,
Kind: n.identity.Kind,
UID: n.identity.UID,
}
if attributes.BlockOwnerDeletion != nil {
ref.BlockOwnerDeletion = attributes.BlockOwnerDeletion
}
if attributes.Controller != nil {
ref.Controller = attributes.Controller
}
return ref
}

// GetOwnerGraph returns a graph with all the objects considered by clusterctl move as nodes and the OwnerReference relationship between those objects as edges.
// NOTE: this data structure is exposed to allow implementation of E2E tests verifying that CAPI can properly rebuild its
// own owner references; there is no guarantee about the stability of this API.
func GetOwnerGraph(namespace, kubeconfigPath string) (OwnerGraph, error) {
killianmuldoon marked this conversation as resolved.
Show resolved Hide resolved
p := newProxy(Kubeconfig{Path: kubeconfigPath, Context: ""})
invClient := newInventoryClient(p, nil)

graph := newObjectGraph(p, invClient)

// Gets all the types defined by the CRDs installed by clusterctl plus the ConfigMap/Secret core types.
err := graph.getDiscoveryTypes()
if err != nil {
return OwnerGraph{}, errors.Wrap(err, "failed to retrieve discovery types")
}

// Discovery the object graph for the selected types:
// - Nodes are defined the Kubernetes objects (Clusters, Machines etc.) identified during the discovery process.
// - Edges are derived by the OwnerReferences between nodes.
if err := graph.Discovery(namespace); err != nil {
return OwnerGraph{}, errors.Wrap(err, "failed to discover the object graph")
}
owners := OwnerGraph{}
for _, v := range graph.uidToNode {
n := OwnerGraphNode{Object: v.identity, Owners: []metav1.OwnerReference{}}
for owner, attributes := range v.owners {
n.Owners = append(n.Owners, nodeToOwnerRef(owner, attributes))
}
owners[string(v.identity.UID)] = n
}
return owners, nil
}
13 changes: 9 additions & 4 deletions test/e2e/quick_start.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,8 @@ type QuickStartSpecInput struct {
// Flavor, if specified is the template flavor used to create the cluster for testing.
// If not specified, and the e2econfig variable IPFamily is IPV6, then "ipv6" is used,
// otherwise the default flavor is used.
Flavor *string
Flavor *string
PostMachinesProvisioned func(managementClusterProxy framework.ClusterProxy, workloadClusterNamespace, workloadClusterName string)
}

// QuickStartSpec implements a spec that mimics the operation described in the Cluster API quick start, that is
Expand Down Expand Up @@ -82,7 +83,7 @@ func QuickStartSpec(ctx context.Context, inputGetter func() QuickStartSpecInput)
if input.Flavor != nil {
flavor = *input.Flavor
}

clusterName := fmt.Sprintf("%s-%s", specName, util.RandomString(6))
clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{
ClusterProxy: input.BootstrapClusterProxy,
ConfigCluster: clusterctl.ConfigClusterInput{
Expand All @@ -92,7 +93,7 @@ func QuickStartSpec(ctx context.Context, inputGetter func() QuickStartSpecInput)
InfrastructureProvider: clusterctl.DefaultInfrastructureProvider,
Flavor: flavor,
Namespace: namespace.Name,
ClusterName: fmt.Sprintf("%s-%s", specName, util.RandomString(6)),
ClusterName: clusterName,
KubernetesVersion: input.E2EConfig.GetVariable(KubernetesVersion),
ControlPlaneMachineCount: pointer.Int64(1),
WorkerMachineCount: pointer.Int64(1),
Expand All @@ -101,8 +102,12 @@ func QuickStartSpec(ctx context.Context, inputGetter func() QuickStartSpecInput)
WaitForClusterIntervals: input.E2EConfig.GetIntervals(specName, "wait-cluster"),
WaitForControlPlaneIntervals: input.E2EConfig.GetIntervals(specName, "wait-control-plane"),
WaitForMachineDeployments: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"),
PostMachinesProvisioned: func() {
sbueringer marked this conversation as resolved.
Show resolved Hide resolved
if input.PostMachinesProvisioned != nil {
input.PostMachinesProvisioned(input.BootstrapClusterProxy, namespace.Name, clusterName)
}
},
}, clusterResources)

By("PASSED!")
})

Expand Down
47 changes: 47 additions & 0 deletions test/e2e/quick_start_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,8 @@ package e2e
import (
. "github.com/onsi/ginkgo/v2"
"k8s.io/utils/pointer"

"sigs.k8s.io/cluster-api/test/framework"
)

var _ = Describe("When following the Cluster API quick-start [PR-Blocking]", func() {
Expand Down Expand Up @@ -75,3 +77,48 @@ var _ = Describe("When following the Cluster API quick-start with Ignition", fun
}
})
})

var _ = Describe("When following the Cluster API quick-start check owner references are correctly reconciled and rereconciled if deleted", func() {
sbueringer marked this conversation as resolved.
Show resolved Hide resolved
QuickStartSpec(ctx, func() QuickStartSpecInput {
return QuickStartSpecInput{
E2EConfig: e2eConfig,
ClusterctlConfigPath: clusterctlConfigPath,
BootstrapClusterProxy: bootstrapClusterProxy,
ArtifactFolder: artifactFolder,
SkipCleanup: skipCleanup,
PostMachinesProvisioned: func(proxy framework.ClusterProxy, namespace, clusterName string) {
framework.ValidateOwnerReferencesResilience(ctx, proxy, namespace, clusterName,
framework.CoreTypeOwnerReferenceAssertion,
framework.ExpOwnerReferenceAssertions,
framework.DockerInfraOwnerReferenceAssertions,
framework.KubeadmBootstrapOwnerReferenceAssertions,
framework.KubeadmControlPlaneOwnerReferenceAssertions,
framework.KubernetesReferenceAssertions,
)
},
}
})
})

var _ = Describe("When following the Cluster API quick-start with ClusterClass check owner references are correctly reconciled and rereconciled if deleted [ClusterClass]", func() {
QuickStartSpec(ctx, func() QuickStartSpecInput {
return QuickStartSpecInput{
E2EConfig: e2eConfig,
ClusterctlConfigPath: clusterctlConfigPath,
BootstrapClusterProxy: bootstrapClusterProxy,
ArtifactFolder: artifactFolder,
SkipCleanup: skipCleanup,
Flavor: pointer.String("topology"),
PostMachinesProvisioned: func(proxy framework.ClusterProxy, namespace, clusterName string) {
framework.ValidateOwnerReferencesResilience(ctx, proxy, namespace, clusterName,
framework.CoreTypeOwnerReferenceAssertion,
framework.ExpOwnerReferenceAssertions,
framework.DockerInfraOwnerReferenceAssertions,
framework.KubeadmBootstrapOwnerReferenceAssertions,
framework.KubeadmControlPlaneOwnerReferenceAssertions,
framework.KubernetesReferenceAssertions,
)
},
}
})
})
Loading