Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ CONTROLLER_GEN_VER := v0.16.0
CONTROLLER_GEN_BIN := controller-gen
CONTROLLER_GEN := $(abspath $(TOOLS_BIN_DIR)/$(CONTROLLER_GEN_BIN)-$(CONTROLLER_GEN_VER))

STATICCHECK_VER := 2024.1
STATICCHECK_VER := 2025.1
STATICCHECK_BIN := staticcheck
STATICCHECK := $(abspath $(TOOLS_BIN_DIR)/$(STATICCHECK_BIN)-$(STATICCHECK_VER))

Expand Down
11 changes: 8 additions & 3 deletions pkg/controllers/clusterresourceplacement/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,10 @@ import (
// if object size is greater than 1MB https://github.com/kubernetes/kubernetes/blob/db1990f48b92d603f469c1c89e2ad36da1b74846/test/integration/master/synthetic_master_test.go#L337
var resourceSnapshotResourceSizeLimit = 800 * (1 << 10) // 800KB

// We use a safety resync period to requeue all the finished request just in case there is a bug in the system.
// TODO: unify all the controllers with this pattern and make this configurable in place of the controller runtime resync period.
const controllerResyncPeriod = 15 * time.Minute

func (r *Reconciler) Reconcile(ctx context.Context, key controller.QueueKey) (ctrl.Result, error) {
name, ok := key.(string)
if !ok {
Expand Down Expand Up @@ -188,7 +192,8 @@ func (r *Reconciler) handleUpdate(ctx context.Context, crp *fleetv1beta1.Cluster
klog.ErrorS(updateErr, "Failed to update the status", "clusterResourcePlacement", crpKObj)
return ctrl.Result{}, controller.NewUpdateIgnoreConflictError(updateErr)
}
return ctrl.Result{}, err
// no need to retry faster, the user needs to fix the resource selectors
return ctrl.Result{RequeueAfter: controllerResyncPeriod}, nil
}

latestSchedulingPolicySnapshot, err := r.getOrCreateClusterSchedulingPolicySnapshot(ctx, crp, int(revisionLimit))
Expand Down Expand Up @@ -252,11 +257,11 @@ func (r *Reconciler) handleUpdate(ctx context.Context, crp *fleetv1beta1.Cluster
// Here we requeue the request to prevent a bug in the watcher.
klog.V(2).InfoS("Scheduler has not scheduled any cluster yet and requeue the request as a backup",
"clusterResourcePlacement", crpKObj, "scheduledCondition", crp.GetCondition(string(fleetv1beta1.ClusterResourcePlacementScheduledConditionType)), "generation", crp.Generation)
return ctrl.Result{RequeueAfter: 5 * time.Minute}, nil
return ctrl.Result{RequeueAfter: controllerResyncPeriod}, nil
}
klog.V(2).InfoS("Placement rollout has not finished yet and requeue the request", "clusterResourcePlacement", crpKObj, "status", crp.Status, "generation", crp.Generation)
// no need to requeue the request as the binding status will be changed but we add a long resync loop just in case.
return ctrl.Result{RequeueAfter: 5 * time.Minute}, nil
return ctrl.Result{RequeueAfter: controllerResyncPeriod}, nil
}

func (r *Reconciler) getOrCreateClusterSchedulingPolicySnapshot(ctx context.Context, crp *fleetv1beta1.ClusterResourcePlacement, revisionHistoryLimit int) (*fleetv1beta1.ClusterSchedulingPolicySnapshot, error) {
Expand Down
37 changes: 26 additions & 11 deletions pkg/controllers/clusterresourceplacement/resource_selector.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,8 +48,7 @@ func (r *Reconciler) selectResources(placement *fleetv1alpha1.ClusterResourcePla
}
placement.Status.SelectedResources = make([]fleetv1alpha1.ResourceIdentifier, 0)
manifests := make([]workv1alpha1.Manifest, len(selectedObjects))
for i, obj := range selectedObjects {
unstructuredObj := obj.DeepCopyObject().(*unstructured.Unstructured)
for i, unstructuredObj := range selectedObjects {
gvk := unstructuredObj.GroupVersionKind()
res := fleetv1alpha1.ResourceIdentifier{
Group: gvk.Group,
Expand Down Expand Up @@ -81,8 +80,9 @@ func convertResourceSelector(old []fleetv1alpha1.ClusterResourceSelector) []flee
}

// gatherSelectedResource gets all the resources according to the resource selector.
func (r *Reconciler) gatherSelectedResource(placement string, selectors []fleetv1beta1.ClusterResourceSelector) ([]runtime.Object, error) {
var resources []runtime.Object
func (r *Reconciler) gatherSelectedResource(placement string, selectors []fleetv1beta1.ClusterResourceSelector) ([]*unstructured.Unstructured, error) {
var resources []*unstructured.Unstructured
var resourceMap = make(map[fleetv1beta1.ResourceIdentifier]bool)
for _, selector := range selectors {
gvk := schema.GroupVersionKind{
Group: selector.Group,
Expand All @@ -104,7 +104,23 @@ func (r *Reconciler) gatherSelectedResource(placement string, selectors []fleetv
if err != nil {
return nil, err
}
resources = append(resources, objs...)
for _, obj := range objs {
uObj := obj.(*unstructured.Unstructured)
ri := fleetv1beta1.ResourceIdentifier{
Group: obj.GetObjectKind().GroupVersionKind().Group,
Version: obj.GetObjectKind().GroupVersionKind().Version,
Kind: obj.GetObjectKind().GroupVersionKind().Kind,
Name: uObj.GetName(),
Namespace: uObj.GetNamespace(),
}
if _, exist := resourceMap[ri]; exist {
err = fmt.Errorf("found duplicate resource %+v", ri)
klog.ErrorS(err, "user selected one resource more than once", "resource", ri, "placement", placement)
return nil, controller.NewUserError(err)
}
resourceMap[ri] = true
resources = append(resources, uObj)
}
}
// sort the resources in strict order so that we will get the stable list of manifest so that
// the generated work object doesn't change between reconcile loops
Expand All @@ -113,16 +129,16 @@ func (r *Reconciler) gatherSelectedResource(placement string, selectors []fleetv
return resources, nil
}

func sortResources(resources []runtime.Object) {
func sortResources(resources []*unstructured.Unstructured) {
sort.Slice(resources, func(i, j int) bool {
obj1 := resources[i].DeepCopyObject().(*unstructured.Unstructured)
obj2 := resources[j].DeepCopyObject().(*unstructured.Unstructured)
obj1 := resources[i]
obj2 := resources[j]
gvk1 := obj1.GetObjectKind().GroupVersionKind().String()
gvk2 := obj2.GetObjectKind().GroupVersionKind().String()
// compare group/version;kind for the rest of type of resources
gvkComp := strings.Compare(gvk1, gvk2)
if gvkComp == 0 {
// same gvk, compare namespace/name
// same gvk, compare namespace/name, no duplication exists
return strings.Compare(fmt.Sprintf("%s/%s", obj1.GetNamespace(), obj1.GetName()),
fmt.Sprintf("%s/%s", obj2.GetNamespace(), obj2.GetName())) > 0
}
Expand Down Expand Up @@ -442,8 +458,7 @@ func (r *Reconciler) selectResourcesForPlacement(placement *fleetv1beta1.Cluster

resources := make([]fleetv1beta1.ResourceContent, len(selectedObjects))
resourcesIDs := make([]fleetv1beta1.ResourceIdentifier, len(selectedObjects))
for i, obj := range selectedObjects {
unstructuredObj := obj.DeepCopyObject().(*unstructured.Unstructured)
for i, unstructuredObj := range selectedObjects {
rc, err := generateResourceContent(unstructuredObj)
if err != nil {
return 0, nil, nil, err
Expand Down
48 changes: 24 additions & 24 deletions pkg/controllers/clusterresourceplacement/resource_selector_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -935,52 +935,52 @@ func TestSortResource(t *testing.T) {
}

tests := map[string]struct {
resources []runtime.Object
want []runtime.Object
resources []*unstructured.Unstructured
want []*unstructured.Unstructured
}{
"should handle empty resources list": {
resources: []runtime.Object{},
want: []runtime.Object{},
resources: []*unstructured.Unstructured{},
want: []*unstructured.Unstructured{},
},
"should handle single resource": {
resources: []runtime.Object{deployment},
want: []runtime.Object{deployment},
resources: []*unstructured.Unstructured{deployment},
want: []*unstructured.Unstructured{deployment},
},
"should handle multiple namespaces": {
resources: []runtime.Object{namespace1, namespace2},
want: []runtime.Object{namespace2, namespace1},
resources: []*unstructured.Unstructured{namespace1, namespace2},
want: []*unstructured.Unstructured{namespace2, namespace1},
},
"should gather selected resources with Namespace in front with order": {
resources: []runtime.Object{deployment, namespace1, namespace2},
want: []runtime.Object{namespace2, namespace1, deployment},
resources: []*unstructured.Unstructured{deployment, namespace1, namespace2},
want: []*unstructured.Unstructured{namespace2, namespace1, deployment},
},
"should gather selected resources with CRD in front with order": {
resources: []runtime.Object{clusterRole, crd1, crd2},
want: []runtime.Object{crd2, crd1, clusterRole},
resources: []*unstructured.Unstructured{clusterRole, crd1, crd2},
want: []*unstructured.Unstructured{crd2, crd1, clusterRole},
},
"should gather selected resources with CRD or Namespace in front with order": {
resources: []runtime.Object{deployment, namespace1, namespace2, clusterRole, crd1, crd2},
want: []runtime.Object{namespace2, namespace1, crd2, crd1, clusterRole, deployment},
resources: []*unstructured.Unstructured{deployment, namespace1, namespace2, clusterRole, crd1, crd2},
want: []*unstructured.Unstructured{namespace2, namespace1, crd2, crd1, clusterRole, deployment},
},
"should gather selected resources with CRD or Namespace in front with order, second case": {
resources: []runtime.Object{crd1, crd2, deployment, namespace2, clusterRole},
want: []runtime.Object{namespace2, crd2, crd1, deployment, clusterRole},
resources: []*unstructured.Unstructured{crd1, crd2, deployment, namespace2, clusterRole},
want: []*unstructured.Unstructured{namespace2, crd2, crd1, deployment, clusterRole},
},
"should gather selected resources with PersistentVolumeClaim in front with order": {
resources: []runtime.Object{deployment, pvc, namespace1, role},
want: []runtime.Object{namespace1, pvc, deployment, role},
resources: []*unstructured.Unstructured{deployment, pvc, namespace1, role},
want: []*unstructured.Unstructured{namespace1, pvc, deployment, role},
},
"should gather selected resources with Secret in front with order": {
resources: []runtime.Object{deployment, secret, namespace1, crd1, namespace2, role},
want: []runtime.Object{namespace2, namespace1, crd1, secret, deployment, role},
resources: []*unstructured.Unstructured{deployment, secret, namespace1, crd1, namespace2, role},
want: []*unstructured.Unstructured{namespace2, namespace1, crd1, secret, deployment, role},
},
"should gather selected resources with ConfigMap and Secret in front with order": {
resources: []runtime.Object{deployment, secret, namespace1, role, configMap, secret2},
want: []runtime.Object{namespace1, configMap, secret2, secret, deployment, role},
resources: []*unstructured.Unstructured{deployment, secret, namespace1, role, configMap, secret2},
want: []*unstructured.Unstructured{namespace1, configMap, secret2, secret, deployment, role},
},
"should gather selected all the resources with the right order": {
resources: []runtime.Object{configMap, deployment, role, crd1, pvc, secret2, clusterRole, secret, namespace1, namespace2, crd2},
want: []runtime.Object{namespace2, namespace1, crd2, crd1, configMap, secret2, secret, pvc, deployment, clusterRole, role},
resources: []*unstructured.Unstructured{configMap, deployment, role, crd1, pvc, secret2, clusterRole, secret, namespace1, namespace2, crd2},
want: []*unstructured.Unstructured{namespace2, namespace1, crd2, crd1, configMap, secret2, secret, pvc, deployment, clusterRole, role},
},
}

Expand Down
Loading
Loading