Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

patch resourcebinding instead of update and update observed generation. #2345

Merged
merged 2 commits into from
Aug 10, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
53 changes: 27 additions & 26 deletions pkg/scheduler/scheduler.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@ import (
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/retry"
"k8s.io/client-go/util/workqueue"
"k8s.io/klog/v2"

Expand Down Expand Up @@ -417,10 +416,10 @@ func (s *Scheduler) doScheduleClusterBinding(name string) (err error) {
} else {
condition = util.NewCondition(workv1alpha2.Scheduled, scheduleFailedReason, err.Error(), metav1.ConditionFalse)
}
if updateErr := s.updateClusterBindingScheduledConditionIfNeeded(crb, condition); updateErr != nil {
klog.Errorf("Failed update condition(%s) for ClusterResourceBinding(%s)", workv1alpha2.Scheduled, crb.Name)
if updateErr := s.patchClusterBindingScheduleStatus(crb, condition); updateErr != nil {
klog.Errorf("Failed to patch schedule status to ClusterResourceBinding(%s): %v", crb.Name, err)
if err == nil {
// schedule succeed but update condition failed, return err in order to retry in next loop.
// schedule succeed but update status failed, return err in order to retry in next loop.
err = updateErr
}
}
Expand Down Expand Up @@ -662,36 +661,38 @@ func (s *Scheduler) patchBindingScheduleStatus(rb *workv1alpha2.ResourceBinding,
return nil
}

// updateClusterBindingScheduledConditionIfNeeded sets the scheduled condition of ClusterResourceBinding if needed
func (s *Scheduler) updateClusterBindingScheduledConditionIfNeeded(crb *workv1alpha2.ClusterResourceBinding, newScheduledCondition metav1.Condition) error {
// patchClusterBindingScheduleStatus patches schedule status of ClusterResourceBinding when necessary
func (s *Scheduler) patchClusterBindingScheduleStatus(crb *workv1alpha2.ClusterResourceBinding, newScheduledCondition metav1.Condition) error {
if crb == nil {
return nil
}

oldScheduledCondition := meta.FindStatusCondition(crb.Status.Conditions, workv1alpha2.Scheduled)
if oldScheduledCondition != nil {
if util.IsConditionsEqual(newScheduledCondition, *oldScheduledCondition) {
klog.V(4).Infof("No need to update scheduled condition")
return nil
}
modifiedObj := crb.DeepCopy()
meta.SetStatusCondition(&modifiedObj.Status.Conditions, newScheduledCondition)
// Postpone setting observed generation until schedule succeed, assume scheduler will retry and
// will succeed eventually
if newScheduledCondition.Status == metav1.ConditionTrue {
modifiedObj.Status.SchedulerObservedGeneration = modifiedObj.Generation
}

return retry.RetryOnConflict(retry.DefaultRetry, func() error {
meta.SetStatusCondition(&crb.Status.Conditions, newScheduledCondition)
_, updateErr := s.KarmadaClient.WorkV1alpha2().ClusterResourceBindings().UpdateStatus(context.TODO(), crb, metav1.UpdateOptions{})
if updateErr == nil {
return nil
}
// Short path, ignore patch if no change.
if reflect.DeepEqual(crb.Status, modifiedObj.Status) {
return nil
}

if updated, err := s.KarmadaClient.WorkV1alpha2().ClusterResourceBindings().Get(context.TODO(), crb.Name, metav1.GetOptions{}); err == nil {
// make a copy so we don't mutate the shared cache
crb = updated.DeepCopy()
} else {
klog.Errorf("failed to get updated cluster resource binding %s: %v", crb.Name, err)
}
patchBytes, err := helper.GenMergePatch(crb, modifiedObj)
if err != nil {
return fmt.Errorf("failed to create a merge patch: %v", err)
}

return updateErr
})
_, err = s.KarmadaClient.WorkV1alpha2().ClusterResourceBindings().Patch(context.TODO(), crb.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}, "status")
if err != nil {
klog.Errorf("Failed to patch schedule status to ClusterResourceBinding(%s): %v", crb.Name, err)
return err
}

klog.V(4).Infof("Patch schedule status to ClusterResourceBinding(%s) succeed", crb.Name)
return nil
}

func (s *Scheduler) recordScheduleResultEventForResourceBinding(rb *workv1alpha2.ResourceBinding, schedulerErr error) {
Expand Down
214 changes: 214 additions & 0 deletions pkg/scheduler/scheduler_test.go
Original file line number Diff line number Diff line change
@@ -1,13 +1,19 @@
package scheduler

import (
"context"
"reflect"
"testing"
"time"

metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
dynamicfake "k8s.io/client-go/dynamic/fake"
"k8s.io/client-go/kubernetes/fake"

workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
karmadafake "github.com/karmada-io/karmada/pkg/generated/clientset/versioned/fake"
"github.com/karmada-io/karmada/pkg/util"
)

func TestCreateScheduler(t *testing.T) {
Expand Down Expand Up @@ -63,3 +69,211 @@ func TestCreateScheduler(t *testing.T) {
})
}
}

func Test_patchBindingScheduleStatus(t *testing.T) {
oneHourBefore := time.Now().Add(-1 * time.Hour).Round(time.Second)
oneHourAfter := time.Now().Add(1 * time.Hour).Round(time.Second)

successCondition := util.NewCondition(workv1alpha2.Scheduled, scheduleSuccessReason, scheduleSuccessMessage, metav1.ConditionTrue)
failureCondition := util.NewCondition(workv1alpha2.Scheduled, scheduleFailedReason, "schedule error", metav1.ConditionFalse)

successCondition.LastTransitionTime = metav1.Time{Time: oneHourBefore}
failureCondition.LastTransitionTime = metav1.Time{Time: oneHourAfter}

dynamicClient := dynamicfake.NewSimpleDynamicClient(runtime.NewScheme())
karmadaClient := karmadafake.NewSimpleClientset()
kubeClient := fake.NewSimpleClientset()

scheduler, err := NewScheduler(dynamicClient, karmadaClient, kubeClient)
if err != nil {
t.Error(err)
}

tests := []struct {
name string
binding *workv1alpha2.ResourceBinding
newScheduledCondition metav1.Condition
expected *workv1alpha2.ResourceBinding
}{
{
name: "add success condition",
binding: &workv1alpha2.ResourceBinding{
ObjectMeta: metav1.ObjectMeta{Name: "rb-1", Namespace: "default", Generation: 1},
Spec: workv1alpha2.ResourceBindingSpec{},
Status: workv1alpha2.ResourceBindingStatus{},
},
newScheduledCondition: successCondition,
expected: &workv1alpha2.ResourceBinding{
ObjectMeta: metav1.ObjectMeta{Name: "rb-1", Namespace: "default", Generation: 1},
Spec: workv1alpha2.ResourceBindingSpec{},
Status: workv1alpha2.ResourceBindingStatus{Conditions: []metav1.Condition{successCondition}, SchedulerObservedGeneration: 1},
},
},
{
name: "add failure condition",
binding: &workv1alpha2.ResourceBinding{
ObjectMeta: metav1.ObjectMeta{Name: "rb-2", Namespace: "default"},
Spec: workv1alpha2.ResourceBindingSpec{},
Status: workv1alpha2.ResourceBindingStatus{},
},
newScheduledCondition: failureCondition,
expected: &workv1alpha2.ResourceBinding{
ObjectMeta: metav1.ObjectMeta{Name: "rb-2", Namespace: "default"},
Spec: workv1alpha2.ResourceBindingSpec{},
Status: workv1alpha2.ResourceBindingStatus{Conditions: []metav1.Condition{failureCondition}},
},
},
{
name: "replace to success condition",
binding: &workv1alpha2.ResourceBinding{
ObjectMeta: metav1.ObjectMeta{Name: "rb-3", Namespace: "default", Generation: 1},
Spec: workv1alpha2.ResourceBindingSpec{},
Status: workv1alpha2.ResourceBindingStatus{Conditions: []metav1.Condition{failureCondition}, SchedulerObservedGeneration: 2},
},
newScheduledCondition: successCondition,
expected: &workv1alpha2.ResourceBinding{
ObjectMeta: metav1.ObjectMeta{Name: "rb-3", Namespace: "default"},
Spec: workv1alpha2.ResourceBindingSpec{},
Status: workv1alpha2.ResourceBindingStatus{Conditions: []metav1.Condition{successCondition}, SchedulerObservedGeneration: 1},
},
},
{
name: "replace failure condition",
binding: &workv1alpha2.ResourceBinding{
ObjectMeta: metav1.ObjectMeta{Name: "rb-4", Namespace: "default"},
Spec: workv1alpha2.ResourceBindingSpec{},
Status: workv1alpha2.ResourceBindingStatus{Conditions: []metav1.Condition{successCondition}},
},
newScheduledCondition: failureCondition,
expected: &workv1alpha2.ResourceBinding{
ObjectMeta: metav1.ObjectMeta{Name: "rb-4", Namespace: "default"},
Spec: workv1alpha2.ResourceBindingSpec{},
Status: workv1alpha2.ResourceBindingStatus{Conditions: []metav1.Condition{failureCondition}},
},
},
}

for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
_, err := karmadaClient.WorkV1alpha2().ResourceBindings(test.binding.Namespace).Create(context.TODO(), test.binding, metav1.CreateOptions{})
if err != nil {
t.Fatal(err)
}
err = scheduler.patchBindingScheduleStatus(test.binding, test.newScheduledCondition)
if err != nil {
t.Error(err)
}
res, err := karmadaClient.WorkV1alpha2().ResourceBindings(test.binding.Namespace).Get(context.TODO(), test.binding.Name, metav1.GetOptions{})
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(res.Status, test.expected.Status) {
t.Errorf("expected status: %v, but got: %v", test.expected.Status, res.Status)
}
})
}
}

func Test_patchClusterBindingScheduleStatus(t *testing.T) {
oneHourBefore := time.Now().Add(-1 * time.Hour).Round(time.Second)
oneHourAfter := time.Now().Add(1 * time.Hour).Round(time.Second)

successCondition := util.NewCondition(workv1alpha2.Scheduled, scheduleSuccessReason, scheduleSuccessMessage, metav1.ConditionTrue)
failureCondition := util.NewCondition(workv1alpha2.Scheduled, scheduleFailedReason, "schedule error", metav1.ConditionFalse)

successCondition.LastTransitionTime = metav1.Time{Time: oneHourBefore}
failureCondition.LastTransitionTime = metav1.Time{Time: oneHourAfter}

dynamicClient := dynamicfake.NewSimpleDynamicClient(runtime.NewScheme())
karmadaClient := karmadafake.NewSimpleClientset()
kubeClient := fake.NewSimpleClientset()

scheduler, err := NewScheduler(dynamicClient, karmadaClient, kubeClient)
if err != nil {
t.Error(err)
}

tests := []struct {
name string
binding *workv1alpha2.ClusterResourceBinding
newScheduledCondition metav1.Condition
expected *workv1alpha2.ClusterResourceBinding
}{
{
name: "add success condition",
binding: &workv1alpha2.ClusterResourceBinding{
ObjectMeta: metav1.ObjectMeta{Name: "rb-1", Generation: 1},
Spec: workv1alpha2.ResourceBindingSpec{},
Status: workv1alpha2.ResourceBindingStatus{},
},
newScheduledCondition: successCondition,
expected: &workv1alpha2.ClusterResourceBinding{
ObjectMeta: metav1.ObjectMeta{Name: "rb-1"},
Spec: workv1alpha2.ResourceBindingSpec{},
Status: workv1alpha2.ResourceBindingStatus{Conditions: []metav1.Condition{successCondition}, SchedulerObservedGeneration: 1},
},
},
{
name: "add failure condition",
binding: &workv1alpha2.ClusterResourceBinding{
ObjectMeta: metav1.ObjectMeta{Name: "rb-2"},
Spec: workv1alpha2.ResourceBindingSpec{},
Status: workv1alpha2.ResourceBindingStatus{},
},
newScheduledCondition: failureCondition,
expected: &workv1alpha2.ClusterResourceBinding{
ObjectMeta: metav1.ObjectMeta{Name: "rb-2"},
Spec: workv1alpha2.ResourceBindingSpec{},
Status: workv1alpha2.ResourceBindingStatus{Conditions: []metav1.Condition{failureCondition}},
},
},
{
name: "replace to success condition",
binding: &workv1alpha2.ClusterResourceBinding{
ObjectMeta: metav1.ObjectMeta{Name: "rb-3", Generation: 1},
Spec: workv1alpha2.ResourceBindingSpec{},
Status: workv1alpha2.ResourceBindingStatus{Conditions: []metav1.Condition{failureCondition}, SchedulerObservedGeneration: 2},
},
newScheduledCondition: successCondition,
expected: &workv1alpha2.ClusterResourceBinding{
ObjectMeta: metav1.ObjectMeta{Name: "rb-3"},
Spec: workv1alpha2.ResourceBindingSpec{},
Status: workv1alpha2.ResourceBindingStatus{Conditions: []metav1.Condition{successCondition}, SchedulerObservedGeneration: 1},
},
},
{
name: "replace failure condition",
binding: &workv1alpha2.ClusterResourceBinding{
ObjectMeta: metav1.ObjectMeta{Name: "rb-4"},
Spec: workv1alpha2.ResourceBindingSpec{},
Status: workv1alpha2.ResourceBindingStatus{Conditions: []metav1.Condition{successCondition}},
},
newScheduledCondition: failureCondition,
expected: &workv1alpha2.ClusterResourceBinding{
ObjectMeta: metav1.ObjectMeta{Name: "rb-4"},
Spec: workv1alpha2.ResourceBindingSpec{},
Status: workv1alpha2.ResourceBindingStatus{Conditions: []metav1.Condition{failureCondition}},
},
},
}

for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
_, err := karmadaClient.WorkV1alpha2().ClusterResourceBindings().Create(context.TODO(), test.binding, metav1.CreateOptions{})
if err != nil {
t.Fatal(err)
}
err = scheduler.patchClusterBindingScheduleStatus(test.binding, test.newScheduledCondition)
if err != nil {
t.Error(err)
}
res, err := karmadaClient.WorkV1alpha2().ClusterResourceBindings().Get(context.TODO(), test.binding.Name, metav1.GetOptions{})
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(res.Status, test.expected.Status) {
t.Errorf("expected status: %v, but got: %v", test.expected.Status, res.Status)
}
})
}
}