Skip to content

Commit

Permalink
Feat: support sharding
Browse files Browse the repository at this point in the history
Signed-off-by: Yin Da <yd219913@alibaba-inc.com>
  • Loading branch information
Somefive committed Feb 6, 2023
1 parent eab57c8 commit 2ec72c2
Show file tree
Hide file tree
Showing 13 changed files with 604 additions and 3 deletions.
39 changes: 39 additions & 0 deletions controller/sharding/cache.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
/*
Copyright 2023 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package sharding

import (
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/client"
)

// BuildCache add shard-id label selector for given typed object
func BuildCache(scheme *runtime.Scheme, shardingObjects ...client.Object) cache.NewCacheFunc {
opts := cache.Options{
Scheme: scheme,
SelectorsByObject: map[client.Object]cache.ObjectSelector{},
}
if EnableSharding {
ls := labels.SelectorFromSet(map[string]string{LabelKubeVelaScheduledShardID: ShardID})
for _, obj := range shardingObjects {
opts.SelectorsByObject[obj] = cache.ObjectSelector{Label: ls}
}
}
return cache.BuilderWithOptions(opts)
}
29 changes: 29 additions & 0 deletions controller/sharding/flags.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
/*
Copyright 2023 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package sharding

import (
"github.com/spf13/pflag"
)

// AddFlags add sharding flags
func AddFlags(fs *pflag.FlagSet) {
fs.BoolVar(&EnableSharding, "enable-sharding", EnableSharding, "When sharding enabled, the controller will run as master (shard-id=master) or slave mode (shard-id is any non-empty string except master). Refer to https://github.com/kubevela/kubevela/blob/master/design/vela-core/sharding.md for details.")
fs.StringVar(&ShardID, "shard-id", ShardID, "The id for sharding.")
fs.StringSliceVar(&SchedulableShards, "schedulable-shards", SchedulableShards, "The shard ids that are available for scheduling. If empty, dynamic discovery will be used.")
fs.DurationVar(&DynamicDiscoverySchedulerResyncPeriod, "sharding-slave-discovery-resync-period", DynamicDiscoverySchedulerResyncPeriod, "The resync period for default dynamic discovery scheduler.")
}
219 changes: 219 additions & 0 deletions controller/sharding/scheduler.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,219 @@
/*
Copyright 2023 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package sharding

import (
"context"
"math/rand"
"sort"
"strings"
"sync"
"sync/atomic"
"time"

corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/selection"
"k8s.io/client-go/tools/cache"
"k8s.io/klog/v2"
"k8s.io/kubectl/pkg/util/podutils"
"sigs.k8s.io/controller-runtime/pkg/client"

"github.com/kubevela/pkg/util/k8s"
"github.com/kubevela/pkg/util/maps"
velaruntime "github.com/kubevela/pkg/util/runtime"
"github.com/kubevela/pkg/util/singleton"
"github.com/kubevela/pkg/util/slices"
)

// Scheduler schedule shard-id for object
type Scheduler interface {
Start(context.Context)
Schedule(client.Object) bool
}

var _ Scheduler = (*staticScheduler)(nil)

// NewStaticScheduler create a scheduler that do not make update but only use predefined shards for allocate
func NewStaticScheduler(shards []string) Scheduler {
return &staticScheduler{shards: shards}
}

type staticScheduler struct {
shards []string
}

// Start .
func (in *staticScheduler) Start(ctx context.Context) {
klog.Infof("staticScheduler started, shards: [%s]", strings.Join(in.shards, ", "))
}

// Schedule the target object to a random shard
func (in *staticScheduler) Schedule(o client.Object) bool {
if _, scheduled := GetScheduledShardID(o); !scheduled {
if len(in.shards) > 0 {
// nolint
sid := in.shards[rand.Intn(len(in.shards))]
klog.Infof("staticScheduler schedule %s %s/%s to shard[%s]", o.GetObjectKind().GroupVersionKind().Kind, o.GetNamespace(), o.GetName(), sid)
SetScheduledShardID(o, sid)
return true
}
klog.Infof("staticDiscoveryScheduler no schedulable shard found for %s %s/%s", o.GetObjectKind().GroupVersionKind().Kind, o.GetNamespace(), o.GetName())
}
return false
}

var _ Scheduler = (*dynamicDiscoveryScheduler)(nil)

// NewDynamicDiscoveryScheduler create a scheduler that allow dynamic discovery for available shards
func NewDynamicDiscoveryScheduler(name string, resyncPeriod time.Duration) Scheduler {
return &dynamicDiscoveryScheduler{
name: name,
resyncPeriod: resyncPeriod,
candidates: map[string]map[string]bool{},
}
}

type dynamicDiscoveryScheduler struct {
mu sync.RWMutex

name string
resyncPeriod time.Duration
candidates map[string]map[string]bool
roundRobinIndex atomic.Uint32

store cache.Store
informer cache.Controller
}

func (in *dynamicDiscoveryScheduler) _registerPod(obj interface{}) {
if pod, ok := obj.(*corev1.Pod); ok {
id := pod.GetLabels()[LabelKubeVelaShardID]
healthy := podutils.IsPodReady(pod)
klog.Infof("dynamicDiscoveryScheduler register pod %s/%s (id: %s) with health status: %t", pod.Namespace, pod.Name, id, healthy)
in.mu.Lock()
defer in.mu.Unlock()
if _, exist := in.candidates[id]; !exist {
in.candidates[id] = map[string]bool{}
}
in.candidates[id][pod.Name] = healthy
}
}

func (in *dynamicDiscoveryScheduler) _unregisterPod(obj interface{}) {
if pod, ok := obj.(*corev1.Pod); ok {
id := pod.GetLabels()[LabelKubeVelaShardID]
klog.Infof("dynamicDiscoveryScheduler unregister pod %s/%s", pod.Namespace, pod.Name)
in.mu.Lock()
defer in.mu.Unlock()
if _, exist := in.candidates[id]; exist {
delete(in.candidates[id], pod.Name)
if len(in.candidates[id]) == 0 {
delete(in.candidates, id)
}
}
}
}

// resync the available shards
func (in *dynamicDiscoveryScheduler) resync(stopCh <-chan struct{}) {
ticker := time.NewTicker(in.resyncPeriod)
defer ticker.Stop()
for {
select {
case <-stopCh:
return
case <-ticker.C:
in.mu.Lock()
in.candidates = map[string]map[string]bool{}
in.mu.Unlock()
for _, obj := range in.store.List() {
in._registerPod(obj)
}
available := in.availableShards()
klog.Infof("dynamicDiscoveryScheduler resync finished, available shards: [%s]", strings.Join(available, ", "))
}
}
}

// Start run scheduler to watch pods and automatic register
func (in *dynamicDiscoveryScheduler) Start(ctx context.Context) {
klog.Infof("dynamicDiscoveryScheduler staring, watching pods in %s", k8s.GetRuntimeNamespace())
cli := singleton.StaticClient.Get().CoreV1().RESTClient()
lw := cache.NewFilteredListWatchFromClient(cli, "pods", k8s.GetRuntimeNamespace(), func(options *metav1.ListOptions) {
ls := labels.NewSelector()
ls = ls.Add(*velaruntime.Must(labels.NewRequirement(LabelKubeVelaShardID, selection.Exists, nil)))
ls = ls.Add(*velaruntime.Must(labels.NewRequirement("app.kubernetes.io/name", selection.Equals, []string{in.name})))
options.LabelSelector = ls.String()
})
in.store, in.informer = cache.NewInformer(lw, &corev1.Pod{}, in.resyncPeriod, cache.ResourceEventHandlerFuncs{
AddFunc: in._registerPod,
UpdateFunc: func(oldObj, newObj interface{}) {
if k8s.GetLabel(oldObj.(runtime.Object), LabelKubeVelaShardID) != k8s.GetLabel(newObj.(runtime.Object), LabelKubeVelaShardID) {
in._unregisterPod(oldObj)
}
in._registerPod(newObj)
},
DeleteFunc: in._unregisterPod,
})
stopCh := ctx.Done()
if stopCh == nil {
stopCh = make(chan struct{})
}
if in.resyncPeriod > 0 {
go in.resync(stopCh)
}
klog.Infof("dynamicDiscoveryScheduler started")
in.informer.Run(stopCh)
}

func (in *dynamicDiscoveryScheduler) availableShards() []string {
in.mu.RLock()
defer in.mu.RUnlock()
var available []string
for id, pods := range in.candidates {
if slices.Any(maps.Values(pods), func(x bool) bool { return x }) {
available = append(available, id)
}
}
return available
}

func (in *dynamicDiscoveryScheduler) schedule() (string, bool) {
available := in.availableShards()
if len(available) == 0 {
return "", false
}
sort.Strings(available)
idx := in.roundRobinIndex.Add(1) % uint32(len(available))
return available[idx], true
}

// Schedule get available shard-id for application
func (in *dynamicDiscoveryScheduler) Schedule(o client.Object) bool {
if _, scheduled := GetScheduledShardID(o); !scheduled {
if sid, ok := in.schedule(); ok {
klog.Infof("dynamicDiscoveryScheduler schedule %s %s/%s to shard[%s]", o.GetObjectKind().GroupVersionKind().Kind, o.GetNamespace(), o.GetName(), sid)
SetScheduledShardID(o, sid)
return true
}
klog.Infof("dynamicDiscoveryScheduler no schedulable shard found for %s %s/%s", o.GetObjectKind().GroupVersionKind().Kind, o.GetNamespace(), o.GetName())
}
return false
}
84 changes: 84 additions & 0 deletions controller/sharding/suite_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
/*
Copyright 2023 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package sharding_test

import (
"context"
"testing"
"time"

. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/spf13/pflag"
corev1 "k8s.io/api/core/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes/scheme"
"sigs.k8s.io/controller-runtime/pkg/cache"

"github.com/kubevela/pkg/controller/sharding"
"github.com/kubevela/pkg/util/singleton"

"github.com/kubevela/pkg/util/test/bootstrap"
)

func TestSharding(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Run client package test")
}

var _ = bootstrap.InitKubeBuilderForTest()

var _ = Describe("Test sharding", func() {

It("Test scheduler", func() {
fs := pflag.NewFlagSet("-", pflag.ExitOnError)
sharding.AddFlags(fs)
Ω(fs.Parse([]string{"--enable-sharding", "--shard-id=s", "--schedulable-shards=s,t"})).To(Succeed())
Ω(sharding.SchedulableShards).To(Equal([]string{"s", "t"}))

ctx, cancel := context.WithCancel(context.Background())
defer cancel()
cfg, cli := singleton.KubeConfig.Get(), singleton.KubeClient.Get()

By("Test static scheduler")
scheduler := sharding.NewStaticScheduler([]string{"s"})
go scheduler.Start(ctx)
cm1 := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "scheduled", Namespace: metav1.NamespaceDefault}}
Ω(scheduler.Schedule(cm1)).To(BeTrue())
Ω(cli.Create(ctx, cm1)).To(Succeed())
scheduler = sharding.NewStaticScheduler([]string{""})
go scheduler.Start(ctx)
cm2 := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "unscheduled", Namespace: metav1.NamespaceDefault}}
Ω(scheduler.Schedule(cm1)).To(BeFalse())
Ω(cli.Create(ctx, cm2)).To(Succeed())

By("Test cache")
store, err := sharding.BuildCache(scheme.Scheme, &corev1.ConfigMap{})(cfg, cache.Options{})
Ω(err).To(Succeed())
go func() { _ = store.Start(ctx) }()
Eventually(func(g Gomega) {
cms := &corev1.ConfigMapList{}
g.Expect(store.List(ctx, cms)).To(Succeed())
g.Expect(len(cms.Items)).To(Equal(1))
g.Expect(cms.Items[0].Name).To(Equal("scheduled"))
g.Expect(kerrors.IsNotFound(store.Get(ctx, types.NamespacedName{Name: cm2.Name, Namespace: cm2.Namespace}, &corev1.ConfigMap{}))).To(BeTrue())
}).WithTimeout(5 * time.Second).Should(Succeed())
})

})
Loading

0 comments on commit 2ec72c2

Please sign in to comment.