Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

✨Add option to use Lease lock if coordination group is available #600

Closed
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
1 change: 1 addition & 0 deletions Gopkg.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

5 changes: 4 additions & 1 deletion pkg/leaderelection/doc.go
Expand Up @@ -19,6 +19,9 @@ Package leaderelection contains a constructors for a leader election resource lo
This is used to ensure that multiple copies of a controller manager can be run with
only one active set of controllers, for active-passive HA.

It uses built-in Kubernetes leader election APIs.
It uses built-in Kubernetes leader election APIs. The Lease lock type takes precedence
as edits to Leases are less common and fewer objects in the cluster watch "all Leases".
If the Lease API is not available, the ConfigMap resource lock is used.

*/
package leaderelection
33 changes: 28 additions & 5 deletions pkg/leaderelection/leader_election.go
Expand Up @@ -21,6 +21,7 @@ import (
"io/ioutil"
"os"

coordinationv1 "k8s.io/api/coordination/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
Expand All @@ -37,16 +38,17 @@ type Options struct {
LeaderElection bool

// LeaderElectionNamespace determines the namespace in which the leader
// election configmap will be created.
// election will be created.
LeaderElectionNamespace string

// LeaderElectionID determines the name of the configmap that leader election
// LeaderElectionID determines the name of the resource lock that leader election
// will use for holding the leader lock.
LeaderElectionID string
}

// NewResourceLock creates a new config map resource lock for use in a leader
// election loop
// NewResourceLock creates a new resource lock to use in a leader
// election loop. Choose the Lease lock if `lease.coordination.k8s.io` is available.
// Otherwise, the ConfigMap resource lock is used.
func NewResourceLock(config *rest.Config, recorderProvider recorder.Provider, options Options) (resourcelock.Interface, error) {
if !options.LeaderElection {
return nil, nil
Expand Down Expand Up @@ -79,8 +81,14 @@ func NewResourceLock(config *rest.Config, recorderProvider recorder.Provider, op
return nil, err
}

// Determine lock type
lockType, err := getDefaultLockType(client)
if err != nil {
return nil, err
}

// TODO(JoelSpeed): switch to leaderelection object in 1.12
return resourcelock.New(resourcelock.ConfigMapsResourceLock,
return resourcelock.New(lockType,
options.LeaderElectionNamespace,
options.LeaderElectionID,
client.CoreV1(),
Expand All @@ -91,6 +99,21 @@ func NewResourceLock(config *rest.Config, recorderProvider recorder.Provider, op
})
}

func getDefaultLockType(client *kubernetes.Clientset) (string, error) {
// check if new leader election api is available
supportedGroups, err := client.Discovery().ServerGroups()
if err != nil {
return "", fmt.Errorf("unable to retrieve supported server groups: %v", err)
}
for _, g := range supportedGroups.Groups {
if g.Name == coordinationv1.GroupName {
return resourcelock.LeasesResourceLock, nil
}
}

return resourcelock.ConfigMapsResourceLock, nil
}

func getInClusterNamespace() (string, error) {
// Check whether the namespace file exists.
// If not, we are not running in cluster so can't guess the namespace.
Expand Down
13 changes: 13 additions & 0 deletions pkg/leaderelection/leader_election_suite_test.go
@@ -0,0 +1,13 @@
package leaderelection

import (
"testing"

. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)

func TestLeaderElection(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Leader Election Suite")
}
79 changes: 79 additions & 0 deletions pkg/leaderelection/leader_election_test.go
@@ -0,0 +1,79 @@
package leaderelection

import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"

tlog "github.com/go-logr/logr/testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
coordinationv1 "k8s.io/api/coordination/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/scheme"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/leaderelection/resourcelock"
"k8s.io/client-go/tools/record"
"sigs.k8s.io/controller-runtime/pkg/internal/recorder"
)

var _ = Describe("Leader Election", func() {
It("should use the Lease lock because coordination group is available.", func() {
coordinationGroup := &v1.APIGroupList{
Groups: []v1.APIGroup{
{Name: coordinationv1.GroupName},
},
}

clientConfig := &restclient.Config{
Transport: interceptAPIGroupCall(coordinationGroup),
}

rProvider, err := recorder.NewProvider(clientConfig, scheme.Scheme, tlog.NullLogger{}, record.NewBroadcaster())
Expect(err).ToNot(HaveOccurred())

lock, err := NewResourceLock(clientConfig, rProvider, Options{LeaderElection: true, LeaderElectionNamespace: "test-ns"})
Expect(err).ToNot(HaveOccurred())
Expect(lock).To(BeAssignableToTypeOf(&resourcelock.LeaseLock{}))
})

It("should use the ConfigMap lock because coordination group is unavailable.", func() {
clientConfig := &restclient.Config{
Transport: interceptAPIGroupCall(&v1.APIGroupList{ /* no coordination group */ }),
}

rProvider, err := recorder.NewProvider(clientConfig, scheme.Scheme, tlog.NullLogger{}, record.NewBroadcaster())
Expect(err).ToNot(HaveOccurred())

lock, err := NewResourceLock(clientConfig, rProvider, Options{LeaderElection: true, LeaderElectionNamespace: "test-ns"})
Expect(err).ToNot(HaveOccurred())
Expect(lock).To(BeAssignableToTypeOf(&resourcelock.ConfigMapLock{}))
})
})

func interceptAPIGroupCall(returnApis *v1.APIGroupList) roundTripper {
return roundTripper(func(req *http.Request) (*http.Response, error) {
if req.Method == "GET" && (req.URL.Path == "/apis" || req.URL.Path == "/api") {
return encode(returnApis)
}
return nil, fmt.Errorf("unexpected request: %v %#v\n%#v", req.Method, req.URL, req)
})
}
func encode(bodyStruct interface{}) (*http.Response, error) {
jsonBytes, err := json.Marshal(bodyStruct)
if err != nil {
return nil, err
}
return &http.Response{
StatusCode: http.StatusOK,
Body: ioutil.NopCloser(bytes.NewReader(jsonBytes)),
}, nil
}

type roundTripper func(*http.Request) (*http.Response, error)

func (f roundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
return f(req)
}
4 changes: 2 additions & 2 deletions pkg/manager/manager.go
Expand Up @@ -111,10 +111,10 @@ type Options struct {
LeaderElection bool

// LeaderElectionNamespace determines the namespace in which the leader
// election configmap will be created.
// election will be created.
LeaderElectionNamespace string

// LeaderElectionID determines the name of the configmap that leader election
// LeaderElectionID determines the name of the resource lock that leader election
// will use for holding the leader lock.
LeaderElectionID string

Expand Down
4 changes: 0 additions & 4 deletions pkg/webhook/server.go
Expand Up @@ -53,10 +53,6 @@ type Server struct {
Port int

// CertDir is the directory that contains the server key and certificate.
// If using FSCertWriter in Provisioner, the server itself will provision the certificate and
// store it in this directory.
// If using SecretCertWriter in Provisioner, the server will provision the certificate in a secret,
// the user is responsible to mount the secret to the this location for the server to consume.
CertDir string

// WebhookMux is the multiplexer that handles different webhooks.
Expand Down