Skip to content

Commit

Permalink
Enable operator operation without kubernetes.
Browse files Browse the repository at this point in the history
This is intended for use-cases of cilium with other orquestrators compatible
with CNI or docker's network plugins, so we can have an operator running
against kvstore, but w/o requiring mandatory access to k8s.

This is addresses by adding a new command argument (--enable-k8s=false,
w/ true as default), which enables basic usage of the operator (lease cleanup, ipam address gc, etc.).

Signed-off-by: Pablo Ruiz <pablo.ruiz@gmail.com>
  • Loading branch information
pruiz committed Oct 19, 2022
1 parent 229686a commit 1291049
Show file tree
Hide file tree
Showing 8 changed files with 74 additions and 46 deletions.
1 change: 1 addition & 0 deletions Documentation/cmdref/cilium-operator-alibabacloud.md

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions Documentation/cmdref/cilium-operator-aws.md

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions Documentation/cmdref/cilium-operator-azure.md

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions Documentation/cmdref/cilium-operator-generic.md

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions Documentation/cmdref/cilium-operator.md

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

3 changes: 3 additions & 0 deletions operator/cmd/flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -321,6 +321,9 @@ func init() {
flags.StringSlice(operatorOption.IngressLBAnnotationPrefixes, operatorOption.IngressLBAnnotationsDefault, "Annotation prefixes for propagating from Ingress to the Load Balancer service")
option.BindEnv(Vp, operatorOption.IngressLBAnnotationPrefixes)

flags.Bool(operatorOption.EnableK8s, true, `Enable operation of Kubernetes-related services/controllers when using Cilium with Kubernetes`)
option.BindEnv(Vp, operatorOption.EnableK8s)

flags.Duration(option.KVstoreLeaseTTL, defaults.KVstoreLeaseTTL, "Time-to-live for the KVstore lease.")
flags.MarkHidden(option.KVstoreLeaseTTL)
option.BindEnv(Vp, option.KVstoreLeaseTTL)
Expand Down
101 changes: 55 additions & 46 deletions operator/cmd/root.go
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,6 @@ func Execute() {
}

func registerOperatorHooks(lc hive.Lifecycle, llc *LeaderLifecycle, clientset k8sClient.Clientset, shutdowner hive.Shutdowner) {
k8s.SetClients(clientset, clientset.Slim(), clientset, clientset)

lc.Append(hive.Hook{
OnStart: func(context.Context) error {
Expand Down Expand Up @@ -239,12 +238,16 @@ func runOperator(lc *LeaderLifecycle, clientset k8sClient.Clientset, shutdowner
}
close(allSystemsGo)

go func() {
err = srv.WithStatusCheckFunc(func() error { return checkStatus(clientset) }).StartServer()
if err != nil {
log.WithError(err).Fatalf("Unable to start operator apiserver")
}
}()
if operatorOption.Config.EnableK8s {
k8s.SetClients(clientset, clientset.Slim(), clientset, clientset)

go func() {
err = srv.WithStatusCheckFunc(func() error { return checkStatus(clientset) }).StartServer()
if err != nil {
log.WithError(err).Fatalf("Unable to start operator apiserver")
}
}()
}

if operatorOption.Config.EnableMetrics {
operatorMetrics.Register()
Expand All @@ -254,15 +257,17 @@ func runOperator(lc *LeaderLifecycle, clientset k8sClient.Clientset, shutdowner
pprof.Enable(operatorOption.Config.PProfPort)
}

capabilities := k8sversion.Capabilities()
if !capabilities.MinimalVersionMet {
log.Fatalf("Minimal kubernetes version not met: %s < %s",
k8sversion.Version(), k8sversion.MinimalVersionConstraint)
if k8s.IsEnabled() {
capabilities := k8sversion.Capabilities()
if !capabilities.MinimalVersionMet {
log.Fatalf("Minimal kubernetes version not met: %s < %s",
k8sversion.Version(), k8sversion.MinimalVersionConstraint)
}
}

// Register the CRDs after validating that we are running on a supported
// version of K8s.
if !operatorOption.Config.SkipCRDCreation {
if k8s.IsEnabled() && !operatorOption.Config.SkipCRDCreation {
if err := client.RegisterCRDs(); err != nil {
log.WithError(err).Fatal("Unable to register CRDs")
}
Expand All @@ -273,7 +278,7 @@ func runOperator(lc *LeaderLifecycle, clientset k8sClient.Clientset, shutdowner
// We only support Operator in HA mode for Kubernetes Versions having support for
// LeasesResourceLock.
// See docs on capabilities.LeasesResourceLock for more context.
if !capabilities.LeasesResourceLock {
if !k8sversion.Capabilities().LeasesResourceLock {
log.Info("Support for coordination.k8s.io/v1 not present, fallback to non HA mode")

if err := lc.Start(leaderElectionCtx); err != nil {
Expand Down Expand Up @@ -389,7 +394,7 @@ func (legacy *legacyOnLeader) onStart(_ context.Context) error {
IsLeader.Store(true)

// If CiliumEndpointSlice feature is enabled, create CESController, start CEP watcher and run controller.
if !option.Config.DisableCiliumEndpointCRD && option.Config.EnableCiliumEndpointSlice {
if k8s.IsEnabled() && !option.Config.DisableCiliumEndpointCRD && option.Config.EnableCiliumEndpointSlice {
log.Info("Create and run CES controller, start CEP watcher")
// Initialize the CES controller
cesController := ces.NewCESController(legacy.clientset,
Expand All @@ -409,7 +414,9 @@ func (legacy *legacyOnLeader) onStart(_ context.Context) error {
// etcd from reaching out kube-dns in EKS.
// If this logic is modified, make sure the operator's clusterrole logic for
// pods/delete is also up-to-date.
if option.Config.DisableCiliumEndpointCRD {
if !k8s.IsEnabled() {
log.Infof("KubeDNS unmanaged pods controller disabled due to kubernetes support not enabled")
} else if option.Config.DisableCiliumEndpointCRD {
log.Infof("KubeDNS unmanaged pods controller disabled as %q option is set to 'disabled' in Cilium ConfigMap", option.DisableCiliumEndpointCRDName)
} else if operatorOption.Config.UnmanagedPodWatcherInterval != 0 {
go enableUnmanagedKubeDNSController(legacy.clientset)
Expand Down Expand Up @@ -448,16 +455,14 @@ func (legacy *legacyOnLeader) onStart(_ context.Context) error {
}

if kvstoreEnabled() {
if operatorOption.Config.SyncK8sServices {
operatorWatchers.StartSynchronizingServices(legacy.clientset, true, option.Config)
}

var goopts *kvstore.ExtraOptions
scopedLog := log.WithFields(logrus.Fields{
"kvstore": option.Config.KVStore,
"address": option.Config.KVStoreOpt[fmt.Sprintf("%s.address", option.Config.KVStore)],
})
if operatorOption.Config.SyncK8sServices {

if k8s.IsEnabled() && operatorOption.Config.SyncK8sServices {
operatorWatchers.StartSynchronizingServices(legacy.clientset, true, option.Config)
// If K8s is enabled we can do the service translation automagically by
// looking at services from k8s and retrieve the service IP from that.
// This makes cilium to not depend on kube dns to interact with etcd
Expand Down Expand Up @@ -521,7 +526,7 @@ func (legacy *legacyOnLeader) onStart(_ context.Context) error {
scopedLog.WithError(err).Fatal("Unable to setup kvstore")
}

if operatorOption.Config.SyncK8sNodes {
if k8s.IsEnabled() && operatorOption.Config.SyncK8sNodes {
withKVStore = true
}

Expand All @@ -542,16 +547,18 @@ func (legacy *legacyOnLeader) onStart(_ context.Context) error {
operatorWatchers.HandleNodeTolerationAndTaints(legacy.clientset, stopCh)
}

if err := startSynchronizingCiliumNodes(legacy.ctx, legacy.clientset, nodeManager, withKVStore); err != nil {
log.WithError(err).Fatal("Unable to setup node watcher")
}
if k8s.IsEnabled() {
if err := startSynchronizingCiliumNodes(legacy.ctx, legacy.clientset, nodeManager, withKVStore); err != nil {
log.WithError(err).Fatal("Unable to setup node watcher")
}

if operatorOption.Config.CNPNodeStatusGCInterval != 0 {
RunCNPNodeStatusGC(legacy.clientset, ciliumNodeStore)
}
if operatorOption.Config.CNPNodeStatusGCInterval != 0 {
RunCNPNodeStatusGC(legacy.clientset, ciliumNodeStore)
}

if operatorOption.Config.NodesGCInterval != 0 {
operatorWatchers.RunCiliumNodeGC(legacy.ctx, legacy.clientset, ciliumNodeStore, operatorOption.Config.NodesGCInterval)
if operatorOption.Config.NodesGCInterval != 0 {
operatorWatchers.RunCiliumNodeGC(legacy.ctx, legacy.clientset, ciliumNodeStore, operatorOption.Config.NodesGCInterval)
}
}

if option.Config.IPAM == ipamOption.IPAMClusterPool || option.Config.IPAM == ipamOption.IPAMClusterPoolV2 {
Expand Down Expand Up @@ -595,25 +602,27 @@ func (legacy *legacyOnLeader) onStart(_ context.Context) error {
}
}

if operatorOption.Config.EndpointGCInterval != 0 {
enableCiliumEndpointSyncGC(legacy.clientset, false)
} else {
// Even if the EndpointGC is disabled we still want it to run at least
// once. This is to prevent leftover CEPs from populating ipcache with
// stale entries.
enableCiliumEndpointSyncGC(legacy.clientset, true)
}
if k8s.IsEnabled() {
if operatorOption.Config.EndpointGCInterval != 0 {
enableCiliumEndpointSyncGC(legacy.clientset, false)
} else {
// Even if the EndpointGC is disabled we still want it to run at least
// once. This is to prevent leftover CEPs from populating ipcache with
// stale entries.
enableCiliumEndpointSyncGC(legacy.clientset, true)
}

err = enableCNPWatcher(legacy.clientset)
if err != nil {
log.WithError(err).WithField(logfields.LogSubsys, "CNPWatcher").Fatal(
"Cannot connect to Kubernetes apiserver ")
}
err = enableCNPWatcher(legacy.clientset)
if err != nil {
log.WithError(err).WithField(logfields.LogSubsys, "CNPWatcher").Fatal(
"Cannot connect to Kubernetes apiserver ")
}

err = enableCCNPWatcher(legacy.clientset)
if err != nil {
log.WithError(err).WithField(logfields.LogSubsys, "CCNPWatcher").Fatal(
"Cannot connect to Kubernetes apiserver ")
err = enableCCNPWatcher(legacy.clientset)
if err != nil {
log.WithError(err).WithField(logfields.LogSubsys, "CCNPWatcher").Fatal(
"Cannot connect to Kubernetes apiserver ")
}
}

if operatorOption.Config.EnableIngressController {
Expand Down
11 changes: 11 additions & 0 deletions operator/option/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -272,6 +272,11 @@ const (
// IngressDefaultLoadbalancerMode is the default loadbalancer mode for Ingress.
// Applicable values: dedicated, shared
IngressDefaultLoadbalancerMode = "ingress-default-lb-mode"

// EnableK8s operation of Kubernet-related services/controllers.
// Intended for operating cilium with CNI-compatible orchestrators
// other than Kubernetes. (default is true)
EnableK8s = "enable-k8s"
)

// OperatorConfig is the configuration used by the operator.
Expand Down Expand Up @@ -508,6 +513,11 @@ type OperatorConfig struct {
// IngressDefaultLoadbalancerMode is the default loadbalancer mode for Ingress.
// Applicable values: dedicated, shared
IngressDefaultLoadbalancerMode string

// Enables/Disables operation of kubernet-related services/controllers.
// Intended for operating cilium with CNI-compatible orquestrators
// othern than Kubernetes. (default is true)
EnableK8s bool
}

// Populate sets all options with the values from viper.
Expand Down Expand Up @@ -548,6 +558,7 @@ func (c *OperatorConfig) Populate(vp *viper.Viper) {
c.IngressLBAnnotationPrefixes = vp.GetStringSlice(IngressLBAnnotationPrefixes)
c.IngressSharedLBServiceName = vp.GetString(IngressSharedLBServiceName)
c.IngressDefaultLoadbalancerMode = vp.GetString(IngressDefaultLoadbalancerMode)
c.EnableK8s = vp.GetBool(EnableK8s)

c.CiliumK8sNamespace = vp.GetString(CiliumK8sNamespace)

Expand Down

0 comments on commit 1291049

Please sign in to comment.