Skip to content

Commit

Permalink
Merge pull request #934 from nilo19/chore/bump-cloud-provider
Browse files Browse the repository at this point in the history
chore: bump k8s.io/cloud-provider to v0.23.0
  • Loading branch information
k8s-ci-robot committed Dec 10, 2021
2 parents bc6f6a1 + 5cd1b18 commit e77588d
Show file tree
Hide file tree
Showing 22 changed files with 400 additions and 583 deletions.
20 changes: 10 additions & 10 deletions cmd/cloud-controller-manager/app/controllermanager.go
Original file line number Diff line number Diff line change
Expand Up @@ -338,30 +338,30 @@ func Run(ctx context.Context, c *cloudcontrollerconfig.CompletedConfig) error {
klog.Errorf("unable to register configz: %v", err)
}

if err := startControllers(c, ctx.Done(), cloud, newControllerInitializers()); err != nil {
if err := startControllers(ctx, c, ctx.Done(), cloud, newControllerInitializers()); err != nil {
klog.Fatalf("error running controllers: %v", err)
}

return nil
}

// startControllers starts the cloud specific controller loops.
func startControllers(c *cloudcontrollerconfig.CompletedConfig, stopCh <-chan struct{}, cloud cloudprovider.Interface, controllers map[string]initFunc) error {
func startControllers(ctx context.Context, completedConfig *cloudcontrollerconfig.CompletedConfig, stopCh <-chan struct{}, cloud cloudprovider.Interface, controllers map[string]initFunc) error {
// Initialize the cloud provider with a reference to the clientBuilder
cloud.Initialize(c.ClientBuilder, stopCh)
cloud.Initialize(completedConfig.ClientBuilder, stopCh)
// Set the informer on the user cloud object
if informerUserCloud, ok := cloud.(cloudprovider.InformerUser); ok {
informerUserCloud.SetInformers(c.SharedInformers)
informerUserCloud.SetInformers(completedConfig.SharedInformers)
}

for controllerName, initFn := range controllers {
if !genericcontrollermanager.IsControllerEnabled(controllerName, ControllersDisabledByDefault, c.ComponentConfig.Generic.Controllers) {
if !genericcontrollermanager.IsControllerEnabled(controllerName, ControllersDisabledByDefault, completedConfig.ComponentConfig.Generic.Controllers) {
klog.Warningf("%q is disabled", controllerName)
continue
}

klog.V(1).Infof("Starting %q", controllerName)
_, started, err := initFn(c, cloud, stopCh)
_, started, err := initFn(ctx, completedConfig, cloud, stopCh)
if err != nil {
klog.Errorf("Error starting %q: %s", controllerName, err.Error())
return err
Expand All @@ -372,17 +372,17 @@ func startControllers(c *cloudcontrollerconfig.CompletedConfig, stopCh <-chan st
}
klog.Infof("Started %q", controllerName)

time.Sleep(wait.Jitter(c.ComponentConfig.Generic.ControllerStartInterval.Duration, ControllerStartJitter))
time.Sleep(wait.Jitter(completedConfig.ComponentConfig.Generic.ControllerStartInterval.Duration, ControllerStartJitter))
}

// If apiserver is not running we should wait for some time and fail only then. This is particularly
// important when we start apiserver and controller manager at the same time.
if err := genericcontrollermanager.WaitForAPIServer(c.VersionedClient, 10*time.Second); err != nil {
if err := genericcontrollermanager.WaitForAPIServer(completedConfig.VersionedClient, 10*time.Second); err != nil {
klog.Fatalf("Failed to wait for apiserver being healthy: %v", err)
}

klog.V(2).Infof("startControllers: starting shared informers")
c.SharedInformers.Start(stopCh)
completedConfig.SharedInformers.Start(stopCh)

<-stopCh
klog.V(1).Infof("startControllers: received stopping signal, exiting")
Expand All @@ -393,7 +393,7 @@ func startControllers(c *cloudcontrollerconfig.CompletedConfig, stopCh <-chan st
// initFunc is used to launch a particular controller. It may run additional "should I activate checks".
// Any error returned will cause the controller process to `Fatal`
// The bool indicates whether the controller was enabled.
type initFunc func(ctx *cloudcontrollerconfig.CompletedConfig, cloud cloudprovider.Interface, stop <-chan struct{}) (debuggingHandler http.Handler, enabled bool, err error)
type initFunc func(ctx context.Context, completedConfig *cloudcontrollerconfig.CompletedConfig, cloud cloudprovider.Interface, stop <-chan struct{}) (debuggingHandler http.Handler, enabled bool, err error)

// KnownControllers indicate the default controller we are known.
func KnownControllers() []string {
Expand Down
75 changes: 38 additions & 37 deletions cmd/cloud-controller-manager/app/core.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ limitations under the License.
package app

import (
"context"
"errors"
"fmt"
"net"
Expand Down Expand Up @@ -52,14 +53,14 @@ const (
IPv6DualStack featuregate.Feature = "IPv6DualStack"
)

func startCloudNodeController(ctx *cloudcontrollerconfig.CompletedConfig, cloud cloudprovider.Interface, stopCh <-chan struct{}) (http.Handler, bool, error) {
func startCloudNodeController(ctx context.Context, completedConfig *cloudcontrollerconfig.CompletedConfig, cloud cloudprovider.Interface, stopCh <-chan struct{}) (http.Handler, bool, error) {
// Start the CloudNodeController
nodeController, err := nodecontroller.NewCloudNodeController(
ctx.SharedInformers.Core().V1().Nodes(),
completedConfig.SharedInformers.Core().V1().Nodes(),
// cloud node controller uses existing cluster role from node-controller
ctx.ClientBuilder.ClientOrDie("node-controller"),
completedConfig.ClientBuilder.ClientOrDie("node-controller"),
cloud,
ctx.ComponentConfig.NodeStatusUpdateFrequency.Duration,
completedConfig.ComponentConfig.NodeStatusUpdateFrequency.Duration,
)
if err != nil {
klog.Warningf("failed to start cloud node controller: %s", err)
Expand All @@ -71,33 +72,33 @@ func startCloudNodeController(ctx *cloudcontrollerconfig.CompletedConfig, cloud
return nil, true, nil
}

func startCloudNodeLifecycleController(ctx *cloudcontrollerconfig.CompletedConfig, cloud cloudprovider.Interface, stopCh <-chan struct{}) (http.Handler, bool, error) {
func startCloudNodeLifecycleController(ctx context.Context, completedConfig *cloudcontrollerconfig.CompletedConfig, cloud cloudprovider.Interface, stopCh <-chan struct{}) (http.Handler, bool, error) {
// Start the cloudNodeLifecycleController
cloudNodeLifecycleController, err := nodelifecyclecontroller.NewCloudNodeLifecycleController(
ctx.SharedInformers.Core().V1().Nodes(),
completedConfig.SharedInformers.Core().V1().Nodes(),
// cloud node lifecycle controller uses existing cluster role from node-controller
ctx.ClientBuilder.ClientOrDie("node-controller"),
completedConfig.ClientBuilder.ClientOrDie("node-controller"),
cloud,
ctx.ComponentConfig.KubeCloudShared.NodeMonitorPeriod.Duration,
completedConfig.ComponentConfig.KubeCloudShared.NodeMonitorPeriod.Duration,
)
if err != nil {
klog.Warningf("failed to start cloud node lifecycle controller: %s", err)
return nil, false, nil
}

go cloudNodeLifecycleController.Run(stopCh)
go cloudNodeLifecycleController.Run(ctx)

return nil, true, nil
}

func startServiceController(ctx *cloudcontrollerconfig.CompletedConfig, cloud cloudprovider.Interface, stopCh <-chan struct{}) (http.Handler, bool, error) {
func startServiceController(ctx context.Context, completedConfig *cloudcontrollerconfig.CompletedConfig, cloud cloudprovider.Interface, stopCh <-chan struct{}) (http.Handler, bool, error) {
// Start the service controller
serviceController, err := servicecontroller.New(
cloud,
ctx.ClientBuilder.ClientOrDie("service-controller"),
ctx.SharedInformers.Core().V1().Services(),
ctx.SharedInformers.Core().V1().Nodes(),
ctx.ComponentConfig.KubeCloudShared.ClusterName,
completedConfig.ClientBuilder.ClientOrDie("service-controller"),
completedConfig.SharedInformers.Core().V1().Services(),
completedConfig.SharedInformers.Core().V1().Nodes(),
completedConfig.ComponentConfig.KubeCloudShared.ClusterName,
utilfeature.DefaultFeatureGate,
)
if err != nil {
Expand All @@ -106,14 +107,14 @@ func startServiceController(ctx *cloudcontrollerconfig.CompletedConfig, cloud cl
return nil, false, nil
}

go serviceController.Run(stopCh, int(ctx.ComponentConfig.ServiceController.ConcurrentServiceSyncs))
go serviceController.Run(ctx, int(completedConfig.ComponentConfig.ServiceController.ConcurrentServiceSyncs))

return nil, true, nil
}

func startRouteController(ctx *cloudcontrollerconfig.CompletedConfig, cloud cloudprovider.Interface, stopCh <-chan struct{}) (http.Handler, bool, error) {
if !ctx.ComponentConfig.KubeCloudShared.ConfigureCloudRoutes {
klog.Infof("Will not configure cloud provider routes, --configure-cloud-routes: %v.", ctx.ComponentConfig.KubeCloudShared.ConfigureCloudRoutes)
func startRouteController(ctx context.Context, completedConfig *cloudcontrollerconfig.CompletedConfig, cloud cloudprovider.Interface, stopCh <-chan struct{}) (http.Handler, bool, error) {
if !completedConfig.ComponentConfig.KubeCloudShared.ConfigureCloudRoutes {
klog.Infof("Will not configure cloud provider routes, --configure-cloud-routes: %v.", completedConfig.ComponentConfig.KubeCloudShared.ConfigureCloudRoutes)
return nil, false, nil
}

Expand All @@ -125,7 +126,7 @@ func startRouteController(ctx *cloudcontrollerconfig.CompletedConfig, cloud clou
}

// failure: bad cidrs in config
clusterCIDRs, dualStack, err := processCIDRs(ctx.ComponentConfig.KubeCloudShared.ClusterCIDR)
clusterCIDRs, dualStack, err := processCIDRs(completedConfig.ComponentConfig.KubeCloudShared.ClusterCIDR)
if err != nil {
return nil, false, err
}
Expand All @@ -147,27 +148,27 @@ func startRouteController(ctx *cloudcontrollerconfig.CompletedConfig, cloud clou

routeController := routecontroller.New(
routes,
ctx.ClientBuilder.ClientOrDie("route-controller"),
ctx.SharedInformers.Core().V1().Nodes(),
ctx.ComponentConfig.KubeCloudShared.ClusterName,
completedConfig.ClientBuilder.ClientOrDie("route-controller"),
completedConfig.SharedInformers.Core().V1().Nodes(),
completedConfig.ComponentConfig.KubeCloudShared.ClusterName,
clusterCIDRs,
)
go routeController.Run(stopCh, ctx.ComponentConfig.KubeCloudShared.RouteReconciliationPeriod.Duration)
go routeController.Run(ctx, completedConfig.ComponentConfig.KubeCloudShared.RouteReconciliationPeriod.Duration)

return nil, true, nil
}

func startNodeIpamController(ctx *cloudcontrollerconfig.CompletedConfig, cloud cloudprovider.Interface, stopCh <-chan struct{}) (http.Handler, bool, error) {
func startNodeIpamController(ctx context.Context, completedConfig *cloudcontrollerconfig.CompletedConfig, cloud cloudprovider.Interface, stopCh <-chan struct{}) (http.Handler, bool, error) {
var serviceCIDR *net.IPNet
var secondaryServiceCIDR *net.IPNet

// should we start nodeIPAM
if !ctx.ComponentConfig.KubeCloudShared.AllocateNodeCIDRs {
if !completedConfig.ComponentConfig.KubeCloudShared.AllocateNodeCIDRs {
return nil, false, nil
}

// failure: bad cidrs in config
clusterCIDRs, dualStack, err := processCIDRs(ctx.ComponentConfig.KubeCloudShared.ClusterCIDR)
clusterCIDRs, dualStack, err := processCIDRs(completedConfig.ComponentConfig.KubeCloudShared.ClusterCIDR)
if err != nil {
return nil, false, err
}
Expand All @@ -188,17 +189,17 @@ func startNodeIpamController(ctx *cloudcontrollerconfig.CompletedConfig, cloud c
}

// service cidr processing
if len(strings.TrimSpace(ctx.NodeIPAMControllerConfig.ServiceCIDR)) != 0 {
_, serviceCIDR, err = net.ParseCIDR(ctx.NodeIPAMControllerConfig.ServiceCIDR)
if len(strings.TrimSpace(completedConfig.NodeIPAMControllerConfig.ServiceCIDR)) != 0 {
_, serviceCIDR, err = net.ParseCIDR(completedConfig.NodeIPAMControllerConfig.ServiceCIDR)
if err != nil {
klog.Warningf("Unsuccessful parsing of service CIDR %v: %v", ctx.NodeIPAMControllerConfig.ServiceCIDR, err)
klog.Warningf("Unsuccessful parsing of service CIDR %v: %v", completedConfig.NodeIPAMControllerConfig.ServiceCIDR, err)
}
}

if len(strings.TrimSpace(ctx.NodeIPAMControllerConfig.SecondaryServiceCIDR)) != 0 {
_, secondaryServiceCIDR, err = net.ParseCIDR(ctx.NodeIPAMControllerConfig.SecondaryServiceCIDR)
if len(strings.TrimSpace(completedConfig.NodeIPAMControllerConfig.SecondaryServiceCIDR)) != 0 {
_, secondaryServiceCIDR, err = net.ParseCIDR(completedConfig.NodeIPAMControllerConfig.SecondaryServiceCIDR)
if err != nil {
klog.Warningf("Unsuccessful parsing of service CIDR %v: %v", ctx.NodeIPAMControllerConfig.SecondaryServiceCIDR, err)
klog.Warningf("Unsuccessful parsing of service CIDR %v: %v", completedConfig.NodeIPAMControllerConfig.SecondaryServiceCIDR, err)
}
}

Expand All @@ -223,11 +224,11 @@ func startNodeIpamController(ctx *cloudcontrollerconfig.CompletedConfig, cloud c
if utilfeature.DefaultFeatureGate.Enabled(IPv6DualStack) {
// only --node-cidr-mask-size-ipv4 and --node-cidr-mask-size-ipv6 supported with dual stack clusters.
// --node-cidr-mask-size flag is incompatible with dual stack clusters.
nodeCIDRMaskSizeIPv4, nodeCIDRMaskSizeIPv6, err = setNodeCIDRMaskSizesDualStack(ctx.NodeIPAMControllerConfig)
nodeCIDRMaskSizeIPv4, nodeCIDRMaskSizeIPv6, err = setNodeCIDRMaskSizesDualStack(completedConfig.NodeIPAMControllerConfig)
} else {
// only --node-cidr-mask-size supported with single stack clusters.
// --node-cidr-mask-size-ipv4 and --node-cidr-mask-size-ipv6 flags are incompatible with dual stack clusters.
nodeCIDRMaskSizeIPv4, nodeCIDRMaskSizeIPv6, err = setNodeCIDRMaskSizes(ctx.NodeIPAMControllerConfig)
nodeCIDRMaskSizeIPv4, nodeCIDRMaskSizeIPv6, err = setNodeCIDRMaskSizes(completedConfig.NodeIPAMControllerConfig)
}

if err != nil {
Expand All @@ -238,14 +239,14 @@ func startNodeIpamController(ctx *cloudcontrollerconfig.CompletedConfig, cloud c
nodeCIDRMaskSizes := getNodeCIDRMaskSizes(clusterCIDRs, nodeCIDRMaskSizeIPv4, nodeCIDRMaskSizeIPv6)

nodeIpamController, err := nodeipamcontroller.NewNodeIpamController(
ctx.SharedInformers.Core().V1().Nodes(),
completedConfig.SharedInformers.Core().V1().Nodes(),
cloud,
ctx.ClientBuilder.ClientOrDie("node-controller"),
completedConfig.ClientBuilder.ClientOrDie("node-controller"),
clusterCIDRs,
serviceCIDR,
secondaryServiceCIDR,
nodeCIDRMaskSizes,
ipam.CIDRAllocatorType(ctx.ComponentConfig.KubeCloudShared.CIDRAllocatorType),
ipam.CIDRAllocatorType(completedConfig.ComponentConfig.KubeCloudShared.CIDRAllocatorType),
)
if err != nil {
return nil, true, err
Expand Down

0 comments on commit e77588d

Please sign in to comment.