Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

use contextual logging(nodeipam and nodelifecycle part) #112670

Merged
merged 1 commit into from Mar 7, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
14 changes: 7 additions & 7 deletions cmd/cloud-controller-manager/nodeipamcontroller.go
Expand Up @@ -62,11 +62,11 @@ func (nodeIpamController *nodeIPAMController) StartNodeIpamControllerWrapper(ini
nodeIpamController.nodeIPAMControllerOptions.ApplyTo(&nodeIpamController.nodeIPAMControllerConfiguration)

return func(ctx context.Context, controllerContext genericcontrollermanager.ControllerContext) (controller.Interface, bool, error) {
return startNodeIpamController(initContext, completedConfig, nodeIpamController.nodeIPAMControllerConfiguration, controllerContext, cloud)
return startNodeIpamController(ctx, initContext, completedConfig, nodeIpamController.nodeIPAMControllerConfiguration, controllerContext, cloud)
}
}

func startNodeIpamController(initContext app.ControllerInitContext, ccmConfig *cloudcontrollerconfig.CompletedConfig, nodeIPAMConfig nodeipamconfig.NodeIPAMControllerConfiguration, ctx genericcontrollermanager.ControllerContext, cloud cloudprovider.Interface) (controller.Interface, bool, error) {
func startNodeIpamController(ctx context.Context, initContext app.ControllerInitContext, ccmConfig *cloudcontrollerconfig.CompletedConfig, nodeIPAMConfig nodeipamconfig.NodeIPAMControllerConfiguration, controllerCtx genericcontrollermanager.ControllerContext, cloud cloudprovider.Interface) (controller.Interface, bool, error) {
var serviceCIDR *net.IPNet
var secondaryServiceCIDR *net.IPNet

Expand Down Expand Up @@ -130,14 +130,14 @@ func startNodeIpamController(initContext app.ControllerInitContext, ccmConfig *c

var clusterCIDRInformer v1alpha1.ClusterCIDRInformer
if utilfeature.DefaultFeatureGate.Enabled(features.MultiCIDRRangeAllocator) {
clusterCIDRInformer = ctx.InformerFactory.Networking().V1alpha1().ClusterCIDRs()
clusterCIDRInformer = controllerCtx.InformerFactory.Networking().V1alpha1().ClusterCIDRs()
}

nodeIpamController, err := nodeipamcontroller.NewNodeIpamController(
ctx.InformerFactory.Core().V1().Nodes(),
ctx,
controllerCtx.InformerFactory.Core().V1().Nodes(),
clusterCIDRInformer,
cloud,
ctx.ClientBuilder.ClientOrDie(initContext.ClientName),
controllerCtx.ClientBuilder.ClientOrDie(initContext.ClientName),
clusterCIDRs,
serviceCIDR,
secondaryServiceCIDR,
Expand All @@ -147,7 +147,7 @@ func startNodeIpamController(initContext app.ControllerInitContext, ccmConfig *c
if err != nil {
return nil, true, err
}
go nodeIpamController.Run(ctx.Stop)
go nodeIpamController.Run(ctx)
return nil, true, nil
}

Expand Down
4 changes: 3 additions & 1 deletion cmd/kube-controller-manager/app/core.go
Expand Up @@ -154,7 +154,9 @@ func startNodeIpamController(ctx context.Context, controllerContext ControllerCo
clusterCIDRInformer = controllerContext.InformerFactory.Networking().V1alpha1().ClusterCIDRs()
}

ctx = klog.NewContext(ctx, klog.LoggerWithName(klog.FromContext(ctx), "NodeIpamController"))
nodeIpamController, err := nodeipamcontroller.NewNodeIpamController(
ctx,
controllerContext.InformerFactory.Core().V1().Nodes(),
clusterCIDRInformer,
controllerContext.Cloud,
Expand All @@ -168,7 +170,7 @@ func startNodeIpamController(ctx context.Context, controllerContext ControllerCo
if err != nil {
return nil, true, err
}
go nodeIpamController.RunWithMetrics(ctx.Done(), controllerContext.ControllerManagerMetrics)
go nodeIpamController.RunWithMetrics(ctx, controllerContext.ControllerManagerMetrics)
return nil, true, nil
}

Expand Down
6 changes: 3 additions & 3 deletions pkg/controller/nodeipam/ipam/adapter.go
Expand Up @@ -61,15 +61,15 @@ func newAdapter(k8s clientset.Interface, cloud *gce.Cloud) *adapter {
return ret
}

func (a *adapter) Run(stopCh <-chan struct{}) {
func (a *adapter) Run(ctx context.Context) {
defer utilruntime.HandleCrash()

// Start event processing pipeline.
a.broadcaster.StartStructuredLogging(0)
a.broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: a.k8s.CoreV1().Events("")})
defer a.broadcaster.Shutdown()

<-stopCh
<-ctx.Done()
}

func (a *adapter) Alias(ctx context.Context, node *v1.Node) (*net.IPNet, error) {
Expand All @@ -88,7 +88,7 @@ func (a *adapter) Alias(ctx context.Context, node *v1.Node) (*net.IPNet, error)
case 1:
break
default:
klog.Warningf("Node %q has more than one alias assigned (%v), defaulting to the first", node.Name, cidrs)
klog.FromContext(ctx).Info("Node has more than one alias assigned, defaulting to the first", "node", klog.KObj(node), "CIDRs", cidrs)
}

_, cidrRange, err := netutils.ParseCIDRSloppy(cidrs[0])
Expand Down
21 changes: 11 additions & 10 deletions pkg/controller/nodeipam/ipam/cidr_allocator.go
Expand Up @@ -91,11 +91,11 @@ type CIDRAllocator interface {
// AllocateOrOccupyCIDR looks at the given node, assigns it a valid
// CIDR if it doesn't currently have one or mark the CIDR as used if
// the node already have one.
AllocateOrOccupyCIDR(node *v1.Node) error
AllocateOrOccupyCIDR(logger klog.Logger, node *v1.Node) error
// ReleaseCIDR releases the CIDR of the removed node.
ReleaseCIDR(node *v1.Node) error
ReleaseCIDR(logger klog.Logger, node *v1.Node) error
// Run starts all the working logic of the allocator.
Run(stopCh <-chan struct{})
Run(ctx context.Context)
}

// CIDRAllocatorParams is parameters that's required for creating new
Expand All @@ -119,29 +119,30 @@ type nodeReservedCIDRs struct {
}

// New creates a new CIDR range allocator.
func New(kubeClient clientset.Interface, cloud cloudprovider.Interface, nodeInformer informers.NodeInformer, clusterCIDRInformer networkinginformers.ClusterCIDRInformer, allocatorType CIDRAllocatorType, allocatorParams CIDRAllocatorParams) (CIDRAllocator, error) {
nodeList, err := listNodes(kubeClient)
func New(ctx context.Context, kubeClient clientset.Interface, cloud cloudprovider.Interface, nodeInformer informers.NodeInformer, clusterCIDRInformer networkinginformers.ClusterCIDRInformer, allocatorType CIDRAllocatorType, allocatorParams CIDRAllocatorParams) (CIDRAllocator, error) {
logger := klog.FromContext(ctx)
nodeList, err := listNodes(logger, kubeClient)
if err != nil {
return nil, err
}

switch allocatorType {
case RangeAllocatorType:
return NewCIDRRangeAllocator(kubeClient, nodeInformer, allocatorParams, nodeList)
return NewCIDRRangeAllocator(logger, kubeClient, nodeInformer, allocatorParams, nodeList)
case MultiCIDRRangeAllocatorType:
if !utilfeature.DefaultFeatureGate.Enabled(features.MultiCIDRRangeAllocator) {
return nil, fmt.Errorf("invalid CIDR allocator type: %v, feature gate %v must be enabled", allocatorType, features.MultiCIDRRangeAllocator)
}
return NewMultiCIDRRangeAllocator(kubeClient, nodeInformer, clusterCIDRInformer, allocatorParams, nodeList, nil)
return NewMultiCIDRRangeAllocator(ctx, kubeClient, nodeInformer, clusterCIDRInformer, allocatorParams, nodeList, nil)

case CloudAllocatorType:
return NewCloudCIDRAllocator(kubeClient, cloud, nodeInformer)
return NewCloudCIDRAllocator(logger, kubeClient, cloud, nodeInformer)
default:
return nil, fmt.Errorf("invalid CIDR allocator type: %v", allocatorType)
}
}

func listNodes(kubeClient clientset.Interface) (*v1.NodeList, error) {
func listNodes(logger klog.Logger, kubeClient clientset.Interface) (*v1.NodeList, error) {
var nodeList *v1.NodeList
// We must poll because apiserver might not be up. This error causes
// controller manager to restart.
Expand All @@ -152,7 +153,7 @@ func listNodes(kubeClient clientset.Interface) (*v1.NodeList, error) {
LabelSelector: labels.Everything().String(),
})
if err != nil {
klog.Errorf("Failed to list all nodes: %v", err)
logger.Error(err, "Failed to list all nodes")
return false, nil
}
return true, nil
Expand Down
52 changes: 26 additions & 26 deletions pkg/controller/nodeipam/ipam/cidrset/cidr_set_test.go
Expand Up @@ -23,7 +23,6 @@ import (
"testing"

"k8s.io/component-base/metrics/testutil"
"k8s.io/klog/v2"
netutils "k8s.io/utils/net"
)

Expand Down Expand Up @@ -558,34 +557,35 @@ func TestGetBitforCIDR(t *testing.T) {
}

for _, tc := range cases {
_, clusterCIDR, err := netutils.ParseCIDRSloppy(tc.clusterCIDRStr)
if err != nil {
t.Fatalf("unexpected error: %v for %v", err, tc.description)
}

cs, err := NewCIDRSet(clusterCIDR, tc.subNetMaskSize)
if err != nil {
t.Fatalf("Error allocating CIDRSet for %v", tc.description)
}
_, subnetCIDR, err := netutils.ParseCIDRSloppy(tc.subNetCIDRStr)
if err != nil {
t.Fatalf("unexpected error: %v for %v", err, tc.description)
}
t.Run(tc.description, func(t *testing.T) {
_, clusterCIDR, err := netutils.ParseCIDRSloppy(tc.clusterCIDRStr)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}

got, err := cs.getIndexForCIDR(subnetCIDR)
if err == nil && tc.expectErr {
klog.Errorf("expected error but got null for %v", tc.description)
continue
}
cs, err := NewCIDRSet(clusterCIDR, tc.subNetMaskSize)
if err != nil {
t.Fatalf("Error allocating CIDRSet")
}
_, subnetCIDR, err := netutils.ParseCIDRSloppy(tc.subNetCIDRStr)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
got, err := cs.getIndexForCIDR(subnetCIDR)
if err == nil && tc.expectErr {
t.Errorf("expected error but got null")
return
}

if err != nil && !tc.expectErr {
klog.Errorf("unexpected error: %v for %v", err, tc.description)
continue
}
if err != nil && !tc.expectErr {
t.Errorf("unexpected error: %v", err)
return
}

if got != tc.expectedBit {
klog.Errorf("expected %v, but got %v for %v", tc.expectedBit, got, tc.description)
}
if got != tc.expectedBit {
t.Errorf("expected %v, but got %v", tc.expectedBit, got)
}
})
}
}

Expand Down