From e43cc2b50e51e10f389e7708fa16dcf0d05db5fa Mon Sep 17 00:00:00 2001 From: Yifeng Xiao Date: Mon, 11 Mar 2024 17:07:53 +0800 Subject: [PATCH 1/2] Support specifying Pod IPPool type. --- pkg/cloudprovider/vsphereparavirtual/cloud.go | 23 ++++++++++++++++++- .../controllers/routablepod/core.go | 8 ++++--- .../ippoolmanager/interfaces.go | 5 ++-- .../ippoolmanager/v1alpha2/ippoolmanager.go | 5 +++- 4 files changed, 34 insertions(+), 7 deletions(-) diff --git a/pkg/cloudprovider/vsphereparavirtual/cloud.go b/pkg/cloudprovider/vsphereparavirtual/cloud.go index cf8650fde..7ccd21b62 100644 --- a/pkg/cloudprovider/vsphereparavirtual/cloud.go +++ b/pkg/cloudprovider/vsphereparavirtual/cloud.go @@ -46,6 +46,12 @@ const ( // CloudControllerManagerNS is the namespace for vsphere paravirtual cluster cloud provider CloudControllerManagerNS = "vmware-system-cloud-provider" + + // PublicIPPoolType allows Pod IP address routable outside of Tier 0 router. + PublicIPPoolType = "Public" + + // PrivateIPPoolType allows Pod IP address routable within VPC router. + PrivateIPPoolType = "Private" ) var ( @@ -60,6 +66,9 @@ var ( // vpcModeEnabled if set to true, ippool and node controller will process v1alpha1 StaticRoute and v1alpha2 IPPool, otherwise v1alpha1 RouteSet and v1alpha1 IPPool vpcModeEnabled bool + + // podIPPoolType specify if Pod IP addresses is public or private. + podIPPoolType string ) func init() { @@ -86,6 +95,7 @@ func init() { flag.BoolVar(&vmservice.IsLegacy, "is-legacy-paravirtual", false, "If true, machine label selector will start with capw.vmware.com. By default, it's false, machine label selector will start with capv.vmware.com.") flag.BoolVar(&vpcModeEnabled, "enable-vpc-mode", false, "If true, routable pod controller will start with VPC mode. It is useful only when route controller is enabled in vsphereparavirtual mode") + flag.StringVar(&podIPPoolType, "pod-ip-pool-type", "", "Specify if Pod IP address is Public or Private routable in VPC network. Valid values are Public and Private") } // Creates new Controller node interface and returns @@ -101,6 +111,17 @@ func newVSphereParavirtual(cfg *cpcfg.Config) (*VSphereParavirtual, error) { func (cp *VSphereParavirtual) Initialize(clientBuilder cloudprovider.ControllerClientBuilder, stop <-chan struct{}) { klog.V(0).Info("Initing vSphere Paravirtual Cloud Provider") + if vpcModeEnabled { + if podIPPoolType != PublicIPPoolType && podIPPoolType != PrivateIPPoolType { + klog.Fatalf("Pod IP Pool Type can be either Public or Private in VPC network, %s is not supported", podIPPoolType) + } + } else { + // NSX-T T1 or VDS network + if podIPPoolType != "" { + klog.Fatal("Pod IP Pool Type can be set only when the network is VPC") + } + } + ownerRef, err := readOwnerRef(VsphereParavirtualCloudProviderConfigPath) if err != nil { klog.Fatalf("Failed to read ownerRef:%s", err) @@ -148,7 +169,7 @@ func (cp *VSphereParavirtual) Initialize(clientBuilder cloudprovider.ControllerC if RouteEnabled { klog.V(0).Info("Starting routable pod controllers") - if err := routablepod.StartControllers(kcfg, client, cp.informMgr, ClusterName, clusterNS, ownerRef, vpcModeEnabled); err != nil { + if err := routablepod.StartControllers(kcfg, client, cp.informMgr, ClusterName, clusterNS, ownerRef, vpcModeEnabled, podIPPoolType); err != nil { klog.Errorf("Failed to start Routable pod controllers: %v", err) } } diff --git a/pkg/cloudprovider/vsphereparavirtual/controllers/routablepod/core.go b/pkg/cloudprovider/vsphereparavirtual/controllers/routablepod/core.go index bd1ffdc6b..8e7ec72a7 100644 --- a/pkg/cloudprovider/vsphereparavirtual/controllers/routablepod/core.go +++ b/pkg/cloudprovider/vsphereparavirtual/controllers/routablepod/core.go @@ -20,16 +20,18 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" + "k8s.io/klog/v2" + "k8s.io/cloud-provider-vsphere/pkg/cloudprovider/vsphereparavirtual/controllers/routablepod/ippool" "k8s.io/cloud-provider-vsphere/pkg/cloudprovider/vsphereparavirtual/controllers/routablepod/node" "k8s.io/cloud-provider-vsphere/pkg/cloudprovider/vsphereparavirtual/ippoolmanager" k8s "k8s.io/cloud-provider-vsphere/pkg/common/kubernetes" - "k8s.io/klog/v2" ) // StartControllers starts ippool_controller and node_controller func StartControllers(scCfg *rest.Config, client kubernetes.Interface, - informerManager *k8s.InformerManager, clusterName, clusterNS string, ownerRef *metav1.OwnerReference, vpcModeEnabled bool) error { + informerManager *k8s.InformerManager, clusterName, clusterNS string, ownerRef *metav1.OwnerReference, + vpcModeEnabled bool, podIPPoolType string) error { if clusterName == "" { return fmt.Errorf("cluster name can't be empty") @@ -40,7 +42,7 @@ func StartControllers(scCfg *rest.Config, client kubernetes.Interface, klog.V(2).Info("Routable pod controllers start with VPC mode enabled: ", vpcModeEnabled) - ippManager, err := ippoolmanager.GetIPPoolManager(vpcModeEnabled, scCfg, clusterNS) + ippManager, err := ippoolmanager.GetIPPoolManager(vpcModeEnabled, scCfg, clusterNS, podIPPoolType) if err != nil { return fmt.Errorf("fail to get ippool manager or start ippool controller: %w", err) } diff --git a/pkg/cloudprovider/vsphereparavirtual/ippoolmanager/interfaces.go b/pkg/cloudprovider/vsphereparavirtual/ippoolmanager/interfaces.go index a1086c381..a436f493c 100644 --- a/pkg/cloudprovider/vsphereparavirtual/ippoolmanager/interfaces.go +++ b/pkg/cloudprovider/vsphereparavirtual/ippoolmanager/interfaces.go @@ -5,6 +5,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/rest" "k8s.io/client-go/tools/cache" + "k8s.io/cloud-provider-vsphere/pkg/cloudprovider/vsphereparavirtual/ippoolmanager/helper" ippmv1alpha1 "k8s.io/cloud-provider-vsphere/pkg/cloudprovider/vsphereparavirtual/ippoolmanager/v1alpha1" ippmv1alpha2 "k8s.io/cloud-provider-vsphere/pkg/cloudprovider/vsphereparavirtual/ippoolmanager/v1alpha2" @@ -27,9 +28,9 @@ type IPPoolManager interface { } // GetIPPoolManager gets an IPPoolManager -func GetIPPoolManager(vpcModeEnabled bool, scCfg *rest.Config, clusterNS string) (IPPoolManager, error) { +func GetIPPoolManager(vpcModeEnabled bool, scCfg *rest.Config, clusterNS string, podIPPoolType string) (IPPoolManager, error) { if vpcModeEnabled { - return ippmv1alpha2.NewIPPoolManager(scCfg, clusterNS) + return ippmv1alpha2.NewIPPoolManager(scCfg, clusterNS, podIPPoolType) } return ippmv1alpha1.NewIPPoolManager(scCfg, clusterNS) diff --git a/pkg/cloudprovider/vsphereparavirtual/ippoolmanager/v1alpha2/ippoolmanager.go b/pkg/cloudprovider/vsphereparavirtual/ippoolmanager/v1alpha2/ippoolmanager.go index 06616cf14..61c286100 100644 --- a/pkg/cloudprovider/vsphereparavirtual/ippoolmanager/v1alpha2/ippoolmanager.go +++ b/pkg/cloudprovider/vsphereparavirtual/ippoolmanager/v1alpha2/ippoolmanager.go @@ -22,10 +22,11 @@ import ( type IPPoolManager struct { clients vpcnetworkingclients.Interface informerFactory vpcnetworkinginformers.SharedInformerFactory + podIPPoolType string } // NewIPPoolManager initializes a IPPoolManager -func NewIPPoolManager(config *rest.Config, clusterNS string) (*IPPoolManager, error) { +func NewIPPoolManager(config *rest.Config, clusterNS string, podIPPoolType string) (*IPPoolManager, error) { ippoolclients, err := vpcnetworkingclients.NewForConfig(config) if err != nil { return nil, fmt.Errorf("error building ippool ippoolclientset: %w", err) @@ -37,6 +38,7 @@ func NewIPPoolManager(config *rest.Config, clusterNS string) (*IPPoolManager, er return &IPPoolManager{ clients: ippoolclients, informerFactory: ippoolInformerFactory, + podIPPoolType: podIPPoolType, }, nil } @@ -83,6 +85,7 @@ func (p *IPPoolManager) CreateIPPool(clusterNS, clusterName string, ownerRef *me }, // Omit to provide IPPool type. nsx operator will update the type afterwards. Spec: vpcnetworkingapis.IPPoolSpec{ + Type: p.podIPPoolType, Subnets: []vpcnetworkingapis.SubnetRequest{}, }, } From 1fc3dc5a36b7e225111d8dbb236fb926b29eaaeb Mon Sep 17 00:00:00 2001 From: Chen Lin Date: Thu, 21 Mar 2024 17:07:24 +0800 Subject: [PATCH 2/2] Add podIPPoolType configuration unit test --- pkg/cloudprovider/vsphereparavirtual/cloud.go | 6 +- .../v1alpha2/ippoolmanager_test.go | 66 +++++++++++++++++++ 2 files changed, 71 insertions(+), 1 deletion(-) diff --git a/pkg/cloudprovider/vsphereparavirtual/cloud.go b/pkg/cloudprovider/vsphereparavirtual/cloud.go index 7ccd21b62..2eaf94e70 100644 --- a/pkg/cloudprovider/vsphereparavirtual/cloud.go +++ b/pkg/cloudprovider/vsphereparavirtual/cloud.go @@ -67,7 +67,7 @@ var ( // vpcModeEnabled if set to true, ippool and node controller will process v1alpha1 StaticRoute and v1alpha2 IPPool, otherwise v1alpha1 RouteSet and v1alpha1 IPPool vpcModeEnabled bool - // podIPPoolType specify if Pod IP addresses is public or private. + // podIPPoolType specifies if Pod IP addresses are public or private. podIPPoolType string ) @@ -115,6 +115,10 @@ func (cp *VSphereParavirtual) Initialize(clientBuilder cloudprovider.ControllerC if podIPPoolType != PublicIPPoolType && podIPPoolType != PrivateIPPoolType { klog.Fatalf("Pod IP Pool Type can be either Public or Private in VPC network, %s is not supported", podIPPoolType) } + + if podIPPoolType == "" { + podIPPoolType = PrivateIPPoolType + } } else { // NSX-T T1 or VDS network if podIPPoolType != "" { diff --git a/pkg/cloudprovider/vsphereparavirtual/ippoolmanager/v1alpha2/ippoolmanager_test.go b/pkg/cloudprovider/vsphereparavirtual/ippoolmanager/v1alpha2/ippoolmanager_test.go index f0805a6ab..aec1dd892 100644 --- a/pkg/cloudprovider/vsphereparavirtual/ippoolmanager/v1alpha2/ippoolmanager_test.go +++ b/pkg/cloudprovider/vsphereparavirtual/ippoolmanager/v1alpha2/ippoolmanager_test.go @@ -10,6 +10,7 @@ import ( fakevpcnetworkingclients "github.com/vmware-tanzu/nsx-operator/pkg/client/clientset/versioned/fake" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/rest" t1networkingapis "k8s.io/cloud-provider-vsphere/pkg/cloudprovider/vsphereparavirtual/apis/nsxnetworking/v1alpha1" "k8s.io/cloud-provider-vsphere/pkg/cloudprovider/vsphereparavirtual/ippoolmanager/helper" ) @@ -398,3 +399,68 @@ func TestDiffIPPoolSubnets(t *testing.T) { }) } } + +func TestNewIPPoolManager(t *testing.T) { + testcases := []struct { + name string + podIPPoolType string + expectedpodIPPoolType string + }{ + { + name: "Public podIPPoolType should be correctly populated into ipPoolManager", + podIPPoolType: "Public", + expectedpodIPPoolType: "Public", + }, + { + name: "Private podIPPoolType should be correctly populated into ipPoolManager", + podIPPoolType: "Private", + expectedpodIPPoolType: "Private", + }, + } + + for _, testCase := range testcases { + t.Run(testCase.name, func(t *testing.T) { + config := &rest.Config{ + UserAgent: testClustername, + } + ipm, err := NewIPPoolManager(config, testClusterNameSpace, testCase.podIPPoolType) + assert.Equal(t, nil, err) + assert.Equal(t, testCase.expectedpodIPPoolType, ipm.podIPPoolType) + }) + } +} + +func TestCreateIPPool(t *testing.T) { + testcases := []struct { + name string + podIPPoolType string + expectedpodIPPoolType string + }{ + { + name: "Public podIPPoolType should be correctly populated into ipPool", + podIPPoolType: "Public", + expectedpodIPPoolType: "Public", + }, + { + name: "Private podIPPoolType should be correctly populated into ipPool", + podIPPoolType: "Private", + expectedpodIPPoolType: "Private", + }, + { + name: "empty podIPPoolType should be correctly populated into ipPool", + podIPPoolType: "", + expectedpodIPPoolType: "", + }, + } + + for _, testCase := range testcases { + t.Run(testCase.name, func(t *testing.T) { + ipm, _ := initIPPoolTest() + ipm.podIPPoolType = testCase.podIPPoolType + ipp, err := ipm.CreateIPPool(testClusterNameSpace, testClustername, &metav1.OwnerReference{}) + assert.Equal(t, nil, err) + ippool, _ := ipp.(*vpcnetworkingapis.IPPool) + assert.Equal(t, testCase.expectedpodIPPoolType, ippool.Spec.Type) + }) + } +}