Skip to content

Commit

Permalink
Merge pull request #957 from chenlin07/topic/chenlin/configure-ip-poo…
Browse files Browse the repository at this point in the history
…l-type

Support specifying pod IPPool type in VPC network
  • Loading branch information
k8s-ci-robot committed Mar 22, 2024
2 parents 1e0e960 + 1fc3dc5 commit 0f58145
Show file tree
Hide file tree
Showing 5 changed files with 104 additions and 7 deletions.
27 changes: 26 additions & 1 deletion pkg/cloudprovider/vsphereparavirtual/cloud.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,12 @@ const (

// CloudControllerManagerNS is the namespace for vsphere paravirtual cluster cloud provider
CloudControllerManagerNS = "vmware-system-cloud-provider"

// PublicIPPoolType allows Pod IP address routable outside of Tier 0 router.
PublicIPPoolType = "Public"

// PrivateIPPoolType allows Pod IP address routable within VPC router.
PrivateIPPoolType = "Private"
)

var (
Expand All @@ -60,6 +66,9 @@ var (

// vpcModeEnabled if set to true, ippool and node controller will process v1alpha1 StaticRoute and v1alpha2 IPPool, otherwise v1alpha1 RouteSet and v1alpha1 IPPool
vpcModeEnabled bool

// podIPPoolType specifies if Pod IP addresses are public or private.
podIPPoolType string
)

func init() {
Expand All @@ -86,6 +95,7 @@ func init() {

flag.BoolVar(&vmservice.IsLegacy, "is-legacy-paravirtual", false, "If true, machine label selector will start with capw.vmware.com. By default, it's false, machine label selector will start with capv.vmware.com.")
flag.BoolVar(&vpcModeEnabled, "enable-vpc-mode", false, "If true, routable pod controller will start with VPC mode. It is useful only when route controller is enabled in vsphereparavirtual mode")
flag.StringVar(&podIPPoolType, "pod-ip-pool-type", "", "Specify if Pod IP address is Public or Private routable in VPC network. Valid values are Public and Private")
}

// Creates new Controller node interface and returns
Expand All @@ -101,6 +111,21 @@ func newVSphereParavirtual(cfg *cpcfg.Config) (*VSphereParavirtual, error) {
func (cp *VSphereParavirtual) Initialize(clientBuilder cloudprovider.ControllerClientBuilder, stop <-chan struct{}) {
klog.V(0).Info("Initing vSphere Paravirtual Cloud Provider")

if vpcModeEnabled {
if podIPPoolType != PublicIPPoolType && podIPPoolType != PrivateIPPoolType {
klog.Fatalf("Pod IP Pool Type can be either Public or Private in VPC network, %s is not supported", podIPPoolType)
}

if podIPPoolType == "" {
podIPPoolType = PrivateIPPoolType
}
} else {
// NSX-T T1 or VDS network
if podIPPoolType != "" {
klog.Fatal("Pod IP Pool Type can be set only when the network is VPC")
}
}

ownerRef, err := readOwnerRef(VsphereParavirtualCloudProviderConfigPath)
if err != nil {
klog.Fatalf("Failed to read ownerRef:%s", err)
Expand Down Expand Up @@ -148,7 +173,7 @@ func (cp *VSphereParavirtual) Initialize(clientBuilder cloudprovider.ControllerC
if RouteEnabled {
klog.V(0).Info("Starting routable pod controllers")

if err := routablepod.StartControllers(kcfg, client, cp.informMgr, ClusterName, clusterNS, ownerRef, vpcModeEnabled); err != nil {
if err := routablepod.StartControllers(kcfg, client, cp.informMgr, ClusterName, clusterNS, ownerRef, vpcModeEnabled, podIPPoolType); err != nil {
klog.Errorf("Failed to start Routable pod controllers: %v", err)
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,16 +20,18 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/klog/v2"

"k8s.io/cloud-provider-vsphere/pkg/cloudprovider/vsphereparavirtual/controllers/routablepod/ippool"
"k8s.io/cloud-provider-vsphere/pkg/cloudprovider/vsphereparavirtual/controllers/routablepod/node"
"k8s.io/cloud-provider-vsphere/pkg/cloudprovider/vsphereparavirtual/ippoolmanager"
k8s "k8s.io/cloud-provider-vsphere/pkg/common/kubernetes"
"k8s.io/klog/v2"
)

// StartControllers starts ippool_controller and node_controller
func StartControllers(scCfg *rest.Config, client kubernetes.Interface,
informerManager *k8s.InformerManager, clusterName, clusterNS string, ownerRef *metav1.OwnerReference, vpcModeEnabled bool) error {
informerManager *k8s.InformerManager, clusterName, clusterNS string, ownerRef *metav1.OwnerReference,
vpcModeEnabled bool, podIPPoolType string) error {

if clusterName == "" {
return fmt.Errorf("cluster name can't be empty")
Expand All @@ -40,7 +42,7 @@ func StartControllers(scCfg *rest.Config, client kubernetes.Interface,

klog.V(2).Info("Routable pod controllers start with VPC mode enabled: ", vpcModeEnabled)

ippManager, err := ippoolmanager.GetIPPoolManager(vpcModeEnabled, scCfg, clusterNS)
ippManager, err := ippoolmanager.GetIPPoolManager(vpcModeEnabled, scCfg, clusterNS, podIPPoolType)
if err != nil {
return fmt.Errorf("fail to get ippool manager or start ippool controller: %w", err)
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"

"k8s.io/cloud-provider-vsphere/pkg/cloudprovider/vsphereparavirtual/ippoolmanager/helper"
ippmv1alpha1 "k8s.io/cloud-provider-vsphere/pkg/cloudprovider/vsphereparavirtual/ippoolmanager/v1alpha1"
ippmv1alpha2 "k8s.io/cloud-provider-vsphere/pkg/cloudprovider/vsphereparavirtual/ippoolmanager/v1alpha2"
Expand All @@ -27,9 +28,9 @@ type IPPoolManager interface {
}

// GetIPPoolManager gets an IPPoolManager
func GetIPPoolManager(vpcModeEnabled bool, scCfg *rest.Config, clusterNS string) (IPPoolManager, error) {
func GetIPPoolManager(vpcModeEnabled bool, scCfg *rest.Config, clusterNS string, podIPPoolType string) (IPPoolManager, error) {
if vpcModeEnabled {
return ippmv1alpha2.NewIPPoolManager(scCfg, clusterNS)
return ippmv1alpha2.NewIPPoolManager(scCfg, clusterNS, podIPPoolType)
}

return ippmv1alpha1.NewIPPoolManager(scCfg, clusterNS)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,10 +22,11 @@ import (
type IPPoolManager struct {
clients vpcnetworkingclients.Interface
informerFactory vpcnetworkinginformers.SharedInformerFactory
podIPPoolType string
}

// NewIPPoolManager initializes a IPPoolManager
func NewIPPoolManager(config *rest.Config, clusterNS string) (*IPPoolManager, error) {
func NewIPPoolManager(config *rest.Config, clusterNS string, podIPPoolType string) (*IPPoolManager, error) {
ippoolclients, err := vpcnetworkingclients.NewForConfig(config)
if err != nil {
return nil, fmt.Errorf("error building ippool ippoolclientset: %w", err)
Expand All @@ -37,6 +38,7 @@ func NewIPPoolManager(config *rest.Config, clusterNS string) (*IPPoolManager, er
return &IPPoolManager{
clients: ippoolclients,
informerFactory: ippoolInformerFactory,
podIPPoolType: podIPPoolType,
}, nil
}

Expand Down Expand Up @@ -83,6 +85,7 @@ func (p *IPPoolManager) CreateIPPool(clusterNS, clusterName string, ownerRef *me
},
// Omit to provide IPPool type. nsx operator will update the type afterwards.
Spec: vpcnetworkingapis.IPPoolSpec{
Type: p.podIPPoolType,
Subnets: []vpcnetworkingapis.SubnetRequest{},
},
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ import (
fakevpcnetworkingclients "github.com/vmware-tanzu/nsx-operator/pkg/client/clientset/versioned/fake"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/rest"
t1networkingapis "k8s.io/cloud-provider-vsphere/pkg/cloudprovider/vsphereparavirtual/apis/nsxnetworking/v1alpha1"
"k8s.io/cloud-provider-vsphere/pkg/cloudprovider/vsphereparavirtual/ippoolmanager/helper"
)
Expand Down Expand Up @@ -398,3 +399,68 @@ func TestDiffIPPoolSubnets(t *testing.T) {
})
}
}

func TestNewIPPoolManager(t *testing.T) {
testcases := []struct {
name string
podIPPoolType string
expectedpodIPPoolType string
}{
{
name: "Public podIPPoolType should be correctly populated into ipPoolManager",
podIPPoolType: "Public",
expectedpodIPPoolType: "Public",
},
{
name: "Private podIPPoolType should be correctly populated into ipPoolManager",
podIPPoolType: "Private",
expectedpodIPPoolType: "Private",
},
}

for _, testCase := range testcases {
t.Run(testCase.name, func(t *testing.T) {
config := &rest.Config{
UserAgent: testClustername,
}
ipm, err := NewIPPoolManager(config, testClusterNameSpace, testCase.podIPPoolType)
assert.Equal(t, nil, err)
assert.Equal(t, testCase.expectedpodIPPoolType, ipm.podIPPoolType)
})
}
}

func TestCreateIPPool(t *testing.T) {
testcases := []struct {
name string
podIPPoolType string
expectedpodIPPoolType string
}{
{
name: "Public podIPPoolType should be correctly populated into ipPool",
podIPPoolType: "Public",
expectedpodIPPoolType: "Public",
},
{
name: "Private podIPPoolType should be correctly populated into ipPool",
podIPPoolType: "Private",
expectedpodIPPoolType: "Private",
},
{
name: "empty podIPPoolType should be correctly populated into ipPool",
podIPPoolType: "",
expectedpodIPPoolType: "",
},
}

for _, testCase := range testcases {
t.Run(testCase.name, func(t *testing.T) {
ipm, _ := initIPPoolTest()
ipm.podIPPoolType = testCase.podIPPoolType
ipp, err := ipm.CreateIPPool(testClusterNameSpace, testClustername, &metav1.OwnerReference{})
assert.Equal(t, nil, err)
ippool, _ := ipp.(*vpcnetworkingapis.IPPool)
assert.Equal(t, testCase.expectedpodIPPoolType, ippool.Spec.Type)
})
}
}

0 comments on commit 0f58145

Please sign in to comment.