Skip to content

Commit

Permalink
CORS-2895: aws/capi: decouple the zone config
Browse files Browse the repository at this point in the history
Moving the cluster subnet configuration for CAPI to a dedicated file
decoupling the functions for BYOVPC and managed VPC to dedicated
functions to create tests dedicated for each scenario.

The subnet structure created in managed VPC by CAPA is created with
SubnetSpec, without providing the valid ID, so CAPA will understand that
the subnet does not exists and will created it following the zone
specified/discovered in the install config.
  • Loading branch information
mtulio committed Mar 21, 2024
1 parent 7d88f86 commit edcc010
Show file tree
Hide file tree
Showing 2 changed files with 286 additions and 52 deletions.
69 changes: 17 additions & 52 deletions pkg/asset/manifests/aws/cluster.go
Original file line number Diff line number Diff line change
@@ -1,11 +1,9 @@
package aws

import (
"context"
"fmt"
"time"

"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/ptr"
Expand All @@ -19,33 +17,24 @@ import (
)

// GenerateClusterAssets generates the manifests for the cluster-api.
func GenerateClusterAssets(installConfig *installconfig.InstallConfig, clusterID *installconfig.ClusterID) (*capiutils.GenerateClusterAssetsOutput, error) {
func GenerateClusterAssets(ic *installconfig.InstallConfig, clusterID *installconfig.ClusterID) (*capiutils.GenerateClusterAssetsOutput, error) {
manifests := []*asset.RuntimeFile{}
mainCIDR := capiutils.CIDRFromInstallConfig(installConfig)

zones, err := installConfig.AWS.AvailabilityZones(context.TODO())
if err != nil {
return nil, errors.Wrap(err, "failed to get availability zones")
}

tags, err := aws.CapaTagsFromUserTags(clusterID.InfraID, installConfig.Config.AWS.UserTags)
tags, err := aws.CapaTagsFromUserTags(clusterID.InfraID, ic.Config.AWS.UserTags)
if err != nil {
return nil, fmt.Errorf("failed to get user tags: %w", err)
}

mainCIDR := capiutils.CIDRFromInstallConfig(ic)

awsCluster := &capa.AWSCluster{
ObjectMeta: metav1.ObjectMeta{
Name: clusterID.InfraID,
Namespace: capiutils.Namespace,
},
Spec: capa.AWSClusterSpec{
Region: installConfig.Config.AWS.Region,
Region: ic.Config.AWS.Region,
NetworkSpec: capa.NetworkSpec{
VPC: capa.VPCSpec{
CidrBlock: mainCIDR.String(),
AvailabilityZoneUsageLimit: ptr.To(len(zones)),
AvailabilityZoneSelection: &capa.AZSelectionSchemeOrdered,
},
CNI: &capa.CNISpec{
CNIIngressRules: capa.CNIIngressRules{
{
Expand Down Expand Up @@ -182,7 +171,7 @@ func GenerateClusterAssets(installConfig *installconfig.InstallConfig, clusterID
},
}

if installConfig.Config.Publish == types.ExternalPublishingStrategy {
if ic.Config.Publish == types.ExternalPublishingStrategy {
// FIXME: CAPA bug. Remove when fixed upstream
// The primary and secondary load balancers in CAPA share the same
// security group. However, specifying an ingress rule only in the
Expand Down Expand Up @@ -217,41 +206,17 @@ func GenerateClusterAssets(installConfig *installconfig.InstallConfig, clusterID
}
}

// If the install config has subnets, use them.
if len(installConfig.AWS.Subnets) > 0 {
privateSubnets, err := installConfig.AWS.PrivateSubnets(context.TODO())
if err != nil {
return nil, errors.Wrap(err, "failed to get private subnets")
}
for _, subnet := range privateSubnets {
awsCluster.Spec.NetworkSpec.Subnets = append(awsCluster.Spec.NetworkSpec.Subnets, capa.SubnetSpec{
ID: subnet.ID,
CidrBlock: subnet.CIDR,
AvailabilityZone: subnet.Zone.Name,
IsPublic: subnet.Public,
})
}
publicSubnets, err := installConfig.AWS.PublicSubnets(context.TODO())
if err != nil {
return nil, errors.Wrap(err, "failed to get public subnets")
}

for _, subnet := range publicSubnets {
awsCluster.Spec.NetworkSpec.Subnets = append(awsCluster.Spec.NetworkSpec.Subnets, capa.SubnetSpec{
ID: subnet.ID,
CidrBlock: subnet.CIDR,
AvailabilityZone: subnet.Zone.Name,
IsPublic: subnet.Public,
})
}

vpc, err := installConfig.AWS.VPC(context.TODO())
if err != nil {
return nil, errors.Wrap(err, "failed to get VPC")
}
awsCluster.Spec.NetworkSpec.VPC = capa.VPCSpec{
ID: vpc,
}
// Set the VPC and zones (managed) or subnets (BYO VPC) based in the
// install-config.yaml.
err = setZones(&zoneConfigInput{
InstallConfig: ic,
Config: ic.Config,
Meta: ic.AWS,
ClusterID: clusterID,
Cluster: awsCluster,
})
if err != nil {
return nil, fmt.Errorf("failed to set cluster zones or subnets: %w", err)
}

manifests = append(manifests, &asset.RuntimeFile{
Expand Down
269 changes: 269 additions & 0 deletions pkg/asset/manifests/aws/zones.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,269 @@
package aws

import (
"context"
"fmt"
"net"

capa "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"

"github.com/openshift/installer/pkg/asset/installconfig"
"github.com/openshift/installer/pkg/asset/installconfig/aws"
"github.com/openshift/installer/pkg/asset/manifests/capiutils"
utilscidr "github.com/openshift/installer/pkg/asset/manifests/capiutils/cidr"
"github.com/openshift/installer/pkg/types"
)

type zonesCAPI struct {
AvailabilityZones []string
controlPlaneZones []string
computeZones []string
allZones map[string]struct{}
}

func (zo *zonesCAPI) All() (zones []string) {
for name, _ := range zo.allZones {
zones = append(zones, name)
}
return zones
}

func (zo *zonesCAPI) set(pool []string, zones []string) {
for _, name := range zones {
if _, ok := zo.allZones[name]; !ok {
zo.allZones[name] = struct{}{}
pool = append(pool, name)
}
}
}

func (zo *zonesCAPI) SetAvailabilityZones(pool string, zones []string) {
switch pool {
case types.MachinePoolControlPlaneRoleName:
zo.set(zo.controlPlaneZones, zones)
zo.set(zo.AvailabilityZones, zones)

case types.MachinePoolComputeRoleName:
zo.set(zo.computeZones, zones)
zo.set(zo.AvailabilityZones, zones)
}
}

func (zo *zonesCAPI) SetDefaultConfigZones(pool string, defConfig []string, defRegion []string) {
zones := []string{}
switch pool {
case string(types.MachinePoolControlPlaneRoleName):
if len(zo.controlPlaneZones) == 0 && len(defConfig) > 0 {
zones = defConfig
} else if len(zo.controlPlaneZones) == 0 {
zones = defRegion
}
zo.set(zo.controlPlaneZones, zones)

case string(types.MachinePoolComputeRoleName):
if len(zo.computeZones) == 0 && len(defConfig) > 0 {
zones = defConfig
} else if len(zo.computeZones) == 0 {
zones = defRegion
}
zo.set(zo.computeZones, zones)
}
zo.set(zo.AvailabilityZones, zones)
}

type zoneConfigInput struct {
InstallConfig *installconfig.InstallConfig
Config *types.InstallConfig
Meta *aws.Metadata
Cluster *capa.AWSCluster
ClusterID *installconfig.ClusterID
ZonesInRegion []string
}

func (zin *zoneConfigInput) SetZoneMetadata() (err error) {
if zin.InstallConfig == nil {
return fmt.Errorf("failed to get installConfig: %w", err)
}
if zin.InstallConfig.AWS == nil {
return fmt.Errorf("failed to get AWS metadata: %w", err)
}
zin.ZonesInRegion, err = zin.InstallConfig.AWS.AvailabilityZones(context.TODO())
if err != nil {
return fmt.Errorf("failed to get availability zones: %w", err)
}
// QUESTION(mtulio): Should we need to filter the instances available in ZonesInRegion,
// removing zones which does not match any criteria (instance offering)?
return nil
}

// setZones creates the CAPI NetworkSpec structures for managed or
// BYO VPC deployments from install-config.yaml.
func setZones(in *zoneConfigInput) error {
if len(in.Config.AWS.Subnets) > 0 {
return setZonesBYOVPC(in)
}

err := in.SetZoneMetadata()
if err != nil {
return fmt.Errorf("failed to get availability zones from metadata: %w", err)
}
return setZonesManagedVPC(in)
}

// setZonesManagedVPC creates the CAPI NetworkSpec.Subnets setting the
// desired subnets from install-config.yaml in the BYO VPC deployment.
func setZonesBYOVPC(in *zoneConfigInput) error {
privateSubnets, err := in.Meta.PrivateSubnets(context.TODO())
if err != nil {
return fmt.Errorf("failed to get private subnets: %w", err)
}
for _, subnet := range privateSubnets {
in.Cluster.Spec.NetworkSpec.Subnets = append(in.Cluster.Spec.NetworkSpec.Subnets, capa.SubnetSpec{
ID: subnet.ID,
CidrBlock: subnet.CIDR,
AvailabilityZone: subnet.Zone.Name,
IsPublic: subnet.Public,
})
}

publicSubnets, err := in.Meta.PublicSubnets(context.TODO())
if err != nil {
return fmt.Errorf("failed to get public subnets: %w", err)
}
for _, subnet := range publicSubnets {
in.Cluster.Spec.NetworkSpec.Subnets = append(in.Cluster.Spec.NetworkSpec.Subnets, capa.SubnetSpec{
ID: subnet.ID,
CidrBlock: subnet.CIDR,
AvailabilityZone: subnet.Zone.Name,
IsPublic: subnet.Public,
})
}

edgeSubnets, err := in.Meta.EdgeSubnets(context.TODO())
if err != nil {
return fmt.Errorf("failed to get edge subnets: %w", err)
}
for _, subnet := range edgeSubnets {
in.Cluster.Spec.NetworkSpec.Subnets = append(in.Cluster.Spec.NetworkSpec.Subnets, capa.SubnetSpec{
ID: subnet.ID,
CidrBlock: subnet.CIDR,
AvailabilityZone: subnet.Zone.Name,
IsPublic: subnet.Public,
})
}

vpc, err := in.Meta.VPC(context.TODO())
if err != nil {
return fmt.Errorf("failed to get VPC: %w", err)
}
in.Cluster.Spec.NetworkSpec.VPC = capa.VPCSpec{
ID: vpc,
}

return nil
}

// setZonesManagedVPC creates the CAPI NetworkSpec.VPC setting the
// desired zones from install-config.yaml in the managed VPC deployment.
func setZonesManagedVPC(in *zoneConfigInput) error {

out, err := extractZonesFromInstallConfig(in)
if err != nil {
return fmt.Errorf("failed to get availability zones: %w", err)
}

mainCIDR := capiutils.CIDRFromInstallConfig(in.InstallConfig)
in.Cluster.Spec.NetworkSpec.VPC = capa.VPCSpec{
CidrBlock: mainCIDR.String(),
}

// Base subnets considering only private zones, leaving one free block to allow
// future subnet expansions in Day-2.
numSubnets := len(out.AvailabilityZones) + 1

// Public subnets consumes one range from base blocks.
isPublishingExternal := in.Config.Publish == types.ExternalPublishingStrategy
publicCidrIndex := len(out.AvailabilityZones)
if isPublishingExternal {
numSubnets++
}

privateCIDRs, err := utilscidr.SplitIntoSubnetsIPv4(mainCIDR.String(), numSubnets)
if err != nil {
return fmt.Errorf("unable to retrieve CIDR blocks for all private subnets: %w", err)
}
var publicCIDRs []*net.IPNet
if isPublishingExternal {
publicCIDRs, err = utilscidr.SplitIntoSubnetsIPv4(privateCIDRs[publicCidrIndex].String(), publicCidrIndex)
if err != nil {
return fmt.Errorf("unable to retrieve CIDR blocks for all public subnets: %w", err)
}
}

// Q: Can we use the standard terraform name (without 'subnet') and tell CAPA
// to query it for Control Planes?
subnetNamePrefix := fmt.Sprintf("%s-subnet", in.ClusterID.InfraID)
// Create subnets from zone pool with type availability-zone
idxCIDR := 0
for _, zone := range out.AvailabilityZones {
if len(privateCIDRs) < idxCIDR {
return fmt.Errorf("unable to define CIDR blocks for all private subnets: %w", err)
}
cidr := privateCIDRs[idxCIDR]
in.Cluster.Spec.NetworkSpec.Subnets = append(in.Cluster.Spec.NetworkSpec.Subnets, capa.SubnetSpec{
AvailabilityZone: zone,
CidrBlock: cidr.String(),
ID: fmt.Sprintf("%s-private-%s", subnetNamePrefix, zone),
IsPublic: false,
})
if isPublishingExternal {
if len(publicCIDRs) < idxCIDR {
return fmt.Errorf("unable to define CIDR blocks for all public subnets: %w", err)
}
cidr = publicCIDRs[idxCIDR]
in.Cluster.Spec.NetworkSpec.Subnets = append(in.Cluster.Spec.NetworkSpec.Subnets, capa.SubnetSpec{
AvailabilityZone: zone,
CidrBlock: cidr.String(),
ID: fmt.Sprintf("%s-public-%s", subnetNamePrefix, zone),
IsPublic: true,
})
}
idxCIDR++
}
return nil
}

// extractZonesFromInstallConfig extracts all zones defined in the install-config,
// otherwise discover it based in the AWS metadata when none is defined.
func extractZonesFromInstallConfig(in *zoneConfigInput) (*zonesCAPI, error) {
if in.Config == nil {
return nil, fmt.Errorf("unable to retrieve Config")
}
out := zonesCAPI{allZones: make(map[string]struct{})}

cfg := in.Config
defaultZones := []string{}
if cfg.AWS != nil && cfg.AWS.DefaultMachinePlatform != nil && len(cfg.AWS.DefaultMachinePlatform.Zones) > 0 {
defaultZones = cfg.AWS.DefaultMachinePlatform.Zones
}

if cfg.ControlPlane != nil && cfg.ControlPlane.Platform.AWS != nil {
out.SetAvailabilityZones(string(types.MachinePoolControlPlaneRoleName), cfg.ControlPlane.Platform.AWS.Zones)
}
out.SetDefaultConfigZones(string(types.MachinePoolControlPlaneRoleName), defaultZones, in.ZonesInRegion)

for _, pool := range cfg.Compute {
if pool.Platform.AWS == nil {
continue
}
if len(pool.Platform.AWS.Zones) > 0 {
out.SetAvailabilityZones(pool.Name, pool.Platform.AWS.Zones)
}
// Only explicity defined zones are valid.
if pool.Name == types.MachinePoolEdgeRoleName {
continue
}
out.SetDefaultConfigZones(string(types.MachinePoolComputeRoleName), defaultZones, in.ZonesInRegion)
}
return &out, nil
}

0 comments on commit edcc010

Please sign in to comment.