Skip to content

Commit

Permalink
Cleanup ReconcileVPC() and set id early in reconciliation.
Browse files Browse the repository at this point in the history
  • Loading branch information
Sedef authored and randomvariable committed Jul 16, 2021
1 parent d38fda2 commit fea802e
Show file tree
Hide file tree
Showing 5 changed files with 146 additions and 99 deletions.
13 changes: 13 additions & 0 deletions api/v1alpha4/awscluster_webhook.go
Expand Up @@ -109,6 +109,19 @@ func (r *AWSCluster) ValidateUpdate(old runtime.Object) error {
)
}

// Modifying VPC id is not allowed because it will cause a new VPC creation if set to nil.
if !reflect.DeepEqual(oldC.Spec.NetworkSpec, NetworkSpec{}) &&
!reflect.DeepEqual(oldC.Spec.NetworkSpec.VPC, VPCSpec{}) &&
oldC.Spec.NetworkSpec.VPC.ID != "" {
if reflect.DeepEqual(r.Spec.NetworkSpec, NetworkSpec{}) ||
reflect.DeepEqual(r.Spec.NetworkSpec.VPC, VPCSpec{}) ||
oldC.Spec.NetworkSpec.VPC.ID != r.Spec.NetworkSpec.VPC.ID {
allErrs = append(allErrs,
field.Invalid(field.NewPath("spec", "networkSpec", "vpc", "id"),
r.Spec.IdentityRef, "field cannot be modified once set"))
}
}

// If a identityRef is already set, do not allow removal of it.
if oldC.Spec.IdentityRef != nil && r.Spec.IdentityRef == nil {
allErrs = append(allErrs,
Expand Down
32 changes: 32 additions & 0 deletions api/v1alpha4/awscluster_webhook_test.go
Expand Up @@ -200,6 +200,38 @@ func TestAWSCluster_ValidateUpdate(t *testing.T) {
},
wantErr: false,
},
{
name: "VPC id is immutable cannot be emptied once set",
oldCluster: &AWSCluster{
Spec: AWSClusterSpec{
NetworkSpec: NetworkSpec{
VPC: VPCSpec{ID: "managed-or-unmanaged-vpc"},
},
},
},
newCluster: &AWSCluster{
Spec: AWSClusterSpec{},
},
wantErr: true,
},
{
name: "VPC id is immutable, cannot be set to a different value once set",
oldCluster: &AWSCluster{
Spec: AWSClusterSpec{
NetworkSpec: NetworkSpec{
VPC: VPCSpec{ID: "managed-or-unmanaged-vpc"},
},
},
},
newCluster: &AWSCluster{
Spec: AWSClusterSpec{
NetworkSpec: NetworkSpec{
VPC: VPCSpec{ID: "a-new-vpc"},
},
},
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
Expand Down
21 changes: 14 additions & 7 deletions pkg/cloud/services/network/network.go
Expand Up @@ -72,15 +72,22 @@ func (s *Service) ReconcileNetwork() (err error) {
func (s *Service) DeleteNetwork() (err error) {
s.scope.V(2).Info("Deleting network")

// Search for a previously created and tagged VPC
vpc, err := s.describeVPC()
if err != nil {
if awserrors.IsNotFound(err) {
// If the VPC does not exist, nothing to do
return nil
vpc := &infrav1.VPCSpec{}
// Get VPC used for the cluster
if s.scope.VPC().ID != "" {
var err error
vpc, err = s.describeVPCByID()
if err != nil {
if awserrors.IsNotFound(err) {
// If the VPC does not exist, nothing to do
return nil
}
return err
}
return err
} else {
s.scope.Error(err, "non-fatal: VPC ID is missing, ")
}

vpc.DeepCopyInto(s.scope.VPC())

// Secondary CIDR
Expand Down
111 changes: 47 additions & 64 deletions pkg/cloud/services/network/vpc.go
Expand Up @@ -19,17 +19,16 @@ package network
import (
"fmt"

kerrors "k8s.io/apimachinery/pkg/util/errors"
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services"
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/wait"

"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/pkg/errors"
kerrors "k8s.io/apimachinery/pkg/util/errors"
infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1alpha4"
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/awserrors"
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/converters"
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/filter"
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services"
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/wait"
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/tags"
"sigs.k8s.io/cluster-api-provider-aws/pkg/record"
clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4"
Expand All @@ -43,55 +42,55 @@ const (
func (s *Service) reconcileVPC() error {
s.scope.V(2).Info("Reconciling VPC")

vpc, err := s.describeVPC()
if awserrors.IsNotFound(err) { // nolint:nestif
// Create a new managed vpc.
if !conditions.Has(s.scope.InfraCluster(), infrav1.VpcReadyCondition) {
conditions.MarkFalse(s.scope.InfraCluster(), infrav1.VpcReadyCondition, infrav1.VpcCreationStartedReason, clusterv1.ConditionSeverityInfo, "")
// If the ID is not nil, VPC is either managed or unmanaged but should exist in the AWS.
if s.scope.VPC().ID != "" {
vpc, err := s.describeVPCByID()
if err != nil {
return errors.Wrap(err, ".spec.vpc.id is set but VPC resource is missing in AWS; failed to describe VPC resources. (might be in creation process)")
}

s.scope.VPC().CidrBlock = vpc.CidrBlock
s.scope.VPC().Tags = vpc.Tags

// If VPC is unmanaged, return early.
if vpc.IsUnmanaged(s.scope.Name()) {
s.scope.V(2).Info("Working on unmanaged VPC", "vpc-id", vpc.ID)
if err := s.scope.PatchObject(); err != nil {
return errors.Wrap(err, "failed to patch conditions")
return errors.Wrap(err, "failed to patch unmanaged VPC fields")
}
record.Eventf(s.scope.InfraCluster(), "SuccessfulSetVPCAttributes", "Set managed VPC attributes for %q", vpc.ID)
return nil
}
vpc, err = s.createVPC()
if err != nil {
return errors.Wrap(err, "failed to create new vpc")
}
} else if err != nil {
return errors.Wrap(err, "failed to describe VPCs")
}

// This function creates a new infrav1.VPCSpec, populates it with data from AWS, and then deep copies into the
// AWSCluster's VPC spec (see the DeepCopyInto lines below). This is potentially problematic, as it completely
// overwrites the data for the VPC spec as retrieved from the apiserver. This is a temporary band-aid to restore
// recently-added fields that descripe user intent and do not come from AWS resource descriptions.
//
// FIXME(ncdc): rather than copying these values from the scope to vpc, find a better way to merge AWS information
// with data in the scope retrieved from the apiserver. Could use something like mergo.
//
// NOTE: it may look like we are losing InternetGatewayID because it's not populated by describeVPC/createVPC or
// restored here, but that's ok. It is restored by reconcileInternetGateways, which is invoked after this.
vpc.AvailabilityZoneSelection = s.scope.VPC().AvailabilityZoneSelection
vpc.AvailabilityZoneUsageLimit = s.scope.VPC().AvailabilityZoneUsageLimit
// if the VPC is managed, make managed sure attributes are configured.
if err := wait.WaitForWithRetryable(wait.NewBackoff(), func() (bool, error) {
if err := s.ensureManagedVPCAttributes(vpc); err != nil {
return false, err
}
return true, nil
}, awserrors.VPCNotFound); err != nil {
return errors.Wrapf(err, "failed to to set vpc attributes for %q", vpc.ID)
}

if vpc.IsUnmanaged(s.scope.Name()) {
vpc.DeepCopyInto(s.scope.VPC())
s.scope.V(2).Info("Working on unmanaged VPC", "vpc-id", vpc.ID)
return nil
}

// Make sure attributes are configured
if err := wait.WaitForWithRetryable(wait.NewBackoff(), func() (bool, error) {
buildParams := s.getVPCTagParams(vpc.ID)
tagsBuilder := tags.New(&buildParams, tags.WithEC2(s.EC2Client))
if err := tagsBuilder.Ensure(vpc.Tags); err != nil {
return false, err
// .spec.vpc.id is nil, Create a new managed vpc.
if !conditions.Has(s.scope.InfraCluster(), infrav1.VpcReadyCondition) {
conditions.MarkFalse(s.scope.InfraCluster(), infrav1.VpcReadyCondition, infrav1.VpcCreationStartedReason, clusterv1.ConditionSeverityInfo, "")
if err := s.scope.PatchObject(); err != nil {
return errors.Wrap(err, "failed to patch conditions")
}
return true, nil
}, awserrors.VPCNotFound); err != nil {
record.Warnf(s.scope.InfraCluster(), "FailedTagVPC", "Failed to tag managed VPC %q: %v", vpc.ID, err)
return errors.Wrapf(err, "failed to tag vpc %q", vpc.ID)
}
vpc, err := s.createVPC()
if err != nil {
return errors.Wrap(err, "failed to create new vpc")
}

s.scope.VPC().CidrBlock = vpc.CidrBlock
s.scope.VPC().Tags = vpc.Tags
s.scope.VPC().ID = vpc.ID

// Make sure attributes are configured
if err := wait.WaitForWithRetryable(wait.NewBackoff(), func() (bool, error) {
if err := s.ensureManagedVPCAttributes(vpc); err != nil {
Expand All @@ -102,8 +101,6 @@ func (s *Service) reconcileVPC() error {
return errors.Wrapf(err, "failed to to set vpc attributes for %q", vpc.ID)
}

vpc.DeepCopyInto(s.scope.VPC())
s.scope.V(2).Info("Working on managed VPC", "vpc-id", vpc.ID)
return nil
}

Expand Down Expand Up @@ -165,10 +162,6 @@ func (s *Service) ensureManagedVPCAttributes(vpc *infrav1.VPCSpec) error {
}

func (s *Service) createVPC() (*infrav1.VPCSpec, error) {
if s.scope.VPC().IsUnmanaged(s.scope.Name()) {
return nil, errors.Errorf("cannot create a managed vpc in unmanaged mode")
}

if s.scope.VPC().CidrBlock == "" {
s.scope.VPC().CidrBlock = defaultVPCCidr
}
Expand All @@ -189,15 +182,6 @@ func (s *Service) createVPC() (*infrav1.VPCSpec, error) {
record.Eventf(s.scope.InfraCluster(), "SuccessfulCreateVPC", "Created new managed VPC %q", *out.Vpc.VpcId)
s.scope.V(2).Info("Created new VPC with cidr", "vpc-id", *out.Vpc.VpcId, "cidr-block", *out.Vpc.CidrBlock)

// TODO: we should attempt to record the VPC ID as soon as possible by setting s.scope.VPC().ID
// however, the logic used for determining managed vs unmanaged VPCs relies on the tags and will
// need to be updated to accommodate for the recording of the VPC ID prior to the tagging.

wReq := &ec2.DescribeVpcsInput{VpcIds: []*string{out.Vpc.VpcId}}
if err := s.EC2Client.WaitUntilVpcAvailable(wReq); err != nil {
return nil, errors.Wrapf(err, "failed to wait for vpc %q", *out.Vpc.VpcId)
}

return &infrav1.VPCSpec{
ID: *out.Vpc.VpcId,
CidrBlock: *out.Vpc.CidrBlock,
Expand Down Expand Up @@ -232,19 +216,18 @@ func (s *Service) deleteVPC() error {
return nil
}

func (s *Service) describeVPC() (*infrav1.VPCSpec, error) {
func (s *Service) describeVPCByID() (*infrav1.VPCSpec, error) {
if s.scope.VPC().ID == "" {
return nil, errors.New("VPC ID is not set, failed to describe VPCs by ID")
}

input := &ec2.DescribeVpcsInput{
Filters: []*ec2.Filter{
filter.EC2.VPCStates(ec2.VpcStatePending, ec2.VpcStateAvailable),
},
}

if s.scope.VPC().ID == "" {
// Try to find a previously created and tagged VPC
input.Filters = append(input.Filters, filter.EC2.Cluster(s.scope.Name()))
} else {
input.VpcIds = []*string{aws.String(s.scope.VPC().ID)}
}
input.VpcIds = []*string{aws.String(s.scope.VPC().ID)}

out, err := s.EC2Client.DescribeVpcs(input)
if err != nil {
Expand Down
68 changes: 40 additions & 28 deletions pkg/cloud/services/network/vpc_test.go
Expand Up @@ -21,9 +21,13 @@ import (
"reflect"
"testing"

. "github.com/onsi/gomega"

"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/golang/mock/gomock"
"github.com/pkg/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/utils/diff"
Expand Down Expand Up @@ -68,13 +72,14 @@ func TestReconcileVPC(t *testing.T) {
selection := infrav1.AZSelectionSchemeOrdered

testCases := []struct {
name string
input *infrav1.VPCSpec
expected *infrav1.VPCSpec
expect func(m *mock_ec2iface.MockEC2APIMockRecorder)
name string
input *infrav1.VPCSpec
expected *infrav1.VPCSpec
expect func(m *mock_ec2iface.MockEC2APIMockRecorder)
expectError bool
}{
{
name: "managed vpc exists",
name: "if unmanaged vpc exists, updates tags with aws VPC resource tags",
input: &infrav1.VPCSpec{ID: "vpc-exists", AvailabilityZoneUsageLimit: &usageLimit, AvailabilityZoneSelection: &selection},
expected: &infrav1.VPCSpec{
ID: "vpc-exists",
Expand All @@ -87,6 +92,7 @@ func TestReconcileVPC(t *testing.T) {
AvailabilityZoneUsageLimit: &usageLimit,
AvailabilityZoneSelection: &selection,
},
expectError: false,
expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
m.DescribeVpcs(gomock.Eq(&ec2.DescribeVpcsInput{
VpcIds: []*string{
Expand Down Expand Up @@ -128,8 +134,9 @@ func TestReconcileVPC(t *testing.T) {
},
},
{
name: "managed vpc does not exist",
input: &infrav1.VPCSpec{AvailabilityZoneUsageLimit: &usageLimit, AvailabilityZoneSelection: &selection},
name: "if managed vpc does not exist, creates a new VPC",
input: &infrav1.VPCSpec{AvailabilityZoneUsageLimit: &usageLimit, AvailabilityZoneSelection: &selection},
expectError: false,
expected: &infrav1.VPCSpec{
ID: "vpc-new",
CidrBlock: "10.1.0.0/16",
Expand All @@ -142,20 +149,6 @@ func TestReconcileVPC(t *testing.T) {
AvailabilityZoneSelection: &selection,
},
expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
m.DescribeVpcs(gomock.Eq(&ec2.DescribeVpcsInput{
Filters: []*ec2.Filter{
{
Name: aws.String("state"),
Values: aws.StringSlice([]string{ec2.VpcStatePending, ec2.VpcStateAvailable}),
},
{
Name: aws.String("tag-key"),
Values: aws.StringSlice([]string{"sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"}),
},
},
})).
Return(&ec2.DescribeVpcsOutput{}, nil)

m.CreateVpc(gomock.AssignableToTypeOf(&ec2.CreateVpcInput{})).
Return(&ec2.CreateVpcOutput{
Vpc: &ec2.Vpc{
Expand Down Expand Up @@ -184,11 +177,25 @@ func TestReconcileVPC(t *testing.T) {

m.ModifyVpcAttribute(gomock.AssignableToTypeOf(&ec2.ModifyVpcAttributeInput{})).
Return(&ec2.ModifyVpcAttributeOutput{}, nil).Times(2)

m.WaitUntilVpcAvailable(gomock.Eq(&ec2.DescribeVpcsInput{
VpcIds: []*string{aws.String("vpc-new")},
},
},
{
name: "managed vpc id exists, but vpc resource is missing",
input: &infrav1.VPCSpec{ID: "vpc-exists", AvailabilityZoneUsageLimit: &usageLimit, AvailabilityZoneSelection: &selection},
expectError: true,
expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
m.DescribeVpcs(gomock.Eq(&ec2.DescribeVpcsInput{
VpcIds: []*string{
aws.String("vpc-exists"),
},
Filters: []*ec2.Filter{
{
Name: aws.String("state"),
Values: aws.StringSlice([]string{ec2.VpcStatePending, ec2.VpcStateAvailable}),
},
},
})).
Return(nil)
Return(nil, awserr.New("404", "http not found err", errors.New("err")))
},
},
}
Expand Down Expand Up @@ -226,9 +233,14 @@ func TestReconcileVPC(t *testing.T) {

s := NewService(clusterScope)
s.EC2Client = ec2Mock

if err := s.reconcileVPC(); err != nil {
t.Fatalf("got an unexpected error: %v", err)
g := NewWithT(t)

err = s.reconcileVPC()
if tc.expectError {
g.Expect(err).ToNot(BeNil())
return
} else {
g.Expect(err).To(BeNil())
}

if !reflect.DeepEqual(tc.expected, &clusterScope.AWSCluster.Spec.NetworkSpec.VPC) {
Expand Down

0 comments on commit fea802e

Please sign in to comment.