From a82f548ff8d37987e1e7af058e2728943b66b3f4 Mon Sep 17 00:00:00 2001 From: Rodrigo Menezes Date: Mon, 3 Sep 2018 13:16:57 -0700 Subject: [PATCH 1/3] Allow using existing/shared Security Groups Verbosely log when a user overwrites LB or IG security groups Change SecurityGroup to SecurityGroupOverride Allow using existing/shared Security Groups Update tests --- cmd/kops/integration_test.go | 65 +- docs/security_groups.md | 55 ++ pkg/apis/kops/cluster.go | 17 +- pkg/apis/kops/instancegroup.go | 2 + pkg/apis/kops/v1alpha1/cluster.go | 17 +- pkg/apis/kops/v1alpha1/instancegroup.go | 2 + .../kops/v1alpha1/zz_generated.conversion.go | 4 + .../kops/v1alpha1/zz_generated.deepcopy.go | 18 + pkg/apis/kops/v1alpha2/cluster.go | 17 +- pkg/apis/kops/v1alpha2/instancegroup.go | 2 + .../kops/v1alpha2/zz_generated.conversion.go | 4 + .../kops/v1alpha2/zz_generated.deepcopy.go | 18 + pkg/apis/kops/zz_generated.deepcopy.go | 18 + pkg/model/BUILD.bazel | 1 + pkg/model/awsmodel/api_loadbalancer.go | 70 +- pkg/model/awsmodel/autoscalinggroup.go | 15 +- pkg/model/external_access.go | 119 ++-- pkg/model/firewall.go | 376 +++++++---- pkg/model/firewall_test.go | 71 ++ .../update_cluster/existing_sg/id_rsa.pub | 1 + .../existing_sg/in-v1alpha2.yaml | 134 ++++ .../update_cluster/existing_sg/kubernetes.tf | 614 ++++++++++++++++++ upup/pkg/fi/cloudup/awstasks/securitygroup.go | 2 +- 23 files changed, 1402 insertions(+), 240 deletions(-) create mode 100644 docs/security_groups.md create mode 100644 pkg/model/firewall_test.go create mode 100755 tests/integration/update_cluster/existing_sg/id_rsa.pub create mode 100644 tests/integration/update_cluster/existing_sg/in-v1alpha2.yaml create mode 100644 tests/integration/update_cluster/existing_sg/kubernetes.tf diff --git a/cmd/kops/integration_test.go b/cmd/kops/integration_test.go index 26176b7e803a4..545c00e4582f7 100644 --- a/cmd/kops/integration_test.go +++ b/cmd/kops/integration_test.go @@ -52,15 +52,15 @@ const updateClusterTestBase = "../../tests/integration/update_cluster/" // TestMinimal runs the test on a minimum configuration, similar to kops create cluster minimal.example.com --zones us-west-1a func TestMinimal(t *testing.T) { - runTestAWS(t, "minimal.example.com", "minimal", "v1alpha0", false, 1, true) - runTestAWS(t, "minimal.example.com", "minimal", "v1alpha1", false, 1, true) - runTestAWS(t, "minimal.example.com", "minimal", "v1alpha2", false, 1, true) + runTestAWS(t, "minimal.example.com", "minimal", "v1alpha0", false, 1, true, nil) + runTestAWS(t, "minimal.example.com", "minimal", "v1alpha1", false, 1, true, nil) + runTestAWS(t, "minimal.example.com", "minimal", "v1alpha2", false, 1, true, nil) } // TestHA runs the test on a simple HA configuration, similar to kops create cluster minimal.example.com --zones us-west-1a,us-west-1b,us-west-1c --master-count=3 func TestHA(t *testing.T) { - runTestAWS(t, "ha.example.com", "ha", "v1alpha1", false, 3, true) - runTestAWS(t, "ha.example.com", "ha", "v1alpha2", false, 3, true) + runTestAWS(t, "ha.example.com", "ha", "v1alpha1", false, 3, true, nil) + runTestAWS(t, "ha.example.com", "ha", "v1alpha2", false, 3, true, nil) } // TestHighAvailabilityGCE runs the test on a simple HA GCE configuration, similar to kops create cluster ha-gce.example.com @@ -71,7 +71,7 @@ func TestHighAvailabilityGCE(t *testing.T) { // TestComplex runs the test on a more complex configuration, intended to hit more of the edge cases func TestComplex(t *testing.T) { - runTestAWS(t, "complex.example.com", "complex", "v1alpha2", false, 1, true) + runTestAWS(t, "complex.example.com", "complex", "v1alpha2", false, 1, true, nil) } // TestMinimalCloudformation runs the test on a minimum configuration, similar to kops create cluster minimal.example.com --zones us-west-1a @@ -85,6 +85,12 @@ func TestExistingIAMCloudformation(t *testing.T) { runTestCloudformation(t, "minimal.example.com", "existing_iam_cloudformation", "v1alpha2", false, lifecycleOverrides) } +// TestExistingSG runs the test with existing Security Group, similar to kops create cluster minimal.example.com --zones us-west-1a +func TestExistingSG(t *testing.T) { + lifecycleOverrides := []string{"SecurityGroup=ExistsAndWarnIfChanges", "SecurityGroupRule=ExistsAndWarnIfChanges"} + runTestAWS(t, "existingsg.example.com", "existing_sg", "v1alpha2", false, 3, true, lifecycleOverrides) +} + // TestAdditionalUserData runs the test on passing additional user-data to an instance at bootstrap. func TestAdditionalUserData(t *testing.T) { runTestCloudformation(t, "additionaluserdata.example.com", "additional_user-data", "v1alpha2", false, nil) @@ -92,71 +98,72 @@ func TestAdditionalUserData(t *testing.T) { // TestBastionAdditionalUserData runs the test on passing additional user-data to a bastion instance group func TestBastionAdditionalUserData(t *testing.T) { - runTestAWS(t, "bastionuserdata.example.com", "bastionadditional_user-data", "v1alpha2", true, 1, true) + runTestAWS(t, "bastionuserdata.example.com", "bastionadditional_user-data", "v1alpha2", true, 1, true, nil) } // TestMinimal_141 runs the test on a configuration from 1.4.1 release func TestMinimal_141(t *testing.T) { - runTestAWS(t, "minimal-141.example.com", "minimal-141", "v1alpha0", false, 1, true) + runTestAWS(t, "minimal-141.example.com", "minimal-141", "v1alpha0", false, 1, true, nil) } // TestPrivateWeave runs the test on a configuration with private topology, weave networking func TestPrivateWeave(t *testing.T) { - runTestAWS(t, "privateweave.example.com", "privateweave", "v1alpha1", true, 1, true) - runTestAWS(t, "privateweave.example.com", "privateweave", "v1alpha2", true, 1, true) + runTestAWS(t, "privateweave.example.com", "privateweave", "v1alpha1", true, 1, true, nil) + runTestAWS(t, "privateweave.example.com", "privateweave", "v1alpha2", true, 1, true, nil) } // TestPrivateFlannel runs the test on a configuration with private topology, flannel networking func TestPrivateFlannel(t *testing.T) { - runTestAWS(t, "privateflannel.example.com", "privateflannel", "v1alpha1", true, 1, true) - runTestAWS(t, "privateflannel.example.com", "privateflannel", "v1alpha2", true, 1, true) + runTestAWS(t, "privateflannel.example.com", "privateflannel", "v1alpha1", true, 1, true, nil) + runTestAWS(t, "privateflannel.example.com", "privateflannel", "v1alpha2", true, 1, true, nil) } // TestPrivateCalico runs the test on a configuration with private topology, calico networking func TestPrivateCalico(t *testing.T) { - runTestAWS(t, "privatecalico.example.com", "privatecalico", "v1alpha1", true, 1, true) - runTestAWS(t, "privatecalico.example.com", "privatecalico", "v1alpha2", true, 1, true) + runTestAWS(t, "privatecalico.example.com", "privatecalico", "v1alpha1", true, 1, true, nil) + runTestAWS(t, "privatecalico.example.com", "privatecalico", "v1alpha2", true, 1, true, nil) } // TestPrivateCanal runs the test on a configuration with private topology, canal networking func TestPrivateCanal(t *testing.T) { - runTestAWS(t, "privatecanal.example.com", "privatecanal", "v1alpha1", true, 1, true) - runTestAWS(t, "privatecanal.example.com", "privatecanal", "v1alpha2", true, 1, true) + runTestAWS(t, "privatecanal.example.com", "privatecanal", "v1alpha1", true, 1, true, nil) + runTestAWS(t, "privatecanal.example.com", "privatecanal", "v1alpha2", true, 1, true, nil) } // TestPrivateKopeio runs the test on a configuration with private topology, kopeio networking func TestPrivateKopeio(t *testing.T) { - runTestAWS(t, "privatekopeio.example.com", "privatekopeio", "v1alpha2", true, 1, true) + runTestAWS(t, "privatekopeio.example.com", "privatekopeio", "v1alpha2", true, 1, true, nil) } // TestPrivateSharedSubnet runs the test on a configuration with private topology & shared subnets func TestPrivateSharedSubnet(t *testing.T) { - runTestAWS(t, "private-shared-subnet.example.com", "private-shared-subnet", "v1alpha2", true, 1, true) + runTestAWS(t, "private-shared-subnet.example.com", "private-shared-subnet", "v1alpha2", true, 1, true, nil) } // TestPrivateDns1 runs the test on a configuration with private topology, private dns func TestPrivateDns1(t *testing.T) { - runTestAWS(t, "privatedns1.example.com", "privatedns1", "v1alpha2", true, 1, true) + runTestAWS(t, "privatedns1.example.com", "privatedns1", "v1alpha2", true, 1, true, nil) } // TestPrivateDns2 runs the test on a configuration with private topology, private dns, extant vpc func TestPrivateDns2(t *testing.T) { - runTestAWS(t, "privatedns2.example.com", "privatedns2", "v1alpha2", true, 1, true) + runTestAWS(t, "privatedns2.example.com", "privatedns2", "v1alpha2", true, 1, true, nil) } // TestSharedSubnet runs the test on a configuration with a shared subnet (and VPC) func TestSharedSubnet(t *testing.T) { - runTestAWS(t, "sharedsubnet.example.com", "shared_subnet", "v1alpha2", false, 1, true) + runTestAWS(t, "sharedsubnet.example.com", "shared_subnet", "v1alpha2", false, 1, true, nil) } // TestSharedVPC runs the test on a configuration with a shared VPC func TestSharedVPC(t *testing.T) { - runTestAWS(t, "sharedvpc.example.com", "shared_vpc", "v1alpha2", false, 1, true) + runTestAWS(t, "sharedvpc.example.com", "shared_vpc", "v1alpha2", false, 1, true, nil) } // TestExistingIAM runs the test on a configuration with existing IAM instance profiles func TestExistingIAM(t *testing.T) { - runTestAWS(t, "existing-iam.example.com", "existing_iam", "v1alpha2", false, 3, false) + lifecycleOverrides := []string{"IAMRole=ExistsAndWarnIfChanges", "IAMRolePolicy=ExistsAndWarnIfChanges", "IAMInstanceProfileRole=ExistsAndWarnIfChanges"} + runTestAWS(t, "existing-iam.example.com", "existing_iam", "v1alpha2", false, 3, false, lifecycleOverrides) } // TestAdditionalCIDR runs the test on a configuration with a shared VPC @@ -170,7 +177,7 @@ func TestPhaseNetwork(t *testing.T) { } func TestExternalLoadBalancer(t *testing.T) { - runTestAWS(t, "externallb.example.com", "externallb", "v1alpha2", false, 1, true) + runTestAWS(t, "externallb.example.com", "externallb", "v1alpha2", false, 1, true, nil) runTestCloudformation(t, "externallb.example.com", "externallb", "v1alpha2", false, nil) } @@ -354,7 +361,7 @@ func runTest(t *testing.T, h *testutils.IntegrationTestHarness, clusterName stri } } -func runTestAWS(t *testing.T, clusterName string, srcDir string, version string, private bool, zones int, expectPolicies bool) { +func runTestAWS(t *testing.T, clusterName string, srcDir string, version string, private bool, zones int, expectPolicies bool, lifecycleOverrides []string) { h := testutils.NewIntegrationTestHarness(t) defer h.Close() @@ -372,13 +379,6 @@ func runTestAWS(t *testing.T, clusterName string, srcDir string, version string, expectedFilenames = append(expectedFilenames, s) } - lifecycleOverrides := []string{} - if !expectPolicies { - lifecycleOverrides = append(lifecycleOverrides, "IAMRole=Ignore") - lifecycleOverrides = append(lifecycleOverrides, "IAMRolePolicy=Ignore") - lifecycleOverrides = append(lifecycleOverrides, "IAMInstanceProfileRole=Ignore") - } - if expectPolicies { expectedFilenames = append(expectedFilenames, []string{ "aws_iam_role_masters." + clusterName + "_policy", @@ -396,6 +396,7 @@ func runTestAWS(t *testing.T, clusterName string, srcDir string, version string, }...) } } + // Special case that tests a bastion with user-data if srcDir == "bastionadditional_user-data" { expectedFilenames = append(expectedFilenames, "aws_launch_configuration_bastion."+clusterName+"_user_data") diff --git a/docs/security_groups.md b/docs/security_groups.md new file mode 100644 index 0000000000000..4083fba5672e6 --- /dev/null +++ b/docs/security_groups.md @@ -0,0 +1,55 @@ +# Security Groups + +## Use existing AWS Security Groups +**Note: Use this at your own risk, when existing SGs are used Kops will NOT ensure they are properly configured.** + +Rather than having Kops create and manage IAM Security Groups, it is possible to use an existing one. This is useful in organizations where security policies prevent tools from creating their own Security Groups. +Kops will still output any differences in the managed and your own Security Groups. +This is convenient for determining policy changes that need to be made when upgrading Kops. +**Using Managed Security Groups will not output these differences, it is up to the user to track expected changes to policies.** + +*NOTE: Currently Kops only supports using existing Security Groups for every instance group and Load Balancer in the Cluster, not a mix of existing and managed Security Groups. +This is due to the lifecycle overrides being used to prevent creation of the Security Groups related resources.* + +To do this first specify the Security Groups for the ELB (if you are using a LB) and Instance Groups +Example: +```yaml +apiVersion: kops/v1alpha2 +kind: Cluster +metadata: + creationTimestamp: "2016-12-10T22:42:27Z" + name: mycluster.example.com +spec: + api: + loadBalancer: + SecurityGroupOverride: sg-abcd1234 + +. +. +. + +apiVersion: kops/v1alpha2 +kind: InstanceGroup +metadata: + creationTimestamp: "2017-01-01T00:00:00Z" + labels: + kops.k8s.io/cluster: mycluster.example.com + name: master-us-test-1a +spec: + SecurityGroupOverride: sg-1234dcba + +``` + +Now run a cluster update to create the new launch configuration, using [lifecycle overrides](./cli/kops_update_cluster.md#options) to prevent Security Group resources from being created: + +``` +kops update cluster ${CLUSTER_NAME} --yes --lifecycle-overrides IAMRole=SecurityGroup=ExistsAndWarnIfChanges,SecurityGroupRule=ExistsAndWarnIfChanges +``` + +*Everytime `kops update cluster` is ran, it must include the above `--lifecycle-overrides`.* + +Then perform a rolling update in order to replace EC2 instances in the ASG with the new launch configuration: + +``` +kops rolling-update cluster ${CLUSTER_NAME} --yes +``` diff --git a/pkg/apis/kops/cluster.go b/pkg/apis/kops/cluster.go index 48e47a1d7768c..a74a89fda8fc7 100644 --- a/pkg/apis/kops/cluster.go +++ b/pkg/apis/kops/cluster.go @@ -315,11 +315,18 @@ const ( // LoadBalancerAccessSpec provides configuration details related to API LoadBalancer and its access type LoadBalancerAccessSpec struct { - Type LoadBalancerType `json:"type,omitempty"` - IdleTimeoutSeconds *int64 `json:"idleTimeoutSeconds,omitempty"` - AdditionalSecurityGroups []string `json:"additionalSecurityGroups,omitempty"` - UseForInternalApi bool `json:"useForInternalApi,omitempty"` - SSLCertificate string `json:"sslCertificate,omitempty"` + // Type of load balancer to create may Public or Internal. + Type LoadBalancerType `json:"type,omitempty"` + // IdleTimeoutSeconds sets the timeout of the api loadbalancer. + IdleTimeoutSeconds *int64 `json:"idleTimeoutSeconds,omitempty"` + // SecurityGroupOverride overrides the default Kops created SG for the load balancer. + SecurityGroupOverride *string `json:"securityGroupOverride,omitempty"` + // AdditionalSecurityGroups attaches additional security groups (e.g. sg-123456). + AdditionalSecurityGroups []string `json:"additionalSecurityGroups,omitempty"` + // UseForInternalApi indicates wether the LB should be used by the kubelet + UseForInternalApi bool `json:"useForInternalApi,omitempty"` + // SSLCertificate allows you to specify the ACM cert to be used the the LB + SSLCertificate string `json:"sslCertificate,omitempty"` } // KubeDNSConfig defines the kube dns configuration diff --git a/pkg/apis/kops/instancegroup.go b/pkg/apis/kops/instancegroup.go index 2b55e9da0d4f5..bdbfc47d8422b 100644 --- a/pkg/apis/kops/instancegroup.go +++ b/pkg/apis/kops/instancegroup.go @@ -123,6 +123,8 @@ type InstanceGroupSpec struct { DetailedInstanceMonitoring *bool `json:"detailedInstanceMonitoring,omitempty"` // IAMProfileSpec defines the identity of the cloud group iam profile (AWS only). IAM *IAMProfileSpec `json:"iam,omitempty"` + // SecurityGroupOverride overrides the defaut security group created by Kops for this IG (AWS only). + SecurityGroupOverride *string `json:"securityGroupOverride,omitempty"` } // UserData defines a user-data section diff --git a/pkg/apis/kops/v1alpha1/cluster.go b/pkg/apis/kops/v1alpha1/cluster.go index da42baee79164..fc46a490d8218 100644 --- a/pkg/apis/kops/v1alpha1/cluster.go +++ b/pkg/apis/kops/v1alpha1/cluster.go @@ -314,11 +314,18 @@ const ( // LoadBalancerAccessSpec provides configuration details related to API LoadBalancer and its access type LoadBalancerAccessSpec struct { - Type LoadBalancerType `json:"type,omitempty"` - IdleTimeoutSeconds *int64 `json:"idleTimeoutSeconds,omitempty"` - AdditionalSecurityGroups []string `json:"additionalSecurityGroups,omitempty"` - UseForInternalApi bool `json:"useForInternalApi,omitempty"` - SSLCertificate string `json:"sslCertificate,omitempty"` + // Type of load balancer to create may Public or Internal. + Type LoadBalancerType `json:"type,omitempty"` + // IdleTimeoutSeconds sets the timeout of the api loadbalancer. + IdleTimeoutSeconds *int64 `json:"idleTimeoutSeconds,omitempty"` + // SecurityGroupOverride overrides the default Kops created SG for the load balancer. + SecurityGroupOverride *string `json:"securityGroupOverride,omitempty"` + // AdditionalSecurityGroups attaches additional security groups (e.g. sg-123456). + AdditionalSecurityGroups []string `json:"additionalSecurityGroups,omitempty"` + // UseForInternalApi indicates wether the LB should be used by the kubelet + UseForInternalApi bool `json:"useForInternalApi,omitempty"` + // SSLCertificate allows you to specify the ACM cert to be used the the LB + SSLCertificate string `json:"sslCertificate,omitempty"` } // KubeDNSConfig defines the kube dns configuration diff --git a/pkg/apis/kops/v1alpha1/instancegroup.go b/pkg/apis/kops/v1alpha1/instancegroup.go index ffd4960903105..d776bb766ba2e 100644 --- a/pkg/apis/kops/v1alpha1/instancegroup.go +++ b/pkg/apis/kops/v1alpha1/instancegroup.go @@ -102,6 +102,8 @@ type InstanceGroupSpec struct { DetailedInstanceMonitoring *bool `json:"detailedInstanceMonitoring,omitempty"` // IAMProfileSpec defines the identity of the cloud group iam profile (AWS only). IAM *IAMProfileSpec `json:"iam,omitempty"` + // SecurityGroupOverride overrides the defaut security group created by Kops for this IG (AWS only). + SecurityGroupOverride *string `json:"securityGroupOverride,omitempty"` } // IAMProfileSpec is the AWS IAM Profile to attach to instances in this instance diff --git a/pkg/apis/kops/v1alpha1/zz_generated.conversion.go b/pkg/apis/kops/v1alpha1/zz_generated.conversion.go index ca87ea5e5a44c..366ce05d261e9 100644 --- a/pkg/apis/kops/v1alpha1/zz_generated.conversion.go +++ b/pkg/apis/kops/v1alpha1/zz_generated.conversion.go @@ -2028,6 +2028,7 @@ func autoConvert_v1alpha1_InstanceGroupSpec_To_kops_InstanceGroupSpec(in *Instan } else { out.IAM = nil } + out.SecurityGroupOverride = in.SecurityGroupOverride return nil } @@ -2114,6 +2115,7 @@ func autoConvert_kops_InstanceGroupSpec_To_v1alpha1_InstanceGroupSpec(in *kops.I } else { out.IAM = nil } + out.SecurityGroupOverride = in.SecurityGroupOverride return nil } @@ -2740,6 +2742,7 @@ func Convert_kops_LoadBalancer_To_v1alpha1_LoadBalancer(in *kops.LoadBalancer, o func autoConvert_v1alpha1_LoadBalancerAccessSpec_To_kops_LoadBalancerAccessSpec(in *LoadBalancerAccessSpec, out *kops.LoadBalancerAccessSpec, s conversion.Scope) error { out.Type = kops.LoadBalancerType(in.Type) out.IdleTimeoutSeconds = in.IdleTimeoutSeconds + out.SecurityGroupOverride = in.SecurityGroupOverride out.AdditionalSecurityGroups = in.AdditionalSecurityGroups out.UseForInternalApi = in.UseForInternalApi out.SSLCertificate = in.SSLCertificate @@ -2754,6 +2757,7 @@ func Convert_v1alpha1_LoadBalancerAccessSpec_To_kops_LoadBalancerAccessSpec(in * func autoConvert_kops_LoadBalancerAccessSpec_To_v1alpha1_LoadBalancerAccessSpec(in *kops.LoadBalancerAccessSpec, out *LoadBalancerAccessSpec, s conversion.Scope) error { out.Type = LoadBalancerType(in.Type) out.IdleTimeoutSeconds = in.IdleTimeoutSeconds + out.SecurityGroupOverride = in.SecurityGroupOverride out.AdditionalSecurityGroups = in.AdditionalSecurityGroups out.UseForInternalApi = in.UseForInternalApi out.SSLCertificate = in.SSLCertificate diff --git a/pkg/apis/kops/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/kops/v1alpha1/zz_generated.deepcopy.go index ba15249a75d09..779dda4e28b62 100644 --- a/pkg/apis/kops/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/kops/v1alpha1/zz_generated.deepcopy.go @@ -1799,6 +1799,15 @@ func (in *InstanceGroupSpec) DeepCopyInto(out *InstanceGroupSpec) { (*in).DeepCopyInto(*out) } } + if in.SecurityGroupOverride != nil { + in, out := &in.SecurityGroupOverride, &out.SecurityGroupOverride + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } return } @@ -2876,6 +2885,15 @@ func (in *LoadBalancerAccessSpec) DeepCopyInto(out *LoadBalancerAccessSpec) { **out = **in } } + if in.SecurityGroupOverride != nil { + in, out := &in.SecurityGroupOverride, &out.SecurityGroupOverride + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } if in.AdditionalSecurityGroups != nil { in, out := &in.AdditionalSecurityGroups, &out.AdditionalSecurityGroups *out = make([]string, len(*in)) diff --git a/pkg/apis/kops/v1alpha2/cluster.go b/pkg/apis/kops/v1alpha2/cluster.go index dd6ece0f916c3..b9bd34fc6042b 100644 --- a/pkg/apis/kops/v1alpha2/cluster.go +++ b/pkg/apis/kops/v1alpha2/cluster.go @@ -315,11 +315,18 @@ const ( // LoadBalancerAccessSpec provides configuration details related to API LoadBalancer and its access type LoadBalancerAccessSpec struct { - Type LoadBalancerType `json:"type,omitempty"` - IdleTimeoutSeconds *int64 `json:"idleTimeoutSeconds,omitempty"` - AdditionalSecurityGroups []string `json:"additionalSecurityGroups,omitempty"` - UseForInternalApi bool `json:"useForInternalApi,omitempty"` - SSLCertificate string `json:"sslCertificate,omitempty"` + // Type of load balancer to create may Public or Internal. + Type LoadBalancerType `json:"type,omitempty"` + // IdleTimeoutSeconds sets the timeout of the api loadbalancer. + IdleTimeoutSeconds *int64 `json:"idleTimeoutSeconds,omitempty"` + // SecurityGroupOverride overrides the default Kops created SG for the load balancer. + SecurityGroupOverride *string `json:"securityGroupOverride,omitempty"` + // AdditionalSecurityGroups attaches additional security groups (e.g. sg-123456). + AdditionalSecurityGroups []string `json:"additionalSecurityGroups,omitempty"` + // UseForInternalApi indicates wether the LB should be used by the kubelet + UseForInternalApi bool `json:"useForInternalApi,omitempty"` + // SSLCertificate allows you to specify the ACM cert to be used the the LB + SSLCertificate string `json:"sslCertificate,omitempty"` } // KubeDNSConfig defines the kube dns configuration diff --git a/pkg/apis/kops/v1alpha2/instancegroup.go b/pkg/apis/kops/v1alpha2/instancegroup.go index a586fa0a6d541..0f103cb8e835c 100644 --- a/pkg/apis/kops/v1alpha2/instancegroup.go +++ b/pkg/apis/kops/v1alpha2/instancegroup.go @@ -111,6 +111,8 @@ type InstanceGroupSpec struct { DetailedInstanceMonitoring *bool `json:"detailedInstanceMonitoring,omitempty"` // IAMProfileSpec defines the identity of the cloud group iam profile (AWS only). IAM *IAMProfileSpec `json:"iam,omitempty"` + // SecurityGroupOverride overrides the defaut security group created by Kops for this IG (AWS only). + SecurityGroupOverride *string `json:"securityGroupOverride,omitempty"` } // UserData defines a user-data section diff --git a/pkg/apis/kops/v1alpha2/zz_generated.conversion.go b/pkg/apis/kops/v1alpha2/zz_generated.conversion.go index 35fd664b97778..a96fedaad9989 100644 --- a/pkg/apis/kops/v1alpha2/zz_generated.conversion.go +++ b/pkg/apis/kops/v1alpha2/zz_generated.conversion.go @@ -2140,6 +2140,7 @@ func autoConvert_v1alpha2_InstanceGroupSpec_To_kops_InstanceGroupSpec(in *Instan } else { out.IAM = nil } + out.SecurityGroupOverride = in.SecurityGroupOverride return nil } @@ -2231,6 +2232,7 @@ func autoConvert_kops_InstanceGroupSpec_To_v1alpha2_InstanceGroupSpec(in *kops.I } else { out.IAM = nil } + out.SecurityGroupOverride = in.SecurityGroupOverride return nil } @@ -3004,6 +3006,7 @@ func Convert_kops_LoadBalancer_To_v1alpha2_LoadBalancer(in *kops.LoadBalancer, o func autoConvert_v1alpha2_LoadBalancerAccessSpec_To_kops_LoadBalancerAccessSpec(in *LoadBalancerAccessSpec, out *kops.LoadBalancerAccessSpec, s conversion.Scope) error { out.Type = kops.LoadBalancerType(in.Type) out.IdleTimeoutSeconds = in.IdleTimeoutSeconds + out.SecurityGroupOverride = in.SecurityGroupOverride out.AdditionalSecurityGroups = in.AdditionalSecurityGroups out.UseForInternalApi = in.UseForInternalApi out.SSLCertificate = in.SSLCertificate @@ -3018,6 +3021,7 @@ func Convert_v1alpha2_LoadBalancerAccessSpec_To_kops_LoadBalancerAccessSpec(in * func autoConvert_kops_LoadBalancerAccessSpec_To_v1alpha2_LoadBalancerAccessSpec(in *kops.LoadBalancerAccessSpec, out *LoadBalancerAccessSpec, s conversion.Scope) error { out.Type = LoadBalancerType(in.Type) out.IdleTimeoutSeconds = in.IdleTimeoutSeconds + out.SecurityGroupOverride = in.SecurityGroupOverride out.AdditionalSecurityGroups = in.AdditionalSecurityGroups out.UseForInternalApi = in.UseForInternalApi out.SSLCertificate = in.SSLCertificate diff --git a/pkg/apis/kops/v1alpha2/zz_generated.deepcopy.go b/pkg/apis/kops/v1alpha2/zz_generated.deepcopy.go index 19250f4eb4ade..1c562375d8c92 100644 --- a/pkg/apis/kops/v1alpha2/zz_generated.deepcopy.go +++ b/pkg/apis/kops/v1alpha2/zz_generated.deepcopy.go @@ -1771,6 +1771,15 @@ func (in *InstanceGroupSpec) DeepCopyInto(out *InstanceGroupSpec) { (*in).DeepCopyInto(*out) } } + if in.SecurityGroupOverride != nil { + in, out := &in.SecurityGroupOverride, &out.SecurityGroupOverride + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } return } @@ -2957,6 +2966,15 @@ func (in *LoadBalancerAccessSpec) DeepCopyInto(out *LoadBalancerAccessSpec) { **out = **in } } + if in.SecurityGroupOverride != nil { + in, out := &in.SecurityGroupOverride, &out.SecurityGroupOverride + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } if in.AdditionalSecurityGroups != nil { in, out := &in.AdditionalSecurityGroups, &out.AdditionalSecurityGroups *out = make([]string, len(*in)) diff --git a/pkg/apis/kops/zz_generated.deepcopy.go b/pkg/apis/kops/zz_generated.deepcopy.go index b1c6610ba9958..c1b4e85d6d4fc 100644 --- a/pkg/apis/kops/zz_generated.deepcopy.go +++ b/pkg/apis/kops/zz_generated.deepcopy.go @@ -1943,6 +1943,15 @@ func (in *InstanceGroupSpec) DeepCopyInto(out *InstanceGroupSpec) { (*in).DeepCopyInto(*out) } } + if in.SecurityGroupOverride != nil { + in, out := &in.SecurityGroupOverride, &out.SecurityGroupOverride + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } return } @@ -3161,6 +3170,15 @@ func (in *LoadBalancerAccessSpec) DeepCopyInto(out *LoadBalancerAccessSpec) { **out = **in } } + if in.SecurityGroupOverride != nil { + in, out := &in.SecurityGroupOverride, &out.SecurityGroupOverride + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } if in.AdditionalSecurityGroups != nil { in, out := &in.AdditionalSecurityGroups, &out.AdditionalSecurityGroups *out = make([]string, len(*in)) diff --git a/pkg/model/BUILD.bazel b/pkg/model/BUILD.bazel index 691c3fd0c2cb7..310f472789dde 100644 --- a/pkg/model/BUILD.bazel +++ b/pkg/model/BUILD.bazel @@ -59,6 +59,7 @@ go_test( srcs = [ "bootstrapscript_test.go", "context_test.go", + "firewall_test.go", ], data = glob(["tests/**"]), #keep embed = [":go_default_library"], diff --git a/pkg/model/awsmodel/api_loadbalancer.go b/pkg/model/awsmodel/api_loadbalancer.go index 440120d704b3a..436a786db9746 100644 --- a/pkg/model/awsmodel/api_loadbalancer.go +++ b/pkg/model/awsmodel/api_loadbalancer.go @@ -110,13 +110,26 @@ func (b *APILoadBalancerBuilder) Build(c *fi.ModelBuilderContext) error { listeners["443"] = &awstasks.LoadBalancerListener{InstancePort: 443, SSLCertificateID: lbSpec.SSLCertificate} } + var sgLink *awstasks.SecurityGroup + if lbSpec.SecurityGroupOverride != nil { + glog.V(1).Infof("WARNING: You are overwriting the Load Balancers, Security Group. When this is done you are responsible for ensure the correct rules!") + + sgLink = &awstasks.SecurityGroup{ + Name: lbSpec.SecurityGroupOverride, + ID: lbSpec.SecurityGroupOverride, + Shared: fi.Bool(true), + } + } else { + sgLink = b.LinkToELBSecurityGroup("api") + } + elb = &awstasks.LoadBalancer{ Name: s("api." + b.ClusterName()), Lifecycle: b.Lifecycle, LoadBalancerName: s(loadBalancerName), SecurityGroups: []*awstasks.SecurityGroup{ - b.LinkToELBSecurityGroup("api"), + sgLink, }, Subnets: elbSubnets, Listeners: listeners, @@ -148,8 +161,9 @@ func (b *APILoadBalancerBuilder) Build(c *fi.ModelBuilderContext) error { } // Create security group for API ELB + var lbSG *awstasks.SecurityGroup { - t := &awstasks.SecurityGroup{ + lbSG = &awstasks.SecurityGroup{ Name: s(b.ELBSecurityGroupName("api")), Lifecycle: b.SecurityLifecycle, @@ -157,8 +171,15 @@ func (b *APILoadBalancerBuilder) Build(c *fi.ModelBuilderContext) error { Description: s("Security group for api ELB"), RemoveExtraRules: []string{"port=443"}, } - t.Tags = b.CloudTags(*t.Name, false) - c.AddTask(t) + lbSG.Tags = b.CloudTags(*lbSG.Name, false) + + if lbSpec.SecurityGroupOverride != nil { + lbSG.Name = fi.String(*lbSpec.SecurityGroupOverride) + lbSG.ID = fi.String(*lbSpec.SecurityGroupOverride) + lbSG.Shared = fi.Bool(true) + } + + c.AddTask(lbSG) } // Allow traffic from ELB to egress freely @@ -167,7 +188,7 @@ func (b *APILoadBalancerBuilder) Build(c *fi.ModelBuilderContext) error { Name: s("api-elb-egress"), Lifecycle: b.SecurityLifecycle, - SecurityGroup: b.LinkToELBSecurityGroup("api"), + SecurityGroup: lbSG, Egress: fi.Bool(true), CIDR: s("0.0.0.0/0"), } @@ -181,7 +202,7 @@ func (b *APILoadBalancerBuilder) Build(c *fi.ModelBuilderContext) error { Name: s("https-api-elb-" + cidr), Lifecycle: b.SecurityLifecycle, - SecurityGroup: b.LinkToELBSecurityGroup("api"), + SecurityGroup: lbSG, CIDR: s(cidr), FromPort: i64(443), ToPort: i64(443), @@ -208,19 +229,28 @@ func (b *APILoadBalancerBuilder) Build(c *fi.ModelBuilderContext) error { } } + masterGroups, err := b.GetSecurityGroups(kops.InstanceGroupRoleMaster) + if err != nil { + return err + } + // Allow HTTPS to the master instances from the ELB { - t := &awstasks.SecurityGroupRule{ - Name: s("https-elb-to-master"), - Lifecycle: b.SecurityLifecycle, + for masterGroupName, masterGroup := range masterGroups { + suffix := GetGroupSuffix(masterGroupName, masterGroups) + t := &awstasks.SecurityGroupRule{ + Name: s(fmt.Sprintf("https-elb-to-master%s", suffix)), + Lifecycle: b.SecurityLifecycle, - SecurityGroup: b.LinkToSecurityGroup(kops.InstanceGroupRoleMaster), - SourceGroup: b.LinkToELBSecurityGroup("api"), - FromPort: i64(443), - ToPort: i64(443), - Protocol: s("tcp"), + SecurityGroup: masterGroup, + SourceGroup: lbSG, + // SourceGroup: sgLink, + FromPort: i64(443), + ToPort: i64(443), + Protocol: s("tcp"), + } + c.AddTask(t) } - c.AddTask(t) } if dns.IsGossipHostname(b.Cluster.Name) || b.UsePrivateDNS() { @@ -314,3 +344,13 @@ func (b *APILoadBalancerBuilder) chooseBestSubnetForELB(zone string, subnets []* return scoredSubnets[0].subnet } + +// GetGroupSuffix returns the name of the security groups suffix. +func GetGroupSuffix(name string, groups map[string]*awstasks.SecurityGroup) string { + if len(groups) != 1 { + glog.V(8).Infof("adding group suffix: %q", name) + return "-" + name + } + + return "" +} diff --git a/pkg/model/awsmodel/autoscalinggroup.go b/pkg/model/awsmodel/autoscalinggroup.go index 4e07bbaf6e980..90ea27d2b2b18 100644 --- a/pkg/model/awsmodel/autoscalinggroup.go +++ b/pkg/model/awsmodel/autoscalinggroup.go @@ -76,12 +76,25 @@ func (b *AutoscalingGroupModelBuilder) Build(c *fi.ModelBuilderContext) error { return fmt.Errorf("unable to find iam profile link for instance group %q: %v", ig.ObjectMeta.Name, err) } + var sgLink *awstasks.SecurityGroup + if ig.Spec.SecurityGroupOverride != nil { + glog.V(1).Infof("WARNING: You are overwriting the Instance Groups, Security Group. When this is done you are responsible for ensure the correct rules!") + + sgLink = &awstasks.SecurityGroup{ + Name: ig.Spec.SecurityGroupOverride, + ID: ig.Spec.SecurityGroupOverride, + Shared: fi.Bool(true), + } + } else { + sgLink = b.LinkToSecurityGroup(ig.Spec.Role) + } + t := &awstasks.LaunchConfiguration{ Name: s(name), Lifecycle: b.Lifecycle, SecurityGroups: []*awstasks.SecurityGroup{ - b.LinkToSecurityGroup(ig.Spec.Role), + sgLink, }, IAMInstanceProfile: link, ImageID: s(ig.Spec.Image), diff --git a/pkg/model/external_access.go b/pkg/model/external_access.go index e413656456151..ded0f3ec5ad8e 100644 --- a/pkg/model/external_access.go +++ b/pkg/model/external_access.go @@ -17,6 +17,8 @@ limitations under the License. package model import ( + "fmt" + "github.com/golang/glog" "k8s.io/kops/pkg/apis/kops" "k8s.io/kops/upup/pkg/fi" @@ -41,6 +43,15 @@ func (b *ExternalAccessModelBuilder) Build(c *fi.ModelBuilderContext) error { glog.Warningf("SSHAccess is empty") } + masterGroups, err := b.GetSecurityGroups(kops.InstanceGroupRoleMaster) + if err != nil { + return err + } + nodeGroups, err := b.GetSecurityGroups(kops.InstanceGroupRoleNode) + if err != nil { + return err + } + // SSH is open to AdminCIDR set if b.UsesSSHBastion() { // If we are using a bastion, we only access through the bastion @@ -49,25 +60,34 @@ func (b *ExternalAccessModelBuilder) Build(c *fi.ModelBuilderContext) error { glog.V(2).Infof("bastion is in use; won't configure SSH access to master / node instances") } else { for _, sshAccess := range b.Cluster.Spec.SSHAccess { - c.AddTask(&awstasks.SecurityGroupRule{ - Name: s("ssh-external-to-master-" + sshAccess), - Lifecycle: b.Lifecycle, - SecurityGroup: b.LinkToSecurityGroup(kops.InstanceGroupRoleMaster), - Protocol: s("tcp"), - FromPort: i64(22), - ToPort: i64(22), - CIDR: s(sshAccess), - }) - c.AddTask(&awstasks.SecurityGroupRule{ - Name: s("ssh-external-to-node-" + sshAccess), - Lifecycle: b.Lifecycle, - SecurityGroup: b.LinkToSecurityGroup(kops.InstanceGroupRoleNode), - Protocol: s("tcp"), - FromPort: i64(22), - ToPort: i64(22), - CIDR: s(sshAccess), - }) + for masterGroupName, masterGroup := range masterGroups { + suffix := GetGroupSuffix(masterGroupName, masterGroups) + t := &awstasks.SecurityGroupRule{ + Name: s(fmt.Sprintf("ssh-external-to-master-%s%s", sshAccess, suffix)), + Lifecycle: b.Lifecycle, + SecurityGroup: masterGroup, + Protocol: s("tcp"), + FromPort: i64(22), + ToPort: i64(22), + CIDR: s(sshAccess), + } + c.AddTask(t) + } + + for nodeGroupName, nodeGroup := range nodeGroups { + suffix := GetGroupSuffix(nodeGroupName, nodeGroups) + t := &awstasks.SecurityGroupRule{ + Name: s(fmt.Sprintf("ssh-external-to-node-%s%s", sshAccess, suffix)), + Lifecycle: b.Lifecycle, + SecurityGroup: nodeGroup, + Protocol: s("tcp"), + FromPort: i64(22), + ToPort: i64(22), + CIDR: s(sshAccess), + } + c.AddTask(t) + } } } @@ -77,24 +97,30 @@ func (b *ExternalAccessModelBuilder) Build(c *fi.ModelBuilderContext) error { return err } - c.AddTask(&awstasks.SecurityGroupRule{ - Name: s("nodeport-tcp-external-to-node-" + nodePortAccess), - Lifecycle: b.Lifecycle, - SecurityGroup: b.LinkToSecurityGroup(kops.InstanceGroupRoleNode), - Protocol: s("tcp"), - FromPort: i64(int64(nodePortRange.Base)), - ToPort: i64(int64(nodePortRange.Base + nodePortRange.Size - 1)), - CIDR: s(nodePortAccess), - }) - c.AddTask(&awstasks.SecurityGroupRule{ - Name: s("nodeport-udp-external-to-node-" + nodePortAccess), - Lifecycle: b.Lifecycle, - SecurityGroup: b.LinkToSecurityGroup(kops.InstanceGroupRoleNode), - Protocol: s("udp"), - FromPort: i64(int64(nodePortRange.Base)), - ToPort: i64(int64(nodePortRange.Base + nodePortRange.Size - 1)), - CIDR: s(nodePortAccess), - }) + for nodeGroupName, nodeGroup := range nodeGroups { + suffix := GetGroupSuffix(nodeGroupName, nodeGroups) + t1 := &awstasks.SecurityGroupRule{ + Name: s(fmt.Sprintf("nodeport-tcp-external-to-node-%s%s", nodePortAccess, suffix)), + Lifecycle: b.Lifecycle, + SecurityGroup: nodeGroup, + Protocol: s("tcp"), + FromPort: i64(int64(nodePortRange.Base)), + ToPort: i64(int64(nodePortRange.Base + nodePortRange.Size - 1)), + CIDR: s(nodePortAccess), + } + c.AddTask(t1) + + t2 := &awstasks.SecurityGroupRule{ + Name: s(fmt.Sprintf("nodeport-udp-external-to-node-%s%s", nodePortAccess, suffix)), + Lifecycle: b.Lifecycle, + SecurityGroup: nodeGroup, + Protocol: s("udp"), + FromPort: i64(int64(nodePortRange.Base)), + ToPort: i64(int64(nodePortRange.Base + nodePortRange.Size - 1)), + CIDR: s(nodePortAccess), + } + c.AddTask(t2) + } } if !b.UseLoadBalancerForAPI() { @@ -104,16 +130,19 @@ func (b *ExternalAccessModelBuilder) Build(c *fi.ModelBuilderContext) error { // HTTPS to the master is allowed (for API access) for _, apiAccess := range b.Cluster.Spec.KubernetesAPIAccess { - t := &awstasks.SecurityGroupRule{ - Name: s("https-external-to-master-" + apiAccess), - Lifecycle: b.Lifecycle, - SecurityGroup: b.LinkToSecurityGroup(kops.InstanceGroupRoleMaster), - Protocol: s("tcp"), - FromPort: i64(443), - ToPort: i64(443), - CIDR: s(apiAccess), + for masterGroupName, masterGroup := range masterGroups { + suffix := GetGroupSuffix(masterGroupName, masterGroups) + t := &awstasks.SecurityGroupRule{ + Name: s(fmt.Sprintf("https-external-to-master-%s%s", apiAccess, suffix)), + Lifecycle: b.Lifecycle, + SecurityGroup: masterGroup, + Protocol: s("tcp"), + FromPort: i64(443), + ToPort: i64(443), + CIDR: s(apiAccess), + } + c.AddTask(t) } - c.AddTask(t) } } diff --git a/pkg/model/firewall.go b/pkg/model/firewall.go index 4cf400893ec93..c623eae7eb685 100644 --- a/pkg/model/firewall.go +++ b/pkg/model/firewall.go @@ -42,73 +42,74 @@ type FirewallModelBuilder struct { var _ fi.ModelBuilder = &FirewallModelBuilder{} func (b *FirewallModelBuilder) Build(c *fi.ModelBuilderContext) error { - if err := b.buildNodeRules(c); err != nil { + nodeGroups, err := b.buildNodeRules(c) + if err != nil { return err } - if err := b.buildMasterRules(c); err != nil { + + masterGroups, err := b.buildMasterRules(c, nodeGroups) + if err != nil { return err } + + // We _should_ block per port... but: + // * It causes e2e tests to break + // * Users expect to be able to reach pods + // * If users are running an overlay, we punch a hole in it anyway + // b.applyNodeToMasterAllowSpecificPorts(c) + b.applyNodeToMasterBlockSpecificPorts(c, nodeGroups, masterGroups) + return nil } -func (b *FirewallModelBuilder) buildNodeRules(c *fi.ModelBuilderContext) error { - { - t := &awstasks.SecurityGroup{ - Name: s(b.SecurityGroupName(kops.InstanceGroupRoleNode)), - Lifecycle: b.Lifecycle, - VPC: b.LinkToVPC(), - Description: s("Security group for nodes"), - RemoveExtraRules: []string{"port=22"}, - } - t.Tags = b.CloudTags(*t.Name, false) - c.AddTask(t) - } +func (b *FirewallModelBuilder) buildNodeRules(c *fi.ModelBuilderContext) (map[string]*awstasks.SecurityGroup, error) { - // Allow full egress - { - t := &awstasks.SecurityGroupRule{ - Name: s("node-egress"), - Lifecycle: b.Lifecycle, - SecurityGroup: b.LinkToSecurityGroup(kops.InstanceGroupRoleNode), - Egress: fi.Bool(true), - CIDR: s("0.0.0.0/0"), - } - c.AddTask(t) + nodeGroups, err := b.createSecurityGroups(kops.InstanceGroupRoleNode, b.Lifecycle, c) + if err != nil { + return nil, err } - // Nodes can talk to nodes - { - t := &awstasks.SecurityGroupRule{ - Name: s("all-node-to-node"), - Lifecycle: b.Lifecycle, - SecurityGroup: b.LinkToSecurityGroup(kops.InstanceGroupRoleNode), - SourceGroup: b.LinkToSecurityGroup(kops.InstanceGroupRoleNode), + for nodeGroupName, secGroup := range nodeGroups { + suffix := GetGroupSuffix(nodeGroupName, nodeGroups) + // Allow full egress + { + t := &awstasks.SecurityGroupRule{ + Name: s(fmt.Sprintf("node-egress%s", suffix)), + Lifecycle: b.Lifecycle, + SecurityGroup: secGroup, + Egress: fi.Bool(true), + CIDR: s("0.0.0.0/0"), + } + c.AddTask(t) } - c.AddTask(t) - } - // Pods running in Nodes could need to reach pods in master/s - if b.Cluster.Spec.Networking != nil && b.Cluster.Spec.Networking.AmazonVPC != nil { - // Nodes can talk to masters + // Nodes can talk to nodes { t := &awstasks.SecurityGroupRule{ - Name: s("all-nodes-to-master"), + Name: s(fmt.Sprintf("all-node-to-node%s", suffix)), Lifecycle: b.Lifecycle, - SecurityGroup: b.LinkToSecurityGroup(kops.InstanceGroupRoleMaster), - SourceGroup: b.LinkToSecurityGroup(kops.InstanceGroupRoleNode), + SecurityGroup: secGroup, + SourceGroup: secGroup, } c.AddTask(t) } - } - // We _should_ block per port... but: - // * It causes e2e tests to break - // * Users expect to be able to reach pods - // * If users are running an overlay, we punch a hole in it anyway - //b.applyNodeToMasterAllowSpecificPorts(c) - b.applyNodeToMasterBlockSpecificPorts(c) + // Pods running in Nodes could need to reach pods in master/s + if b.Cluster.Spec.Networking != nil && b.Cluster.Spec.Networking.AmazonVPC != nil { + // Nodes can talk to masters + { + t := &awstasks.SecurityGroupRule{ + Name: s(fmt.Sprintf("all-nodes-to-master%s", suffix)), + Lifecycle: b.Lifecycle, + SecurityGroup: b.LinkToSecurityGroup(kops.InstanceGroupRoleMaster), + SourceGroup: b.LinkToSecurityGroup(kops.InstanceGroupRoleNode), + } + c.AddTask(t) + } + } + } - return nil + return nodeGroups, nil } func (b *FirewallModelBuilder) applyNodeToMasterAllowSpecificPorts(c *fi.ModelBuilderContext) { @@ -181,7 +182,7 @@ func (b *FirewallModelBuilder) applyNodeToMasterAllowSpecificPorts(c *fi.ModelBu } for _, udpPort := range udpPorts { - c.AddTask(&awstasks.SecurityGroupRule{ + t := &awstasks.SecurityGroupRule{ Name: s(fmt.Sprintf("node-to-master-udp-%d", udpPort)), Lifecycle: b.Lifecycle, SecurityGroup: b.LinkToSecurityGroup(kops.InstanceGroupRoleMaster), @@ -189,10 +190,11 @@ func (b *FirewallModelBuilder) applyNodeToMasterAllowSpecificPorts(c *fi.ModelBu FromPort: i64(udpPort), ToPort: i64(udpPort), Protocol: s("udp"), - }) + } + c.AddTask(t) } for _, tcpPort := range tcpPorts { - c.AddTask(&awstasks.SecurityGroupRule{ + t := &awstasks.SecurityGroupRule{ Name: s(fmt.Sprintf("node-to-master-tcp-%d", tcpPort)), Lifecycle: b.Lifecycle, SecurityGroup: b.LinkToSecurityGroup(kops.InstanceGroupRoleMaster), @@ -200,7 +202,8 @@ func (b *FirewallModelBuilder) applyNodeToMasterAllowSpecificPorts(c *fi.ModelBu FromPort: i64(tcpPort), ToPort: i64(tcpPort), Protocol: s("tcp"), - }) + } + c.AddTask(t) } for _, protocol := range protocols { awsName := strconv.Itoa(int(protocol)) @@ -212,17 +215,18 @@ func (b *FirewallModelBuilder) applyNodeToMasterAllowSpecificPorts(c *fi.ModelBu glog.Warningf("unknown protocol %q - naming by number", awsName) } - c.AddTask(&awstasks.SecurityGroupRule{ + t := &awstasks.SecurityGroupRule{ Name: s("node-to-master-protocol-" + name), Lifecycle: b.Lifecycle, SecurityGroup: b.LinkToSecurityGroup(kops.InstanceGroupRoleMaster), SourceGroup: b.LinkToSecurityGroup(kops.InstanceGroupRoleNode), Protocol: s(awsName), - }) + } + c.AddTask(t) } } -func (b *FirewallModelBuilder) applyNodeToMasterBlockSpecificPorts(c *fi.ModelBuilderContext) { +func (b *FirewallModelBuilder) applyNodeToMasterBlockSpecificPorts(c *fi.ModelBuilderContext, nodeGroups map[string]*awstasks.SecurityGroup, masterGroups map[string]*awstasks.SecurityGroup) { type portRange struct { From int To int @@ -258,29 +262,10 @@ func (b *FirewallModelBuilder) applyNodeToMasterBlockSpecificPorts(c *fi.ModelBu protocols = append(protocols, ProtocolIPIP) } - if b.Cluster.Spec.Networking.Cilium != nil { - // Cilium needs to access etcd - glog.Warningf("Opening etcd port on masters for access from the nodes, for Cilium. This is unsafe in untrusted environments.") - tcpBlocked[4001] = false - protocols = append(protocols, ProtocolIPIP) - } - if b.Cluster.Spec.Networking.Kuberouter != nil { protocols = append(protocols, ProtocolIPIP) } - for _, r := range udpRanges { - c.AddTask(&awstasks.SecurityGroupRule{ - Name: s(fmt.Sprintf("node-to-master-udp-%d-%d", r.From, r.To)), - Lifecycle: b.Lifecycle, - SecurityGroup: b.LinkToSecurityGroup(kops.InstanceGroupRoleMaster), - SourceGroup: b.LinkToSecurityGroup(kops.InstanceGroupRoleNode), - FromPort: i64(int64(r.From)), - ToPort: i64(int64(r.To)), - Protocol: s("udp"), - }) - } - tcpRanges := []portRange{ {From: 1, To: 0}, } @@ -295,42 +280,137 @@ func (b *FirewallModelBuilder) applyNodeToMasterBlockSpecificPorts(c *fi.ModelBu } } - for _, r := range tcpRanges { - c.AddTask(&awstasks.SecurityGroupRule{ - Name: s(fmt.Sprintf("node-to-master-tcp-%d-%d", r.From, r.To)), - Lifecycle: b.Lifecycle, - SecurityGroup: b.LinkToSecurityGroup(kops.InstanceGroupRoleMaster), - SourceGroup: b.LinkToSecurityGroup(kops.InstanceGroupRoleNode), - FromPort: i64(int64(r.From)), - ToPort: i64(int64(r.To)), - Protocol: s("tcp"), - }) + for masterSecGroupName, masterGroup := range masterGroups { + var masterSuffix string + var nodeSuffix string + + if len(masterGroups) != 1 { + masterSuffix = "-" + masterSecGroupName + } else { + masterSuffix = "" + } + + for nodeSecGroupName, nodeGroup := range nodeGroups { + + if len(masterGroups) == 1 && len(nodeGroups) == 1 { + nodeSuffix = "" + } else { + nodeSuffix = fmt.Sprintf("%s-%s", masterSuffix, nodeSecGroupName) + } + + for _, r := range udpRanges { + t := &awstasks.SecurityGroupRule{ + Name: s(fmt.Sprintf("node-to-master-udp-%d-%d%s", r.From, r.To, nodeSuffix)), + Lifecycle: b.Lifecycle, + SecurityGroup: masterGroup, + SourceGroup: nodeGroup, + FromPort: i64(int64(r.From)), + ToPort: i64(int64(r.To)), + Protocol: s("udp"), + } + c.AddTask(t) + } + for _, r := range tcpRanges { + t := &awstasks.SecurityGroupRule{ + Name: s(fmt.Sprintf("node-to-master-tcp-%d-%d%s", r.From, r.To, nodeSuffix)), + Lifecycle: b.Lifecycle, + SecurityGroup: masterGroup, + SourceGroup: nodeGroup, + FromPort: i64(int64(r.From)), + ToPort: i64(int64(r.To)), + Protocol: s("tcp"), + } + c.AddTask(t) + } + for _, protocol := range protocols { + awsName := strconv.Itoa(int(protocol)) + name := awsName + switch protocol { + case ProtocolIPIP: + name = "ipip" + default: + glog.Warningf("unknown protocol %q - naming by number", awsName) + } + + t := &awstasks.SecurityGroupRule{ + Name: s(fmt.Sprintf("node-to-master-protocol-%s%s", name, nodeSuffix)), + Lifecycle: b.Lifecycle, + SecurityGroup: masterGroup, + SourceGroup: nodeGroup, + Protocol: s(awsName), + } + c.AddTask(t) + } + } } - for _, protocol := range protocols { - awsName := strconv.Itoa(int(protocol)) - name := awsName - switch protocol { - case ProtocolIPIP: - name = "ipip" - default: - glog.Warningf("unknown protocol %q - naming by number", awsName) +} + +func (b *FirewallModelBuilder) buildMasterRules(c *fi.ModelBuilderContext, nodeGroups map[string]*awstasks.SecurityGroup) (map[string]*awstasks.SecurityGroup, error) { + masterGroups, err := b.createSecurityGroups(kops.InstanceGroupRoleMaster, b.Lifecycle, c) + if err != nil { + return nil, err + } + + for masterSecGroupName, masterGroup := range masterGroups { + suffix := GetGroupSuffix(masterSecGroupName, masterGroups) + // Allow full egress + { + t := &awstasks.SecurityGroupRule{ + Name: s(fmt.Sprintf("master-egress%s", suffix)), + Lifecycle: b.Lifecycle, + SecurityGroup: masterGroup, + Egress: fi.Bool(true), + CIDR: s("0.0.0.0/0"), + } + c.AddTask(t) } - c.AddTask(&awstasks.SecurityGroupRule{ - Name: s("node-to-master-protocol-" + name), - Lifecycle: b.Lifecycle, - SecurityGroup: b.LinkToSecurityGroup(kops.InstanceGroupRoleMaster), - SourceGroup: b.LinkToSecurityGroup(kops.InstanceGroupRoleNode), - Protocol: s(awsName), - }) + // Masters can talk to masters + { + t := &awstasks.SecurityGroupRule{ + Name: s(fmt.Sprintf("all-master-to-master%s", suffix)), + Lifecycle: b.Lifecycle, + SecurityGroup: masterGroup, + SourceGroup: masterGroup, + } + c.AddTask(t) + } + for nodeSecGroupName, nodeGroup := range nodeGroups { + + if len(masterGroups) == 1 && len(nodeGroups) == 1 { + nodeSecGroupName = "" + } else { + nodeSecGroupName = fmt.Sprintf("%s-%s", masterSecGroupName, nodeSecGroupName) + } + + // Masters can talk to nodes + { + t := &awstasks.SecurityGroupRule{ + Name: s(fmt.Sprintf("all-master-to-node%s", nodeSecGroupName)), + Lifecycle: b.Lifecycle, + SecurityGroup: nodeGroup, + SourceGroup: masterGroup, + } + c.AddTask(t) + } + } } + + return masterGroups, nil +} + +func (b *KopsModelContext) GetSecurityGroups(role kops.InstanceGroupRole) (map[string]*awstasks.SecurityGroup, error) { + return b.createSecurityGroups(role, nil, nil) } -func (b *FirewallModelBuilder) buildMasterRules(c *fi.ModelBuilderContext) error { - { - t := &awstasks.SecurityGroup{ - Name: s(b.SecurityGroupName(kops.InstanceGroupRoleMaster)), - Lifecycle: b.Lifecycle, +func (b *KopsModelContext) createSecurityGroups(role kops.InstanceGroupRole, lifecycle *fi.Lifecycle, c *fi.ModelBuilderContext) (map[string]*awstasks.SecurityGroup, error) { + + var baseGroup *awstasks.SecurityGroup + if role == kops.InstanceGroupRoleMaster { + name := "masters." + b.ClusterName() + baseGroup = &awstasks.SecurityGroup{ + Name: s(name), + Lifecycle: lifecycle, VPC: b.LinkToVPC(), Description: s("Security group for masters"), RemoveExtraRules: []string{ @@ -347,43 +427,77 @@ func (b *FirewallModelBuilder) buildMasterRules(c *fi.ModelBuilderContext) error // TODO: Protocol 4 for calico }, } - t.Tags = b.CloudTags(*t.Name, false) - c.AddTask(t) + baseGroup.Tags = b.CloudTags(*baseGroup.Name, false) + } else if role == kops.InstanceGroupRoleNode { + name := "nodes." + b.ClusterName() + baseGroup = &awstasks.SecurityGroup{ + Name: s(name), + Lifecycle: lifecycle, + VPC: b.LinkToVPC(), + Description: s("Security group for nodes"), + RemoveExtraRules: []string{"port=22"}, + } + baseGroup.Tags = b.CloudTags(*baseGroup.Name, false) + } else if role == kops.InstanceGroupRoleBastion { + return nil, fmt.Errorf("bastion are not supported yet with instancegroup securitygroup") + /* + // TODO use this instead of the hardcoded names?? + // b.SecurityGroupName(kops.InstanceGroupRoleBastion)) + // TODO implement + name := "bastion." + b.ClusterName() + baseGroup = &awstasks.SecurityGroup{ + Name: s(name), + Lifecycle: lifecycle, + VPC: b.LinkToVPC(), + Description: s("Security group for bastion"), + RemoveExtraRules: []string{"port=22"}, + } + baseGroup.Tags = b.CloudTags(*baseGroup.Name, false) + */ + } else { + return nil, fmt.Errorf("not a supported security group type") } - // Allow full egress - { - t := &awstasks.SecurityGroupRule{ - Name: s("master-egress"), - Lifecycle: b.Lifecycle, - SecurityGroup: b.LinkToSecurityGroup(kops.InstanceGroupRoleMaster), - Egress: fi.Bool(true), - CIDR: s("0.0.0.0/0"), + groups := make(map[string]*awstasks.SecurityGroup) + for _, ig := range b.InstanceGroups { + if ig.Spec.SecurityGroupOverride != nil && ig.Spec.Role == role { + name := fi.StringValue(ig.Spec.SecurityGroupOverride) + t := &awstasks.SecurityGroup{ + Name: ig.Spec.SecurityGroupOverride, + ID: ig.Spec.SecurityGroupOverride, + Lifecycle: lifecycle, + VPC: b.LinkToVPC(), + Shared: fi.Bool(true), + Description: baseGroup.Description, + } + groups[name] = t } - c.AddTask(t) } - // Masters can talk to masters - { - t := &awstasks.SecurityGroupRule{ - Name: s("all-master-to-master"), - Lifecycle: b.Lifecycle, - SecurityGroup: b.LinkToSecurityGroup(kops.InstanceGroupRoleMaster), - SourceGroup: b.LinkToSecurityGroup(kops.InstanceGroupRoleMaster), + for name, t := range groups { + if c != nil { + glog.V(8).Infof("adding security group: %q", name) + c.AddTask(t) } - c.AddTask(t) } - // Masters can talk to nodes - { - t := &awstasks.SecurityGroupRule{ - Name: s("all-master-to-node"), - Lifecycle: b.Lifecycle, - SecurityGroup: b.LinkToSecurityGroup(kops.InstanceGroupRoleNode), - SourceGroup: b.LinkToSecurityGroup(kops.InstanceGroupRoleMaster), + if len(groups) == 0 { + groups[fi.StringValue(baseGroup.Name)] = baseGroup + if c != nil { + glog.V(8).Infof("adding security group: %q", fi.StringValue(baseGroup.Name)) + c.AddTask(baseGroup) } - c.AddTask(t) } - return nil + return groups, nil +} + +// GetGroupSuffix returns the name of the security groups suffix. +func GetGroupSuffix(name string, groups map[string]*awstasks.SecurityGroup) string { + if len(groups) != 1 { + glog.V(8).Infof("adding group suffix: %q", name) + return "-" + name + } + + return "" } diff --git a/pkg/model/firewall_test.go b/pkg/model/firewall_test.go new file mode 100644 index 0000000000000..7dcb74ab3fd0a --- /dev/null +++ b/pkg/model/firewall_test.go @@ -0,0 +1,71 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package model + +import ( + "testing" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/kops/pkg/apis/kops" +) + +func Test_SharedGroups(t *testing.T) { + grid := []struct { + Prefix string + ClusterName string + Expected string + }{ + { + "bastion", "mycluster", + "bastion-mycluster-vnrjie", + }, + { + "bastion", "mycluster.example.com", + "bastion-mycluster-example-o8elkm", + }, + { + "api", "this.is.a.very.long.cluster.example.com", + "api-this-is-a-very-long-c-q4ukp4", + }, + { + "bastion", "this.is.a.very.long.cluster.example.com", + "bastion-this-is-a-very-lo-4ggpa2", + }, + } + for _, g := range grid { + c := &KopsModelContext{ + Cluster: &kops.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: g.ClusterName, + }, + }, + } + actual := c.GetELBName32(g.Prefix) + if actual != g.Expected { + t.Errorf("unexpected result from %q+%q. expected %q, got %q", g.Prefix, g.ClusterName, g.Expected, actual) + } + } +} + +func makeTestInstanceGroupSec(role kops.InstanceGroupRole, secGroup *string) *kops.InstanceGroup { + return &kops.InstanceGroup{ + Spec: kops.InstanceGroupSpec{ + Role: role, + SecurityGroupOverride: secGroup, + }, + } +} diff --git a/tests/integration/update_cluster/existing_sg/id_rsa.pub b/tests/integration/update_cluster/existing_sg/id_rsa.pub new file mode 100755 index 0000000000000..81cb0127830e7 --- /dev/null +++ b/tests/integration/update_cluster/existing_sg/id_rsa.pub @@ -0,0 +1 @@ +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQCtWu40XQo8dczLsCq0OWV+hxm9uV3WxeH9Kgh4sMzQxNtoU1pvW0XdjpkBesRKGoolfWeCLXWxpyQb1IaiMkKoz7MdhQ/6UKjMjP66aFWWp3pwD0uj0HuJ7tq4gKHKRYGTaZIRWpzUiANBrjugVgA+Sd7E/mYwc/DMXkIyRZbvhQ== diff --git a/tests/integration/update_cluster/existing_sg/in-v1alpha2.yaml b/tests/integration/update_cluster/existing_sg/in-v1alpha2.yaml new file mode 100644 index 0000000000000..fcd768e0a5969 --- /dev/null +++ b/tests/integration/update_cluster/existing_sg/in-v1alpha2.yaml @@ -0,0 +1,134 @@ +apiVersion: kops/v1alpha2 +kind: Cluster +metadata: + creationTimestamp: "2016-12-10T22:42:27Z" + name: existingsg.example.com +spec: + api: + loadBalancer: + SecurityGroupOverride: sg-abcd1234 + kubernetesApiAccess: + - 0.0.0.0/0 + channel: stable + cloudProvider: aws + configBase: memfs://clusters.example.com/existingsg.example.com + etcdClusters: + - etcdMembers: + - instanceGroup: master-us-test-1a + name: a + - instanceGroup: master-us-test-1b + name: b + - instanceGroup: master-us-test-1c + name: c + name: main + - etcdMembers: + - instanceGroup: master-us-test-1a + name: a + - instanceGroup: master-us-test-1b + name: b + - instanceGroup: master-us-test-1c + name: c + name: events + kubernetesVersion: v1.4.12 + masterInternalName: api.internal.existingsg.example.com + masterPublicName: api.existingsg.example.com + networkCIDR: 172.20.0.0/16 + networking: + kubenet: {} + nonMasqueradeCIDR: 100.64.0.0/10 + sshAccess: + - 0.0.0.0/0 + topology: + masters: public + nodes: public + subnets: + - cidr: 172.20.32.0/19 + name: us-test-1a + type: Public + zone: us-test-1a + - cidr: 172.20.64.0/19 + name: us-test-1b + type: Public + zone: us-test-1b + - cidr: 172.20.96.0/19 + name: us-test-1c + type: Public + zone: us-test-1c + +--- + +apiVersion: kops/v1alpha2 +kind: InstanceGroup +metadata: + creationTimestamp: "2017-01-01T00:00:00Z" + labels: + kops.k8s.io/cluster: existingsg.example.com + name: master-us-test-1a +spec: + image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21 + machineType: m3.medium + maxSize: 1 + minSize: 1 + role: Master + subnets: + - us-test-1a + SecurityGroupOverride: sg-1234dcba + +--- + +apiVersion: kops/v1alpha2 +kind: InstanceGroup +metadata: + creationTimestamp: "2017-01-01T00:00:00Z" + labels: + kops.k8s.io/cluster: existingsg.example.com + name: master-us-test-1b +spec: + image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21 + machineType: m3.medium + maxSize: 1 + minSize: 1 + role: Master + subnets: + - us-test-1b + SecurityGroupOverride: sg-1234dcba + +--- + +apiVersion: kops/v1alpha2 +kind: InstanceGroup +metadata: + creationTimestamp: "2017-01-01T00:00:00Z" + labels: + kops.k8s.io/cluster: existingsg.example.com + name: master-us-test-1c +spec: + image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21 + machineType: m3.medium + maxSize: 1 + minSize: 1 + role: Master + subnets: + - us-test-1c + SecurityGroupOverride: sg-1234dcba + +--- + +apiVersion: kops/v1alpha2 +kind: InstanceGroup +metadata: + creationTimestamp: "2016-12-10T22:42:28Z" + name: nodes + labels: + kops.k8s.io/cluster: existingsg.example.com +spec: + associatePublicIp: true + image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21 + machineType: t2.medium + maxSize: 2 + minSize: 2 + role: Node + subnets: + - us-test-1a + SecurityGroupOverride: sg-1234abcd + diff --git a/tests/integration/update_cluster/existing_sg/kubernetes.tf b/tests/integration/update_cluster/existing_sg/kubernetes.tf new file mode 100644 index 0000000000000..291a944c5d7df --- /dev/null +++ b/tests/integration/update_cluster/existing_sg/kubernetes.tf @@ -0,0 +1,614 @@ +locals = { + cluster_name = "existingsg.example.com" + master_autoscaling_group_ids = ["${aws_autoscaling_group.master-us-test-1a-masters-existingsg-example-com.id}", "${aws_autoscaling_group.master-us-test-1b-masters-existingsg-example-com.id}", "${aws_autoscaling_group.master-us-test-1c-masters-existingsg-example-com.id}"] + master_security_group_ids = ["sg-1234dcba"] + masters_role_arn = "${aws_iam_role.masters-existingsg-example-com.arn}" + masters_role_name = "${aws_iam_role.masters-existingsg-example-com.name}" + node_autoscaling_group_ids = ["${aws_autoscaling_group.nodes-existingsg-example-com.id}"] + node_security_group_ids = ["sg-1234abcd"] + node_subnet_ids = ["${aws_subnet.us-test-1a-existingsg-example-com.id}"] + nodes_role_arn = "${aws_iam_role.nodes-existingsg-example-com.arn}" + nodes_role_name = "${aws_iam_role.nodes-existingsg-example-com.name}" + region = "us-test-1" + route_table_public_id = "${aws_route_table.existingsg-example-com.id}" + subnet_us-test-1a_id = "${aws_subnet.us-test-1a-existingsg-example-com.id}" + subnet_us-test-1b_id = "${aws_subnet.us-test-1b-existingsg-example-com.id}" + subnet_us-test-1c_id = "${aws_subnet.us-test-1c-existingsg-example-com.id}" + vpc_cidr_block = "${aws_vpc.existingsg-example-com.cidr_block}" + vpc_id = "${aws_vpc.existingsg-example-com.id}" +} + +output "cluster_name" { + value = "existingsg.example.com" +} + +output "master_autoscaling_group_ids" { + value = ["${aws_autoscaling_group.master-us-test-1a-masters-existingsg-example-com.id}", "${aws_autoscaling_group.master-us-test-1b-masters-existingsg-example-com.id}", "${aws_autoscaling_group.master-us-test-1c-masters-existingsg-example-com.id}"] +} + +output "master_security_group_ids" { + value = ["sg-1234dcba"] +} + +output "masters_role_arn" { + value = "${aws_iam_role.masters-existingsg-example-com.arn}" +} + +output "masters_role_name" { + value = "${aws_iam_role.masters-existingsg-example-com.name}" +} + +output "node_autoscaling_group_ids" { + value = ["${aws_autoscaling_group.nodes-existingsg-example-com.id}"] +} + +output "node_security_group_ids" { + value = ["sg-1234abcd"] +} + +output "node_subnet_ids" { + value = ["${aws_subnet.us-test-1a-existingsg-example-com.id}"] +} + +output "nodes_role_arn" { + value = "${aws_iam_role.nodes-existingsg-example-com.arn}" +} + +output "nodes_role_name" { + value = "${aws_iam_role.nodes-existingsg-example-com.name}" +} + +output "region" { + value = "us-test-1" +} + +output "route_table_public_id" { + value = "${aws_route_table.existingsg-example-com.id}" +} + +output "subnet_us-test-1a_id" { + value = "${aws_subnet.us-test-1a-existingsg-example-com.id}" +} + +output "subnet_us-test-1b_id" { + value = "${aws_subnet.us-test-1b-existingsg-example-com.id}" +} + +output "subnet_us-test-1c_id" { + value = "${aws_subnet.us-test-1c-existingsg-example-com.id}" +} + +output "vpc_cidr_block" { + value = "${aws_vpc.existingsg-example-com.cidr_block}" +} + +output "vpc_id" { + value = "${aws_vpc.existingsg-example-com.id}" +} + +provider "aws" { + region = "us-test-1" +} + +resource "aws_autoscaling_attachment" "master-us-test-1a-masters-existingsg-example-com" { + elb = "${aws_elb.api-existingsg-example-com.id}" + autoscaling_group_name = "${aws_autoscaling_group.master-us-test-1a-masters-existingsg-example-com.id}" +} + +resource "aws_autoscaling_attachment" "master-us-test-1b-masters-existingsg-example-com" { + elb = "${aws_elb.api-existingsg-example-com.id}" + autoscaling_group_name = "${aws_autoscaling_group.master-us-test-1b-masters-existingsg-example-com.id}" +} + +resource "aws_autoscaling_attachment" "master-us-test-1c-masters-existingsg-example-com" { + elb = "${aws_elb.api-existingsg-example-com.id}" + autoscaling_group_name = "${aws_autoscaling_group.master-us-test-1c-masters-existingsg-example-com.id}" +} + +resource "aws_autoscaling_group" "master-us-test-1a-masters-existingsg-example-com" { + name = "master-us-test-1a.masters.existingsg.example.com" + launch_configuration = "${aws_launch_configuration.master-us-test-1a-masters-existingsg-example-com.id}" + max_size = 1 + min_size = 1 + vpc_zone_identifier = ["${aws_subnet.us-test-1a-existingsg-example-com.id}"] + + tag = { + key = "KubernetesCluster" + value = "existingsg.example.com" + propagate_at_launch = true + } + + tag = { + key = "Name" + value = "master-us-test-1a.masters.existingsg.example.com" + propagate_at_launch = true + } + + tag = { + key = "k8s.io/role/master" + value = "1" + propagate_at_launch = true + } + + metrics_granularity = "1Minute" + enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"] +} + +resource "aws_autoscaling_group" "master-us-test-1b-masters-existingsg-example-com" { + name = "master-us-test-1b.masters.existingsg.example.com" + launch_configuration = "${aws_launch_configuration.master-us-test-1b-masters-existingsg-example-com.id}" + max_size = 1 + min_size = 1 + vpc_zone_identifier = ["${aws_subnet.us-test-1b-existingsg-example-com.id}"] + + tag = { + key = "KubernetesCluster" + value = "existingsg.example.com" + propagate_at_launch = true + } + + tag = { + key = "Name" + value = "master-us-test-1b.masters.existingsg.example.com" + propagate_at_launch = true + } + + tag = { + key = "k8s.io/role/master" + value = "1" + propagate_at_launch = true + } + + metrics_granularity = "1Minute" + enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"] +} + +resource "aws_autoscaling_group" "master-us-test-1c-masters-existingsg-example-com" { + name = "master-us-test-1c.masters.existingsg.example.com" + launch_configuration = "${aws_launch_configuration.master-us-test-1c-masters-existingsg-example-com.id}" + max_size = 1 + min_size = 1 + vpc_zone_identifier = ["${aws_subnet.us-test-1c-existingsg-example-com.id}"] + + tag = { + key = "KubernetesCluster" + value = "existingsg.example.com" + propagate_at_launch = true + } + + tag = { + key = "Name" + value = "master-us-test-1c.masters.existingsg.example.com" + propagate_at_launch = true + } + + tag = { + key = "k8s.io/role/master" + value = "1" + propagate_at_launch = true + } + + metrics_granularity = "1Minute" + enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"] +} + +resource "aws_autoscaling_group" "nodes-existingsg-example-com" { + name = "nodes.existingsg.example.com" + launch_configuration = "${aws_launch_configuration.nodes-existingsg-example-com.id}" + max_size = 2 + min_size = 2 + vpc_zone_identifier = ["${aws_subnet.us-test-1a-existingsg-example-com.id}"] + + tag = { + key = "KubernetesCluster" + value = "existingsg.example.com" + propagate_at_launch = true + } + + tag = { + key = "Name" + value = "nodes.existingsg.example.com" + propagate_at_launch = true + } + + tag = { + key = "k8s.io/role/node" + value = "1" + propagate_at_launch = true + } + + metrics_granularity = "1Minute" + enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"] +} + +resource "aws_ebs_volume" "a-etcd-events-existingsg-example-com" { + availability_zone = "us-test-1a" + size = 20 + type = "gp2" + encrypted = false + + tags = { + KubernetesCluster = "existingsg.example.com" + Name = "a.etcd-events.existingsg.example.com" + "k8s.io/etcd/events" = "a/a,b,c" + "k8s.io/role/master" = "1" + "kubernetes.io/cluster/existingsg.example.com" = "owned" + } +} + +resource "aws_ebs_volume" "a-etcd-main-existingsg-example-com" { + availability_zone = "us-test-1a" + size = 20 + type = "gp2" + encrypted = false + + tags = { + KubernetesCluster = "existingsg.example.com" + Name = "a.etcd-main.existingsg.example.com" + "k8s.io/etcd/main" = "a/a,b,c" + "k8s.io/role/master" = "1" + "kubernetes.io/cluster/existingsg.example.com" = "owned" + } +} + +resource "aws_ebs_volume" "b-etcd-events-existingsg-example-com" { + availability_zone = "us-test-1b" + size = 20 + type = "gp2" + encrypted = false + + tags = { + KubernetesCluster = "existingsg.example.com" + Name = "b.etcd-events.existingsg.example.com" + "k8s.io/etcd/events" = "b/a,b,c" + "k8s.io/role/master" = "1" + "kubernetes.io/cluster/existingsg.example.com" = "owned" + } +} + +resource "aws_ebs_volume" "b-etcd-main-existingsg-example-com" { + availability_zone = "us-test-1b" + size = 20 + type = "gp2" + encrypted = false + + tags = { + KubernetesCluster = "existingsg.example.com" + Name = "b.etcd-main.existingsg.example.com" + "k8s.io/etcd/main" = "b/a,b,c" + "k8s.io/role/master" = "1" + "kubernetes.io/cluster/existingsg.example.com" = "owned" + } +} + +resource "aws_ebs_volume" "c-etcd-events-existingsg-example-com" { + availability_zone = "us-test-1c" + size = 20 + type = "gp2" + encrypted = false + + tags = { + KubernetesCluster = "existingsg.example.com" + Name = "c.etcd-events.existingsg.example.com" + "k8s.io/etcd/events" = "c/a,b,c" + "k8s.io/role/master" = "1" + "kubernetes.io/cluster/existingsg.example.com" = "owned" + } +} + +resource "aws_ebs_volume" "c-etcd-main-existingsg-example-com" { + availability_zone = "us-test-1c" + size = 20 + type = "gp2" + encrypted = false + + tags = { + KubernetesCluster = "existingsg.example.com" + Name = "c.etcd-main.existingsg.example.com" + "k8s.io/etcd/main" = "c/a,b,c" + "k8s.io/role/master" = "1" + "kubernetes.io/cluster/existingsg.example.com" = "owned" + } +} + +resource "aws_elb" "api-existingsg-example-com" { + name = "api-existingsg-example-co-ikb7m9" + + listener = { + instance_port = 443 + instance_protocol = "TCP" + lb_port = 443 + lb_protocol = "TCP" + } + + security_groups = ["sg-abcd1234"] + subnets = ["${aws_subnet.us-test-1a-existingsg-example-com.id}", "${aws_subnet.us-test-1b-existingsg-example-com.id}", "${aws_subnet.us-test-1c-existingsg-example-com.id}"] + + health_check = { + target = "SSL:443" + healthy_threshold = 2 + unhealthy_threshold = 2 + interval = 10 + timeout = 5 + } + + idle_timeout = 300 + + tags = { + KubernetesCluster = "existingsg.example.com" + Name = "api.existingsg.example.com" + } +} + +resource "aws_iam_instance_profile" "masters-existingsg-example-com" { + name = "masters.existingsg.example.com" + role = "${aws_iam_role.masters-existingsg-example-com.name}" +} + +resource "aws_iam_instance_profile" "nodes-existingsg-example-com" { + name = "nodes.existingsg.example.com" + role = "${aws_iam_role.nodes-existingsg-example-com.name}" +} + +resource "aws_iam_role" "masters-existingsg-example-com" { + name = "masters.existingsg.example.com" + assume_role_policy = "${file("${path.module}/data/aws_iam_role_masters.existingsg.example.com_policy")}" +} + +resource "aws_iam_role" "nodes-existingsg-example-com" { + name = "nodes.existingsg.example.com" + assume_role_policy = "${file("${path.module}/data/aws_iam_role_nodes.existingsg.example.com_policy")}" +} + +resource "aws_iam_role_policy" "masters-existingsg-example-com" { + name = "masters.existingsg.example.com" + role = "${aws_iam_role.masters-existingsg-example-com.name}" + policy = "${file("${path.module}/data/aws_iam_role_policy_masters.existingsg.example.com_policy")}" +} + +resource "aws_iam_role_policy" "nodes-existingsg-example-com" { + name = "nodes.existingsg.example.com" + role = "${aws_iam_role.nodes-existingsg-example-com.name}" + policy = "${file("${path.module}/data/aws_iam_role_policy_nodes.existingsg.example.com_policy")}" +} + +resource "aws_internet_gateway" "existingsg-example-com" { + vpc_id = "${aws_vpc.existingsg-example-com.id}" + + tags = { + KubernetesCluster = "existingsg.example.com" + Name = "existingsg.example.com" + "kubernetes.io/cluster/existingsg.example.com" = "owned" + } +} + +resource "aws_key_pair" "kubernetes-existingsg-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157" { + key_name = "kubernetes.existingsg.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57" + public_key = "${file("${path.module}/data/aws_key_pair_kubernetes.existingsg.example.com-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key")}" +} + +resource "aws_launch_configuration" "master-us-test-1a-masters-existingsg-example-com" { + name_prefix = "master-us-test-1a.masters.existingsg.example.com-" + image_id = "ami-12345678" + instance_type = "m3.medium" + key_name = "${aws_key_pair.kubernetes-existingsg-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id}" + iam_instance_profile = "${aws_iam_instance_profile.masters-existingsg-example-com.id}" + security_groups = ["sg-1234dcba"] + associate_public_ip_address = true + user_data = "${file("${path.module}/data/aws_launch_configuration_master-us-test-1a.masters.existingsg.example.com_user_data")}" + + root_block_device = { + volume_type = "gp2" + volume_size = 64 + delete_on_termination = true + } + + ephemeral_block_device = { + device_name = "/dev/sdc" + virtual_name = "ephemeral0" + } + + lifecycle = { + create_before_destroy = true + } + + enable_monitoring = false +} + +resource "aws_launch_configuration" "master-us-test-1b-masters-existingsg-example-com" { + name_prefix = "master-us-test-1b.masters.existingsg.example.com-" + image_id = "ami-12345678" + instance_type = "m3.medium" + key_name = "${aws_key_pair.kubernetes-existingsg-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id}" + iam_instance_profile = "${aws_iam_instance_profile.masters-existingsg-example-com.id}" + security_groups = ["sg-1234dcba"] + associate_public_ip_address = true + user_data = "${file("${path.module}/data/aws_launch_configuration_master-us-test-1b.masters.existingsg.example.com_user_data")}" + + root_block_device = { + volume_type = "gp2" + volume_size = 64 + delete_on_termination = true + } + + ephemeral_block_device = { + device_name = "/dev/sdc" + virtual_name = "ephemeral0" + } + + lifecycle = { + create_before_destroy = true + } + + enable_monitoring = false +} + +resource "aws_launch_configuration" "master-us-test-1c-masters-existingsg-example-com" { + name_prefix = "master-us-test-1c.masters.existingsg.example.com-" + image_id = "ami-12345678" + instance_type = "m3.medium" + key_name = "${aws_key_pair.kubernetes-existingsg-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id}" + iam_instance_profile = "${aws_iam_instance_profile.masters-existingsg-example-com.id}" + security_groups = ["sg-1234dcba"] + associate_public_ip_address = true + user_data = "${file("${path.module}/data/aws_launch_configuration_master-us-test-1c.masters.existingsg.example.com_user_data")}" + + root_block_device = { + volume_type = "gp2" + volume_size = 64 + delete_on_termination = true + } + + ephemeral_block_device = { + device_name = "/dev/sdc" + virtual_name = "ephemeral0" + } + + lifecycle = { + create_before_destroy = true + } + + enable_monitoring = false +} + +resource "aws_launch_configuration" "nodes-existingsg-example-com" { + name_prefix = "nodes.existingsg.example.com-" + image_id = "ami-12345678" + instance_type = "t2.medium" + key_name = "${aws_key_pair.kubernetes-existingsg-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id}" + iam_instance_profile = "${aws_iam_instance_profile.nodes-existingsg-example-com.id}" + security_groups = ["sg-1234abcd"] + associate_public_ip_address = true + user_data = "${file("${path.module}/data/aws_launch_configuration_nodes.existingsg.example.com_user_data")}" + + root_block_device = { + volume_type = "gp2" + volume_size = 128 + delete_on_termination = true + } + + lifecycle = { + create_before_destroy = true + } + + enable_monitoring = false +} + +resource "aws_route" "0-0-0-0--0" { + route_table_id = "${aws_route_table.existingsg-example-com.id}" + destination_cidr_block = "0.0.0.0/0" + gateway_id = "${aws_internet_gateway.existingsg-example-com.id}" +} + +resource "aws_route53_record" "api-existingsg-example-com" { + name = "api.existingsg.example.com" + type = "A" + + alias = { + name = "${aws_elb.api-existingsg-example-com.dns_name}" + zone_id = "${aws_elb.api-existingsg-example-com.zone_id}" + evaluate_target_health = false + } + + zone_id = "/hostedzone/Z1AFAKE1ZON3YO" +} + +resource "aws_route_table" "existingsg-example-com" { + vpc_id = "${aws_vpc.existingsg-example-com.id}" + + tags = { + KubernetesCluster = "existingsg.example.com" + Name = "existingsg.example.com" + "kubernetes.io/cluster/existingsg.example.com" = "owned" + "kubernetes.io/kops/role" = "public" + } +} + +resource "aws_route_table_association" "us-test-1a-existingsg-example-com" { + subnet_id = "${aws_subnet.us-test-1a-existingsg-example-com.id}" + route_table_id = "${aws_route_table.existingsg-example-com.id}" +} + +resource "aws_route_table_association" "us-test-1b-existingsg-example-com" { + subnet_id = "${aws_subnet.us-test-1b-existingsg-example-com.id}" + route_table_id = "${aws_route_table.existingsg-example-com.id}" +} + +resource "aws_route_table_association" "us-test-1c-existingsg-example-com" { + subnet_id = "${aws_subnet.us-test-1c-existingsg-example-com.id}" + route_table_id = "${aws_route_table.existingsg-example-com.id}" +} + +resource "aws_subnet" "us-test-1a-existingsg-example-com" { + vpc_id = "${aws_vpc.existingsg-example-com.id}" + cidr_block = "172.20.32.0/19" + availability_zone = "us-test-1a" + + tags = { + KubernetesCluster = "existingsg.example.com" + Name = "us-test-1a.existingsg.example.com" + SubnetType = "Public" + "kubernetes.io/cluster/existingsg.example.com" = "owned" + "kubernetes.io/role/elb" = "1" + } +} + +resource "aws_subnet" "us-test-1b-existingsg-example-com" { + vpc_id = "${aws_vpc.existingsg-example-com.id}" + cidr_block = "172.20.64.0/19" + availability_zone = "us-test-1b" + + tags = { + KubernetesCluster = "existingsg.example.com" + Name = "us-test-1b.existingsg.example.com" + SubnetType = "Public" + "kubernetes.io/cluster/existingsg.example.com" = "owned" + "kubernetes.io/role/elb" = "1" + } +} + +resource "aws_subnet" "us-test-1c-existingsg-example-com" { + vpc_id = "${aws_vpc.existingsg-example-com.id}" + cidr_block = "172.20.96.0/19" + availability_zone = "us-test-1c" + + tags = { + KubernetesCluster = "existingsg.example.com" + Name = "us-test-1c.existingsg.example.com" + SubnetType = "Public" + "kubernetes.io/cluster/existingsg.example.com" = "owned" + "kubernetes.io/role/elb" = "1" + } +} + +resource "aws_vpc" "existingsg-example-com" { + cidr_block = "172.20.0.0/16" + enable_dns_hostnames = true + enable_dns_support = true + + tags = { + KubernetesCluster = "existingsg.example.com" + Name = "existingsg.example.com" + "kubernetes.io/cluster/existingsg.example.com" = "owned" + } +} + +resource "aws_vpc_dhcp_options" "existingsg-example-com" { + domain_name = "us-test-1.compute.internal" + domain_name_servers = ["AmazonProvidedDNS"] + + tags = { + KubernetesCluster = "existingsg.example.com" + Name = "existingsg.example.com" + "kubernetes.io/cluster/existingsg.example.com" = "owned" + } +} + +resource "aws_vpc_dhcp_options_association" "existingsg-example-com" { + vpc_id = "${aws_vpc.existingsg-example-com.id}" + dhcp_options_id = "${aws_vpc_dhcp_options.existingsg-example-com.id}" +} + +terraform = { + required_version = ">= 0.9.3" +} diff --git a/upup/pkg/fi/cloudup/awstasks/securitygroup.go b/upup/pkg/fi/cloudup/awstasks/securitygroup.go index a468b17c4e927..739067eb697c8 100644 --- a/upup/pkg/fi/cloudup/awstasks/securitygroup.go +++ b/upup/pkg/fi/cloudup/awstasks/securitygroup.go @@ -149,7 +149,7 @@ func (_ *SecurityGroup) CheckChanges(a, e, changes *SecurityGroup) error { if changes.ID != nil { return fi.CannotChangeField("ID") } - if changes.Name != nil { + if changes.Name != nil && !fi.BoolValue(e.Shared) { return fi.CannotChangeField("Name") } if changes.VPC != nil { From 4cd8dbae3fe4f7c12ad93936ab229cdae2f46886 Mon Sep 17 00:00:00 2001 From: Rodrigo Menezes Date: Tue, 2 Oct 2018 01:42:46 -0700 Subject: [PATCH 2/3] Update tests --- docs/security_groups.md | 6 +++--- .../update_cluster/existing_sg/in-v1alpha2.yaml | 10 +++++----- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/security_groups.md b/docs/security_groups.md index 4083fba5672e6..9e6323d9f16f7 100644 --- a/docs/security_groups.md +++ b/docs/security_groups.md @@ -22,7 +22,7 @@ metadata: spec: api: loadBalancer: - SecurityGroupOverride: sg-abcd1234 + securityGroupOverride: sg-abcd1234 . . @@ -36,14 +36,14 @@ metadata: kops.k8s.io/cluster: mycluster.example.com name: master-us-test-1a spec: - SecurityGroupOverride: sg-1234dcba + securityGroupOverride: sg-1234dcba ``` Now run a cluster update to create the new launch configuration, using [lifecycle overrides](./cli/kops_update_cluster.md#options) to prevent Security Group resources from being created: ``` -kops update cluster ${CLUSTER_NAME} --yes --lifecycle-overrides IAMRole=SecurityGroup=ExistsAndWarnIfChanges,SecurityGroupRule=ExistsAndWarnIfChanges +kops update cluster ${CLUSTER_NAME} --yes --lifecycle-overrides SecurityGroup=ExistsAndWarnIfChanges,SecurityGroupRule=ExistsAndWarnIfChanges ``` *Everytime `kops update cluster` is ran, it must include the above `--lifecycle-overrides`.* diff --git a/tests/integration/update_cluster/existing_sg/in-v1alpha2.yaml b/tests/integration/update_cluster/existing_sg/in-v1alpha2.yaml index fcd768e0a5969..5a019aff5d28c 100644 --- a/tests/integration/update_cluster/existing_sg/in-v1alpha2.yaml +++ b/tests/integration/update_cluster/existing_sg/in-v1alpha2.yaml @@ -6,7 +6,7 @@ metadata: spec: api: loadBalancer: - SecurityGroupOverride: sg-abcd1234 + securityGroupOverride: sg-abcd1234 kubernetesApiAccess: - 0.0.0.0/0 channel: stable @@ -72,7 +72,7 @@ spec: role: Master subnets: - us-test-1a - SecurityGroupOverride: sg-1234dcba + securityGroupOverride: sg-1234dcba --- @@ -91,7 +91,7 @@ spec: role: Master subnets: - us-test-1b - SecurityGroupOverride: sg-1234dcba + securityGroupOverride: sg-1234dcba --- @@ -110,7 +110,7 @@ spec: role: Master subnets: - us-test-1c - SecurityGroupOverride: sg-1234dcba + securityGroupOverride: sg-1234dcba --- @@ -130,5 +130,5 @@ spec: role: Node subnets: - us-test-1a - SecurityGroupOverride: sg-1234abcd + securityGroupOverride: sg-1234abcd From 87eec75f5bfc8f8b10373352b9b59fa82f76ddb4 Mon Sep 17 00:00:00 2001 From: Rodrigo Menezes Date: Tue, 2 Oct 2018 10:22:09 -0700 Subject: [PATCH 3/3] Fix blocker --- pkg/model/awsmodel/api_loadbalancer.go | 7 +++---- pkg/model/firewall.go | 7 +++++++ 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/pkg/model/awsmodel/api_loadbalancer.go b/pkg/model/awsmodel/api_loadbalancer.go index 436a786db9746..3731b19162473 100644 --- a/pkg/model/awsmodel/api_loadbalancer.go +++ b/pkg/model/awsmodel/api_loadbalancer.go @@ -244,10 +244,9 @@ func (b *APILoadBalancerBuilder) Build(c *fi.ModelBuilderContext) error { SecurityGroup: masterGroup, SourceGroup: lbSG, - // SourceGroup: sgLink, - FromPort: i64(443), - ToPort: i64(443), - Protocol: s("tcp"), + FromPort: i64(443), + ToPort: i64(443), + Protocol: s("tcp"), } c.AddTask(t) } diff --git a/pkg/model/firewall.go b/pkg/model/firewall.go index c623eae7eb685..7a4859611a122 100644 --- a/pkg/model/firewall.go +++ b/pkg/model/firewall.go @@ -262,6 +262,13 @@ func (b *FirewallModelBuilder) applyNodeToMasterBlockSpecificPorts(c *fi.ModelBu protocols = append(protocols, ProtocolIPIP) } + if b.Cluster.Spec.Networking.Cilium != nil { + // Cilium needs to access etcd + glog.Warningf("Opening etcd port on masters for access from the nodes, for Cilium. This is unsafe in untrusted environments.") + tcpBlocked[4001] = false + protocols = append(protocols, ProtocolIPIP) + } + if b.Cluster.Spec.Networking.Kuberouter != nil { protocols = append(protocols, ProtocolIPIP) }