diff --git a/.github/workflows/depsreview.yaml b/.github/workflows/depsreview.yaml index 35f210dae4d82..1192881eae7a5 100644 --- a/.github/workflows/depsreview.yaml +++ b/.github/workflows/depsreview.yaml @@ -16,4 +16,4 @@ jobs: - name: 'Checkout Repository' uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 - name: 'Review Dependencies' - uses: actions/dependency-review-action@30d582111533d59ab793fd9f971817241654f3ec + uses: actions/dependency-review-action@11310527b429536e263dc6cc47873e608189ba21 diff --git a/Makefile b/Makefile index 2cc6c7397b9f9..254b12305488a 100644 --- a/Makefile +++ b/Makefile @@ -385,10 +385,6 @@ verify-shellcheck: verify-terraform: hack/verify-terraform.sh -.PHONE: verify-cloudformation -verify-cloudformation: - hack/verify-cloudformation.sh - .PHONY: verify-hashes verify-hashes: hack/verify-hashes.sh diff --git a/channels/pkg/channels/addons_test.go b/channels/pkg/channels/addons_test.go index 8274ffa946196..0e3d9133cb709 100644 --- a/channels/pkg/channels/addons_test.go +++ b/channels/pkg/channels/addons_test.go @@ -145,7 +145,7 @@ func Test_GetRequiredUpdates(t *testing.T) { addon := &Addon{ Name: "test", Spec: &api.AddonSpec{ - Name: fi.String("test"), + Name: fi.PtrTo("test"), NeedsPKI: true, }, } @@ -173,7 +173,7 @@ func Test_NeedsRollingUpdate(t *testing.T) { newAddon: &Addon{ Name: "test", Spec: &api.AddonSpec{ - Name: fi.String("test"), + Name: fi.PtrTo("test"), ManifestHash: "originalHash", NeedsRollingUpdate: "all", }, @@ -183,7 +183,7 @@ func Test_NeedsRollingUpdate(t *testing.T) { newAddon: &Addon{ Name: "test", Spec: &api.AddonSpec{ - Name: fi.String("test"), + Name: fi.PtrTo("test"), ManifestHash: "newHash", NeedsRollingUpdate: "all", }, @@ -195,7 +195,7 @@ func Test_NeedsRollingUpdate(t *testing.T) { newAddon: &Addon{ Name: "test", Spec: &api.AddonSpec{ - Name: fi.String("test"), + Name: fi.PtrTo("test"), ManifestHash: "newHash", NeedsRollingUpdate: "worker", }, @@ -207,7 +207,7 @@ func Test_NeedsRollingUpdate(t *testing.T) { newAddon: &Addon{ Name: "test", Spec: &api.AddonSpec{ - Name: fi.String("test"), + Name: fi.PtrTo("test"), ManifestHash: "newHash", NeedsRollingUpdate: "control-plane", }, @@ -219,7 +219,7 @@ func Test_NeedsRollingUpdate(t *testing.T) { newAddon: &Addon{ Name: "test", Spec: &api.AddonSpec{ - Name: fi.String("test"), + Name: fi.PtrTo("test"), ManifestHash: "newHash", NeedsRollingUpdate: "all", }, @@ -338,7 +338,7 @@ func Test_InstallPKI(t *testing.T) { addon := &Addon{ Name: "test", Spec: &api.AddonSpec{ - Name: fi.String("test"), + Name: fi.PtrTo("test"), NeedsPKI: true, }, } diff --git a/channels/pkg/cmd/apply_channel_test.go b/channels/pkg/cmd/apply_channel_test.go index 496b059126acf..9226536a4e16a 100644 --- a/channels/pkg/cmd/apply_channel_test.go +++ b/channels/pkg/cmd/apply_channel_test.go @@ -59,7 +59,7 @@ func TestGetUpdates(t *testing.T) { "aws-ebs-csi-driver.addons.k8s.io": { Name: "aws-ebs-csi-driver.addons.k8s.io", Spec: &api.AddonSpec{ - Name: fi.String("aws-ebs-csi-driver.addons.k8s.io"), + Name: fi.PtrTo("aws-ebs-csi-driver.addons.k8s.io"), Id: "k8s-1.17", ManifestHash: "abc", }, diff --git a/cloudmock/openstack/mockcompute/flavors.go b/cloudmock/openstack/mockcompute/flavors.go index 310b8934ca58a..57761bc8b0166 100644 --- a/cloudmock/openstack/mockcompute/flavors.go +++ b/cloudmock/openstack/mockcompute/flavors.go @@ -129,7 +129,7 @@ func (m *MockClient) createFlavor(w http.ResponseWriter, r *http.Request) { Name: create.Flavor.Name, RAM: create.Flavor.RAM, VCPUs: create.Flavor.VCPUs, - Disk: fi.IntValue(create.Flavor.Disk), + Disk: fi.ValueOf(create.Flavor.Disk), } m.flavors[flavor.ID] = flavor diff --git a/cloudmock/openstack/mockcompute/servers.go b/cloudmock/openstack/mockcompute/servers.go index bac83ac111947..63b121416d836 100644 --- a/cloudmock/openstack/mockcompute/servers.go +++ b/cloudmock/openstack/mockcompute/servers.go @@ -206,7 +206,7 @@ func (m *MockClient) createServer(w http.ResponseWriter, r *http.Request) { portID := create.Server.Networks[0].Port ports.Update(m.networkClient, portID, ports.UpdateOpts{ - DeviceID: fi.String(server.ID), + DeviceID: fi.PtrTo(server.ID), }) // Assign an IP address diff --git a/cloudmock/openstack/mocknetworking/ports.go b/cloudmock/openstack/mocknetworking/ports.go index 81ae5a9cf9950..396ffeab85bae 100644 --- a/cloudmock/openstack/mocknetworking/ports.go +++ b/cloudmock/openstack/mocknetworking/ports.go @@ -238,7 +238,7 @@ func (m *MockClient) updatePort(w http.ResponseWriter, r *http.Request) { deviceID := update.Port.DeviceID if deviceID != nil { - port.DeviceID = fi.StringValue(deviceID) + port.DeviceID = fi.ValueOf(deviceID) } m.ports[portID] = port diff --git a/cmd/kops-controller/controllers/awsipam.go b/cmd/kops-controller/controllers/awsipam.go index 01ff0b652f4be..2f364933c4baa 100644 --- a/cmd/kops-controller/controllers/awsipam.go +++ b/cmd/kops-controller/controllers/awsipam.go @@ -127,7 +127,7 @@ func (r *AWSIPAMReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct eni, err := r.ec2Client.DescribeNetworkInterfaces(&ec2.DescribeNetworkInterfacesInput{ Filters: []*ec2.Filter{ { - Name: fi.String("attachment.instance-id"), + Name: fi.PtrTo("attachment.instance-id"), Values: []*string{ &instanceID, }, diff --git a/cmd/kops/create_cluster.go b/cmd/kops/create_cluster.go index 16419fd32cbea..ce1a301b16282 100644 --- a/cmd/kops/create_cluster.go +++ b/cmd/kops/create_cluster.go @@ -245,9 +245,12 @@ func NewCmdCreateCluster(f *util.Factory, out io.Writer) *cobra.Command { // TODO complete VFS paths } - cmd.Flags().StringVar(&options.KubernetesVersion, "kubernetes-version", options.KubernetesVersion, "Version of kubernetes to run (defaults to version in channel)") + cmd.Flags().StringVar(&options.KubernetesVersion, "kubernetes-version", options.KubernetesVersion, "Version of Kubernetes to run (defaults to version in channel)") cmd.RegisterFlagCompletionFunc("kubernetes-version", completeKubernetesVersion) + cmd.Flags().StringSliceVar(&options.KubernetesFeatureGates, "kubernetes-feature-gates", options.KubernetesFeatureGates, "List of Kubernetes feature gates to enable/disable") + cmd.RegisterFlagCompletionFunc("kubernetes-version", completeKubernetesFeatureGates) + cmd.Flags().StringVar(&options.ContainerRuntime, "container-runtime", options.ContainerRuntime, "Container runtime to use: containerd, docker") cmd.RegisterFlagCompletionFunc("container-runtime", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { return []string{"containerd", "docker"}, cobra.ShellCompDirectiveNoFileComp @@ -327,7 +330,7 @@ func NewCmdCreateCluster(f *util.Factory, out io.Writer) *cobra.Command { cmd.RegisterFlagCompletionFunc("channel", completeChannel) // Network topology - cmd.Flags().StringVarP(&options.Topology, "topology", "t", options.Topology, "Network topology for the cluster: public or private") + cmd.Flags().StringVarP(&options.Topology, "topology", "t", options.Topology, "Network topology for the cluster: 'public' or 'private'. Defaults to 'public' for IPv4 clusters and 'private' for IPv6 clusters.") cmd.RegisterFlagCompletionFunc("topology", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { return []string{api.TopologyPublic, api.TopologyPrivate}, cobra.ShellCompDirectiveNoFileComp }) @@ -432,7 +435,7 @@ func NewCmdCreateCluster(f *util.Factory, out io.Writer) *cobra.Command { cmd.RegisterFlagCompletionFunc("os-ext-net", completeOpenstackExternalNet) cmd.Flags().StringVar(&options.OpenstackExternalSubnet, "os-ext-subnet", options.OpenstackExternalSubnet, "External floating subnet to use with the openstack router") cmd.RegisterFlagCompletionFunc("os-ext-subnet", completeOpenstackExternalSubnet) - cmd.Flags().StringVar(&options.OpenstackLBSubnet, "os-lb-floating-subnet", options.OpenstackLBSubnet, "External subnet to use with the kubernetes api") + cmd.Flags().StringVar(&options.OpenstackLBSubnet, "os-lb-floating-subnet", options.OpenstackLBSubnet, "External subnet to use with the Kubernetes API") cmd.RegisterFlagCompletionFunc("os-lb-floating-subnet", completeOpenstackLBSubnet) cmd.Flags().BoolVar(&options.OpenstackStorageIgnoreAZ, "os-kubelet-ignore-az", options.OpenstackStorageIgnoreAZ, "Attach volumes across availability zones") cmd.Flags().BoolVar(&options.OpenstackLBOctavia, "os-octavia", options.OpenstackLBOctavia, "Use octavia load balancer API") @@ -578,13 +581,13 @@ func RunCreateCluster(ctx context.Context, f *util.Factory, out io.Writer, c *Cr if c.MasterVolumeSize != 0 { for _, group := range masters { - group.Spec.RootVolumeSize = fi.Int32(c.MasterVolumeSize) + group.Spec.RootVolumeSize = fi.PtrTo(c.MasterVolumeSize) } } if c.NodeVolumeSize != 0 { for _, group := range nodes { - group.Spec.RootVolumeSize = fi.Int32(c.NodeVolumeSize) + group.Spec.RootVolumeSize = fi.PtrTo(c.NodeVolumeSize) } } @@ -601,11 +604,11 @@ func RunCreateCluster(ctx context.Context, f *util.Factory, out io.Writer, c *Cr } if c.DisableSubnetTags { - cluster.Spec.TagSubnets = fi.Bool(false) + cluster.Spec.TagSubnets = fi.PtrTo(false) } if c.MasterPublicName != "" { - cluster.Spec.MasterPublicName = c.MasterPublicName + cluster.Spec.API.PublicName = c.MasterPublicName } if err := commands.UnsetClusterFields(c.Unsets, cluster); err != nil { @@ -908,6 +911,11 @@ func completeKubernetesVersion(cmd *cobra.Command, args []string, toComplete str return versions.List(), cobra.ShellCompDirectiveNoFileComp } +func completeKubernetesFeatureGates(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + // TODO check if there's a way to get the full list of feature gates from k8s libs + return nil, cobra.ShellCompDirectiveNoFileComp +} + func completeInstanceImage(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { // TODO call into cloud provider(s) to get list of valid images return nil, cobra.ShellCompDirectiveNoFileComp diff --git a/cmd/kops/create_cluster_integration_test.go b/cmd/kops/create_cluster_integration_test.go index 14e5281315738..5e61e5f96ed64 100644 --- a/cmd/kops/create_cluster_integration_test.go +++ b/cmd/kops/create_cluster_integration_test.go @@ -69,6 +69,11 @@ func TestCreateClusterOverride(t *testing.T) { runCreateClusterIntegrationTest(t, "../../tests/integration/create_cluster/overrides", "v1alpha2") } +// TestCreateClusterKubernetesFeatureGates tests the override flag +func TestCreateClusterKubernetesFeatureGates(t *testing.T) { + runCreateClusterIntegrationTest(t, "../../tests/integration/create_cluster/minimal_feature-gates", "v1alpha2") +} + // TestCreateClusterComplex runs kops create cluster, with a grab-bag of edge cases func TestCreateClusterComplex(t *testing.T) { runCreateClusterIntegrationTest(t, "../../tests/integration/create_cluster/complex", "v1alpha2") diff --git a/cmd/kops/get_keypairs.go b/cmd/kops/get_keypairs.go index 72dcbed262b8b..25a22ba2791bf 100644 --- a/cmd/kops/get_keypairs.go +++ b/cmd/kops/get_keypairs.go @@ -153,7 +153,7 @@ func listKeypairs(keyStore fi.CAStore, names []string, includeDistrusted bool) ( keypair.AlternateNames = alternateNames } if rsaKey, ok := cert.PublicKey.(*rsa.PublicKey); ok { - keypair.KeyLength = fi.Int(rsaKey.N.BitLen()) + keypair.KeyLength = fi.PtrTo(rsaKey.N.BitLen()) } } items = append(items, &keypair) diff --git a/cmd/kops/integration_test.go b/cmd/kops/integration_test.go index 197a45968089b..323a1a21f057c 100644 --- a/cmd/kops/integration_test.go +++ b/cmd/kops/integration_test.go @@ -22,11 +22,8 @@ import ( "crypto/rand" "crypto/rsa" "crypto/x509" - "encoding/base64" - "encoding/json" "encoding/pem" "fmt" - "io" "os" "path" "path/filepath" @@ -37,13 +34,10 @@ import ( "time" "golang.org/x/crypto/ssh" - "sigs.k8s.io/yaml" - "k8s.io/kops/cmd/kops/util" "k8s.io/kops/pkg/apis/kops/model" "k8s.io/kops/pkg/diff" "k8s.io/kops/pkg/featureflag" - "k8s.io/kops/pkg/jsonutils" "k8s.io/kops/pkg/model/iam" "k8s.io/kops/pkg/pki" "k8s.io/kops/pkg/testutils" @@ -218,7 +212,6 @@ func TestMinimal(t *testing.T) { newIntegrationTest("minimal.example.com", "minimal"). withAddons(dnsControllerAddon). runTestTerraformAWS(t) - newIntegrationTest("minimal.example.com", "minimal").runTestCloudformation(t) } // TestMinimal runs the test on a minimum configuration @@ -230,7 +223,6 @@ func TestMinimal_v1_23(t *testing.T) { leaderElectionAddon, ). runTestTerraformAWS(t) - newIntegrationTest("minimal.example.com", "minimal").runTestCloudformation(t) } // TestMinimal runs the test on a minimum configuration @@ -243,7 +235,6 @@ func TestMinimal_v1_24(t *testing.T) { leaderElectionAddon, ). runTestTerraformAWS(t) - newIntegrationTest("minimal.example.com", "minimal").runTestCloudformation(t) } // TestMinimal runs the test on a minimum configuration @@ -256,7 +247,6 @@ func TestMinimal_v1_25(t *testing.T) { leaderElectionAddon, ). runTestTerraformAWS(t) - newIntegrationTest("minimal.example.com", "minimal").runTestCloudformation(t) } // TestMinimal runs the test on a minimum configuration @@ -270,7 +260,6 @@ func TestMinimal_v1_26(t *testing.T) { awsCCMAddon, ). runTestTerraformAWS(t) - newIntegrationTest("minimal.example.com", "minimal").runTestCloudformation(t) } // TestMinimal_NoneDNS runs the test on a minimum configuration with --dns=none @@ -283,7 +272,6 @@ func TestMinimal_NoneDNS(t *testing.T) { awsCCMAddon, ). runTestTerraformAWS(t) - newIntegrationTest("minimal.example.com", "minimal").runTestCloudformation(t) } // TestHetzner runs the test on a minimum configuration @@ -300,7 +288,6 @@ func TestNvidia(t *testing.T) { "nvidia.addons.k8s.io-k8s-1.16", ). runTestTerraformAWS(t) - newIntegrationTest("minimal.example.com", "nvidia").runTestCloudformation(t) } // TestMinimal runs the test on a minimum gossip configuration @@ -379,7 +366,6 @@ func TestComplex(t *testing.T) { awsAuthenticatorAddon, ). runTestTerraformAWS(t) - newIntegrationTest("complex.example.com", "complex").withoutSSHKey().runTestCloudformation(t) newIntegrationTest("complex.example.com", "complex").withoutSSHKey().withVersion("legacy-v1alpha2"). withAddons( awsEBSCSIAddon, @@ -410,7 +396,6 @@ func TestMinimalIPv6(t *testing.T) { newIntegrationTest("minimal-ipv6.example.com", "minimal-ipv6"). withAddons(awsCCMAddon, awsEBSCSIAddon, dnsControllerAddon). runTestTerraformAWS(t) - newIntegrationTest("minimal-ipv6.example.com", "minimal-ipv6").runTestCloudformation(t) } // TestMinimalIPv6 runs the test on a minimum IPv6 configuration @@ -433,7 +418,6 @@ func TestMinimalIPv6Cilium(t *testing.T) { newIntegrationTest("minimal-ipv6.example.com", "minimal-ipv6-cilium"). withAddons(awsCCMAddon, awsEBSCSIAddon, ciliumAddon, dnsControllerAddon). runTestTerraformAWS(t) - newIntegrationTest("minimal-ipv6.example.com", "minimal-ipv6-cilium").runTestCloudformation(t) } // TestMinimalWarmPool runs the test on a minimum Warm Pool configuration @@ -445,7 +429,9 @@ func TestMinimalWarmPool(t *testing.T) { // TestMinimalEtcd runs the test on a minimum configuration using custom etcd config, similar to kops create cluster minimal.example.com --zones us-west-1a func TestMinimalEtcd(t *testing.T) { - newIntegrationTest("minimal-etcd.example.com", "minimal-etcd").runTestCloudformation(t) + newIntegrationTest("minimal-etcd.example.com", "minimal-etcd"). + withAddons(dnsControllerAddon). + runTestTerraformAWS(t) } // TestMinimalGp3 runs the test on a minimum configuration using gp3 volumes, similar to kops create cluster minimal.example.com --zones us-west-1a @@ -453,7 +439,6 @@ func TestMinimalGp3(t *testing.T) { newIntegrationTest("minimal.example.com", "minimal-gp3"). withAddons(dnsControllerAddon). runTestTerraformAWS(t) - newIntegrationTest("minimal.example.com", "minimal-gp3").runTestCloudformation(t) } // TestMinimal runs the test on a minimum configuration, similar to kops create cluster minimal.example.com --zones us-west-1a @@ -461,13 +446,6 @@ func TestMinimalLongClusterName(t *testing.T) { newIntegrationTest("this.is.truly.a.really.really.long.cluster-name.minimal.example.com", "minimal-longclustername"). withAddons(dnsControllerAddon). runTestTerraformAWS(t) - newIntegrationTest("this.is.truly.a.really.really.long.cluster-name.minimal.example.com", "minimal-longclustername").runTestCloudformation(t) -} - -// TestExistingIAMCloudformation runs the test with existing IAM instance profiles, similar to kops create cluster minimal.example.com --zones us-west-1a -func TestExistingIAMCloudformation(t *testing.T) { - lifecycleOverrides := []string{"IAMRole=ExistsAndWarnIfChanges", "IAMRolePolicy=ExistsAndWarnIfChanges", "IAMInstanceProfileRole=ExistsAndWarnIfChanges"} - newIntegrationTest("minimal.example.com", "existing_iam_cloudformation").withLifecycleOverrides(lifecycleOverrides).runTestCloudformation(t) } // TestExistingSG runs the test with existing Security Group, similar to kops create cluster minimal.example.com --zones us-west-1a @@ -518,9 +496,6 @@ func TestPrivateCilium(t *testing.T) { withPrivate(). withAddons(ciliumAddon, dnsControllerAddon). runTestTerraformAWS(t) - newIntegrationTest("privatecilium.example.com", "privatecilium"). - withPrivate(). - runTestCloudformation(t) } func TestPrivateCilium2(t *testing.T) { @@ -530,9 +505,6 @@ func TestPrivateCilium2(t *testing.T) { withAddons("networking.cilium.io-k8s-1.16"). withAddons(certManagerAddon). runTestTerraformAWS(t) - newIntegrationTest("privatecilium.example.com", "privatecilium2"). - withPrivate(). - runTestCloudformation(t) } func TestPrivateCiliumAdvanced(t *testing.T) { @@ -542,10 +514,6 @@ func TestPrivateCiliumAdvanced(t *testing.T) { withManagedFiles("etcd-cluster-spec-cilium", "manifests-etcdmanager-cilium-master-us-test-1a"). withAddons(ciliumAddon, dnsControllerAddon). runTestTerraformAWS(t) - newIntegrationTest("privateciliumadvanced.example.com", "privateciliumadvanced"). - withPrivate(). - withCiliumEtcd(). - runTestCloudformation(t) } // TestPrivateCanal runs the test on a configuration with private topology, canal networking @@ -589,9 +557,6 @@ func TestPrivateSharedIP(t *testing.T) { withAddons(dnsControllerAddon). withPrivate(). runTestTerraformAWS(t) - newIntegrationTest("private-shared-ip.example.com", "private-shared-ip"). - withPrivate(). - runTestCloudformation(t) } // TestPrivateDns1 runs the test on a configuration with private topology, private dns @@ -790,7 +755,6 @@ func TestExternalDNS(t *testing.T) { newIntegrationTest("minimal.example.com", "external_dns"). withAddons("external-dns.addons.k8s.io-k8s-1.19"). runTestTerraformAWS(t) - newIntegrationTest("minimal.example.com", "external_dns").runTestCloudformation(t) } func TestExternalDNSIRSA(t *testing.T) { @@ -857,8 +821,6 @@ func TestExternalLoadBalancer(t *testing.T) { newIntegrationTest("externallb.example.com", "externallb"). withAddons(dnsControllerAddon). runTestTerraformAWS(t) - newIntegrationTest("externallb.example.com", "externallb"). - runTestCloudformation(t) } // TestPhaseIAM tests the output of tf for the iam phase @@ -882,9 +844,6 @@ func TestMixedInstancesASG(t *testing.T) { withZones(3). withAddons(dnsControllerAddon). runTestTerraformAWS(t) - newIntegrationTest("mixedinstances.example.com", "mixed_instances"). - withZones(3). - runTestCloudformation(t) } // TestMixedInstancesSpotASG tests ASGs using a mixed instance policy and spot instances @@ -893,9 +852,6 @@ func TestMixedInstancesSpotASG(t *testing.T) { withZones(3). withAddons(dnsControllerAddon). runTestTerraformAWS(t) - newIntegrationTest("mixedinstances.example.com", "mixed_instances_spot"). - withZones(3). - runTestCloudformation(t) } // TestAdditionalObjects runs the test on a configuration that includes additional objects @@ -908,19 +864,22 @@ func TestAdditionalObjects(t *testing.T) { // TestContainerd runs the test on a containerd configuration func TestContainerd(t *testing.T) { newIntegrationTest("containerd.example.com", "containerd"). - runTestCloudformation(t) + withAddons(dnsControllerAddon). + runTestTerraformAWS(t) } // TestContainerdCustom runs the test on a custom containerd URL configuration func TestContainerdCustom(t *testing.T) { newIntegrationTest("containerd.example.com", "containerd-custom"). - runTestCloudformation(t) + withAddons(dnsControllerAddon). + runTestTerraformAWS(t) } // TestDockerCustom runs the test on a custom Docker URL configuration func TestDockerCustom(t *testing.T) { newIntegrationTest("docker.example.com", "docker-custom"). - runTestCloudformation(t) + withAddons(dnsControllerAddon). + runTestTerraformAWS(t) } // TestAPIServerNodes runs a simple configuration with dedicated apiserver nodes @@ -931,8 +890,6 @@ func TestAPIServerNodes(t *testing.T) { } defer unsetFeatureFlags() - newIntegrationTest("minimal.example.com", "apiservernodes"). - runTestCloudformation(t) newIntegrationTest("minimal.example.com", "apiservernodes"). withAddons(dnsControllerAddon, awsEBSCSIAddon). withDedicatedAPIServer(). @@ -945,8 +902,6 @@ func TestNTHQueueProcessor(t *testing.T) { withNTH(). withAddons(dnsControllerAddon). runTestTerraformAWS(t) - newIntegrationTest("nthsqsresources.longclustername.example.com", "nth_sqs_resources"). - runTestCloudformation(t) } // TestCustomIRSA runs a simple configuration, but with some additional IAM roles for ServiceAccounts @@ -1487,125 +1442,6 @@ func (i *integrationTest) runTestTerraformHetzner(t *testing.T) { i.runTest(t, h, expectedFilenames, "", "", nil) } -func (i *integrationTest) runTestCloudformation(t *testing.T) { - ctx := context.Background() - - i.srcDir = updateClusterTestBase + i.srcDir - var stdout bytes.Buffer - - inputYAML := "in-" + i.version + ".yaml" - expectedCfPath := "cloudformation.json" - - h := testutils.NewIntegrationTestHarness(t) - defer h.Close() - - h.MockKopsVersion("1.21.0-alpha.1") - h.SetupMockAWS() - - factory := i.setupCluster(t, inputYAML, ctx, stdout) - - { - options := &UpdateClusterOptions{} - options.InitDefaults() - options.Target = "cloudformation" - options.OutDir = path.Join(h.TempDir, "out") - options.RunTasksOptions.MaxTaskDuration = 30 * time.Second - - // We don't test it here, and it adds a dependency on kubectl - options.CreateKubecfg = false - options.ClusterName = i.clusterName - options.LifecycleOverrides = i.lifecycleOverrides - - _, err := RunUpdateCluster(ctx, factory, &stdout, options) - if err != nil { - t.Fatalf("error running update cluster %q: %v", i.clusterName, err) - } - } - - // Compare main files - { - files, err := os.ReadDir(path.Join(h.TempDir, "out")) - if err != nil { - t.Fatalf("failed to read dir: %v", err) - } - - var fileNames []string - for _, f := range files { - fileNames = append(fileNames, f.Name()) - } - sort.Strings(fileNames) - - actualFilenames := strings.Join(fileNames, ",") - expectedFilenames := "kubernetes.json" - if actualFilenames != expectedFilenames { - t.Fatalf("unexpected files. actual=%q, expected=%q", actualFilenames, expectedFilenames) - } - - actualPath := path.Join(h.TempDir, "out", "kubernetes.json") - actualCF, err := os.ReadFile(actualPath) - if err != nil { - t.Fatalf("unexpected error reading actual cloudformation output: %v", err) - } - - // Expand out the UserData base64 blob, as otherwise testing is painful - extracted := make(map[string]string) - var buf bytes.Buffer - out := jsonutils.NewJSONStreamWriter(&buf) - in := json.NewDecoder(bytes.NewReader(actualCF)) - for { - token, err := in.Token() - if err != nil { - if err == io.EOF { - break - } else { - t.Fatalf("unexpected error parsing cloudformation output: %v", err) - } - } - - if strings.HasSuffix(out.Path(), ".UserData") { - if s, ok := token.(string); ok { - vBytes, err := base64.StdEncoding.DecodeString(s) - if err != nil { - t.Fatalf("error decoding UserData: %v", err) - } else { - extracted[out.Path()] = string(vBytes) - token = json.Token("extracted") - } - } - } - - if err := out.WriteToken(token); err != nil { - t.Fatalf("error writing json: %v", err) - } - } - actualCF = buf.Bytes() - - golden.AssertMatchesFile(t, string(actualCF), path.Join(i.srcDir, expectedCfPath)) - - // test extracted values - { - actual := make(map[string]string) - - for k, v := range extracted { - // Strip carriage return as expectedValue is stored in a yaml string literal - // and yaml block quoting doesn't seem to support \r in a string - v = strings.Replace(v, "\r", "", -1) - - actual[k] = v - } - - actualExtracted, err := yaml.Marshal(actual) - if err != nil { - t.Fatalf("error serializing yaml: %v", err) - } - - golden.AssertMatchesFile(t, string(actualExtracted), path.Join(i.srcDir, expectedCfPath+".extracted.yaml")) - } - - golden.AssertMatchesFile(t, string(actualCF), path.Join(i.srcDir, expectedCfPath)) - } -} - func MakeSSHKeyPair(publicKeyPath string, privateKeyPath string) error { privateKey, err := rsa.GenerateKey(rand.Reader, 1024) if err != nil { diff --git a/cmd/kops/update_cluster.go b/cmd/kops/update_cluster.go index da3ec15113c57..acb978779ac32 100644 --- a/cmd/kops/update_cluster.go +++ b/cmd/kops/update_cluster.go @@ -393,7 +393,7 @@ func RunUpdateCluster(ctx context.Context, f *util.Factory, out io.Writer, c *Up fmt.Fprintf(sb, " * validate cluster: kops validate cluster --wait 10m\n") fmt.Fprintf(sb, " * list nodes: kubectl get nodes --show-labels\n") if !usesBastion(applyCmd.InstanceGroups) { - fmt.Fprintf(sb, " * ssh to the master: ssh -i ~/.ssh/id_rsa ubuntu@%s\n", cluster.Spec.MasterPublicName) + fmt.Fprintf(sb, " * ssh to the master: ssh -i ~/.ssh/id_rsa ubuntu@%s\n", cluster.Spec.API.PublicName) } else { bastionPublicName := findBastionPublicName(cluster) if bastionPublicName != "" { diff --git a/docs/cli/kops_create_cluster.md b/docs/cli/kops_create_cluster.md index d35258e140a55..17e6589507ebf 100644 --- a/docs/cli/kops_create_cluster.md +++ b/docs/cli/kops_create_cluster.md @@ -65,67 +65,68 @@ kops create cluster [CLUSTER] [flags] ### Options ``` - --admin-access strings Restrict API access to this CIDR. If not set, access will not be restricted by IP. (default [0.0.0.0/0,::/0]) - --api-loadbalancer-type string Type of load balancer for the Kubernetes API: public or internal - --api-ssl-certificate string ARN of the SSL Certificate to use for the Kubernetes API load balancer (AWS only) - --associate-public-ip Specify --associate-public-ip=[true|false] to enable/disable association of public IP for master ASG and nodes. Default is 'true'. - --authorization string Authorization mode: AlwaysAllow or RBAC (default "RBAC") - --bastion Enable a bastion instance group. Only applies to private topology. - --bastion-image string Machine image for bastions. Takes precedence over --image - --channel string Channel for default versions and configuration to use (default "stable") - --cloud string Cloud provider to use - aws, digitalocean, gce, hetzner, openstack - --cloud-labels string A list of key/value pairs used to tag all instance groups (for example "Owner=John Doe,Team=Some Team"). - --container-runtime string Container runtime to use: containerd, docker - --disable-subnet-tags Disable automatic subnet tagging - --discovery-store string A public location where we publish OIDC-compatible discovery information under a cluster-specific directory. Enables IRSA in AWS. - --dns string DNS type to use: public, private, none - --dns-zone string DNS hosted zone (defaults to longest matching zone) - --dry-run If true, only print the object that would be sent, without sending it. This flag can be used to create a cluster YAML or JSON manifest. - --encrypt-etcd-storage Generate key in AWS KMS and use it for encrypt etcd volumes - --etcd-storage-type string The default storage type for etcd members - --gce-service-account string Service account with which the GCE VM runs. Warning: if not set, VMs will run as default compute service account. - -h, --help help for cluster - --image string Machine image for all instances - --ipv6 Use IPv6 for the pod network (AWS only) - --kubernetes-version string Version of kubernetes to run (defaults to version in channel) - --master-count int32 Number of masters. Defaults to one master per master-zone - --master-image string Machine image for masters. Takes precedence over --image - --master-public-name string Domain name of the public Kubernetes API - --master-security-groups strings Additional precreated security groups to add to masters. - --master-size string Machine type for masters - --master-tenancy string Tenancy of the master group (AWS only): default or dedicated - --master-volume-size int32 Instance volume size (in GB) for masters - --master-zones strings Zones in which to run masters (must be an odd number) - --network-cidr string Network CIDR to use - --network-id string Shared Network or VPC to use - --networking string Networking mode. kubenet, external, weave, flannel-vxlan (or flannel), flannel-udp, calico, canal, kube-router, amazonvpc, cilium, cilium-etcd, cni. (default "cilium") - --node-count int32 Total number of worker nodes. Defaults to one node per zone - --node-image string Machine image for worker nodes. Takes precedence over --image - --node-security-groups strings Additional precreated security groups to add to worker nodes. - --node-size string Machine type for worker nodes - --node-tenancy string Tenancy of the node group (AWS only): default or dedicated - --node-volume-size int32 Instance volume size (in GB) for worker nodes - --os-dns-servers string comma separated list of DNS Servers which is used in network - --os-ext-net string External network to use with the openstack router - --os-ext-subnet string External floating subnet to use with the openstack router - --os-kubelet-ignore-az Attach volumes across availability zones - --os-lb-floating-subnet string External subnet to use with the kubernetes api - --os-network string ID of the existing OpenStack network to use - --os-octavia Use octavia load balancer API - --os-octavia-provider string Octavia provider to use - --out string Path to write any local output - -o, --output string Output format. One of json or yaml. Used with the --dry-run flag. - --project string Project to use (must be set on GCE) - --set strings Directly set values in the spec - --ssh-access strings Restrict SSH access to this CIDR. If not set, uses the value of the admin-access flag. - --ssh-public-key string SSH public key to use - --subnets strings Shared subnets to use - --target string Valid targets: direct, terraform, cloudformation. Set this flag to terraform if you want kOps to generate terraform (default "direct") - -t, --topology string Network topology for the cluster: public or private (default "public") - --unset strings Directly unset values in the spec - --utility-subnets strings Shared utility subnets to use - -y, --yes Specify --yes to immediately create the cluster - --zones strings Zones in which to run the cluster + --admin-access strings Restrict API access to this CIDR. If not set, access will not be restricted by IP. (default [0.0.0.0/0,::/0]) + --api-loadbalancer-type string Type of load balancer for the Kubernetes API: public or internal + --api-ssl-certificate string ARN of the SSL Certificate to use for the Kubernetes API load balancer (AWS only) + --associate-public-ip Specify --associate-public-ip=[true|false] to enable/disable association of public IP for master ASG and nodes. Default is 'true'. + --authorization string Authorization mode: AlwaysAllow or RBAC (default "RBAC") + --bastion Enable a bastion instance group. Only applies to private topology. + --bastion-image string Machine image for bastions. Takes precedence over --image + --channel string Channel for default versions and configuration to use (default "stable") + --cloud string Cloud provider to use - aws, digitalocean, gce, hetzner, openstack + --cloud-labels string A list of key/value pairs used to tag all instance groups (for example "Owner=John Doe,Team=Some Team"). + --container-runtime string Container runtime to use: containerd, docker + --disable-subnet-tags Disable automatic subnet tagging + --discovery-store string A public location where we publish OIDC-compatible discovery information under a cluster-specific directory. Enables IRSA in AWS. + --dns string DNS type to use: public, private, none + --dns-zone string DNS hosted zone (defaults to longest matching zone) + --dry-run If true, only print the object that would be sent, without sending it. This flag can be used to create a cluster YAML or JSON manifest. + --encrypt-etcd-storage Generate key in AWS KMS and use it for encrypt etcd volumes + --etcd-storage-type string The default storage type for etcd members + --gce-service-account string Service account with which the GCE VM runs. Warning: if not set, VMs will run as default compute service account. + -h, --help help for cluster + --image string Machine image for all instances + --ipv6 Use IPv6 for the pod network (AWS only) + --kubernetes-feature-gates strings List of Kubernetes feature gates to enable/disable + --kubernetes-version string Version of Kubernetes to run (defaults to version in channel) + --master-count int32 Number of masters. Defaults to one master per master-zone + --master-image string Machine image for masters. Takes precedence over --image + --master-public-name string Domain name of the public Kubernetes API + --master-security-groups strings Additional precreated security groups to add to masters. + --master-size string Machine type for masters + --master-tenancy string Tenancy of the master group (AWS only): default or dedicated + --master-volume-size int32 Instance volume size (in GB) for masters + --master-zones strings Zones in which to run masters (must be an odd number) + --network-cidr string Network CIDR to use + --network-id string Shared Network or VPC to use + --networking string Networking mode. kubenet, external, weave, flannel-vxlan (or flannel), flannel-udp, calico, canal, kube-router, amazonvpc, cilium, cilium-etcd, cni. (default "cilium") + --node-count int32 Total number of worker nodes. Defaults to one node per zone + --node-image string Machine image for worker nodes. Takes precedence over --image + --node-security-groups strings Additional precreated security groups to add to worker nodes. + --node-size string Machine type for worker nodes + --node-tenancy string Tenancy of the node group (AWS only): default or dedicated + --node-volume-size int32 Instance volume size (in GB) for worker nodes + --os-dns-servers string comma separated list of DNS Servers which is used in network + --os-ext-net string External network to use with the openstack router + --os-ext-subnet string External floating subnet to use with the openstack router + --os-kubelet-ignore-az Attach volumes across availability zones + --os-lb-floating-subnet string External subnet to use with the Kubernetes API + --os-network string ID of the existing OpenStack network to use + --os-octavia Use octavia load balancer API + --os-octavia-provider string Octavia provider to use + --out string Path to write any local output + -o, --output string Output format. One of json or yaml. Used with the --dry-run flag. + --project string Project to use (must be set on GCE) + --set strings Directly set values in the spec + --ssh-access strings Restrict SSH access to this CIDR. If not set, uses the value of the admin-access flag. + --ssh-public-key string SSH public key to use + --subnets strings Shared subnets to use + --target string Valid targets: direct, terraform, cloudformation. Set this flag to terraform if you want kOps to generate terraform (default "direct") + -t, --topology string Network topology for the cluster: 'public' or 'private'. Defaults to 'public' for IPv4 clusters and 'private' for IPv6 clusters. + --unset strings Directly unset values in the spec + --utility-subnets strings Shared utility subnets to use + -y, --yes Specify --yes to immediately create the cluster + --zones strings Zones in which to run the cluster ``` ### Options inherited from parent commands diff --git a/docs/cluster_spec.md b/docs/cluster_spec.md index e4a2cb4c52977..8801fe33ef3f8 100644 --- a/docs/cluster_spec.md +++ b/docs/cluster_spec.md @@ -66,7 +66,7 @@ spec: *Openstack only* As of kOps 1.12.0 it is possible to use the load balancer internally by setting the `useForInternalApi: true`. -This will point both `masterPublicName` and `masterInternalName` to the load balancer. You can therefore set both of these to the same value in this configuration. +This will point `masterPublicName` to the load balancer. ```yaml spec: diff --git a/docs/networking/ipv6.md b/docs/networking/ipv6.md index 673f108ff4e58..9d2fda67bc8de 100644 --- a/docs/networking/ipv6.md +++ b/docs/networking/ipv6.md @@ -39,6 +39,10 @@ the NAT Gateway will be placed in the first-listed public subnet in that zone. The managed private subnets route the rest of outbound IPv6 traffic to the VPC's Egress-only Internet Gateway. The managed public subnets route the rest of outbound IPv6 traffic to the VPC's Internet Gateway. +## Distributions + +As Debian, as of Debian 11, does not support IPv6-only instances, kOps does not support IPv6 on Debian. + ## CNI kOps currently supports IPv6 on Calico, Cilium, and bring-your-own CNI only. @@ -47,7 +51,7 @@ CNIs must not masquerade IPv6 addresses. ### Calico -Running IPv6 with Calico requires a Ubuntu 22.04, Debian 11, or Flatcar based AMI. +Running IPv6 with Calico requires a Ubuntu 22.04 or Flatcar based AMI. ## Future work diff --git a/docs/operations/images.md b/docs/operations/images.md index e0a3c72d03e6d..305bdc83901d7 100644 --- a/docs/operations/images.md +++ b/docs/operations/images.md @@ -2,14 +2,13 @@ As of Kubernetes 1.18 the default images used by kOps are the **[official Ubuntu 20.04](#ubuntu-2004-focal)** images. -You can choose a different image for an instance group by editing it with `kops edit ig nodes`. You should see an `image` field in one of the following formats: +You can choose a different image for an instance group by editing it with `kops edit ig nodes`. -* `ami-abcdef` - specifies an AMI by id directly -* `/` specifies an AMI by its owner's account ID and name properties -* `/` specifies an AMI by its [owner's alias](#owner-aliases) and name properties -* `ssm:` specifies an AMI through an SSM parameter - -Using the AMI id is precise, but ids vary by region. It is often more convenient to use the `/` if equivalent images with the same name have been copied to other regions. +For AWS, you should set the `image` field in one of the following formats: +* `ami-abcdef` - specifies an image by id directly (image id is precise, but ids vary by region) +* `/` specifies an image by its owner's account ID and name properties +* `/` specifies an image by its [owner's alias](#owner-aliases) and name properties +* `ssm:` specifies an image through an SSM parameter (kOps 1.25.3+) ```yaml image: ami-00579fbb15b954340 @@ -18,10 +17,6 @@ image: ubuntu/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20200423 image: ssm:/aws/service/canonical/ubuntu/server/20.04/stable/current/amd64/hvm/ebs-gp2/ami-id ``` -You can find the name for an image using: - -`aws ec2 describe-images --region us-east-1 --image-id ami-00579fbb15b954340` - ## Security Updates Automated security updates are handled by kOps for Debian, Flatcar and Ubuntu distros. This can be disabled by editing the cluster configuration: @@ -93,10 +88,18 @@ additionalUserData: Available images can be listed using: ```bash +# Amazon Web Services (AWS) aws ec2 describe-images --region us-east-1 --output table \ --owners 136693071363 \ --query "sort_by(Images, &CreationDate)[*].[CreationDate,Name,ImageId]" \ --filters "Name=name,Values=debian-10-amd64-*" + +# Google Cloud Platform (GCP) +gcloud compute images list --filter debian-10-buster-v + +# Microsoft Azure +az vm image list --all --output table \ + --publisher Debian --offer debian-10 --sku 10-gen2 ``` ### Debian 11 (Bullseye) @@ -106,10 +109,18 @@ Debian 11 is based on Kernel version **5.10** which has no known major Kernel bu Available images can be listed using: ```bash +# Amazon Web Services (AWS) aws ec2 describe-images --region us-east-1 --output table \ --owners 136693071363 \ --query "sort_by(Images, &CreationDate)[*].[CreationDate,Name,ImageId]" \ --filters "Name=name,Values=debian-11-amd64-*" + +# Google Cloud Platform (GCP) +gcloud compute images list --filter debian-11-bullseye-v + +# Microsoft Azure +az vm image list --all --output table \ + --publisher Debian --offer debian-11 --sku 11-gen2 ``` ### Flatcar diff --git a/docs/releases/1.25-NOTES.md b/docs/releases/1.25-NOTES.md index 85e433842dc56..dfd6336626409 100644 --- a/docs/releases/1.25-NOTES.md +++ b/docs/releases/1.25-NOTES.md @@ -23,8 +23,13 @@ The CSI Cinder plugin for OpenStack will now only use the CSI snapshotter when t # Deprecations * Support for Kubernetes version 1.20 is deprecated and will be removed in kOps 1.26. + * Support for Kubernetes version 1.21 is deprecated and will be removed in kOps 1.27. +* All legacy addons are deprecated in favor of managed addons, including the [metrics server addon](https://github.com/kubernetes/kops/tree/master/addons/metrics-server) and the [autoscaler addon](https://github.com/kubernetes/kops/tree/master/addons/cluster-autoscaler). + +* Due to lack of maintainers, the CloudFormation support has been deprecated. The current implementation will be left as-is until the implementation needs updates or otherwise becomes incompatible. At that point, it will be removed. We very much welcome anyone willing to contribute to this target. + # Full change list since 1.24.0 release * [v1.25.0-alpha.1](https://github.com/kubernetes/kops/releases/tag/v1.25.0-alpha.1) diff --git a/docs/releases/1.26-NOTES.md b/docs/releases/1.26-NOTES.md index a8c7ab3a25060..b28cf84cc6251 100644 --- a/docs/releases/1.26-NOTES.md +++ b/docs/releases/1.26-NOTES.md @@ -20,6 +20,8 @@ This is a document to gather the release notes prior to the release. * CapacityRebalance can be enabled/disabled on ASGs through a new `capacityRebalance` field in InstanceGroup specs. +* New IPv6 clusters now default to using private topology. + # Breaking changes ## Other breaking changes @@ -41,5 +43,8 @@ CNIs, use the "cni" networking option instead. * Support for AWS Classic Load Balancer for API is deprecated and should not be used for newly created clusters. +* All legacy addons are deprecated in favor of managed addons, including the [metrics server addon](https://github.com/kubernetes/kops/tree/master/addons/metrics-server) and the [autoscaler addon](https://github.com/kubernetes/kops/tree/master/addons/cluster-autoscaler). + +* Due to lack of maintainers, the CloudFormation support has been deprecated. The current implementation will be left as-is until the implementation needs updates or otherwise becomes incompatible. At that point, it will be removed. We very much welcome anyone willing to contribute to this target. # Full change list since 1.25.0 release diff --git a/docs/tutorial/upgrading-kubernetes.md b/docs/tutorial/upgrading-kubernetes.md index 8a1a9d8574fc2..1c4b78452568d 100644 --- a/docs/tutorial/upgrading-kubernetes.md +++ b/docs/tutorial/upgrading-kubernetes.md @@ -45,7 +45,6 @@ spec: kubernetesApiAccess: - 0.0.0.0/0 kubernetesVersion: 1.17.2 - masterInternalName: api.internal.simple.k8s.local masterPublicName: api.simple.k8s.local networking: kubenet: {} diff --git a/docs/tutorial/working-with-instancegroups.md b/docs/tutorial/working-with-instancegroups.md index a1022dbbe8b1f..48a8550613b7a 100644 --- a/docs/tutorial/working-with-instancegroups.md +++ b/docs/tutorial/working-with-instancegroups.md @@ -194,6 +194,8 @@ using preemptible/spot instances you might be waiting for a long time. ## Fetching images via AWS SSM (AWS Only) +{{ kops_feature_table(kops_added_default='1.25.3') }} + If you are using AWS, you can dynamically fetch instance group images from an AWS SSM Parameter. kOps will automatically fetch SSM Parameter and lookup the AMI ID on every `kops update cluster` run. This is useful if you often update your images and don't want to update your instance group configuration every time. Your SSM Parameter must start with `ssm:` and contain the full path of the SSM Parameter. An example spec looks like this: diff --git a/examples/kops-api-example/up.go b/examples/kops-api-example/up.go index 8a7df5080b07c..7724a4588eeeb 100644 --- a/examples/kops-api-example/up.go +++ b/examples/kops-api-example/up.go @@ -60,7 +60,7 @@ func up(ctx context.Context) error { for _, masterZone := range masterZones { etcdMember := api.EtcdMemberSpec{ Name: masterZone, - InstanceGroup: fi.String(masterZone), + InstanceGroup: fi.PtrTo(masterZone), } etcdCluster.Members = append(etcdCluster.Members, etcdMember) } diff --git a/hack/verify-cloudformation.sh b/hack/verify-cloudformation.sh deleted file mode 100755 index d27bd14e584f6..0000000000000 --- a/hack/verify-cloudformation.sh +++ /dev/null @@ -1,43 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2020 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -o errexit -set -o nounset -set -o pipefail - -. "$(dirname "${BASH_SOURCE[0]}")/common.sh" - -TAG=v0.54.2 -IMAGE="cfn-python-lint:${TAG}" - -# There is no official docker image so build it locally -# https://github.com/aws-cloudformation/cfn-python-lint/issues/1025 -function docker_build() { - echo "Building cfn-python-lint image" - docker build --build-arg "CFNLINT_VERSION=${TAG}" --tag "${IMAGE}" - < "${KOPS_ROOT}/hack/cfn-lint.Dockerfile" -} - -docker image inspect "${IMAGE}" >/dev/null 2>&1 || docker_build - -docker run --rm --network host -v "${KOPS_ROOT}:/${KOPS_ROOT}" -v "${KOPS_ROOT}/hack/.cfnlintrc.yaml:/root/.cfnlintrc" "${IMAGE}" "/${KOPS_ROOT}/tests/integration/update_cluster/**/cloudformation.json" -RC=$? - -if [ $RC != 0 ]; then - echo -e "\nCloudformation linting failed\n" - exit $RC -else - echo -e "\nCloudformation linting succeeded\n" -fi diff --git a/k8s/crds/kops.k8s.io_clusters.yaml b/k8s/crds/kops.k8s.io_clusters.yaml index 3bba77ca6a5bd..aa90a1f9f20fb 100644 --- a/k8s/crds/kops.k8s.io_clusters.yaml +++ b/k8s/crds/kops.k8s.io_clusters.yaml @@ -895,9 +895,9 @@ spec: DNS This is because some clouds let us define a managed zone foo.bar, and then have kubernetes.dev.foo.bar, without needing to define dev.foo.bar as a hosted zone. DNSZone will probably be a suffix - of the MasterPublicName and MasterInternalName Note that DNSZone - can either by the host name of the zone (containing dots), or can - be an identifier for the zone. + of the MasterPublicName. Note that DNSZone can either by the host + name of the zone (containing dots), or can be an identifier for + the zone. type: string docker: description: DockerConfig is the configuration for docker @@ -3933,8 +3933,7 @@ spec: be a "spec" like stable) type: string masterInternalName: - description: MasterInternalName is the internal DNS name for the master - nodes + description: MasterInternalName is unused. type: string masterKubelet: description: MasterKubelet is the kubelet configuration for nodes @@ -5532,8 +5531,8 @@ spec: this subnet type: string id: - description: ProviderID is the cloud provider id for the objects - associated with the zone (the subnet on AWS) + description: ID is the cloud provider ID for the objects associated + with the zone (the subnet on AWS). type: string ipv6CIDR: description: IPv6CIDR is the IPv6 CIDR block assigned to the diff --git a/nodeup/pkg/bootstrap/install.go b/nodeup/pkg/bootstrap/install.go index e926502234f1d..35309b0d65f1b 100644 --- a/nodeup/pkg/bootstrap/install.go +++ b/nodeup/pkg/bootstrap/install.go @@ -196,7 +196,7 @@ func (i *Installation) buildSystemdJob() *nodetasks.Service { service := &nodetasks.Service{ Name: serviceName, - Definition: fi.String(manifestString), + Definition: fi.PtrTo(manifestString), } service.InitDefaults() diff --git a/nodeup/pkg/model/cloudconfig.go b/nodeup/pkg/model/cloudconfig.go index 98d3f9e36cde4..907dd41605c9c 100644 --- a/nodeup/pkg/model/cloudconfig.go +++ b/nodeup/pkg/model/cloudconfig.go @@ -155,18 +155,18 @@ func (b *CloudConfigBuilder) build(c *fi.ModelBuilderContext, inTree bool) error if lb := osc.Loadbalancer; lb != nil { ingressHostnameSuffix := "nip.io" - if fi.StringValue(lb.IngressHostnameSuffix) != "" { - ingressHostnameSuffix = fi.StringValue(lb.IngressHostnameSuffix) + if fi.ValueOf(lb.IngressHostnameSuffix) != "" { + ingressHostnameSuffix = fi.ValueOf(lb.IngressHostnameSuffix) } lines = append(lines, "[LoadBalancer]", - fmt.Sprintf("floating-network-id=%s", fi.StringValue(lb.FloatingNetworkID)), - fmt.Sprintf("lb-method=%s", fi.StringValue(lb.Method)), - fmt.Sprintf("lb-provider=%s", fi.StringValue(lb.Provider)), - fmt.Sprintf("use-octavia=%t", fi.BoolValue(lb.UseOctavia)), - fmt.Sprintf("manage-security-groups=%t", fi.BoolValue(lb.ManageSecGroups)), - fmt.Sprintf("enable-ingress-hostname=%t", fi.BoolValue(lb.EnableIngressHostname)), + fmt.Sprintf("floating-network-id=%s", fi.ValueOf(lb.FloatingNetworkID)), + fmt.Sprintf("lb-method=%s", fi.ValueOf(lb.Method)), + fmt.Sprintf("lb-provider=%s", fi.ValueOf(lb.Provider)), + fmt.Sprintf("use-octavia=%t", fi.ValueOf(lb.UseOctavia)), + fmt.Sprintf("manage-security-groups=%t", fi.ValueOf(lb.ManageSecGroups)), + fmt.Sprintf("enable-ingress-hostname=%t", fi.ValueOf(lb.EnableIngressHostname)), fmt.Sprintf("ingress-hostname-suffix=%s", ingressHostnameSuffix), "", ) @@ -174,9 +174,9 @@ func (b *CloudConfigBuilder) build(c *fi.ModelBuilderContext, inTree bool) error if monitor := osc.Monitor; monitor != nil { lines = append(lines, "create-monitor=yes", - fmt.Sprintf("monitor-delay=%s", fi.StringValue(monitor.Delay)), - fmt.Sprintf("monitor-timeout=%s", fi.StringValue(monitor.Timeout)), - fmt.Sprintf("monitor-max-retries=%d", fi.IntValue(monitor.MaxRetries)), + fmt.Sprintf("monitor-delay=%s", fi.ValueOf(monitor.Delay)), + fmt.Sprintf("monitor-timeout=%s", fi.ValueOf(monitor.Timeout)), + fmt.Sprintf("monitor-max-retries=%d", fi.ValueOf(monitor.MaxRetries)), "", ) } @@ -186,8 +186,8 @@ func (b *CloudConfigBuilder) build(c *fi.ModelBuilderContext, inTree bool) error // Block Storage Config lines = append(lines, "[BlockStorage]", - fmt.Sprintf("bs-version=%s", fi.StringValue(bs.Version)), - fmt.Sprintf("ignore-volume-az=%t", fi.BoolValue(bs.IgnoreAZ)), + fmt.Sprintf("bs-version=%s", fi.ValueOf(bs.Version)), + fmt.Sprintf("ignore-volume-az=%t", fi.ValueOf(bs.IgnoreAZ)), "") } @@ -197,13 +197,13 @@ func (b *CloudConfigBuilder) build(c *fi.ModelBuilderContext, inTree bool) error var networkingLines []string if networking.IPv6SupportDisabled != nil { - networkingLines = append(networkingLines, fmt.Sprintf("ipv6-support-disabled=%t", fi.BoolValue(networking.IPv6SupportDisabled))) + networkingLines = append(networkingLines, fmt.Sprintf("ipv6-support-disabled=%t", fi.ValueOf(networking.IPv6SupportDisabled))) } for _, name := range networking.PublicNetworkNames { - networkingLines = append(networkingLines, fmt.Sprintf("public-network-name=%s", fi.StringValue(name))) + networkingLines = append(networkingLines, fmt.Sprintf("public-network-name=%s", fi.ValueOf(name))) } for _, name := range networking.InternalNetworkNames { - networkingLines = append(networkingLines, fmt.Sprintf("internal-network-name=%s", fi.StringValue(name))) + networkingLines = append(networkingLines, fmt.Sprintf("internal-network-name=%s", fi.ValueOf(name))) } if len(networkingLines) > 0 { diff --git a/nodeup/pkg/model/containerd.go b/nodeup/pkg/model/containerd.go index 3883ef8a1f9f6..9be6613c638a2 100644 --- a/nodeup/pkg/model/containerd.go +++ b/nodeup/pkg/model/containerd.go @@ -126,7 +126,7 @@ func (b *ContainerdBuilder) installContainerd(c *fi.ModelBuilderContext) error { Path: filepath.Join("/usr/bin", k), Contents: v, Type: nodetasks.FileType_File, - Mode: fi.String("0755"), + Mode: fi.PtrTo("0755"), } c.AddTask(fileTask) } @@ -151,7 +151,7 @@ func (b *ContainerdBuilder) installContainerd(c *fi.ModelBuilderContext) error { Path: "/usr/sbin/runc", Contents: v, Type: nodetasks.FileType_File, - Mode: fi.String("0755"), + Mode: fi.PtrTo("0755"), } c.AddTask(fileTask) } @@ -163,13 +163,13 @@ func (b *ContainerdBuilder) installContainerd(c *fi.ModelBuilderContext) error { var containerRuntimeVersion string if b.Cluster.Spec.ContainerRuntime == "containerd" { if b.Cluster.Spec.Containerd != nil { - containerRuntimeVersion = fi.StringValue(b.NodeupConfig.ContainerdConfig.Version) + containerRuntimeVersion = fi.ValueOf(b.NodeupConfig.ContainerdConfig.Version) } else { return fmt.Errorf("error finding contained version") } } else { if b.Cluster.Spec.Docker != nil { - containerRuntimeVersion = fi.StringValue(b.Cluster.Spec.Docker.Version) + containerRuntimeVersion = fi.ValueOf(b.Cluster.Spec.Docker.Version) } else { return fmt.Errorf("error finding Docker version") } @@ -196,7 +196,7 @@ func (b *ContainerdBuilder) buildSystemdService(sv semver.Version) *nodetasks.Se manifest.Set("Unit", "After", "network.target local-fs.target") // Restore the default SELinux security contexts for the containerd and runc binaries - if b.Distribution.IsRHELFamily() && b.Cluster.Spec.Docker != nil && fi.BoolValue(b.Cluster.Spec.Docker.SelinuxEnabled) { + if b.Distribution.IsRHELFamily() && b.Cluster.Spec.Docker != nil && fi.ValueOf(b.Cluster.Spec.Docker.SelinuxEnabled) { manifest.Set("Service", "ExecStartPre", "/bin/sh -c 'restorecon -v /usr/bin/runc'") manifest.Set("Service", "ExecStartPre", "/bin/sh -c 'restorecon -v /usr/bin/containerd*'") } @@ -345,7 +345,7 @@ func (b *ContainerdBuilder) buildConfigFile(c *fi.ModelBuilderContext) error { var config string if b.NodeupConfig.ContainerdConfig != nil && b.NodeupConfig.ContainerdConfig.ConfigOverride != nil { - config = fi.StringValue(b.NodeupConfig.ContainerdConfig.ConfigOverride) + config = fi.ValueOf(b.NodeupConfig.ContainerdConfig.ConfigOverride) } else { if cc, err := b.buildContainerdConfig(); err != nil { return err @@ -483,7 +483,7 @@ func (b *ContainerdBuilder) buildContainerdConfig() (string, error) { } containerd := b.NodeupConfig.ContainerdConfig - if fi.StringValue(containerd.ConfigOverride) != "" { + if fi.ValueOf(containerd.ConfigOverride) != "" { return *containerd.ConfigOverride, nil } diff --git a/nodeup/pkg/model/containerd_test.go b/nodeup/pkg/model/containerd_test.go index 51308ab4bedda..c6ddaa0bbff76 100644 --- a/nodeup/pkg/model/containerd_test.go +++ b/nodeup/pkg/model/containerd_test.go @@ -64,56 +64,56 @@ func TestContainerdBuilder_BuildFlags(t *testing.T) { { kops.ContainerdConfig{ SkipInstall: false, - ConfigOverride: fi.String("test"), - Version: fi.String("test"), + ConfigOverride: fi.PtrTo("test"), + Version: fi.PtrTo("test"), }, "", }, { kops.ContainerdConfig{ - Address: fi.String("/run/containerd/containerd.sock"), + Address: fi.PtrTo("/run/containerd/containerd.sock"), }, "--address=/run/containerd/containerd.sock", }, { kops.ContainerdConfig{ - LogLevel: fi.String("info"), + LogLevel: fi.PtrTo("info"), }, "--log-level=info", }, { kops.ContainerdConfig{ - Root: fi.String("/var/lib/containerd"), + Root: fi.PtrTo("/var/lib/containerd"), }, "--root=/var/lib/containerd", }, { kops.ContainerdConfig{ - State: fi.String("/run/containerd"), + State: fi.PtrTo("/run/containerd"), }, "--state=/run/containerd", }, { kops.ContainerdConfig{ SkipInstall: false, - Address: fi.String("/run/containerd/containerd.sock"), - ConfigOverride: fi.String("test"), - LogLevel: fi.String("info"), - Root: fi.String("/var/lib/containerd"), - State: fi.String("/run/containerd"), - Version: fi.String("test"), + Address: fi.PtrTo("/run/containerd/containerd.sock"), + ConfigOverride: fi.PtrTo("test"), + LogLevel: fi.PtrTo("info"), + Root: fi.PtrTo("/var/lib/containerd"), + State: fi.PtrTo("/run/containerd"), + Version: fi.PtrTo("test"), }, "--address=/run/containerd/containerd.sock --log-level=info --root=/var/lib/containerd --state=/run/containerd", }, { kops.ContainerdConfig{ SkipInstall: true, - Address: fi.String("/run/containerd/containerd.sock"), - ConfigOverride: fi.String("test"), - LogLevel: fi.String("info"), - Root: fi.String("/var/lib/containerd"), - State: fi.String("/run/containerd"), - Version: fi.String("test"), + Address: fi.PtrTo("/run/containerd/containerd.sock"), + ConfigOverride: fi.PtrTo("test"), + LogLevel: fi.PtrTo("info"), + Root: fi.PtrTo("/var/lib/containerd"), + State: fi.PtrTo("/run/containerd"), + Version: fi.PtrTo("test"), }, "--address=/run/containerd/containerd.sock --log-level=info --root=/var/lib/containerd --state=/run/containerd", }, diff --git a/nodeup/pkg/model/context.go b/nodeup/pkg/model/context.go index 6c7c0a4578820..e260fa6f70278 100644 --- a/nodeup/pkg/model/context.go +++ b/nodeup/pkg/model/context.go @@ -234,7 +234,7 @@ func (c *NodeupModelContext) BuildIssuedKubeconfig(name string, subject nodetask // @note: use https even for local connections, so we can turn off the insecure port kubeConfig.ServerURL = "https://127.0.0.1" } else { - kubeConfig.ServerURL = "https://" + c.Cluster.Spec.MasterInternalName + kubeConfig.ServerURL = "https://" + c.Cluster.APIInternalName() } ctx.AddTask(kubeConfig) return kubeConfig.GetConfig() @@ -278,7 +278,7 @@ func (c *NodeupModelContext) BuildBootstrapKubeconfig(name string, ctx *fi.Model // @note: use https even for local connections, so we can turn off the insecure port kubeConfig.ServerURL = "https://127.0.0.1" } else { - kubeConfig.ServerURL = "https://" + c.Cluster.Spec.MasterInternalName + kubeConfig.ServerURL = "https://" + c.Cluster.APIInternalName() } err = ctx.EnsureTask(kubeConfig) @@ -323,7 +323,7 @@ func (c *NodeupModelContext) BuildBootstrapKubeconfig(name string, ctx *fi.Model // This code path is used for the kubelet cert in Kubernetes 1.18 and earlier. kubeConfig.ServerURL = "https://127.0.0.1" } else { - kubeConfig.ServerURL = "https://" + c.Cluster.Spec.MasterInternalName + kubeConfig.ServerURL = "https://" + c.Cluster.APIInternalName() } err = kubeConfig.Run(nil) @@ -378,7 +378,7 @@ func (c *NodeupModelContext) UsesSecondaryIP() bool { // UseBootstrapTokens checks if we are using bootstrap tokens func (c *NodeupModelContext) UseBootstrapTokens() bool { if c.HasAPIServer { - return fi.BoolValue(c.NodeupConfig.APIServerConfig.KubeAPIServer.EnableBootstrapAuthToken) + return fi.ValueOf(c.NodeupConfig.APIServerConfig.KubeAPIServer.EnableBootstrapAuthToken) } return c.NodeupConfig.KubeletConfig.BootstrapKubeconfig != "" @@ -575,7 +575,7 @@ func (b *NodeupModelContext) addCNIBinAsset(c *fi.ModelBuilderContext, assetPath Path: filepath.Join(b.CNIBinDir(), name), Contents: res, Type: nodetasks.FileType_File, - Mode: fi.String("0755"), + Mode: fi.PtrTo("0755"), }) return nil @@ -594,7 +594,7 @@ func (c *NodeupModelContext) CNIConfDir() string { func (c *NodeupModelContext) InstallNvidiaRuntime() bool { return c.NodeupConfig.NvidiaGPU != nil && - fi.BoolValue(c.NodeupConfig.NvidiaGPU.Enabled) && + fi.ValueOf(c.NodeupConfig.NvidiaGPU.Enabled) && c.GPUVendor == architectures.GPUVendorNvidia } diff --git a/nodeup/pkg/model/convenience.go b/nodeup/pkg/model/convenience.go index 1171ab02ef10f..e5a16ac489560 100644 --- a/nodeup/pkg/model/convenience.go +++ b/nodeup/pkg/model/convenience.go @@ -25,12 +25,12 @@ import ( // s is a helper that builds a *string from a string value func s(v string) *string { - return fi.String(v) + return fi.PtrTo(v) } // b returns a pointer to a boolean func b(v bool) *bool { - return fi.Bool(v) + return fi.PtrTo(v) } // buildContainerRuntimeEnvironmentVars just converts a series of keypairs to docker environment variables switches diff --git a/nodeup/pkg/model/docker.go b/nodeup/pkg/model/docker.go index 5c6c3d56ef04a..58e4c0f10bd70 100644 --- a/nodeup/pkg/model/docker.go +++ b/nodeup/pkg/model/docker.go @@ -46,7 +46,7 @@ var _ fi.ModelBuilder = &DockerBuilder{} func (b *DockerBuilder) dockerVersion() (string, error) { dockerVersion := "" if b.Cluster.Spec.Docker != nil { - dockerVersion = fi.StringValue(b.Cluster.Spec.Docker.Version) + dockerVersion = fi.ValueOf(b.Cluster.Spec.Docker.Version) } if dockerVersion == "" { return "", fmt.Errorf("error finding Docker version") @@ -104,7 +104,7 @@ func (b *DockerBuilder) Build(c *fi.ModelBuilderContext) error { Path: filepath.Join("/usr/bin", k), Contents: v, Type: nodetasks.FileType_File, - Mode: fi.String("0755"), + Mode: fi.PtrTo("0755"), } c.AddTask(fileTask) @@ -201,7 +201,7 @@ func (b *DockerBuilder) buildSystemdService(dockerVersion semver.Version) *nodet manifest.Set("Service", "Type", "notify") // Restore the default SELinux security contexts for the Docker binaries - if b.Distribution.IsRHELFamily() && b.Cluster.Spec.Docker != nil && fi.BoolValue(b.Cluster.Spec.Docker.SelinuxEnabled) { + if b.Distribution.IsRHELFamily() && b.Cluster.Spec.Docker != nil && fi.ValueOf(b.Cluster.Spec.Docker.SelinuxEnabled) { manifest.Set("Service", "ExecStartPre", "/bin/sh -c 'restorecon -v /usr/bin/docker*'") } // the default is not to use systemd for cgroups because the delegate issues still diff --git a/nodeup/pkg/model/docker_test.go b/nodeup/pkg/model/docker_test.go index d81c077657a8a..1f007875767c3 100644 --- a/nodeup/pkg/model/docker_test.go +++ b/nodeup/pkg/model/docker_test.go @@ -82,11 +82,11 @@ func TestDockerBuilder_BuildFlags(t *testing.T) { "", }, { - kops.DockerConfig{Bridge: fi.String("")}, + kops.DockerConfig{Bridge: fi.PtrTo("")}, "", }, { - kops.DockerConfig{Bridge: fi.String("br0")}, + kops.DockerConfig{Bridge: fi.PtrTo("br0")}, "--bridge=br0", }, { @@ -150,7 +150,7 @@ func runDockerBuilderTest(t *testing.T, key string) { t.Fatalf("error finding Docker version") return } - dv := fi.StringValue(nodeUpModelContext.Cluster.Spec.Docker.Version) + dv := fi.ValueOf(nodeUpModelContext.Cluster.Spec.Docker.Version) sv, err := semver.ParseTolerant(dv) if err != nil { t.Fatalf("error parsing Docker version %q: %v", dv, err) diff --git a/nodeup/pkg/model/etc_hosts.go b/nodeup/pkg/model/etc_hosts.go index 51767392962f2..573aad94d8b8d 100644 --- a/nodeup/pkg/model/etc_hosts.go +++ b/nodeup/pkg/model/etc_hosts.go @@ -37,18 +37,18 @@ func (b *EtcHostsBuilder) Build(c *fi.ModelBuilderContext) error { if b.IsMaster && (b.Cluster.IsGossip() || b.Cluster.UsesNoneDNS()) { task.Records = append(task.Records, nodetasks.HostRecord{ - Hostname: b.Cluster.Spec.MasterInternalName, + Hostname: b.Cluster.APIInternalName(), Addresses: []string{"127.0.0.1"}, }) - if b.Cluster.Spec.MasterPublicName != "" { + if b.Cluster.Spec.API.PublicName != "" { task.Records = append(task.Records, nodetasks.HostRecord{ - Hostname: b.Cluster.Spec.MasterPublicName, + Hostname: b.Cluster.Spec.API.PublicName, Addresses: []string{"127.0.0.1"}, }) } } else if b.BootConfig.APIServerIP != "" { task.Records = append(task.Records, nodetasks.HostRecord{ - Hostname: b.Cluster.Spec.MasterInternalName, + Hostname: b.Cluster.APIInternalName(), Addresses: []string{b.BootConfig.APIServerIP}, }) if b.UseKopsControllerForNodeBootstrap() { diff --git a/nodeup/pkg/model/etcd_manager_tls.go b/nodeup/pkg/model/etcd_manager_tls.go index bb0d84c958f8d..a8073d216f86f 100644 --- a/nodeup/pkg/model/etcd_manager_tls.go +++ b/nodeup/pkg/model/etcd_manager_tls.go @@ -60,7 +60,7 @@ func (b *EtcdManagerTLSBuilder) Build(ctx *fi.ModelBuilderContext) error { Path: filepath.Join(d, fileName+".crt"), Contents: fi.NewStringResource(b.NodeupConfig.CAs[keystoreName]), Type: nodetasks.FileType_File, - Mode: fi.String("0644"), + Mode: fi.PtrTo("0644"), }) } diff --git a/nodeup/pkg/model/kube_apiserver.go b/nodeup/pkg/model/kube_apiserver.go index 052bf323c23a1..4f8c346eeb31c 100644 --- a/nodeup/pkg/model/kube_apiserver.go +++ b/nodeup/pkg/model/kube_apiserver.go @@ -79,7 +79,7 @@ func (b *KubeAPIServerBuilder) Build(c *fi.ModelBuilderContext) error { } if b.NodeupConfig.APIServerConfig.EncryptionConfigSecretHash != "" { - encryptionConfigPath := fi.String(filepath.Join(pathSrvKAPI, "encryptionconfig.yaml")) + encryptionConfigPath := fi.PtrTo(filepath.Join(pathSrvKAPI, "encryptionconfig.yaml")) kubeAPIServer.EncryptionProviderConfig = encryptionConfigPath @@ -90,7 +90,7 @@ func (b *KubeAPIServerBuilder) Build(c *fi.ModelBuilderContext) error { t := &nodetasks.File{ Path: *encryptionConfigPath, Contents: fi.NewStringResource(contents), - Mode: fi.String("600"), + Mode: fi.PtrTo("600"), Type: nodetasks.FileType_File, } c.AddTask(t) @@ -121,7 +121,7 @@ func (b *KubeAPIServerBuilder) Build(c *fi.ModelBuilderContext) error { Path: filepath.Join(pathSrvKAPI, "etcd-ca.crt"), Contents: fi.NewStringResource(b.NodeupConfig.CAs["etcd-clients-ca"]), Type: nodetasks.FileType_File, - Mode: fi.String("0644"), + Mode: fi.PtrTo("0644"), }) kubeAPIServer.EtcdCAFile = filepath.Join(pathSrvKAPI, "etcd-ca.crt") @@ -147,7 +147,7 @@ func (b *KubeAPIServerBuilder) Build(c *fi.ModelBuilderContext) error { Path: filepath.Join(pathSrvKAPI, "apiserver-aggregator-ca.crt"), Contents: fi.NewStringResource(b.NodeupConfig.CAs["apiserver-aggregator-ca"]), Type: nodetasks.FileType_File, - Mode: fi.String("0644"), + Mode: fi.PtrTo("0644"), }) kubeAPIServer.RequestheaderClientCAFile = filepath.Join(pathSrvKAPI, "apiserver-aggregator-ca.crt") @@ -164,8 +164,8 @@ func (b *KubeAPIServerBuilder) Build(c *fi.ModelBuilderContext) error { if err != nil { return err } - kubeAPIServer.ProxyClientCertFile = fi.String(filepath.Join(pathSrvKAPI, "apiserver-aggregator.crt")) - kubeAPIServer.ProxyClientKeyFile = fi.String(filepath.Join(pathSrvKAPI, "apiserver-aggregator.key")) + kubeAPIServer.ProxyClientCertFile = fi.PtrTo(filepath.Join(pathSrvKAPI, "apiserver-aggregator.crt")) + kubeAPIServer.ProxyClientKeyFile = fi.PtrTo(filepath.Join(pathSrvKAPI, "apiserver-aggregator.key")) } if err := b.writeServerCertificate(c, &kubeAPIServer); err != nil { @@ -263,7 +263,7 @@ func (b *KubeAPIServerBuilder) writeAuthenticationConfig(c *fi.ModelBuilderConte if b.Cluster.Spec.Authentication.AWS != nil { id := "aws-iam-authenticator" - kubeAPIServer.AuthenticationTokenWebhookConfigFile = fi.String(PathAuthnConfig) + kubeAPIServer.AuthenticationTokenWebhookConfigFile = fi.PtrTo(PathAuthnConfig) { cluster := kubeconfig.KubectlCluster{ @@ -298,7 +298,7 @@ func (b *KubeAPIServerBuilder) writeAuthenticationConfig(c *fi.ModelBuilderConte Path: PathAuthnConfig, Contents: fi.NewBytesResource(manifest), Type: nodetasks.FileType_File, - Mode: fi.String("600"), + Mode: fi.PtrTo("600"), }) } @@ -333,18 +333,18 @@ func (b *KubeAPIServerBuilder) writeAuthenticationConfig(c *fi.ModelBuilderConte Path: "/srv/kubernetes/aws-iam-authenticator/cert.pem", Contents: certificate, Type: nodetasks.FileType_File, - Mode: fi.String("600"), - Owner: fi.String("aws-iam-authenticator"), - Group: fi.String("aws-iam-authenticator"), + Mode: fi.PtrTo("600"), + Owner: fi.PtrTo("aws-iam-authenticator"), + Group: fi.PtrTo("aws-iam-authenticator"), }) c.AddTask(&nodetasks.File{ Path: "/srv/kubernetes/aws-iam-authenticator/key.pem", Contents: privateKey, Type: nodetasks.FileType_File, - Mode: fi.String("600"), - Owner: fi.String("aws-iam-authenticator"), - Group: fi.String("aws-iam-authenticator"), + Mode: fi.PtrTo("600"), + Owner: fi.PtrTo("aws-iam-authenticator"), + Group: fi.PtrTo("aws-iam-authenticator"), }) } @@ -367,11 +367,11 @@ func (b *KubeAPIServerBuilder) writeServerCertificate(c *fi.ModelBuilderContext, } // Names specified in the cluster spec - if b.Cluster.Spec.MasterPublicName != "" { - alternateNames = append(alternateNames, b.Cluster.Spec.MasterPublicName) + if b.Cluster.Spec.API.PublicName != "" { + alternateNames = append(alternateNames, b.Cluster.Spec.API.PublicName) } - alternateNames = append(alternateNames, b.Cluster.Spec.MasterInternalName) - alternateNames = append(alternateNames, b.Cluster.Spec.AdditionalSANs...) + alternateNames = append(alternateNames, b.Cluster.APIInternalName()) + alternateNames = append(alternateNames, b.Cluster.Spec.API.AdditionalSANs...) // Load balancer IPs passed in through NodeupConfig alternateNames = append(alternateNames, b.NodeupConfig.ApiserverAdditionalIPs...) @@ -575,7 +575,7 @@ func (b *KubeAPIServerBuilder) buildPod(kubeAPIServer *kops.KubeAPIServerConfig) Port: intstr.FromInt(wellknownports.KubeAPIServerHealthCheck), } - insecurePort := fi.Int32Value(kubeAPIServer.InsecurePort) + insecurePort := fi.ValueOf(kubeAPIServer.InsecurePort) if useHealthcheckProxy { // kube-apiserver-healthcheck sidecar container runs on port 3990 } else if insecurePort != 0 { @@ -732,14 +732,12 @@ func (b *KubeAPIServerBuilder) buildAnnotations() map[string]string { return annotations } - if b.Cluster.Spec.API != nil { - if b.Cluster.Spec.API.LoadBalancer == nil || !b.Cluster.Spec.API.LoadBalancer.UseForInternalAPI { - annotations["dns.alpha.kubernetes.io/internal"] = b.Cluster.Spec.MasterInternalName - } + if b.Cluster.Spec.API.LoadBalancer == nil || !b.Cluster.Spec.API.LoadBalancer.UseForInternalAPI { + annotations["dns.alpha.kubernetes.io/internal"] = b.Cluster.APIInternalName() + } - if b.Cluster.Spec.API.DNS != nil && b.Cluster.Spec.MasterPublicName != "" { - annotations["dns.alpha.kubernetes.io/external"] = b.Cluster.Spec.MasterPublicName - } + if b.Cluster.Spec.API.DNS != nil && b.Cluster.Spec.API.PublicName != "" { + annotations["dns.alpha.kubernetes.io/external"] = b.Cluster.Spec.API.PublicName } return annotations diff --git a/nodeup/pkg/model/kube_apiserver_test.go b/nodeup/pkg/model/kube_apiserver_test.go index 9a84ccf6e4588..bed2d3d9935dd 100644 --- a/nodeup/pkg/model/kube_apiserver_test.go +++ b/nodeup/pkg/model/kube_apiserver_test.go @@ -80,13 +80,13 @@ func Test_KubeAPIServer_BuildFlags(t *testing.T) { }, { kops.KubeAPIServerConfig{ - ExperimentalEncryptionProviderConfig: fi.String("/srv/kubernetes/encryptionconfig.yaml"), + ExperimentalEncryptionProviderConfig: fi.PtrTo("/srv/kubernetes/encryptionconfig.yaml"), }, "--experimental-encryption-provider-config=/srv/kubernetes/encryptionconfig.yaml --secure-port=0", }, { kops.KubeAPIServerConfig{ - EncryptionProviderConfig: fi.String("/srv/kubernetes/encryptionconfig.yaml"), + EncryptionProviderConfig: fi.PtrTo("/srv/kubernetes/encryptionconfig.yaml"), }, "--encryption-provider-config=/srv/kubernetes/encryptionconfig.yaml --secure-port=0", }, diff --git a/nodeup/pkg/model/kube_controller_manager.go b/nodeup/pkg/model/kube_controller_manager.go index ebd68b1d83314..802254d3570dd 100644 --- a/nodeup/pkg/model/kube_controller_manager.go +++ b/nodeup/pkg/model/kube_controller_manager.go @@ -137,7 +137,7 @@ func (b *KubeControllerManagerBuilder) writeServerCertificate(c *fi.ModelBuilder return err } - kcm.TLSCertFile = fi.String(filepath.Join(pathSrvKCM, "server.crt")) + kcm.TLSCertFile = fi.PtrTo(filepath.Join(pathSrvKCM, "server.crt")) kcm.TLSPrivateKeyFile = filepath.Join(pathSrvKCM, "server.key") } diff --git a/nodeup/pkg/model/kube_proxy.go b/nodeup/pkg/model/kube_proxy.go index fd4b8a0d03d18..c8fb1c4465bca 100644 --- a/nodeup/pkg/model/kube_proxy.go +++ b/nodeup/pkg/model/kube_proxy.go @@ -51,7 +51,7 @@ func (b *KubeProxyBuilder) Build(c *fi.ModelBuilderContext) error { if b.IsMaster { // If this is a master that is not isolated, run it as a normal node also (start kube-proxy etc) // This lets e.g. daemonset pods communicate with other pods in the system - if fi.BoolValue(b.Cluster.Spec.IsolateMasters) { + if fi.ValueOf(b.Cluster.Spec.IsolateMasters) { klog.V(2).Infof("Running on Master with IsolateMaster=true; skipping kube-proxy installation") return nil } @@ -126,7 +126,7 @@ func (b *KubeProxyBuilder) buildPod() (*v1.Pod, error) { // which would mean that DNS can't rely on API to come up c.Master = "https://127.0.0.1" } else { - c.Master = "https://" + b.Cluster.Spec.MasterInternalName + c.Master = "https://" + b.Cluster.APIInternalName() } } @@ -178,7 +178,7 @@ func (b *KubeProxyBuilder) buildPod() (*v1.Pod, error) { Limits: resourceLimits, }, SecurityContext: &v1.SecurityContext{ - Privileged: fi.Bool(true), + Privileged: fi.PtrTo(true), }, } diff --git a/nodeup/pkg/model/kube_proxy_test.go b/nodeup/pkg/model/kube_proxy_test.go index ae477fa24cdac..a9d25d2f88601 100644 --- a/nodeup/pkg/model/kube_proxy_test.go +++ b/nodeup/pkg/model/kube_proxy_test.go @@ -38,7 +38,6 @@ func TestKubeProxyBuilder_buildPod(t *testing.T) { // https://pkg.go.dev/k8s.io/kops/pkg/apis/kops#KubeProxyConfig cluster := &kops.Cluster{} - cluster.Spec.MasterInternalName = "dev-cluster" cluster.Spec.KubeProxy = &kops.KubeProxyConfig{} cluster.Spec.KubeProxy.Image = "kube-proxy:1.2" diff --git a/nodeup/pkg/model/kube_scheduler.go b/nodeup/pkg/model/kube_scheduler.go index 8e350c53fc12f..2d462671560d7 100644 --- a/nodeup/pkg/model/kube_scheduler.go +++ b/nodeup/pkg/model/kube_scheduler.go @@ -178,7 +178,7 @@ func (b *KubeSchedulerBuilder) writeServerCertificate(c *fi.ModelBuilderContext, return err } - kubeScheduler.TLSCertFile = fi.String(filepath.Join(pathSrvScheduler, "server.crt")) + kubeScheduler.TLSCertFile = fi.PtrTo(filepath.Join(pathSrvScheduler, "server.crt")) kubeScheduler.TLSPrivateKeyFile = filepath.Join(pathSrvScheduler, "server.key") } @@ -201,7 +201,7 @@ func (b *KubeSchedulerBuilder) buildPod(kubeScheduler *kops.KubeSchedulerConfig) flags = append(flags, "--"+flag+"kubeconfig="+kubescheduler.KubeConfigPath) } - if fi.BoolValue(kubeScheduler.UsePolicyConfigMap) { + if fi.ValueOf(kubeScheduler.UsePolicyConfigMap) { flags = append(flags, "--policy-configmap=scheduler-policy", "--policy-configmap-namespace=kube-system") } diff --git a/nodeup/pkg/model/kubelet.go b/nodeup/pkg/model/kubelet.go index 28dc4e6a3c7fd..46500c25dc3ce 100644 --- a/nodeup/pkg/model/kubelet.go +++ b/nodeup/pkg/model/kubelet.go @@ -322,7 +322,7 @@ func (b *KubeletBuilder) buildSystemdEnvironmentFile(kubeletConfig *kops.Kubelet if b.Cluster.Spec.Containerd == nil || b.Cluster.Spec.Containerd.Address == nil { flags += " --container-runtime-endpoint=unix:///run/containerd/containerd.sock" } else { - flags += " --container-runtime-endpoint=unix://" + fi.StringValue(b.Cluster.Spec.Containerd.Address) + flags += " --container-runtime-endpoint=unix://" + fi.ValueOf(b.Cluster.Spec.Containerd.Address) } } @@ -404,7 +404,7 @@ func (b *KubeletBuilder) buildSystemdService() *nodetasks.Service { service.InitDefaults() if b.ConfigurationMode == "Warming" { - service.Running = fi.Bool(false) + service.Running = fi.PtrTo(false) } return service @@ -565,7 +565,7 @@ func (b *KubeletBuilder) buildKubeletConfigSpec() (*kops.KubeletConfigSpec, erro } // Write back values that could have changed - c.MaxPods = fi.Int32(int32(maxPods)) + c.MaxPods = fi.PtrTo(int32(maxPods)) } } @@ -602,7 +602,7 @@ func (b *KubeletBuilder) buildKubeletConfigSpec() (*kops.KubeletConfigSpec, erro } if c.AuthenticationTokenWebhook == nil { - c.AuthenticationTokenWebhook = fi.Bool(true) + c.AuthenticationTokenWebhook = fi.PtrTo(true) } return &c, nil @@ -642,7 +642,7 @@ func (b *KubeletBuilder) buildKubeletServingCertificate(c *fi.ModelBuilderContex Path: filepath.Join(dir, name+".crt"), Contents: cert, Type: nodetasks.FileType_File, - Mode: fi.String("0644"), + Mode: fi.PtrTo("0644"), BeforeServices: []string{"kubelet.service"}, }) @@ -650,7 +650,7 @@ func (b *KubeletBuilder) buildKubeletServingCertificate(c *fi.ModelBuilderContex Path: filepath.Join(dir, name+".key"), Contents: key, Type: nodetasks.FileType_File, - Mode: fi.String("0400"), + Mode: fi.PtrTo("0400"), BeforeServices: []string{"kubelet.service"}, }) diff --git a/nodeup/pkg/model/kubelet_test.go b/nodeup/pkg/model/kubelet_test.go index b1333e8f3b0a1..13481a4172a8b 100644 --- a/nodeup/pkg/model/kubelet_test.go +++ b/nodeup/pkg/model/kubelet_test.go @@ -283,7 +283,7 @@ func BuildNodeupModelContext(model *testutils.Model) (*NodeupModelContext, error nodeupModelContext.NodeupConfig.ContainerdConfig = nodeupModelContext.Cluster.Spec.Containerd updatePolicy := nodeupModelContext.Cluster.Spec.UpdatePolicy if updatePolicy == nil { - updatePolicy = fi.String(kops.UpdatePolicyAutomatic) + updatePolicy = fi.PtrTo(kops.UpdatePolicyAutomatic) } nodeupModelContext.NodeupConfig.UpdatePolicy = *updatePolicy diff --git a/nodeup/pkg/model/networking/cilium.go b/nodeup/pkg/model/networking/cilium.go index 58ebf69f5032a..2d3669e195973 100644 --- a/nodeup/pkg/model/networking/cilium.go +++ b/nodeup/pkg/model/networking/cilium.go @@ -95,7 +95,7 @@ WantedBy=multi-user.target service := &nodetasks.Service{ Name: "sys-fs-bpf.mount", - Definition: fi.String(unit), + Definition: fi.PtrTo(unit), } service.InitDefaults() c.AddTask(service) @@ -138,8 +138,8 @@ WantedBy=multi-user.target service := &nodetasks.Service{ Name: "run-cilium-cgroupv2.mount", - Definition: fi.String(unit), - SmartRestart: fi.Bool(false), + Definition: fi.PtrTo(unit), + SmartRestart: fi.PtrTo(false), } service.InitDefaults() c.AddTask(service) @@ -156,7 +156,7 @@ func (b *CiliumBuilder) buildCiliumEtcdSecrets(c *fi.ModelBuilderContext) error Path: filepath.Join(dir, "etcd-ca.crt"), Contents: fi.NewStringResource(b.NodeupConfig.CAs[signer]), Type: nodetasks.FileType_File, - Mode: fi.String("0600"), + Mode: fi.PtrTo("0600"), }) if b.HasAPIServer { issueCert := &nodetasks.IssueCert{ @@ -181,7 +181,7 @@ func (b *CiliumBuilder) buildCiliumEtcdSecrets(c *fi.ModelBuilderContext) error Path: filepath.Join(dir, name+".crt"), Contents: cert, Type: nodetasks.FileType_File, - Mode: fi.String("0644"), + Mode: fi.PtrTo("0644"), BeforeServices: []string{"kubelet.service"}, }) @@ -189,7 +189,7 @@ func (b *CiliumBuilder) buildCiliumEtcdSecrets(c *fi.ModelBuilderContext) error Path: filepath.Join(dir, name+".key"), Contents: key, Type: nodetasks.FileType_File, - Mode: fi.String("0400"), + Mode: fi.PtrTo("0400"), BeforeServices: []string{"kubelet.service"}, }) diff --git a/nodeup/pkg/model/networking/kube_router.go b/nodeup/pkg/model/networking/kube_router.go index 3b8c8662cd72e..4ce900f3109d0 100644 --- a/nodeup/pkg/model/networking/kube_router.go +++ b/nodeup/pkg/model/networking/kube_router.go @@ -54,7 +54,7 @@ func (b *KuberouterBuilder) Build(c *fi.ModelBuilderContext) error { Path: "/var/lib/kube-router/kubeconfig", Contents: kubeconfig, Type: nodetasks.FileType_File, - Mode: fi.String("0400"), + Mode: fi.PtrTo("0400"), BeforeServices: []string{"kubelet.service"}, }) diff --git a/nodeup/pkg/model/protokube.go b/nodeup/pkg/model/protokube.go index 319a549debc11..58df6faf1ccd0 100644 --- a/nodeup/pkg/model/protokube.go +++ b/nodeup/pkg/model/protokube.go @@ -65,7 +65,7 @@ func (t *ProtokubeBuilder) Build(c *fi.ModelBuilderContext) error { Path: filepath.Join("/opt/kops/bin", name), Contents: res, Type: nodetasks.FileType_File, - Mode: fi.String("0755"), + Mode: fi.PtrTo("0755"), }) } @@ -79,7 +79,7 @@ func (t *ProtokubeBuilder) Build(c *fi.ModelBuilderContext) error { Path: filepath.Join("/opt/kops/bin", name), Contents: res, Type: nodetasks.FileType_File, - Mode: fi.String("0755"), + Mode: fi.PtrTo("0755"), }) } @@ -186,12 +186,12 @@ type ProtokubeFlags struct { func (t *ProtokubeBuilder) ProtokubeFlags(k8sVersion semver.Version) (*ProtokubeFlags, error) { f := &ProtokubeFlags{ Channels: t.NodeupConfig.Channels, - Containerized: fi.Bool(false), - LogLevel: fi.Int32(4), + Containerized: fi.PtrTo(false), + LogLevel: fi.PtrTo(int32(4)), Master: b(t.IsMaster), } - f.ClusterID = fi.String(t.Cluster.ObjectMeta.Name) + f.ClusterID = fi.PtrTo(t.Cluster.ObjectMeta.Name) zone := t.Cluster.Spec.DNSZone if zone != "" { @@ -210,7 +210,7 @@ func (t *ProtokubeBuilder) ProtokubeFlags(k8sVersion semver.Version) (*Protokube if t.Cluster.IsGossip() { klog.Warningf("Cluster name %q implies gossip DNS", t.Cluster.Name) - f.Gossip = fi.Bool(true) + f.Gossip = fi.PtrTo(true) if t.Cluster.Spec.GossipConfig != nil { f.GossipProtocol = t.Cluster.Spec.GossipConfig.Protocol f.GossipListen = t.Cluster.Spec.GossipConfig.Listen @@ -224,17 +224,17 @@ func (t *ProtokubeBuilder) ProtokubeFlags(k8sVersion semver.Version) (*Protokube } // @TODO: This is hacky, but we want it so that we can have a different internal & external name - internalSuffix := t.Cluster.Spec.MasterInternalName + internalSuffix := t.Cluster.APIInternalName() internalSuffix = strings.TrimPrefix(internalSuffix, "api.") - f.DNSInternalSuffix = fi.String(internalSuffix) + f.DNSInternalSuffix = fi.PtrTo(internalSuffix) } if t.CloudProvider != "" { - f.Cloud = fi.String(string(t.CloudProvider)) + f.Cloud = fi.PtrTo(string(t.CloudProvider)) } if f.DNSInternalSuffix == nil { - f.DNSInternalSuffix = fi.String(".internal." + t.Cluster.ObjectMeta.Name) + f.DNSInternalSuffix = fi.PtrTo(".internal." + t.Cluster.ObjectMeta.Name) } f.BootstrapMasterNodeLabels = true diff --git a/nodeup/pkg/model/tests/containerdbuilder/flatcar/cluster.yaml b/nodeup/pkg/model/tests/containerdbuilder/flatcar/cluster.yaml index f282eeeb59757..66c7d54a52448 100644 --- a/nodeup/pkg/model/tests/containerdbuilder/flatcar/cluster.yaml +++ b/nodeup/pkg/model/tests/containerdbuilder/flatcar/cluster.yaml @@ -23,7 +23,6 @@ spec: iam: legacy: false kubernetesVersion: v1.21.0 - masterInternalName: api.internal.minimal.example.com masterPublicName: api.minimal.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/nodeup/pkg/model/tests/containerdbuilder/from_docker_19.03.11/cluster.yaml b/nodeup/pkg/model/tests/containerdbuilder/from_docker_19.03.11/cluster.yaml index 5e3d558a2c4fe..3e13495c5a93e 100644 --- a/nodeup/pkg/model/tests/containerdbuilder/from_docker_19.03.11/cluster.yaml +++ b/nodeup/pkg/model/tests/containerdbuilder/from_docker_19.03.11/cluster.yaml @@ -23,7 +23,6 @@ spec: iam: legacy: false kubernetesVersion: v1.21.0 - masterInternalName: api.internal.minimal.example.com masterPublicName: api.minimal.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/nodeup/pkg/model/tests/containerdbuilder/from_docker_19.03.14/cluster.yaml b/nodeup/pkg/model/tests/containerdbuilder/from_docker_19.03.14/cluster.yaml index 63a80d21773d2..ead52ac01b634 100644 --- a/nodeup/pkg/model/tests/containerdbuilder/from_docker_19.03.14/cluster.yaml +++ b/nodeup/pkg/model/tests/containerdbuilder/from_docker_19.03.14/cluster.yaml @@ -23,7 +23,6 @@ spec: iam: legacy: false kubernetesVersion: v1.21.0 - masterInternalName: api.internal.minimal.example.com masterPublicName: api.minimal.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/nodeup/pkg/model/tests/containerdbuilder/simple/cluster.yaml b/nodeup/pkg/model/tests/containerdbuilder/simple/cluster.yaml index f282eeeb59757..66c7d54a52448 100644 --- a/nodeup/pkg/model/tests/containerdbuilder/simple/cluster.yaml +++ b/nodeup/pkg/model/tests/containerdbuilder/simple/cluster.yaml @@ -23,7 +23,6 @@ spec: iam: legacy: false kubernetesVersion: v1.21.0 - masterInternalName: api.internal.minimal.example.com masterPublicName: api.minimal.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/nodeup/pkg/model/tests/containerdbuilder/skipinstall/cluster.yaml b/nodeup/pkg/model/tests/containerdbuilder/skipinstall/cluster.yaml index fbbe7a31e4ee1..e2c8d3b9a0cd5 100644 --- a/nodeup/pkg/model/tests/containerdbuilder/skipinstall/cluster.yaml +++ b/nodeup/pkg/model/tests/containerdbuilder/skipinstall/cluster.yaml @@ -23,7 +23,6 @@ spec: iam: legacy: false kubernetesVersion: v1.22.3 - masterInternalName: api.internal.minimal.example.com masterPublicName: api.minimal.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/nodeup/pkg/model/tests/dockerbuilder/docker_19.03.11/cluster.yaml b/nodeup/pkg/model/tests/dockerbuilder/docker_19.03.11/cluster.yaml index 082048a6677f1..94e141a4322e6 100644 --- a/nodeup/pkg/model/tests/dockerbuilder/docker_19.03.11/cluster.yaml +++ b/nodeup/pkg/model/tests/dockerbuilder/docker_19.03.11/cluster.yaml @@ -24,7 +24,6 @@ spec: iam: legacy: false kubernetesVersion: v1.21.0 - masterInternalName: api.internal.minimal.example.com masterPublicName: api.minimal.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/nodeup/pkg/model/tests/dockerbuilder/healthcheck/cluster.yaml b/nodeup/pkg/model/tests/dockerbuilder/healthcheck/cluster.yaml index 1f37342bd7fbd..f1aba6f14dcc2 100644 --- a/nodeup/pkg/model/tests/dockerbuilder/healthcheck/cluster.yaml +++ b/nodeup/pkg/model/tests/dockerbuilder/healthcheck/cluster.yaml @@ -24,7 +24,6 @@ spec: iam: legacy: false kubernetesVersion: v1.4.6 - masterInternalName: api.internal.minimal.example.com masterPublicName: api.minimal.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/nodeup/pkg/model/tests/dockerbuilder/logflags/cluster.yaml b/nodeup/pkg/model/tests/dockerbuilder/logflags/cluster.yaml index 0bf756f11bd6d..9d3bebe04c79c 100644 --- a/nodeup/pkg/model/tests/dockerbuilder/logflags/cluster.yaml +++ b/nodeup/pkg/model/tests/dockerbuilder/logflags/cluster.yaml @@ -23,7 +23,6 @@ spec: iam: legacy: false kubernetesVersion: v1.21.0 - masterInternalName: api.internal.logflags.example.com masterPublicName: api.logflags.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/nodeup/pkg/model/tests/dockerbuilder/skipinstall/cluster.yaml b/nodeup/pkg/model/tests/dockerbuilder/skipinstall/cluster.yaml index a3c1926892f89..b42134ecec95f 100644 --- a/nodeup/pkg/model/tests/dockerbuilder/skipinstall/cluster.yaml +++ b/nodeup/pkg/model/tests/dockerbuilder/skipinstall/cluster.yaml @@ -23,7 +23,6 @@ spec: iam: legacy: false kubernetesVersion: v1.22.6 - masterInternalName: api.internal.minimal.example.com masterPublicName: api.minimal.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/nodeup/pkg/model/tests/golden/awsiam/cluster.yaml b/nodeup/pkg/model/tests/golden/awsiam/cluster.yaml index 94d19b086498a..050a2add5deb7 100644 --- a/nodeup/pkg/model/tests/golden/awsiam/cluster.yaml +++ b/nodeup/pkg/model/tests/golden/awsiam/cluster.yaml @@ -33,7 +33,6 @@ spec: kubelet: anonymousAuth: false kubernetesVersion: v1.21.0 - masterInternalName: api.internal.minimal.example.com masterPublicName: api.minimal.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/nodeup/pkg/model/tests/golden/dedicated-apiserver/cluster.yaml b/nodeup/pkg/model/tests/golden/dedicated-apiserver/cluster.yaml index 7760a8f56e947..b55de8a3e5bf6 100644 --- a/nodeup/pkg/model/tests/golden/dedicated-apiserver/cluster.yaml +++ b/nodeup/pkg/model/tests/golden/dedicated-apiserver/cluster.yaml @@ -31,7 +31,6 @@ spec: kubelet: anonymousAuth: false kubernetesVersion: v1.21.0 - masterInternalName: api.internal.minimal.example.com masterPublicName: api.minimal.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/nodeup/pkg/model/tests/golden/hooks-containerd-exec/cluster.yaml b/nodeup/pkg/model/tests/golden/hooks-containerd-exec/cluster.yaml index 689bbab3ec949..f9cedc943ee7d 100644 --- a/nodeup/pkg/model/tests/golden/hooks-containerd-exec/cluster.yaml +++ b/nodeup/pkg/model/tests/golden/hooks-containerd-exec/cluster.yaml @@ -31,7 +31,6 @@ spec: kubelet: anonymousAuth: false kubernetesVersion: v1.22.0 - masterInternalName: api.internal.minimal.example.com masterPublicName: api.minimal.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/nodeup/pkg/model/tests/golden/hooks-docker-exec/cluster.yaml b/nodeup/pkg/model/tests/golden/hooks-docker-exec/cluster.yaml index 8c8858b065c12..2ea2b8691b0c5 100644 --- a/nodeup/pkg/model/tests/golden/hooks-docker-exec/cluster.yaml +++ b/nodeup/pkg/model/tests/golden/hooks-docker-exec/cluster.yaml @@ -31,7 +31,6 @@ spec: kubelet: anonymousAuth: false kubernetesVersion: v1.22.0 - masterInternalName: api.internal.minimal.example.com masterPublicName: api.minimal.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/nodeup/pkg/model/tests/golden/minimal/cluster.yaml b/nodeup/pkg/model/tests/golden/minimal/cluster.yaml index ea34339c09fce..7cfc8ddb6026b 100644 --- a/nodeup/pkg/model/tests/golden/minimal/cluster.yaml +++ b/nodeup/pkg/model/tests/golden/minimal/cluster.yaml @@ -31,7 +31,6 @@ spec: kubelet: anonymousAuth: false kubernetesVersion: v1.23.0 - masterInternalName: api.internal.minimal.example.com masterPublicName: api.minimal.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/nodeup/pkg/model/tests/golden/side-loading/cluster.yaml b/nodeup/pkg/model/tests/golden/side-loading/cluster.yaml index ff0dfbc8e28a9..c2bc08d3ffbca 100644 --- a/nodeup/pkg/model/tests/golden/side-loading/cluster.yaml +++ b/nodeup/pkg/model/tests/golden/side-loading/cluster.yaml @@ -31,7 +31,6 @@ spec: kubelet: anonymousAuth: false kubernetesVersion: https://storage.googleapis.com/kubernetes-release/release/v1.22.0 - masterInternalName: api.internal.minimal.example.com masterPublicName: api.minimal.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/nodeup/pkg/model/tests/golden/without-etcd-events/cluster.yaml b/nodeup/pkg/model/tests/golden/without-etcd-events/cluster.yaml index 8543db732f931..d81ce4314d994 100644 --- a/nodeup/pkg/model/tests/golden/without-etcd-events/cluster.yaml +++ b/nodeup/pkg/model/tests/golden/without-etcd-events/cluster.yaml @@ -22,7 +22,6 @@ spec: kubelet: anonymousAuth: false kubernetesVersion: v1.21.0 - masterInternalName: api.internal.minimal.example.com masterPublicName: api.minimal.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/nodeup/pkg/model/tests/kubelet/featuregates/cluster.yaml b/nodeup/pkg/model/tests/kubelet/featuregates/cluster.yaml index 58f59037f3c4b..9e3cc2546e54b 100644 --- a/nodeup/pkg/model/tests/kubelet/featuregates/cluster.yaml +++ b/nodeup/pkg/model/tests/kubelet/featuregates/cluster.yaml @@ -26,7 +26,6 @@ spec: AllowExtTrafficLocalEndpoints: "false" podManifestPath: "/etc/kubernetes/manifests" kubernetesVersion: v1.24.0 - masterInternalName: api.internal.minimal.example.com masterPublicName: api.minimal.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/nodeup/pkg/model/tests/kubelet/warmpool/cluster.yaml b/nodeup/pkg/model/tests/kubelet/warmpool/cluster.yaml index 26893cafd2d60..db10bc6da6a2e 100644 --- a/nodeup/pkg/model/tests/kubelet/warmpool/cluster.yaml +++ b/nodeup/pkg/model/tests/kubelet/warmpool/cluster.yaml @@ -23,7 +23,6 @@ spec: kubelet: podManifestPath: "/etc/kubernetes/manifests" kubernetesVersion: v1.24.0 - masterInternalName: api.internal.minimal.example.com masterPublicName: api.minimal.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/nodeup/pkg/model/tests/protokube/cluster.yaml b/nodeup/pkg/model/tests/protokube/cluster.yaml index 34e7330d48b66..32a121cfa58f1 100644 --- a/nodeup/pkg/model/tests/protokube/cluster.yaml +++ b/nodeup/pkg/model/tests/protokube/cluster.yaml @@ -27,7 +27,6 @@ spec: kubelet: hostnameOverride: master.hostname.invalid kubernetesVersion: v1.21.0 - masterInternalName: api.internal.minimal.example.com masterPublicName: api.minimal.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/nodeup/pkg/model/tests/updateservicebuilder/automatic/cluster.yaml b/nodeup/pkg/model/tests/updateservicebuilder/automatic/cluster.yaml index 34e7330d48b66..32a121cfa58f1 100644 --- a/nodeup/pkg/model/tests/updateservicebuilder/automatic/cluster.yaml +++ b/nodeup/pkg/model/tests/updateservicebuilder/automatic/cluster.yaml @@ -27,7 +27,6 @@ spec: kubelet: hostnameOverride: master.hostname.invalid kubernetesVersion: v1.21.0 - masterInternalName: api.internal.minimal.example.com masterPublicName: api.minimal.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/nodeup/pkg/model/tests/updateservicebuilder/external/cluster.yaml b/nodeup/pkg/model/tests/updateservicebuilder/external/cluster.yaml index 907e0dfc01a8f..b2077366b862d 100644 --- a/nodeup/pkg/model/tests/updateservicebuilder/external/cluster.yaml +++ b/nodeup/pkg/model/tests/updateservicebuilder/external/cluster.yaml @@ -27,7 +27,6 @@ spec: kubelet: hostnameOverride: master.hostname.invalid kubernetesVersion: v1.21.0 - masterInternalName: api.internal.minimal.example.com masterPublicName: api.minimal.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/pkg/apis/kops/cluster.go b/pkg/apis/kops/cluster.go index 2da29b30861c5..d4feb9871f86f 100644 --- a/pkg/apis/kops/cluster.go +++ b/pkg/apis/kops/cluster.go @@ -70,10 +70,6 @@ type ClusterSpec struct { KubernetesVersion string `json:"kubernetesVersion,omitempty"` // Configuration of subnets we are targeting Subnets []ClusterSubnetSpec `json:"subnets,omitempty"` - // MasterPublicName is the external DNS name for the master nodes - MasterPublicName string `json:"masterPublicName,omitempty"` - // MasterInternalName is the internal DNS name for the master nodes - MasterInternalName string `json:"masterInternalName,omitempty"` // NetworkCIDR is the CIDR used for the AWS VPC / DO/ GCE Network, or otherwise allocated to k8s // This is a real CIDR, not the internal k8s network // On AWS, it maps to the VPC CIDR. It is not required on GCE. @@ -98,14 +94,12 @@ type ClusterSpec struct { // DNSZone is the DNS zone we should use when configuring DNS // This is because some clouds let us define a managed zone foo.bar, and then have // kubernetes.dev.foo.bar, without needing to define dev.foo.bar as a hosted zone. - // DNSZone will probably be a suffix of the MasterPublicName and MasterInternalName + // DNSZone will probably be a suffix of the MasterPublicName. // Note that DNSZone can either by the host name of the zone (containing dots), // or can be an identifier for the zone. DNSZone string `json:"dnsZone,omitempty"` // DNSControllerGossipConfig for the cluster assuming the use of gossip DNS DNSControllerGossipConfig *DNSControllerGossipConfig `json:"dnsControllerGossipConfig,omitempty"` - // AdditionalSANs adds additional Subject Alternate Names to apiserver cert that kops generates - AdditionalSANs []string `json:"additionalSANs,omitempty"` // ClusterDNSDomain is the suffix we use for internal DNS names (normally cluster.local) ClusterDNSDomain string `json:"clusterDNSDomain,omitempty"` // ServiceClusterIPRange is the CIDR, from the internal network, where we allocate IPs for services @@ -123,8 +117,6 @@ type ClusterSpec struct { EgressProxy *EgressProxySpec `json:"egressProxy,omitempty"` // SSHKeyName specifies a preexisting SSH key to use SSHKeyName *string `json:"sshKeyName,omitempty"` - // KubernetesAPIAccess is a list of the CIDRs that can access the Kubernetes API endpoint (master HTTPS) - KubernetesAPIAccess []string `json:"kubernetesAPIAccess,omitempty"` // IsolateMasters determines whether we should lock down masters so that they are not on the pod network. // true is the kube-up behaviour, but it is very surprising: it means that daemonsets only work on the master // if they have hostNetwork=true. @@ -178,8 +170,8 @@ type ClusterSpec struct { // Networking configuration Networking *NetworkingSpec `json:"networking,omitempty"` - // API field controls how the API is exposed outside the cluster - API *AccessSpec `json:"api,omitempty"` + // API controls how the Kubernetes API is exposed. + API APISpec `json:"api,omitempty"` // Authentication field controls how the cluster is configured for authentication Authentication *AuthenticationSpec `json:"authentication,omitempty"` // Authorization field controls how the cluster is configured for authorization @@ -452,12 +444,18 @@ type RBACAuthorizationSpec struct{} type AlwaysAllowAuthorizationSpec struct{} -// AccessSpec provides configuration details related to kubeapi dns and ELB access -type AccessSpec struct { - // DNS will be used to provide config on kube-apiserver ELB DNS +// APISpec provides configuration details related to the Kubernetes API. +type APISpec struct { + // DNS will be used to provide configuration for the Kubernetes API's DNS server. DNS *DNSAccessSpec `json:"dns,omitempty"` - // LoadBalancer is the configuration for the kube-apiserver ELB + // LoadBalancer is the configuration for the Kubernetes API load balancer. LoadBalancer *LoadBalancerAccessSpec `json:"loadBalancer,omitempty"` + // PublicName is the external DNS name for the Kubernetes API. + PublicName string `json:"publicName,omitempty"` + // AdditionalSANs adds additional Subject Alternate Names to the Kubernetes API certificate. + AdditionalSANs []string `json:"additionalSANs,omitempty"` + // Access is a list of the CIDRs that can access the Kubernetes API endpoint. + Access []string `json:"access,omitempty"` } type DNSAccessSpec struct{} @@ -727,8 +725,8 @@ type ClusterSubnetSpec struct { Zone string `json:"zone,omitempty"` // Region is the region the subnet is in, set for subnets that are regionally scoped Region string `json:"region,omitempty"` - // ProviderID is the cloud provider id for the objects associated with the zone (the subnet on AWS) - ProviderID string `json:"id,omitempty"` + // ID is the cloud provider ID for the objects associated with the zone (the subnet on AWS). + ID string `json:"id,omitempty"` // Egress defines the method of traffic egress for this subnet Egress string `json:"egress,omitempty"` // Type define which one if the internal types (public, utility, private) the network is @@ -803,10 +801,6 @@ func (c *Cluster) FillDefaults() error { return fmt.Errorf("cluster Name not set in FillDefaults") } - if c.Spec.MasterInternalName == "" { - c.Spec.MasterInternalName = "api.internal." + c.ObjectMeta.Name - } - return nil } @@ -920,6 +914,10 @@ func (c *Cluster) UsesNoneDNS() bool { return false } +func (c *Cluster) APIInternalName() string { + return "api.internal." + c.ObjectMeta.Name +} + func (c *ClusterSpec) IsIPv6Only() bool { return utils.IsIPv6CIDR(c.NonMasqueradeCIDR) } diff --git a/pkg/apis/kops/instancegroup.go b/pkg/apis/kops/instancegroup.go index 2f482b00c0aba..07079637d02ff 100644 --- a/pkg/apis/kops/instancegroup.go +++ b/pkg/apis/kops/instancegroup.go @@ -162,7 +162,7 @@ type InstanceGroupSpec struct { // SuspendProcesses disables the listed Scaling Policies SuspendProcesses []string `json:"suspendProcesses,omitempty"` // ExternalLoadBalancers define loadbalancers that should be attached to this instance group - ExternalLoadBalancers []LoadBalancer `json:"externalLoadBalancers,omitempty"` + ExternalLoadBalancers []LoadBalancerSpec `json:"externalLoadBalancers,omitempty"` // DetailedInstanceMonitoring defines if detailed-monitoring is enabled (AWS only) DetailedInstanceMonitoring *bool `json:"detailedInstanceMonitoring,omitempty"` // IAMProfileSpec defines the identity of the cloud group IAM profile (AWS only). @@ -372,7 +372,7 @@ func (g *InstanceGroup) AddInstanceGroupNodeLabel() { } // LoadBalancer defines a load balancer -type LoadBalancer struct { +type LoadBalancerSpec struct { // LoadBalancerName to associate with this instance group (AWS ELB) LoadBalancerName *string `json:"loadBalancerName,omitempty"` // TargetGroupARN to associate with this instance group (AWS ALB/NLB) diff --git a/pkg/apis/kops/v1alpha2/cluster.go b/pkg/apis/kops/v1alpha2/cluster.go index 1a6003ec6206d..fafefa8afde35 100644 --- a/pkg/apis/kops/v1alpha2/cluster.go +++ b/pkg/apis/kops/v1alpha2/cluster.go @@ -71,8 +71,10 @@ type ClusterSpec struct { // +k8s:conversion-gen=false Project string `json:"project,omitempty"` // MasterPublicName is the external DNS name for the master nodes + // +k8s:conversion-gen=false MasterPublicName string `json:"masterPublicName,omitempty"` - // MasterInternalName is the internal DNS name for the master nodes + // MasterInternalName is unused. + // +k8s:conversion-gen=false MasterInternalName string `json:"masterInternalName,omitempty"` // NetworkCIDR is the CIDR used for the AWS VPC / GCE Network, or otherwise allocated to k8s // This is a real CIDR, not the internal k8s network @@ -97,13 +99,14 @@ type ClusterSpec struct { // DNSZone is the DNS zone we should use when configuring DNS // This is because some clouds let us define a managed zone foo.bar, and then have // kubernetes.dev.foo.bar, without needing to define dev.foo.bar as a hosted zone. - // DNSZone will probably be a suffix of the MasterPublicName and MasterInternalName + // DNSZone will probably be a suffix of the MasterPublicName. // Note that DNSZone can either by the host name of the zone (containing dots), // or can be an identifier for the zone. DNSZone string `json:"dnsZone,omitempty"` // DNSControllerGossipConfig for the cluster assuming the use of gossip DNS DNSControllerGossipConfig *DNSControllerGossipConfig `json:"dnsControllerGossipConfig,omitempty"` // AdditionalSANs adds additional Subject Alternate Names to apiserver cert that kops generates + // +k8s:conversion-gen=false AdditionalSANs []string `json:"additionalSans,omitempty"` // ClusterDNSDomain is the suffix we use for internal DNS names (normally cluster.local) ClusterDNSDomain string `json:"clusterDNSDomain,omitempty"` @@ -126,6 +129,7 @@ type ClusterSpec struct { SSHKeyName *string `json:"sshKeyName,omitempty"` // KubernetesAPIAccess determines the permitted access to the API endpoints (master HTTPS) // Currently only a single CIDR is supported (though a richer grammar could be added in future) + // +k8s:conversion-gen=false KubernetesAPIAccess []string `json:"kubernetesApiAccess,omitempty"` // IsolateMasters determines whether we should lock down masters so that they are not on the pod network. // true is the kube-up behaviour, but it is very surprising: it means that daemonsets only work on the master @@ -181,7 +185,9 @@ type ClusterSpec struct { // Networking configuration Networking *NetworkingSpec `json:"networking,omitempty"` // API field controls how the API is exposed outside the cluster - API *AccessSpec `json:"api,omitempty"` + // +k8s:conversion-gen=false + LegacyAPI *APISpec `json:"api,omitempty"` + API APISpec `json:"-"` // Authentication field controls how the cluster is configured for authentication Authentication *AuthenticationSpec `json:"authentication,omitempty"` // Authorization field controls how the cluster is configured for authorization @@ -417,15 +423,18 @@ type RBACAuthorizationSpec struct{} type AlwaysAllowAuthorizationSpec struct{} -// AccessSpec provides configuration details related to kubeapi dns and ELB access -type AccessSpec struct { +// APISpec provides configuration details related to kubeapi dns and ELB access +type APISpec struct { // DNS will be used to provide config on kube-apiserver ELB DNS DNS *DNSAccessSpec `json:"dns,omitempty"` // LoadBalancer is the configuration for the kube-apiserver ELB - LoadBalancer *LoadBalancerAccessSpec `json:"loadBalancer,omitempty"` + LoadBalancer *LoadBalancerAccessSpec `json:"loadBalancer,omitempty"` + PublicName string `json:"-"` + AdditionalSANs []string `json:"-"` + Access []string `json:"-"` } -func (s *AccessSpec) IsEmpty() bool { +func (s *APISpec) IsEmpty() bool { return s.DNS == nil && s.LoadBalancer == nil } @@ -690,8 +699,8 @@ type ClusterSubnetSpec struct { // IPv6CIDR is the IPv6 CIDR block assigned to the subnet. IPv6CIDR string `json:"ipv6CIDR,omitempty"` - // ProviderID is the cloud provider id for the objects associated with the zone (the subnet on AWS) - ProviderID string `json:"id,omitempty"` + // ID is the cloud provider ID for the objects associated with the zone (the subnet on AWS). + ID string `json:"id,omitempty"` // Egress defines the method of traffic egress for this subnet Egress string `json:"egress,omitempty"` diff --git a/pkg/apis/kops/v1alpha2/conversion.go b/pkg/apis/kops/v1alpha2/conversion.go index 4f2d5a074298d..4623ec1d4df1c 100644 --- a/pkg/apis/kops/v1alpha2/conversion.go +++ b/pkg/apis/kops/v1alpha2/conversion.go @@ -75,6 +75,11 @@ func Convert_v1alpha2_ClusterSpec_To_kops_ClusterSpec(in *ClusterSpec, out *kops if err := autoConvert_v1alpha2_ClusterSpec_To_kops_ClusterSpec(in, out, s); err != nil { return err } + if in.LegacyAPI != nil { + if err := autoConvert_v1alpha2_APISpec_To_kops_APISpec(in.LegacyAPI, &out.API, s); err != nil { + return err + } + } switch kops.CloudProviderID(in.LegacyCloudProvider) { case kops.CloudProviderAWS: out.CloudProvider.AWS = &kops.AWSSpec{} @@ -122,6 +127,9 @@ func Convert_v1alpha2_ClusterSpec_To_kops_ClusterSpec(in *ClusterSpec, out *kops out.Hooks[i].Enabled = values.Bool(!*hook.Enabled) } } + out.API.PublicName = in.MasterPublicName + out.API.AdditionalSANs = in.AdditionalSANs + out.API.Access = in.KubernetesAPIAccess return nil } @@ -129,6 +137,13 @@ func Convert_kops_ClusterSpec_To_v1alpha2_ClusterSpec(in *kops.ClusterSpec, out if err := autoConvert_kops_ClusterSpec_To_v1alpha2_ClusterSpec(in, out, s); err != nil { return err } + out.LegacyAPI = &APISpec{} + if err := autoConvert_kops_APISpec_To_v1alpha2_APISpec(&in.API, out.LegacyAPI, s); err != nil { + return err + } + if out.API.IsEmpty() { + out.LegacyAPI = nil + } out.LegacyCloudProvider = string(in.GetCloudProvider()) switch kops.CloudProviderID(out.LegacyCloudProvider) { case kops.CloudProviderAzure: @@ -162,6 +177,9 @@ func Convert_kops_ClusterSpec_To_v1alpha2_ClusterSpec(in *kops.ClusterSpec, out out.Hooks[i].Enabled = values.Bool(!*hook.Enabled) } } + out.MasterPublicName = in.API.PublicName + out.AdditionalSANs = in.API.AdditionalSANs + out.KubernetesAPIAccess = in.API.Access return nil } diff --git a/pkg/apis/kops/v1alpha2/defaults.go b/pkg/apis/kops/v1alpha2/defaults.go index 0363989e0450a..111ddd2053cc1 100644 --- a/pkg/apis/kops/v1alpha2/defaults.go +++ b/pkg/apis/kops/v1alpha2/defaults.go @@ -51,31 +51,31 @@ func SetDefaults_ClusterSpec(obj *ClusterSpec) { } if obj.LegacyCloudProvider != "openstack" { - if obj.API == nil { - obj.API = &AccessSpec{} + if obj.LegacyAPI == nil { + obj.LegacyAPI = &APISpec{} } - if obj.API.IsEmpty() { + if obj.LegacyAPI.IsEmpty() { switch obj.Topology.ControlPlane { case TopologyPublic: - obj.API.DNS = &DNSAccessSpec{} + obj.LegacyAPI.DNS = &DNSAccessSpec{} case TopologyPrivate: - obj.API.LoadBalancer = &LoadBalancerAccessSpec{} + obj.LegacyAPI.LoadBalancer = &LoadBalancerAccessSpec{} default: klog.Infof("unknown master topology type: %q", obj.Topology.ControlPlane) } } - if obj.API.LoadBalancer != nil && obj.API.LoadBalancer.Type == "" { - obj.API.LoadBalancer.Type = LoadBalancerTypePublic + if obj.LegacyAPI.LoadBalancer != nil && obj.LegacyAPI.LoadBalancer.Type == "" { + obj.LegacyAPI.LoadBalancer.Type = LoadBalancerTypePublic } } - if obj.API.LoadBalancer != nil && obj.API.LoadBalancer.Class == "" && obj.LegacyCloudProvider == "aws" { - obj.API.LoadBalancer.Class = LoadBalancerClassClassic + if obj.LegacyAPI.LoadBalancer != nil && obj.LegacyAPI.LoadBalancer.Class == "" && obj.LegacyCloudProvider == "aws" { + obj.LegacyAPI.LoadBalancer.Class = LoadBalancerClassClassic } if obj.Authorization == nil { diff --git a/pkg/apis/kops/v1alpha2/instancegroup.go b/pkg/apis/kops/v1alpha2/instancegroup.go index 524117ca9cc18..81bbbb0dea7e7 100644 --- a/pkg/apis/kops/v1alpha2/instancegroup.go +++ b/pkg/apis/kops/v1alpha2/instancegroup.go @@ -128,7 +128,7 @@ type InstanceGroupSpec struct { // SuspendProcesses disables the listed Scaling Policies SuspendProcesses []string `json:"suspendProcesses,omitempty"` // ExternalLoadBalancers define loadbalancers that should be attached to this instance group - ExternalLoadBalancers []LoadBalancer `json:"externalLoadBalancers,omitempty"` + ExternalLoadBalancers []LoadBalancerSpec `json:"externalLoadBalancers,omitempty"` // DetailedInstanceMonitoring defines if detailed-monitoring is enabled (AWS only) DetailedInstanceMonitoring *bool `json:"detailedInstanceMonitoring,omitempty"` // IAMProfileSpec defines the identity of the cloud group IAM profile (AWS only). @@ -274,7 +274,7 @@ type IAMProfileSpec struct { } // LoadBalancer defines a load balancer -type LoadBalancer struct { +type LoadBalancerSpec struct { // LoadBalancerName to associate with this instance group (AWS ELB) LoadBalancerName *string `json:"loadBalancerName,omitempty"` // TargetGroupARN to associate with this instance group (AWS ALB/NLB) diff --git a/pkg/apis/kops/v1alpha2/zz_generated.conversion.go b/pkg/apis/kops/v1alpha2/zz_generated.conversion.go index cac1ac5bfb769..d1de9ceb9ad4d 100644 --- a/pkg/apis/kops/v1alpha2/zz_generated.conversion.go +++ b/pkg/apis/kops/v1alpha2/zz_generated.conversion.go @@ -34,6 +34,16 @@ func init() { // RegisterConversions adds conversion functions to the given scheme. // Public to allow building arbitrary schemes. func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*APISpec)(nil), (*kops.APISpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_APISpec_To_kops_APISpec(a.(*APISpec), b.(*kops.APISpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*kops.APISpec)(nil), (*APISpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_kops_APISpec_To_v1alpha2_APISpec(a.(*kops.APISpec), b.(*APISpec), scope) + }); err != nil { + return err + } if err := s.AddGeneratedConversionFunc((*AWSAuthenticationIdentityMappingSpec)(nil), (*kops.AWSAuthenticationIdentityMappingSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha2_AWSAuthenticationIdentityMappingSpec_To_kops_AWSAuthenticationIdentityMappingSpec(a.(*AWSAuthenticationIdentityMappingSpec), b.(*kops.AWSAuthenticationIdentityMappingSpec), scope) }); err != nil { @@ -104,16 +114,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*AccessSpec)(nil), (*kops.AccessSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_AccessSpec_To_kops_AccessSpec(a.(*AccessSpec), b.(*kops.AccessSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*kops.AccessSpec)(nil), (*AccessSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_kops_AccessSpec_To_v1alpha2_AccessSpec(a.(*kops.AccessSpec), b.(*AccessSpec), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*AddonSpec)(nil), (*kops.AddonSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha2_AddonSpec_To_kops_AddonSpec(a.(*AddonSpec), b.(*kops.AddonSpec), scope) }); err != nil { @@ -764,23 +764,23 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*LoadBalancer)(nil), (*kops.LoadBalancer)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_LoadBalancer_To_kops_LoadBalancer(a.(*LoadBalancer), b.(*kops.LoadBalancer), scope) + if err := s.AddGeneratedConversionFunc((*LoadBalancerAccessSpec)(nil), (*kops.LoadBalancerAccessSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_LoadBalancerAccessSpec_To_kops_LoadBalancerAccessSpec(a.(*LoadBalancerAccessSpec), b.(*kops.LoadBalancerAccessSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*kops.LoadBalancer)(nil), (*LoadBalancer)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_kops_LoadBalancer_To_v1alpha2_LoadBalancer(a.(*kops.LoadBalancer), b.(*LoadBalancer), scope) + if err := s.AddGeneratedConversionFunc((*kops.LoadBalancerAccessSpec)(nil), (*LoadBalancerAccessSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_kops_LoadBalancerAccessSpec_To_v1alpha2_LoadBalancerAccessSpec(a.(*kops.LoadBalancerAccessSpec), b.(*LoadBalancerAccessSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*LoadBalancerAccessSpec)(nil), (*kops.LoadBalancerAccessSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_LoadBalancerAccessSpec_To_kops_LoadBalancerAccessSpec(a.(*LoadBalancerAccessSpec), b.(*kops.LoadBalancerAccessSpec), scope) + if err := s.AddGeneratedConversionFunc((*LoadBalancerSpec)(nil), (*kops.LoadBalancerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_LoadBalancerSpec_To_kops_LoadBalancerSpec(a.(*LoadBalancerSpec), b.(*kops.LoadBalancerSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*kops.LoadBalancerAccessSpec)(nil), (*LoadBalancerAccessSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_kops_LoadBalancerAccessSpec_To_v1alpha2_LoadBalancerAccessSpec(a.(*kops.LoadBalancerAccessSpec), b.(*LoadBalancerAccessSpec), scope) + if err := s.AddGeneratedConversionFunc((*kops.LoadBalancerSpec)(nil), (*LoadBalancerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_kops_LoadBalancerSpec_To_v1alpha2_LoadBalancerSpec(a.(*kops.LoadBalancerSpec), b.(*LoadBalancerSpec), scope) }); err != nil { return err } @@ -1237,6 +1237,66 @@ func RegisterConversions(s *runtime.Scheme) error { return nil } +func autoConvert_v1alpha2_APISpec_To_kops_APISpec(in *APISpec, out *kops.APISpec, s conversion.Scope) error { + if in.DNS != nil { + in, out := &in.DNS, &out.DNS + *out = new(kops.DNSAccessSpec) + if err := Convert_v1alpha2_DNSAccessSpec_To_kops_DNSAccessSpec(*in, *out, s); err != nil { + return err + } + } else { + out.DNS = nil + } + if in.LoadBalancer != nil { + in, out := &in.LoadBalancer, &out.LoadBalancer + *out = new(kops.LoadBalancerAccessSpec) + if err := Convert_v1alpha2_LoadBalancerAccessSpec_To_kops_LoadBalancerAccessSpec(*in, *out, s); err != nil { + return err + } + } else { + out.LoadBalancer = nil + } + out.PublicName = in.PublicName + out.AdditionalSANs = in.AdditionalSANs + out.Access = in.Access + return nil +} + +// Convert_v1alpha2_APISpec_To_kops_APISpec is an autogenerated conversion function. +func Convert_v1alpha2_APISpec_To_kops_APISpec(in *APISpec, out *kops.APISpec, s conversion.Scope) error { + return autoConvert_v1alpha2_APISpec_To_kops_APISpec(in, out, s) +} + +func autoConvert_kops_APISpec_To_v1alpha2_APISpec(in *kops.APISpec, out *APISpec, s conversion.Scope) error { + if in.DNS != nil { + in, out := &in.DNS, &out.DNS + *out = new(DNSAccessSpec) + if err := Convert_kops_DNSAccessSpec_To_v1alpha2_DNSAccessSpec(*in, *out, s); err != nil { + return err + } + } else { + out.DNS = nil + } + if in.LoadBalancer != nil { + in, out := &in.LoadBalancer, &out.LoadBalancer + *out = new(LoadBalancerAccessSpec) + if err := Convert_kops_LoadBalancerAccessSpec_To_v1alpha2_LoadBalancerAccessSpec(*in, *out, s); err != nil { + return err + } + } else { + out.LoadBalancer = nil + } + out.PublicName = in.PublicName + out.AdditionalSANs = in.AdditionalSANs + out.Access = in.Access + return nil +} + +// Convert_kops_APISpec_To_v1alpha2_APISpec is an autogenerated conversion function. +func Convert_kops_APISpec_To_v1alpha2_APISpec(in *kops.APISpec, out *APISpec, s conversion.Scope) error { + return autoConvert_kops_APISpec_To_v1alpha2_APISpec(in, out, s) +} + func autoConvert_v1alpha2_AWSAuthenticationIdentityMappingSpec_To_kops_AWSAuthenticationIdentityMappingSpec(in *AWSAuthenticationIdentityMappingSpec, out *kops.AWSAuthenticationIdentityMappingSpec, s conversion.Scope) error { out.ARN = in.ARN out.Username = in.Username @@ -1439,60 +1499,6 @@ func Convert_kops_AccessLogSpec_To_v1alpha2_AccessLogSpec(in *kops.AccessLogSpec return autoConvert_kops_AccessLogSpec_To_v1alpha2_AccessLogSpec(in, out, s) } -func autoConvert_v1alpha2_AccessSpec_To_kops_AccessSpec(in *AccessSpec, out *kops.AccessSpec, s conversion.Scope) error { - if in.DNS != nil { - in, out := &in.DNS, &out.DNS - *out = new(kops.DNSAccessSpec) - if err := Convert_v1alpha2_DNSAccessSpec_To_kops_DNSAccessSpec(*in, *out, s); err != nil { - return err - } - } else { - out.DNS = nil - } - if in.LoadBalancer != nil { - in, out := &in.LoadBalancer, &out.LoadBalancer - *out = new(kops.LoadBalancerAccessSpec) - if err := Convert_v1alpha2_LoadBalancerAccessSpec_To_kops_LoadBalancerAccessSpec(*in, *out, s); err != nil { - return err - } - } else { - out.LoadBalancer = nil - } - return nil -} - -// Convert_v1alpha2_AccessSpec_To_kops_AccessSpec is an autogenerated conversion function. -func Convert_v1alpha2_AccessSpec_To_kops_AccessSpec(in *AccessSpec, out *kops.AccessSpec, s conversion.Scope) error { - return autoConvert_v1alpha2_AccessSpec_To_kops_AccessSpec(in, out, s) -} - -func autoConvert_kops_AccessSpec_To_v1alpha2_AccessSpec(in *kops.AccessSpec, out *AccessSpec, s conversion.Scope) error { - if in.DNS != nil { - in, out := &in.DNS, &out.DNS - *out = new(DNSAccessSpec) - if err := Convert_kops_DNSAccessSpec_To_v1alpha2_DNSAccessSpec(*in, *out, s); err != nil { - return err - } - } else { - out.DNS = nil - } - if in.LoadBalancer != nil { - in, out := &in.LoadBalancer, &out.LoadBalancer - *out = new(LoadBalancerAccessSpec) - if err := Convert_kops_LoadBalancerAccessSpec_To_v1alpha2_LoadBalancerAccessSpec(*in, *out, s); err != nil { - return err - } - } else { - out.LoadBalancer = nil - } - return nil -} - -// Convert_kops_AccessSpec_To_v1alpha2_AccessSpec is an autogenerated conversion function. -func Convert_kops_AccessSpec_To_v1alpha2_AccessSpec(in *kops.AccessSpec, out *AccessSpec, s conversion.Scope) error { - return autoConvert_kops_AccessSpec_To_v1alpha2_AccessSpec(in, out, s) -} - func autoConvert_v1alpha2_AddonSpec_To_kops_AddonSpec(in *AddonSpec, out *kops.AddonSpec, s conversion.Scope) error { out.Manifest = in.Manifest return nil @@ -2445,8 +2451,8 @@ func autoConvert_v1alpha2_ClusterSpec_To_kops_ClusterSpec(in *ClusterSpec, out * out.Subnets = nil } // INFO: in.Project opted out of conversion generation - out.MasterPublicName = in.MasterPublicName - out.MasterInternalName = in.MasterInternalName + // INFO: in.MasterPublicName opted out of conversion generation + // INFO: in.MasterInternalName opted out of conversion generation out.NetworkCIDR = in.NetworkCIDR out.AdditionalNetworkCIDRs = in.AdditionalNetworkCIDRs out.NetworkID = in.NetworkID @@ -2472,7 +2478,7 @@ func autoConvert_v1alpha2_ClusterSpec_To_kops_ClusterSpec(in *ClusterSpec, out * } else { out.DNSControllerGossipConfig = nil } - out.AdditionalSANs = in.AdditionalSANs + // INFO: in.AdditionalSANs opted out of conversion generation out.ClusterDNSDomain = in.ClusterDNSDomain out.ServiceClusterIPRange = in.ServiceClusterIPRange out.PodCIDR = in.PodCIDR @@ -2489,7 +2495,7 @@ func autoConvert_v1alpha2_ClusterSpec_To_kops_ClusterSpec(in *ClusterSpec, out * out.EgressProxy = nil } out.SSHKeyName = in.SSHKeyName - out.KubernetesAPIAccess = in.KubernetesAPIAccess + // INFO: in.KubernetesAPIAccess opted out of conversion generation out.IsolateMasters = in.IsolateMasters out.UpdatePolicy = in.UpdatePolicy out.ExternalPolicies = in.ExternalPolicies @@ -2687,14 +2693,9 @@ func autoConvert_v1alpha2_ClusterSpec_To_kops_ClusterSpec(in *ClusterSpec, out * } else { out.Networking = nil } - if in.API != nil { - in, out := &in.API, &out.API - *out = new(kops.AccessSpec) - if err := Convert_v1alpha2_AccessSpec_To_kops_AccessSpec(*in, *out, s); err != nil { - return err - } - } else { - out.API = nil + // INFO: in.LegacyAPI opted out of conversion generation + if err := Convert_v1alpha2_APISpec_To_kops_APISpec(&in.API, &out.API, s); err != nil { + return err } if in.Authentication != nil { in, out := &in.Authentication, &out.Authentication @@ -2869,8 +2870,6 @@ func autoConvert_kops_ClusterSpec_To_v1alpha2_ClusterSpec(in *kops.ClusterSpec, } else { out.Subnets = nil } - out.MasterPublicName = in.MasterPublicName - out.MasterInternalName = in.MasterInternalName out.NetworkCIDR = in.NetworkCIDR out.AdditionalNetworkCIDRs = in.AdditionalNetworkCIDRs out.NetworkID = in.NetworkID @@ -2896,7 +2895,6 @@ func autoConvert_kops_ClusterSpec_To_v1alpha2_ClusterSpec(in *kops.ClusterSpec, } else { out.DNSControllerGossipConfig = nil } - out.AdditionalSANs = in.AdditionalSANs out.ClusterDNSDomain = in.ClusterDNSDomain out.ServiceClusterIPRange = in.ServiceClusterIPRange out.PodCIDR = in.PodCIDR @@ -2913,7 +2911,6 @@ func autoConvert_kops_ClusterSpec_To_v1alpha2_ClusterSpec(in *kops.ClusterSpec, out.EgressProxy = nil } out.SSHKeyName = in.SSHKeyName - out.KubernetesAPIAccess = in.KubernetesAPIAccess out.IsolateMasters = in.IsolateMasters out.UpdatePolicy = in.UpdatePolicy out.ExternalPolicies = in.ExternalPolicies @@ -3111,14 +3108,8 @@ func autoConvert_kops_ClusterSpec_To_v1alpha2_ClusterSpec(in *kops.ClusterSpec, } else { out.Networking = nil } - if in.API != nil { - in, out := &in.API, &out.API - *out = new(AccessSpec) - if err := Convert_kops_AccessSpec_To_v1alpha2_AccessSpec(*in, *out, s); err != nil { - return err - } - } else { - out.API = nil + if err := Convert_kops_APISpec_To_v1alpha2_APISpec(&in.API, &out.API, s); err != nil { + return err } if in.Authentication != nil { in, out := &in.Authentication, &out.Authentication @@ -3262,7 +3253,7 @@ func autoConvert_v1alpha2_ClusterSubnetSpec_To_kops_ClusterSubnetSpec(in *Cluste out.Region = in.Region out.CIDR = in.CIDR out.IPv6CIDR = in.IPv6CIDR - out.ProviderID = in.ProviderID + out.ID = in.ID out.Egress = in.Egress out.Type = kops.SubnetType(in.Type) out.PublicIP = in.PublicIP @@ -3291,7 +3282,7 @@ func autoConvert_kops_ClusterSubnetSpec_To_v1alpha2_ClusterSubnetSpec(in *kops.C out.IPv6CIDR = in.IPv6CIDR out.Zone = in.Zone out.Region = in.Region - out.ProviderID = in.ProviderID + out.ID = in.ID out.Egress = in.Egress out.Type = SubnetType(in.Type) out.PublicIP = in.PublicIP @@ -4438,9 +4429,9 @@ func autoConvert_v1alpha2_InstanceGroupSpec_To_kops_InstanceGroupSpec(in *Instan out.SuspendProcesses = in.SuspendProcesses if in.ExternalLoadBalancers != nil { in, out := &in.ExternalLoadBalancers, &out.ExternalLoadBalancers - *out = make([]kops.LoadBalancer, len(*in)) + *out = make([]kops.LoadBalancerSpec, len(*in)) for i := range *in { - if err := Convert_v1alpha2_LoadBalancer_To_kops_LoadBalancer(&(*in)[i], &(*out)[i], s); err != nil { + if err := Convert_v1alpha2_LoadBalancerSpec_To_kops_LoadBalancerSpec(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -4624,9 +4615,9 @@ func autoConvert_kops_InstanceGroupSpec_To_v1alpha2_InstanceGroupSpec(in *kops.I out.SuspendProcesses = in.SuspendProcesses if in.ExternalLoadBalancers != nil { in, out := &in.ExternalLoadBalancers, &out.ExternalLoadBalancers - *out = make([]LoadBalancer, len(*in)) + *out = make([]LoadBalancerSpec, len(*in)) for i := range *in { - if err := Convert_kops_LoadBalancer_To_v1alpha2_LoadBalancer(&(*in)[i], &(*out)[i], s); err != nil { + if err := Convert_kops_LoadBalancerSpec_To_v1alpha2_LoadBalancerSpec(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -5807,28 +5798,6 @@ func Convert_kops_LeaderElectionConfiguration_To_v1alpha2_LeaderElectionConfigur return autoConvert_kops_LeaderElectionConfiguration_To_v1alpha2_LeaderElectionConfiguration(in, out, s) } -func autoConvert_v1alpha2_LoadBalancer_To_kops_LoadBalancer(in *LoadBalancer, out *kops.LoadBalancer, s conversion.Scope) error { - out.LoadBalancerName = in.LoadBalancerName - out.TargetGroupARN = in.TargetGroupARN - return nil -} - -// Convert_v1alpha2_LoadBalancer_To_kops_LoadBalancer is an autogenerated conversion function. -func Convert_v1alpha2_LoadBalancer_To_kops_LoadBalancer(in *LoadBalancer, out *kops.LoadBalancer, s conversion.Scope) error { - return autoConvert_v1alpha2_LoadBalancer_To_kops_LoadBalancer(in, out, s) -} - -func autoConvert_kops_LoadBalancer_To_v1alpha2_LoadBalancer(in *kops.LoadBalancer, out *LoadBalancer, s conversion.Scope) error { - out.LoadBalancerName = in.LoadBalancerName - out.TargetGroupARN = in.TargetGroupARN - return nil -} - -// Convert_kops_LoadBalancer_To_v1alpha2_LoadBalancer is an autogenerated conversion function. -func Convert_kops_LoadBalancer_To_v1alpha2_LoadBalancer(in *kops.LoadBalancer, out *LoadBalancer, s conversion.Scope) error { - return autoConvert_kops_LoadBalancer_To_v1alpha2_LoadBalancer(in, out, s) -} - func autoConvert_v1alpha2_LoadBalancerAccessSpec_To_kops_LoadBalancerAccessSpec(in *LoadBalancerAccessSpec, out *kops.LoadBalancerAccessSpec, s conversion.Scope) error { out.Class = kops.LoadBalancerClass(in.Class) out.Type = kops.LoadBalancerType(in.Type) @@ -5905,6 +5874,28 @@ func Convert_kops_LoadBalancerAccessSpec_To_v1alpha2_LoadBalancerAccessSpec(in * return autoConvert_kops_LoadBalancerAccessSpec_To_v1alpha2_LoadBalancerAccessSpec(in, out, s) } +func autoConvert_v1alpha2_LoadBalancerSpec_To_kops_LoadBalancerSpec(in *LoadBalancerSpec, out *kops.LoadBalancerSpec, s conversion.Scope) error { + out.LoadBalancerName = in.LoadBalancerName + out.TargetGroupARN = in.TargetGroupARN + return nil +} + +// Convert_v1alpha2_LoadBalancerSpec_To_kops_LoadBalancerSpec is an autogenerated conversion function. +func Convert_v1alpha2_LoadBalancerSpec_To_kops_LoadBalancerSpec(in *LoadBalancerSpec, out *kops.LoadBalancerSpec, s conversion.Scope) error { + return autoConvert_v1alpha2_LoadBalancerSpec_To_kops_LoadBalancerSpec(in, out, s) +} + +func autoConvert_kops_LoadBalancerSpec_To_v1alpha2_LoadBalancerSpec(in *kops.LoadBalancerSpec, out *LoadBalancerSpec, s conversion.Scope) error { + out.LoadBalancerName = in.LoadBalancerName + out.TargetGroupARN = in.TargetGroupARN + return nil +} + +// Convert_kops_LoadBalancerSpec_To_v1alpha2_LoadBalancerSpec is an autogenerated conversion function. +func Convert_kops_LoadBalancerSpec_To_v1alpha2_LoadBalancerSpec(in *kops.LoadBalancerSpec, out *LoadBalancerSpec, s conversion.Scope) error { + return autoConvert_kops_LoadBalancerSpec_To_v1alpha2_LoadBalancerSpec(in, out, s) +} + func autoConvert_v1alpha2_LoadBalancerSubnetSpec_To_kops_LoadBalancerSubnetSpec(in *LoadBalancerSubnetSpec, out *kops.LoadBalancerSubnetSpec, s conversion.Scope) error { out.Name = in.Name out.PrivateIPv4Address = in.PrivateIPv4Address diff --git a/pkg/apis/kops/v1alpha2/zz_generated.deepcopy.go b/pkg/apis/kops/v1alpha2/zz_generated.deepcopy.go index 4f3169ef10dec..435d60ad24c18 100644 --- a/pkg/apis/kops/v1alpha2/zz_generated.deepcopy.go +++ b/pkg/apis/kops/v1alpha2/zz_generated.deepcopy.go @@ -28,6 +28,42 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APISpec) DeepCopyInto(out *APISpec) { + *out = *in + if in.DNS != nil { + in, out := &in.DNS, &out.DNS + *out = new(DNSAccessSpec) + **out = **in + } + if in.LoadBalancer != nil { + in, out := &in.LoadBalancer, &out.LoadBalancer + *out = new(LoadBalancerAccessSpec) + (*in).DeepCopyInto(*out) + } + if in.AdditionalSANs != nil { + in, out := &in.AdditionalSANs, &out.AdditionalSANs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Access != nil { + in, out := &in.Access, &out.Access + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APISpec. +func (in *APISpec) DeepCopy() *APISpec { + if in == nil { + return nil + } + out := new(APISpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AWSAuthenticationIdentityMappingSpec) DeepCopyInto(out *AWSAuthenticationIdentityMappingSpec) { *out = *in @@ -224,32 +260,6 @@ func (in *AccessLogSpec) DeepCopy() *AccessLogSpec { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AccessSpec) DeepCopyInto(out *AccessSpec) { - *out = *in - if in.DNS != nil { - in, out := &in.DNS, &out.DNS - *out = new(DNSAccessSpec) - **out = **in - } - if in.LoadBalancer != nil { - in, out := &in.LoadBalancer, &out.LoadBalancer - *out = new(LoadBalancerAccessSpec) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessSpec. -func (in *AccessSpec) DeepCopy() *AccessSpec { - if in == nil { - return nil - } - out := new(AccessSpec) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AddonSpec) DeepCopyInto(out *AddonSpec) { *out = *in @@ -1256,11 +1266,12 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) { *out = new(NetworkingSpec) (*in).DeepCopyInto(*out) } - if in.API != nil { - in, out := &in.API, &out.API - *out = new(AccessSpec) + if in.LegacyAPI != nil { + in, out := &in.LegacyAPI, &out.LegacyAPI + *out = new(APISpec) (*in).DeepCopyInto(*out) } + in.API.DeepCopyInto(&out.API) if in.Authentication != nil { in, out := &in.Authentication, &out.Authentication *out = new(AuthenticationSpec) @@ -2530,7 +2541,7 @@ func (in *InstanceGroupSpec) DeepCopyInto(out *InstanceGroupSpec) { } if in.ExternalLoadBalancers != nil { in, out := &in.ExternalLoadBalancers, &out.ExternalLoadBalancers - *out = make([]LoadBalancer, len(*in)) + *out = make([]LoadBalancerSpec, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -4025,32 +4036,6 @@ func (in *LeaderElectionConfiguration) DeepCopy() *LeaderElectionConfiguration { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LoadBalancer) DeepCopyInto(out *LoadBalancer) { - *out = *in - if in.LoadBalancerName != nil { - in, out := &in.LoadBalancerName, &out.LoadBalancerName - *out = new(string) - **out = **in - } - if in.TargetGroupARN != nil { - in, out := &in.TargetGroupARN, &out.TargetGroupARN - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancer. -func (in *LoadBalancer) DeepCopy() *LoadBalancer { - if in == nil { - return nil - } - out := new(LoadBalancer) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LoadBalancerAccessSpec) DeepCopyInto(out *LoadBalancerAccessSpec) { *out = *in @@ -4104,6 +4089,32 @@ func (in *LoadBalancerAccessSpec) DeepCopy() *LoadBalancerAccessSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBalancerSpec) DeepCopyInto(out *LoadBalancerSpec) { + *out = *in + if in.LoadBalancerName != nil { + in, out := &in.LoadBalancerName, &out.LoadBalancerName + *out = new(string) + **out = **in + } + if in.TargetGroupARN != nil { + in, out := &in.TargetGroupARN, &out.TargetGroupARN + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerSpec. +func (in *LoadBalancerSpec) DeepCopy() *LoadBalancerSpec { + if in == nil { + return nil + } + out := new(LoadBalancerSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LoadBalancerSubnetSpec) DeepCopyInto(out *LoadBalancerSubnetSpec) { *out = *in diff --git a/pkg/apis/kops/v1alpha3/cluster.go b/pkg/apis/kops/v1alpha3/cluster.go index 5a9c5c6e085f4..0f1d7b348ab2b 100644 --- a/pkg/apis/kops/v1alpha3/cluster.go +++ b/pkg/apis/kops/v1alpha3/cluster.go @@ -66,10 +66,6 @@ type ClusterSpec struct { KubernetesVersion string `json:"kubernetesVersion,omitempty"` // Configuration of subnets we are targeting Subnets []ClusterSubnetSpec `json:"subnets,omitempty"` - // MasterPublicName is the external DNS name for the master nodes - MasterPublicName string `json:"masterPublicName,omitempty"` - // MasterInternalName is the internal DNS name for the master nodes - MasterInternalName string `json:"masterInternalName,omitempty"` // NetworkCIDR is the CIDR used for the AWS VPC / GCE Network, or otherwise allocated to k8s // This is a real CIDR, not the internal k8s network // On AWS, it maps to the VPC CIDR. It is not required on GCE. @@ -93,14 +89,12 @@ type ClusterSpec struct { // DNSZone is the DNS zone we should use when configuring DNS // This is because some clouds let us define a managed zone foo.bar, and then have // kubernetes.dev.foo.bar, without needing to define dev.foo.bar as a hosted zone. - // DNSZone will probably be a suffix of the MasterPublicName and MasterInternalName + // DNSZone will probably be a suffix of the MasterPublicName. // Note that DNSZone can either by the host name of the zone (containing dots), // or can be an identifier for the zone. DNSZone string `json:"dnsZone,omitempty"` // DNSControllerGossipConfig for the cluster assuming the use of gossip DNS DNSControllerGossipConfig *DNSControllerGossipConfig `json:"dnsControllerGossipConfig,omitempty"` - // AdditionalSANs adds additional Subject Alternate Names to apiserver cert that kops generates - AdditionalSANs []string `json:"additionalSANs,omitempty"` // ClusterDNSDomain is the suffix we use for internal DNS names (normally cluster.local) ClusterDNSDomain string `json:"clusterDNSDomain,omitempty"` // ServiceClusterIPRange is the CIDR, from the internal network, where we allocate IPs for services @@ -120,9 +114,6 @@ type ClusterSpec struct { EgressProxy *EgressProxySpec `json:"egressProxy,omitempty"` // SSHKeyName specifies a preexisting SSH key to use SSHKeyName *string `json:"sshKeyName,omitempty"` - // KubernetesAPIAccess determines the permitted access to the API endpoints (master HTTPS) - // Currently only a single CIDR is supported (though a richer grammar could be added in future) - KubernetesAPIAccess []string `json:"kubernetesAPIAccess,omitempty"` // IsolateMasters determines whether we should lock down masters so that they are not on the pod network. // true is the kube-up behaviour, but it is very surprising: it means that daemonsets only work on the master // if they have hostNetwork=true. @@ -176,8 +167,8 @@ type ClusterSpec struct { // Networking configuration Networking *NetworkingSpec `json:"networking,omitempty"` - // API field controls how the API is exposed outside the cluster - API *AccessSpec `json:"api,omitempty"` + // API controls how the Kubernetes API is exposed. + API APISpec `json:"api,omitempty"` // Authentication field controls how the cluster is configured for authentication Authentication *AuthenticationSpec `json:"authentication,omitempty"` // Authorization field controls how the cluster is configured for authorization @@ -423,16 +414,18 @@ type RBACAuthorizationSpec struct{} type AlwaysAllowAuthorizationSpec struct{} -// AccessSpec provides configuration details related to kubeapi dns and ELB access -type AccessSpec struct { - // DNS will be used to provide config on kube-apiserver ELB DNS +// APISpec provides configuration details related to the Kubernetes API. +type APISpec struct { + // DNS will be used to provide configuration for the Kubernetes API's DNS server. DNS *DNSAccessSpec `json:"dns,omitempty"` - // LoadBalancer is the configuration for the kube-apiserver ELB + // LoadBalancer is the configuration for the Kubernetes API load balancer. LoadBalancer *LoadBalancerAccessSpec `json:"loadBalancer,omitempty"` -} - -func (s *AccessSpec) IsEmpty() bool { - return s.DNS == nil && s.LoadBalancer == nil + // PublicName is the external DNS name for the Kubernetes API. + PublicName string `json:"publicName,omitempty"` + // AdditionalSANs adds additional Subject Alternate Names to the Kubernetes API certificate. + AdditionalSANs []string `json:"additionalSANs,omitempty"` + // Access is a list of the CIDRs that can access the Kubernetes API endpoint. + Access []string `json:"access,omitempty"` } type DNSAccessSpec struct{} @@ -668,8 +661,8 @@ type ClusterSubnetSpec struct { // IPv6CIDR is the IPv6 CIDR block assigned to the subnet. IPv6CIDR string `json:"ipv6CIDR,omitempty"` - // ProviderID is the cloud provider id for the objects associated with the zone (the subnet on AWS) - ProviderID string `json:"id,omitempty"` + // ID is the cloud provider ID for the objects associated with the zone (the subnet on AWS). + ID string `json:"id,omitempty"` // Egress defines the method of traffic egress for this subnet Egress string `json:"egress,omitempty"` diff --git a/pkg/apis/kops/v1alpha3/defaults.go b/pkg/apis/kops/v1alpha3/defaults.go index 13267dfb43650..dec28740da67d 100644 --- a/pkg/apis/kops/v1alpha3/defaults.go +++ b/pkg/apis/kops/v1alpha3/defaults.go @@ -47,11 +47,7 @@ func SetDefaults_ClusterSpec(obj *ClusterSpec) { } if obj.CloudProvider.Openstack == nil { - if obj.API == nil { - obj.API = &AccessSpec{} - } - - if obj.API.IsEmpty() { + if obj.API.DNS == nil && obj.API.LoadBalancer == nil { switch obj.Topology.ControlPlane { case TopologyPublic: obj.API.DNS = &DNSAccessSpec{} diff --git a/pkg/apis/kops/v1alpha3/instancegroup.go b/pkg/apis/kops/v1alpha3/instancegroup.go index feefb424610fc..e2c29b7c291b0 100644 --- a/pkg/apis/kops/v1alpha3/instancegroup.go +++ b/pkg/apis/kops/v1alpha3/instancegroup.go @@ -125,7 +125,7 @@ type InstanceGroupSpec struct { // SuspendProcesses disables the listed Scaling Policies SuspendProcesses []string `json:"suspendProcesses,omitempty"` // ExternalLoadBalancers define loadbalancers that should be attached to this instance group - ExternalLoadBalancers []LoadBalancer `json:"externalLoadBalancers,omitempty"` + ExternalLoadBalancers []LoadBalancerSpec `json:"externalLoadBalancers,omitempty"` // DetailedInstanceMonitoring defines if detailed-monitoring is enabled (AWS only) DetailedInstanceMonitoring *bool `json:"detailedInstanceMonitoring,omitempty"` // IAMProfileSpec defines the identity of the cloud group IAM profile (AWS only). @@ -271,7 +271,7 @@ type IAMProfileSpec struct { } // LoadBalancer defines a load balancer -type LoadBalancer struct { +type LoadBalancerSpec struct { // LoadBalancerName to associate with this instance group (AWS ELB) LoadBalancerName *string `json:"loadBalancerName,omitempty"` // TargetGroupARN to associate with this instance group (AWS ALB/NLB) diff --git a/pkg/apis/kops/v1alpha3/zz_generated.conversion.go b/pkg/apis/kops/v1alpha3/zz_generated.conversion.go index 705d602baa022..a51f2237db811 100644 --- a/pkg/apis/kops/v1alpha3/zz_generated.conversion.go +++ b/pkg/apis/kops/v1alpha3/zz_generated.conversion.go @@ -34,6 +34,16 @@ func init() { // RegisterConversions adds conversion functions to the given scheme. // Public to allow building arbitrary schemes. func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*APISpec)(nil), (*kops.APISpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_APISpec_To_kops_APISpec(a.(*APISpec), b.(*kops.APISpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*kops.APISpec)(nil), (*APISpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_kops_APISpec_To_v1alpha3_APISpec(a.(*kops.APISpec), b.(*APISpec), scope) + }); err != nil { + return err + } if err := s.AddGeneratedConversionFunc((*AWSAuthenticationIdentityMappingSpec)(nil), (*kops.AWSAuthenticationIdentityMappingSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha3_AWSAuthenticationIdentityMappingSpec_To_kops_AWSAuthenticationIdentityMappingSpec(a.(*AWSAuthenticationIdentityMappingSpec), b.(*kops.AWSAuthenticationIdentityMappingSpec), scope) }); err != nil { @@ -114,16 +124,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*AccessSpec)(nil), (*kops.AccessSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_AccessSpec_To_kops_AccessSpec(a.(*AccessSpec), b.(*kops.AccessSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*kops.AccessSpec)(nil), (*AccessSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_kops_AccessSpec_To_v1alpha3_AccessSpec(a.(*kops.AccessSpec), b.(*AccessSpec), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*AddonSpec)(nil), (*kops.AddonSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha3_AddonSpec_To_kops_AddonSpec(a.(*AddonSpec), b.(*kops.AddonSpec), scope) }); err != nil { @@ -844,23 +844,23 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*LoadBalancer)(nil), (*kops.LoadBalancer)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_LoadBalancer_To_kops_LoadBalancer(a.(*LoadBalancer), b.(*kops.LoadBalancer), scope) + if err := s.AddGeneratedConversionFunc((*LoadBalancerAccessSpec)(nil), (*kops.LoadBalancerAccessSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_LoadBalancerAccessSpec_To_kops_LoadBalancerAccessSpec(a.(*LoadBalancerAccessSpec), b.(*kops.LoadBalancerAccessSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*kops.LoadBalancer)(nil), (*LoadBalancer)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_kops_LoadBalancer_To_v1alpha3_LoadBalancer(a.(*kops.LoadBalancer), b.(*LoadBalancer), scope) + if err := s.AddGeneratedConversionFunc((*kops.LoadBalancerAccessSpec)(nil), (*LoadBalancerAccessSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_kops_LoadBalancerAccessSpec_To_v1alpha3_LoadBalancerAccessSpec(a.(*kops.LoadBalancerAccessSpec), b.(*LoadBalancerAccessSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*LoadBalancerAccessSpec)(nil), (*kops.LoadBalancerAccessSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_LoadBalancerAccessSpec_To_kops_LoadBalancerAccessSpec(a.(*LoadBalancerAccessSpec), b.(*kops.LoadBalancerAccessSpec), scope) + if err := s.AddGeneratedConversionFunc((*LoadBalancerSpec)(nil), (*kops.LoadBalancerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_LoadBalancerSpec_To_kops_LoadBalancerSpec(a.(*LoadBalancerSpec), b.(*kops.LoadBalancerSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*kops.LoadBalancerAccessSpec)(nil), (*LoadBalancerAccessSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_kops_LoadBalancerAccessSpec_To_v1alpha3_LoadBalancerAccessSpec(a.(*kops.LoadBalancerAccessSpec), b.(*LoadBalancerAccessSpec), scope) + if err := s.AddGeneratedConversionFunc((*kops.LoadBalancerSpec)(nil), (*LoadBalancerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_kops_LoadBalancerSpec_To_v1alpha3_LoadBalancerSpec(a.(*kops.LoadBalancerSpec), b.(*LoadBalancerSpec), scope) }); err != nil { return err } @@ -1247,6 +1247,66 @@ func RegisterConversions(s *runtime.Scheme) error { return nil } +func autoConvert_v1alpha3_APISpec_To_kops_APISpec(in *APISpec, out *kops.APISpec, s conversion.Scope) error { + if in.DNS != nil { + in, out := &in.DNS, &out.DNS + *out = new(kops.DNSAccessSpec) + if err := Convert_v1alpha3_DNSAccessSpec_To_kops_DNSAccessSpec(*in, *out, s); err != nil { + return err + } + } else { + out.DNS = nil + } + if in.LoadBalancer != nil { + in, out := &in.LoadBalancer, &out.LoadBalancer + *out = new(kops.LoadBalancerAccessSpec) + if err := Convert_v1alpha3_LoadBalancerAccessSpec_To_kops_LoadBalancerAccessSpec(*in, *out, s); err != nil { + return err + } + } else { + out.LoadBalancer = nil + } + out.PublicName = in.PublicName + out.AdditionalSANs = in.AdditionalSANs + out.Access = in.Access + return nil +} + +// Convert_v1alpha3_APISpec_To_kops_APISpec is an autogenerated conversion function. +func Convert_v1alpha3_APISpec_To_kops_APISpec(in *APISpec, out *kops.APISpec, s conversion.Scope) error { + return autoConvert_v1alpha3_APISpec_To_kops_APISpec(in, out, s) +} + +func autoConvert_kops_APISpec_To_v1alpha3_APISpec(in *kops.APISpec, out *APISpec, s conversion.Scope) error { + if in.DNS != nil { + in, out := &in.DNS, &out.DNS + *out = new(DNSAccessSpec) + if err := Convert_kops_DNSAccessSpec_To_v1alpha3_DNSAccessSpec(*in, *out, s); err != nil { + return err + } + } else { + out.DNS = nil + } + if in.LoadBalancer != nil { + in, out := &in.LoadBalancer, &out.LoadBalancer + *out = new(LoadBalancerAccessSpec) + if err := Convert_kops_LoadBalancerAccessSpec_To_v1alpha3_LoadBalancerAccessSpec(*in, *out, s); err != nil { + return err + } + } else { + out.LoadBalancer = nil + } + out.PublicName = in.PublicName + out.AdditionalSANs = in.AdditionalSANs + out.Access = in.Access + return nil +} + +// Convert_kops_APISpec_To_v1alpha3_APISpec is an autogenerated conversion function. +func Convert_kops_APISpec_To_v1alpha3_APISpec(in *kops.APISpec, out *APISpec, s conversion.Scope) error { + return autoConvert_kops_APISpec_To_v1alpha3_APISpec(in, out, s) +} + func autoConvert_v1alpha3_AWSAuthenticationIdentityMappingSpec_To_kops_AWSAuthenticationIdentityMappingSpec(in *AWSAuthenticationIdentityMappingSpec, out *kops.AWSAuthenticationIdentityMappingSpec, s conversion.Scope) error { out.ARN = in.ARN out.Username = in.Username @@ -1467,60 +1527,6 @@ func Convert_kops_AccessLogSpec_To_v1alpha3_AccessLogSpec(in *kops.AccessLogSpec return autoConvert_kops_AccessLogSpec_To_v1alpha3_AccessLogSpec(in, out, s) } -func autoConvert_v1alpha3_AccessSpec_To_kops_AccessSpec(in *AccessSpec, out *kops.AccessSpec, s conversion.Scope) error { - if in.DNS != nil { - in, out := &in.DNS, &out.DNS - *out = new(kops.DNSAccessSpec) - if err := Convert_v1alpha3_DNSAccessSpec_To_kops_DNSAccessSpec(*in, *out, s); err != nil { - return err - } - } else { - out.DNS = nil - } - if in.LoadBalancer != nil { - in, out := &in.LoadBalancer, &out.LoadBalancer - *out = new(kops.LoadBalancerAccessSpec) - if err := Convert_v1alpha3_LoadBalancerAccessSpec_To_kops_LoadBalancerAccessSpec(*in, *out, s); err != nil { - return err - } - } else { - out.LoadBalancer = nil - } - return nil -} - -// Convert_v1alpha3_AccessSpec_To_kops_AccessSpec is an autogenerated conversion function. -func Convert_v1alpha3_AccessSpec_To_kops_AccessSpec(in *AccessSpec, out *kops.AccessSpec, s conversion.Scope) error { - return autoConvert_v1alpha3_AccessSpec_To_kops_AccessSpec(in, out, s) -} - -func autoConvert_kops_AccessSpec_To_v1alpha3_AccessSpec(in *kops.AccessSpec, out *AccessSpec, s conversion.Scope) error { - if in.DNS != nil { - in, out := &in.DNS, &out.DNS - *out = new(DNSAccessSpec) - if err := Convert_kops_DNSAccessSpec_To_v1alpha3_DNSAccessSpec(*in, *out, s); err != nil { - return err - } - } else { - out.DNS = nil - } - if in.LoadBalancer != nil { - in, out := &in.LoadBalancer, &out.LoadBalancer - *out = new(LoadBalancerAccessSpec) - if err := Convert_kops_LoadBalancerAccessSpec_To_v1alpha3_LoadBalancerAccessSpec(*in, *out, s); err != nil { - return err - } - } else { - out.LoadBalancer = nil - } - return nil -} - -// Convert_kops_AccessSpec_To_v1alpha3_AccessSpec is an autogenerated conversion function. -func Convert_kops_AccessSpec_To_v1alpha3_AccessSpec(in *kops.AccessSpec, out *AccessSpec, s conversion.Scope) error { - return autoConvert_kops_AccessSpec_To_v1alpha3_AccessSpec(in, out, s) -} - func autoConvert_v1alpha3_AddonSpec_To_kops_AddonSpec(in *AddonSpec, out *kops.AddonSpec, s conversion.Scope) error { out.Manifest = in.Manifest return nil @@ -2554,8 +2560,6 @@ func autoConvert_v1alpha3_ClusterSpec_To_kops_ClusterSpec(in *ClusterSpec, out * } else { out.Subnets = nil } - out.MasterPublicName = in.MasterPublicName - out.MasterInternalName = in.MasterInternalName out.NetworkCIDR = in.NetworkCIDR out.AdditionalNetworkCIDRs = in.AdditionalNetworkCIDRs out.NetworkID = in.NetworkID @@ -2581,7 +2585,6 @@ func autoConvert_v1alpha3_ClusterSpec_To_kops_ClusterSpec(in *ClusterSpec, out * } else { out.DNSControllerGossipConfig = nil } - out.AdditionalSANs = in.AdditionalSANs out.ClusterDNSDomain = in.ClusterDNSDomain out.ServiceClusterIPRange = in.ServiceClusterIPRange out.PodCIDR = in.PodCIDR @@ -2598,7 +2601,6 @@ func autoConvert_v1alpha3_ClusterSpec_To_kops_ClusterSpec(in *ClusterSpec, out * out.EgressProxy = nil } out.SSHKeyName = in.SSHKeyName - out.KubernetesAPIAccess = in.KubernetesAPIAccess out.IsolateMasters = in.IsolateMasters out.UpdatePolicy = in.UpdatePolicy out.ExternalPolicies = in.ExternalPolicies @@ -2796,14 +2798,8 @@ func autoConvert_v1alpha3_ClusterSpec_To_kops_ClusterSpec(in *ClusterSpec, out * } else { out.Networking = nil } - if in.API != nil { - in, out := &in.API, &out.API - *out = new(kops.AccessSpec) - if err := Convert_v1alpha3_AccessSpec_To_kops_AccessSpec(*in, *out, s); err != nil { - return err - } - } else { - out.API = nil + if err := Convert_v1alpha3_APISpec_To_kops_APISpec(&in.API, &out.API, s); err != nil { + return err } if in.Authentication != nil { in, out := &in.Authentication, &out.Authentication @@ -2977,8 +2973,6 @@ func autoConvert_kops_ClusterSpec_To_v1alpha3_ClusterSpec(in *kops.ClusterSpec, } else { out.Subnets = nil } - out.MasterPublicName = in.MasterPublicName - out.MasterInternalName = in.MasterInternalName out.NetworkCIDR = in.NetworkCIDR out.AdditionalNetworkCIDRs = in.AdditionalNetworkCIDRs out.NetworkID = in.NetworkID @@ -3004,7 +2998,6 @@ func autoConvert_kops_ClusterSpec_To_v1alpha3_ClusterSpec(in *kops.ClusterSpec, } else { out.DNSControllerGossipConfig = nil } - out.AdditionalSANs = in.AdditionalSANs out.ClusterDNSDomain = in.ClusterDNSDomain out.ServiceClusterIPRange = in.ServiceClusterIPRange out.PodCIDR = in.PodCIDR @@ -3021,7 +3014,6 @@ func autoConvert_kops_ClusterSpec_To_v1alpha3_ClusterSpec(in *kops.ClusterSpec, out.EgressProxy = nil } out.SSHKeyName = in.SSHKeyName - out.KubernetesAPIAccess = in.KubernetesAPIAccess out.IsolateMasters = in.IsolateMasters out.UpdatePolicy = in.UpdatePolicy out.ExternalPolicies = in.ExternalPolicies @@ -3219,14 +3211,8 @@ func autoConvert_kops_ClusterSpec_To_v1alpha3_ClusterSpec(in *kops.ClusterSpec, } else { out.Networking = nil } - if in.API != nil { - in, out := &in.API, &out.API - *out = new(AccessSpec) - if err := Convert_kops_AccessSpec_To_v1alpha3_AccessSpec(*in, *out, s); err != nil { - return err - } - } else { - out.API = nil + if err := Convert_kops_APISpec_To_v1alpha3_APISpec(&in.API, &out.API, s); err != nil { + return err } if in.Authentication != nil { in, out := &in.Authentication, &out.Authentication @@ -3367,7 +3353,7 @@ func autoConvert_v1alpha3_ClusterSubnetSpec_To_kops_ClusterSubnetSpec(in *Cluste out.Region = in.Region out.CIDR = in.CIDR out.IPv6CIDR = in.IPv6CIDR - out.ProviderID = in.ProviderID + out.ID = in.ID out.Egress = in.Egress out.Type = kops.SubnetType(in.Type) out.PublicIP = in.PublicIP @@ -3396,7 +3382,7 @@ func autoConvert_kops_ClusterSubnetSpec_To_v1alpha3_ClusterSubnetSpec(in *kops.C out.IPv6CIDR = in.IPv6CIDR out.Zone = in.Zone out.Region = in.Region - out.ProviderID = in.ProviderID + out.ID = in.ID out.Egress = in.Egress out.Type = SubnetType(in.Type) out.PublicIP = in.PublicIP @@ -4604,9 +4590,9 @@ func autoConvert_v1alpha3_InstanceGroupSpec_To_kops_InstanceGroupSpec(in *Instan out.SuspendProcesses = in.SuspendProcesses if in.ExternalLoadBalancers != nil { in, out := &in.ExternalLoadBalancers, &out.ExternalLoadBalancers - *out = make([]kops.LoadBalancer, len(*in)) + *out = make([]kops.LoadBalancerSpec, len(*in)) for i := range *in { - if err := Convert_v1alpha3_LoadBalancer_To_kops_LoadBalancer(&(*in)[i], &(*out)[i], s); err != nil { + if err := Convert_v1alpha3_LoadBalancerSpec_To_kops_LoadBalancerSpec(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -4790,9 +4776,9 @@ func autoConvert_kops_InstanceGroupSpec_To_v1alpha3_InstanceGroupSpec(in *kops.I out.SuspendProcesses = in.SuspendProcesses if in.ExternalLoadBalancers != nil { in, out := &in.ExternalLoadBalancers, &out.ExternalLoadBalancers - *out = make([]LoadBalancer, len(*in)) + *out = make([]LoadBalancerSpec, len(*in)) for i := range *in { - if err := Convert_kops_LoadBalancer_To_v1alpha3_LoadBalancer(&(*in)[i], &(*out)[i], s); err != nil { + if err := Convert_kops_LoadBalancerSpec_To_v1alpha3_LoadBalancerSpec(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -5971,28 +5957,6 @@ func Convert_kops_LeaderElectionConfiguration_To_v1alpha3_LeaderElectionConfigur return autoConvert_kops_LeaderElectionConfiguration_To_v1alpha3_LeaderElectionConfiguration(in, out, s) } -func autoConvert_v1alpha3_LoadBalancer_To_kops_LoadBalancer(in *LoadBalancer, out *kops.LoadBalancer, s conversion.Scope) error { - out.LoadBalancerName = in.LoadBalancerName - out.TargetGroupARN = in.TargetGroupARN - return nil -} - -// Convert_v1alpha3_LoadBalancer_To_kops_LoadBalancer is an autogenerated conversion function. -func Convert_v1alpha3_LoadBalancer_To_kops_LoadBalancer(in *LoadBalancer, out *kops.LoadBalancer, s conversion.Scope) error { - return autoConvert_v1alpha3_LoadBalancer_To_kops_LoadBalancer(in, out, s) -} - -func autoConvert_kops_LoadBalancer_To_v1alpha3_LoadBalancer(in *kops.LoadBalancer, out *LoadBalancer, s conversion.Scope) error { - out.LoadBalancerName = in.LoadBalancerName - out.TargetGroupARN = in.TargetGroupARN - return nil -} - -// Convert_kops_LoadBalancer_To_v1alpha3_LoadBalancer is an autogenerated conversion function. -func Convert_kops_LoadBalancer_To_v1alpha3_LoadBalancer(in *kops.LoadBalancer, out *LoadBalancer, s conversion.Scope) error { - return autoConvert_kops_LoadBalancer_To_v1alpha3_LoadBalancer(in, out, s) -} - func autoConvert_v1alpha3_LoadBalancerAccessSpec_To_kops_LoadBalancerAccessSpec(in *LoadBalancerAccessSpec, out *kops.LoadBalancerAccessSpec, s conversion.Scope) error { out.Class = kops.LoadBalancerClass(in.Class) out.Type = kops.LoadBalancerType(in.Type) @@ -6069,6 +6033,28 @@ func Convert_kops_LoadBalancerAccessSpec_To_v1alpha3_LoadBalancerAccessSpec(in * return autoConvert_kops_LoadBalancerAccessSpec_To_v1alpha3_LoadBalancerAccessSpec(in, out, s) } +func autoConvert_v1alpha3_LoadBalancerSpec_To_kops_LoadBalancerSpec(in *LoadBalancerSpec, out *kops.LoadBalancerSpec, s conversion.Scope) error { + out.LoadBalancerName = in.LoadBalancerName + out.TargetGroupARN = in.TargetGroupARN + return nil +} + +// Convert_v1alpha3_LoadBalancerSpec_To_kops_LoadBalancerSpec is an autogenerated conversion function. +func Convert_v1alpha3_LoadBalancerSpec_To_kops_LoadBalancerSpec(in *LoadBalancerSpec, out *kops.LoadBalancerSpec, s conversion.Scope) error { + return autoConvert_v1alpha3_LoadBalancerSpec_To_kops_LoadBalancerSpec(in, out, s) +} + +func autoConvert_kops_LoadBalancerSpec_To_v1alpha3_LoadBalancerSpec(in *kops.LoadBalancerSpec, out *LoadBalancerSpec, s conversion.Scope) error { + out.LoadBalancerName = in.LoadBalancerName + out.TargetGroupARN = in.TargetGroupARN + return nil +} + +// Convert_kops_LoadBalancerSpec_To_v1alpha3_LoadBalancerSpec is an autogenerated conversion function. +func Convert_kops_LoadBalancerSpec_To_v1alpha3_LoadBalancerSpec(in *kops.LoadBalancerSpec, out *LoadBalancerSpec, s conversion.Scope) error { + return autoConvert_kops_LoadBalancerSpec_To_v1alpha3_LoadBalancerSpec(in, out, s) +} + func autoConvert_v1alpha3_LoadBalancerSubnetSpec_To_kops_LoadBalancerSubnetSpec(in *LoadBalancerSubnetSpec, out *kops.LoadBalancerSubnetSpec, s conversion.Scope) error { out.Name = in.Name out.PrivateIPv4Address = in.PrivateIPv4Address diff --git a/pkg/apis/kops/v1alpha3/zz_generated.deepcopy.go b/pkg/apis/kops/v1alpha3/zz_generated.deepcopy.go index b43eb7ce8ddf1..3ff451565c5c1 100644 --- a/pkg/apis/kops/v1alpha3/zz_generated.deepcopy.go +++ b/pkg/apis/kops/v1alpha3/zz_generated.deepcopy.go @@ -29,6 +29,42 @@ import ( kops "k8s.io/kops/pkg/apis/kops" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APISpec) DeepCopyInto(out *APISpec) { + *out = *in + if in.DNS != nil { + in, out := &in.DNS, &out.DNS + *out = new(DNSAccessSpec) + **out = **in + } + if in.LoadBalancer != nil { + in, out := &in.LoadBalancer, &out.LoadBalancer + *out = new(LoadBalancerAccessSpec) + (*in).DeepCopyInto(*out) + } + if in.AdditionalSANs != nil { + in, out := &in.AdditionalSANs, &out.AdditionalSANs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Access != nil { + in, out := &in.Access, &out.Access + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APISpec. +func (in *APISpec) DeepCopy() *APISpec { + if in == nil { + return nil + } + out := new(APISpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AWSAuthenticationIdentityMappingSpec) DeepCopyInto(out *AWSAuthenticationIdentityMappingSpec) { *out = *in @@ -241,32 +277,6 @@ func (in *AccessLogSpec) DeepCopy() *AccessLogSpec { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AccessSpec) DeepCopyInto(out *AccessSpec) { - *out = *in - if in.DNS != nil { - in, out := &in.DNS, &out.DNS - *out = new(DNSAccessSpec) - **out = **in - } - if in.LoadBalancer != nil { - in, out := &in.LoadBalancer, &out.LoadBalancer - *out = new(LoadBalancerAccessSpec) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessSpec. -func (in *AccessSpec) DeepCopy() *AccessSpec { - if in == nil { - return nil - } - out := new(AccessSpec) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AddonSpec) DeepCopyInto(out *AddonSpec) { *out = *in @@ -1035,11 +1045,6 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) { *out = new(DNSControllerGossipConfig) (*in).DeepCopyInto(*out) } - if in.AdditionalSANs != nil { - in, out := &in.AdditionalSANs, &out.AdditionalSANs - *out = make([]string, len(*in)) - copy(*out, *in) - } if in.SSHAccess != nil { in, out := &in.SSHAccess, &out.SSHAccess *out = make([]string, len(*in)) @@ -1060,11 +1065,6 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) { *out = new(string) **out = **in } - if in.KubernetesAPIAccess != nil { - in, out := &in.KubernetesAPIAccess, &out.KubernetesAPIAccess - *out = make([]string, len(*in)) - copy(*out, *in) - } if in.IsolateMasters != nil { in, out := &in.IsolateMasters, &out.IsolateMasters *out = new(bool) @@ -1214,11 +1214,7 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) { *out = new(NetworkingSpec) (*in).DeepCopyInto(*out) } - if in.API != nil { - in, out := &in.API, &out.API - *out = new(AccessSpec) - (*in).DeepCopyInto(*out) - } + in.API.DeepCopyInto(&out.API) if in.Authentication != nil { in, out := &in.Authentication, &out.Authentication *out = new(AuthenticationSpec) @@ -2515,7 +2511,7 @@ func (in *InstanceGroupSpec) DeepCopyInto(out *InstanceGroupSpec) { } if in.ExternalLoadBalancers != nil { in, out := &in.ExternalLoadBalancers, &out.ExternalLoadBalancers - *out = make([]LoadBalancer, len(*in)) + *out = make([]LoadBalancerSpec, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -4010,32 +4006,6 @@ func (in *LeaderElectionConfiguration) DeepCopy() *LeaderElectionConfiguration { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LoadBalancer) DeepCopyInto(out *LoadBalancer) { - *out = *in - if in.LoadBalancerName != nil { - in, out := &in.LoadBalancerName, &out.LoadBalancerName - *out = new(string) - **out = **in - } - if in.TargetGroupARN != nil { - in, out := &in.TargetGroupARN, &out.TargetGroupARN - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancer. -func (in *LoadBalancer) DeepCopy() *LoadBalancer { - if in == nil { - return nil - } - out := new(LoadBalancer) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LoadBalancerAccessSpec) DeepCopyInto(out *LoadBalancerAccessSpec) { *out = *in @@ -4089,6 +4059,32 @@ func (in *LoadBalancerAccessSpec) DeepCopy() *LoadBalancerAccessSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBalancerSpec) DeepCopyInto(out *LoadBalancerSpec) { + *out = *in + if in.LoadBalancerName != nil { + in, out := &in.LoadBalancerName, &out.LoadBalancerName + *out = new(string) + **out = **in + } + if in.TargetGroupARN != nil { + in, out := &in.TargetGroupARN, &out.TargetGroupARN + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerSpec. +func (in *LoadBalancerSpec) DeepCopy() *LoadBalancerSpec { + if in == nil { + return nil + } + out := new(LoadBalancerSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LoadBalancerSubnetSpec) DeepCopyInto(out *LoadBalancerSubnetSpec) { *out = *in diff --git a/pkg/apis/kops/validation/aws.go b/pkg/apis/kops/validation/aws.go index 3fca249b7b219..0d1081037a354 100644 --- a/pkg/apis/kops/validation/aws.go +++ b/pkg/apis/kops/validation/aws.go @@ -35,13 +35,11 @@ import ( func awsValidateCluster(c *kops.Cluster) field.ErrorList { allErrs := field.ErrorList{} - if c.Spec.API != nil { - if c.Spec.API.LoadBalancer != nil { - allErrs = append(allErrs, awsValidateAdditionalSecurityGroups(field.NewPath("spec", "api", "loadBalancer", "additionalSecurityGroups"), c.Spec.API.LoadBalancer.AdditionalSecurityGroups)...) - allErrs = append(allErrs, awsValidateSSLPolicy(field.NewPath("spec", "api", "loadBalancer", "sslPolicy"), c.Spec.API.LoadBalancer)...) - allErrs = append(allErrs, awsValidateLoadBalancerSubnets(field.NewPath("spec", "api", "loadBalancer", "subnets"), c.Spec)...) - allErrs = append(allErrs, awsValidateTopologyDNS(field.NewPath("spec", "api", "loadBalancer", "type"), c)...) - } + if c.Spec.API.LoadBalancer != nil { + allErrs = append(allErrs, awsValidateAdditionalSecurityGroups(field.NewPath("spec", "api", "loadBalancer", "additionalSecurityGroups"), c.Spec.API.LoadBalancer.AdditionalSecurityGroups)...) + allErrs = append(allErrs, awsValidateSSLPolicy(field.NewPath("spec", "api", "loadBalancer", "sslPolicy"), c.Spec.API.LoadBalancer)...) + allErrs = append(allErrs, awsValidateLoadBalancerSubnets(field.NewPath("spec", "api", "loadBalancer", "subnets"), c.Spec)...) + allErrs = append(allErrs, awsValidateTopologyDNS(field.NewPath("spec", "api", "loadBalancer", "type"), c)...) } allErrs = append(allErrs, awsValidateExternalCloudControllerManager(c)...) @@ -53,7 +51,7 @@ func awsValidateCluster(c *kops.Cluster) field.ErrorList { for i, subnet := range c.Spec.Subnets { f := field.NewPath("spec", "subnets").Index(i) if subnet.AdditionalRoutes != nil { - if len(subnet.ProviderID) > 0 { + if len(subnet.ID) > 0 { allErrs = append(allErrs, field.Invalid(f, subnet, "additional routes cannot be added if the subnet is shared")) } else if subnet.Type != kops.SubnetTypePrivate { allErrs = append(allErrs, field.Invalid(f, subnet, "additional routes can only be added on private subnets")) @@ -128,7 +126,7 @@ func awsValidateInstanceMetadata(fieldPath *field.Path, instanceMetadata *kops.I } if instanceMetadata.HTTPPutResponseHopLimit != nil { - httpPutResponseHopLimit := fi.Int64Value(instanceMetadata.HTTPPutResponseHopLimit) + httpPutResponseHopLimit := fi.ValueOf(instanceMetadata.HTTPPutResponseHopLimit) if httpPutResponseHopLimit < 1 || httpPutResponseHopLimit > 64 { allErrs = append(allErrs, field.Invalid(fieldPath.Child("httpPutResponseHopLimit"), instanceMetadata.HTTPPutResponseHopLimit, "HTTPPutResponseLimit must be a value between 1 and 64")) @@ -171,7 +169,7 @@ func awsValidateInstanceTypeAndImage(instanceTypeFieldPath *field.Path, imageFie return append(allErrs, field.Invalid(imageFieldPath, image, fmt.Sprintf("specified image %q is invalid: %s", image, err))) } - imageArch := fi.StringValue(imageInfo.Architecture) + imageArch := fi.ValueOf(imageInfo.Architecture) // Spotinst uses the instance type field to keep a "," separated list of instance types for _, instanceType := range strings.Split(instanceTypes, ",") { @@ -184,7 +182,7 @@ func awsValidateInstanceTypeAndImage(instanceTypeFieldPath *field.Path, imageFie found := false if machineInfo != nil && machineInfo.ProcessorInfo != nil { for _, machineArch := range machineInfo.ProcessorInfo.SupportedArchitectures { - if imageArch == fi.StringValue(machineArch) { + if imageArch == fi.ValueOf(machineArch) { found = true } } @@ -252,19 +250,19 @@ func awsValidateMixedInstancesPolicy(path *field.Path, spec *kops.MixedInstances } if spec.OnDemandBase != nil { - if fi.Int64Value(spec.OnDemandBase) < 0 { + if fi.ValueOf(spec.OnDemandBase) < 0 { errs = append(errs, field.Invalid(path.Child("onDemandBase"), spec.OnDemandBase, "cannot be less than zero")) } - if fi.Int64Value(spec.OnDemandBase) > int64(fi.Int32Value(ig.Spec.MaxSize)) { + if fi.ValueOf(spec.OnDemandBase) > int64(fi.ValueOf(ig.Spec.MaxSize)) { errs = append(errs, field.Invalid(path.Child("onDemandBase"), spec.OnDemandBase, "cannot be greater than max size")) } } if spec.OnDemandAboveBase != nil { - if fi.Int64Value(spec.OnDemandAboveBase) < 0 { + if fi.ValueOf(spec.OnDemandAboveBase) < 0 { errs = append(errs, field.Invalid(path.Child("onDemandAboveBase"), spec.OnDemandAboveBase, "cannot be less than 0")) } - if fi.Int64Value(spec.OnDemandAboveBase) > 100 { + if fi.ValueOf(spec.OnDemandAboveBase) > 100 { errs = append(errs, field.Invalid(path.Child("onDemandAboveBase"), spec.OnDemandAboveBase, "cannot be greater than 100")) } } @@ -277,7 +275,7 @@ func awsValidateMixedInstancesPolicy(path *field.Path, spec *kops.MixedInstances func awsValidateTopologyDNS(fieldPath *field.Path, c *kops.Cluster) field.ErrorList { allErrs := field.ErrorList{} - if c.UsesNoneDNS() && c.Spec.API != nil && c.Spec.API.LoadBalancer != nil && c.Spec.API.LoadBalancer.Class != kops.LoadBalancerClassNetwork { + if c.UsesNoneDNS() && c.Spec.API.LoadBalancer != nil && c.Spec.API.LoadBalancer.Class != kops.LoadBalancerClassNetwork { allErrs = append(allErrs, field.Forbidden(fieldPath, "topology.dns.type=none requires Network Load Balancer")) } diff --git a/pkg/apis/kops/validation/aws_test.go b/pkg/apis/kops/validation/aws_test.go index 4da94dfbea155..8bd70dcb15ac7 100644 --- a/pkg/apis/kops/validation/aws_test.go +++ b/pkg/apis/kops/validation/aws_test.go @@ -42,7 +42,7 @@ func TestAWSValidateExternalCloudConfig(t *testing.T) { ExternalCloudControllerManager: &kops.CloudControllerManagerConfig{}, CloudConfig: &kops.CloudConfiguration{ AWSEBSCSIDriver: &kops.AWSEBSCSIDriver{ - Enabled: fi.Bool(false), + Enabled: fi.PtrTo(false), }, }, }, @@ -53,7 +53,7 @@ func TestAWSValidateExternalCloudConfig(t *testing.T) { ExternalCloudControllerManager: &kops.CloudControllerManagerConfig{}, CloudConfig: &kops.CloudConfiguration{ AWSEBSCSIDriver: &kops.AWSEBSCSIDriver{ - Enabled: fi.Bool(true), + Enabled: fi.PtrTo(true), }, }, }, @@ -152,7 +152,7 @@ func TestValidateInstanceGroupSpec(t *testing.T) { }, { Input: kops.InstanceGroupSpec{ - SpotDurationInMinutes: fi.Int64(55), + SpotDurationInMinutes: fi.PtrTo(int64(55)), }, ExpectedErrors: []string{ "Unsupported value::test-nodes.spec.spotDurationInMinutes", @@ -160,7 +160,7 @@ func TestValidateInstanceGroupSpec(t *testing.T) { }, { Input: kops.InstanceGroupSpec{ - SpotDurationInMinutes: fi.Int64(380), + SpotDurationInMinutes: fi.PtrTo(int64(380)), }, ExpectedErrors: []string{ "Unsupported value::test-nodes.spec.spotDurationInMinutes", @@ -168,7 +168,7 @@ func TestValidateInstanceGroupSpec(t *testing.T) { }, { Input: kops.InstanceGroupSpec{ - SpotDurationInMinutes: fi.Int64(125), + SpotDurationInMinutes: fi.PtrTo(int64(125)), }, ExpectedErrors: []string{ "Unsupported value::test-nodes.spec.spotDurationInMinutes", @@ -176,13 +176,13 @@ func TestValidateInstanceGroupSpec(t *testing.T) { }, { Input: kops.InstanceGroupSpec{ - SpotDurationInMinutes: fi.Int64(120), + SpotDurationInMinutes: fi.PtrTo(int64(120)), }, ExpectedErrors: []string{}, }, { Input: kops.InstanceGroupSpec{ - InstanceInterruptionBehavior: fi.String("invalidValue"), + InstanceInterruptionBehavior: fi.PtrTo("invalidValue"), }, ExpectedErrors: []string{ "Unsupported value::test-nodes.spec.instanceInterruptionBehavior", @@ -190,19 +190,19 @@ func TestValidateInstanceGroupSpec(t *testing.T) { }, { Input: kops.InstanceGroupSpec{ - InstanceInterruptionBehavior: fi.String("terminate"), + InstanceInterruptionBehavior: fi.PtrTo("terminate"), }, ExpectedErrors: []string{}, }, { Input: kops.InstanceGroupSpec{ - InstanceInterruptionBehavior: fi.String("hibernate"), + InstanceInterruptionBehavior: fi.PtrTo("hibernate"), }, ExpectedErrors: []string{}, }, { Input: kops.InstanceGroupSpec{ - InstanceInterruptionBehavior: fi.String("stop"), + InstanceInterruptionBehavior: fi.PtrTo("stop"), }, ExpectedErrors: []string{}, }, @@ -322,7 +322,7 @@ func TestMixedInstancePolicies(t *testing.T) { "c4.large", "c5.large", }, - OnDemandAboveBase: fi.Int64(231), + OnDemandAboveBase: fi.PtrTo(int64(231)), }, }, ExpectedErrors: []string{"Invalid value::spec.mixedInstancesPolicy.onDemandAboveBase"}, @@ -381,8 +381,8 @@ func TestInstanceMetadataOptions(t *testing.T) { Spec: kops.InstanceGroupSpec{ Role: "Node", InstanceMetadata: &kops.InstanceMetadataOptions{ - HTTPPutResponseHopLimit: fi.Int64(1), - HTTPTokens: fi.String("abc"), + HTTPPutResponseHopLimit: fi.PtrTo(int64(1)), + HTTPTokens: fi.PtrTo("abc"), }, MachineType: "t3.medium", }, @@ -397,8 +397,8 @@ func TestInstanceMetadataOptions(t *testing.T) { Spec: kops.InstanceGroupSpec{ Role: "Node", InstanceMetadata: &kops.InstanceMetadataOptions{ - HTTPPutResponseHopLimit: fi.Int64(-1), - HTTPTokens: fi.String("required"), + HTTPPutResponseHopLimit: fi.PtrTo(int64(-1)), + HTTPTokens: fi.PtrTo("required"), }, MachineType: "t3.medium", }, @@ -442,7 +442,7 @@ func TestLoadBalancerSubnets(t *testing.T) { lbSubnets: []kops.LoadBalancerSubnetSpec{ { Name: "a", - PrivateIPv4Address: fi.String("10.0.0.10"), + PrivateIPv4Address: fi.PtrTo("10.0.0.10"), AllocationID: nil, }, { @@ -479,7 +479,7 @@ func TestLoadBalancerSubnets(t *testing.T) { lbSubnets: []kops.LoadBalancerSubnetSpec{ { Name: "a", - PrivateIPv4Address: fi.String(""), + PrivateIPv4Address: fi.PtrTo(""), AllocationID: nil, }, }, @@ -491,7 +491,7 @@ func TestLoadBalancerSubnets(t *testing.T) { { Name: "a", PrivateIPv4Address: nil, - AllocationID: fi.String(""), + AllocationID: fi.PtrTo(""), }, }, expected: []string{"Required value::spec.api.loadBalancer.subnets[0].allocationID"}, @@ -501,7 +501,7 @@ func TestLoadBalancerSubnets(t *testing.T) { lbSubnets: []kops.LoadBalancerSubnetSpec{ { Name: "a", - PrivateIPv4Address: fi.String("invalidip"), + PrivateIPv4Address: fi.PtrTo("invalidip"), AllocationID: nil, }, }, @@ -512,56 +512,56 @@ func TestLoadBalancerSubnets(t *testing.T) { lbSubnets: []kops.LoadBalancerSubnetSpec{ { Name: "a", - PrivateIPv4Address: fi.String("11.0.0.10"), + PrivateIPv4Address: fi.PtrTo("11.0.0.10"), AllocationID: nil, }, }, expected: []string{"Invalid value::spec.api.loadBalancer.subnets[0].privateIPv4Address"}, }, { // invalid class - with privateIPv4Address, no allocationID - class: fi.String(string(kops.LoadBalancerClassClassic)), + class: fi.PtrTo(string(kops.LoadBalancerClassClassic)), clusterSubnets: []string{"a", "b", "c"}, lbSubnets: []kops.LoadBalancerSubnetSpec{ { Name: "a", - PrivateIPv4Address: fi.String("10.0.0.10"), + PrivateIPv4Address: fi.PtrTo("10.0.0.10"), AllocationID: nil, }, }, expected: []string{"Forbidden::spec.api.loadBalancer.subnets[0].privateIPv4Address"}, }, { // invalid class - no privateIPv4Address, with allocationID - class: fi.String(string(kops.LoadBalancerClassClassic)), + class: fi.PtrTo(string(kops.LoadBalancerClassClassic)), clusterSubnets: []string{"a", "b", "c"}, lbSubnets: []kops.LoadBalancerSubnetSpec{ { Name: "a", PrivateIPv4Address: nil, - AllocationID: fi.String("eipalloc-222ghi789"), + AllocationID: fi.PtrTo("eipalloc-222ghi789"), }, }, expected: []string{"Forbidden::spec.api.loadBalancer.subnets[0].allocationID"}, }, { // invalid type external for private IP - lbType: fi.String(string(kops.LoadBalancerTypePublic)), + lbType: fi.PtrTo(string(kops.LoadBalancerTypePublic)), clusterSubnets: []string{"a", "b", "c"}, lbSubnets: []kops.LoadBalancerSubnetSpec{ { Name: "a", - PrivateIPv4Address: fi.String("10.0.0.10"), + PrivateIPv4Address: fi.PtrTo("10.0.0.10"), AllocationID: nil, }, }, expected: []string{"Forbidden::spec.api.loadBalancer.subnets[0].privateIPv4Address"}, }, { // invalid type Internal for public IP - lbType: fi.String(string(kops.LoadBalancerTypeInternal)), + lbType: fi.PtrTo(string(kops.LoadBalancerTypeInternal)), clusterSubnets: []string{"a", "b", "c"}, lbSubnets: []kops.LoadBalancerSubnetSpec{ { Name: "a", PrivateIPv4Address: nil, - AllocationID: fi.String("eipalloc-222ghi789"), + AllocationID: fi.PtrTo("eipalloc-222ghi789"), }, }, expected: []string{"Forbidden::spec.api.loadBalancer.subnets[0].allocationID"}, @@ -571,7 +571,7 @@ func TestLoadBalancerSubnets(t *testing.T) { for _, test := range tests { cluster := kops.Cluster{ Spec: kops.ClusterSpec{ - API: &kops.AccessSpec{ + API: kops.APISpec{ LoadBalancer: &kops.LoadBalancerAccessSpec{ Class: kops.LoadBalancerClassNetwork, Type: kops.LoadBalancerTypeInternal, @@ -825,7 +825,7 @@ func TestAWSAdditionalRoutes(t *testing.T) { NetworkCIDR: test.clusterCidr, Subnets: []kops.ClusterSubnetSpec{ { - ProviderID: test.providerId, + ID: test.providerId, Type: test.subnetType, AdditionalRoutes: test.route, }, diff --git a/pkg/apis/kops/validation/cluster.go b/pkg/apis/kops/validation/cluster.go index 707ec5bf0bf05..91680a4b33860 100644 --- a/pkg/apis/kops/validation/cluster.go +++ b/pkg/apis/kops/validation/cluster.go @@ -106,7 +106,7 @@ func validateEtcdMemberUpdate(fp *field.Path, obj kops.EtcdMemberSpec, old kops. allErrs = append(allErrs, field.Forbidden(fp.Child("name"), "name cannot be changed")) } - if fi.StringValue(obj.InstanceGroup) != fi.StringValue(old.InstanceGroup) { + if fi.ValueOf(obj.InstanceGroup) != fi.ValueOf(old.InstanceGroup) { allErrs = append(allErrs, field.Forbidden(fp.Child("instanceGroup"), "instanceGroup cannot be changed")) } diff --git a/pkg/apis/kops/validation/cluster_test.go b/pkg/apis/kops/validation/cluster_test.go index ad5730378dd14..764a4f1025bac 100644 --- a/pkg/apis/kops/validation/cluster_test.go +++ b/pkg/apis/kops/validation/cluster_test.go @@ -37,15 +37,15 @@ func TestValidEtcdChanges(t *testing.T) { Members: []kops.EtcdMemberSpec{ { Name: "a", - InstanceGroup: fi.String("eu-central-1a"), + InstanceGroup: fi.PtrTo("eu-central-1a"), }, { Name: "b", - InstanceGroup: fi.String("eu-central-1b"), + InstanceGroup: fi.PtrTo("eu-central-1b"), }, { Name: "c", - InstanceGroup: fi.String("eu-central-1c"), + InstanceGroup: fi.PtrTo("eu-central-1c"), }, }, }, @@ -55,15 +55,15 @@ func TestValidEtcdChanges(t *testing.T) { Members: []kops.EtcdMemberSpec{ { Name: "a", - InstanceGroup: fi.String("eu-central-1a"), + InstanceGroup: fi.PtrTo("eu-central-1a"), }, { Name: "b", - InstanceGroup: fi.String("eu-central-1b"), + InstanceGroup: fi.PtrTo("eu-central-1b"), }, { Name: "d", - InstanceGroup: fi.String("eu-central-1d"), + InstanceGroup: fi.PtrTo("eu-central-1d"), }, }, }, @@ -85,7 +85,7 @@ func TestValidEtcdChanges(t *testing.T) { Members: []kops.EtcdMemberSpec{ { Name: "a", - InstanceGroup: fi.String("eu-central-1a"), + InstanceGroup: fi.PtrTo("eu-central-1a"), }, }, }, @@ -95,15 +95,15 @@ func TestValidEtcdChanges(t *testing.T) { Members: []kops.EtcdMemberSpec{ { Name: "a", - InstanceGroup: fi.String("eu-central-1a"), + InstanceGroup: fi.PtrTo("eu-central-1a"), }, { Name: "b", - InstanceGroup: fi.String("eu-central-1b"), + InstanceGroup: fi.PtrTo("eu-central-1b"), }, { Name: "c", - InstanceGroup: fi.String("eu-central-1c"), + InstanceGroup: fi.PtrTo("eu-central-1c"), }, }, }, @@ -125,7 +125,7 @@ func TestValidEtcdChanges(t *testing.T) { Members: []kops.EtcdMemberSpec{ { Name: "a", - InstanceGroup: fi.String("eu-central-1a"), + InstanceGroup: fi.PtrTo("eu-central-1a"), }, }, }, @@ -135,7 +135,7 @@ func TestValidEtcdChanges(t *testing.T) { Members: []kops.EtcdMemberSpec{ { Name: "a", - InstanceGroup: fi.String("eu-central-1a"), + InstanceGroup: fi.PtrTo("eu-central-1a"), }, }, }, diff --git a/pkg/apis/kops/validation/instancegroup.go b/pkg/apis/kops/validation/instancegroup.go index c0e17993388fa..52c79d48494a0 100644 --- a/pkg/apis/kops/validation/instancegroup.go +++ b/pkg/apis/kops/validation/instancegroup.go @@ -83,11 +83,11 @@ func ValidateInstanceGroup(g *kops.InstanceGroup, cloud fi.Cloud, strict bool) f allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "image"), "image must be specified.")) } - if fi.Int32Value(g.Spec.RootVolumeIOPS) < 0 { + if fi.ValueOf(g.Spec.RootVolumeIOPS) < 0 { allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "rootVolumeIops"), g.Spec.RootVolumeIOPS, "RootVolumeIOPS must be greater than 0")) } - if fi.Int32Value(g.Spec.RootVolumeThroughput) < 0 { + if fi.ValueOf(g.Spec.RootVolumeThroughput) < 0 { allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "rootVolumeThroughput"), g.Spec.RootVolumeThroughput, "RootVolumeThroughput must be greater than 0")) } @@ -294,7 +294,7 @@ func ValidateMasterInstanceGroup(g *kops.InstanceGroup, cluster *kops.Cluster) f for _, etcd := range cluster.Spec.EtcdClusters { hasEtcd := false for _, m := range etcd.Members { - if fi.StringValue(m.InstanceGroup) == g.ObjectMeta.Name { + if fi.ValueOf(m.InstanceGroup) == g.ObjectMeta.Name { hasEtcd = true break } @@ -381,7 +381,7 @@ func validateIGCloudLabels(ig *kops.InstanceGroup, fldPath *field.Path) (allErrs return allErrs } -func validateExternalLoadBalancer(lb *kops.LoadBalancer, fldPath *field.Path) field.ErrorList { +func validateExternalLoadBalancer(lb *kops.LoadBalancerSpec, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if lb.LoadBalancerName != nil && lb.TargetGroupARN != nil { @@ -389,7 +389,7 @@ func validateExternalLoadBalancer(lb *kops.LoadBalancer, fldPath *field.Path) fi } if lb.LoadBalancerName != nil { - name := fi.StringValue(lb.LoadBalancerName) + name := fi.ValueOf(lb.LoadBalancerName) if len(name) > 32 { allErrs = append(allErrs, field.Invalid(fldPath.Child("loadBalancerName"), name, "Load Balancer name must have at most 32 characters")) @@ -397,7 +397,7 @@ func validateExternalLoadBalancer(lb *kops.LoadBalancer, fldPath *field.Path) fi } if lb.TargetGroupARN != nil { - actual := fi.StringValue(lb.TargetGroupARN) + actual := fi.ValueOf(lb.TargetGroupARN) parsed, err := arn.Parse(actual) if err != nil { diff --git a/pkg/apis/kops/validation/instancegroup_test.go b/pkg/apis/kops/validation/instancegroup_test.go index d45e452d8bfbd..b86fbf382740b 100644 --- a/pkg/apis/kops/validation/instancegroup_test.go +++ b/pkg/apis/kops/validation/instancegroup_test.go @@ -28,7 +28,7 @@ import ( ) func s(v string) *string { - return fi.String(v) + return fi.PtrTo(v) } func TestValidateInstanceProfile(t *testing.T) { @@ -111,15 +111,15 @@ func TestValidMasterInstanceGroup(t *testing.T) { Members: []kops.EtcdMemberSpec{ { Name: "a", - InstanceGroup: fi.String("eu-central-1a"), + InstanceGroup: fi.PtrTo("eu-central-1a"), }, { Name: "b", - InstanceGroup: fi.String("eu-central-1b"), + InstanceGroup: fi.PtrTo("eu-central-1b"), }, { Name: "c", - InstanceGroup: fi.String("eu-central-1c"), + InstanceGroup: fi.PtrTo("eu-central-1c"), }, }, }, @@ -146,15 +146,15 @@ func TestValidMasterInstanceGroup(t *testing.T) { Members: []kops.EtcdMemberSpec{ { Name: "a", - InstanceGroup: fi.String("eu-central-1a"), + InstanceGroup: fi.PtrTo("eu-central-1a"), }, { Name: "b", - InstanceGroup: fi.String("eu-central-1b"), + InstanceGroup: fi.PtrTo("eu-central-1b"), }, { Name: "c", - InstanceGroup: fi.String("eu-central-1c"), + InstanceGroup: fi.PtrTo("eu-central-1c"), }, }, }, @@ -221,7 +221,7 @@ func TestValidBootDevice(t *testing.T) { for _, g := range grid { ig := createMinimalInstanceGroup() - ig.Spec.RootVolumeType = fi.String(g.volumeType) + ig.Spec.RootVolumeType = fi.PtrTo(g.volumeType) errs := CrossValidateInstanceGroup(ig, cluster, nil, true) testErrors(t, g.volumeType, errs, g.expected) } @@ -355,20 +355,20 @@ func TestIGUpdatePolicy(t *testing.T) { }, { label: "automatic", - policy: fi.String(kops.UpdatePolicyAutomatic), + policy: fi.PtrTo(kops.UpdatePolicyAutomatic), }, { label: "external", - policy: fi.String(kops.UpdatePolicyExternal), + policy: fi.PtrTo(kops.UpdatePolicyExternal), }, { label: "empty", - policy: fi.String(""), + policy: fi.PtrTo(""), expected: []string{unsupportedValueError}, }, { label: "unknown", - policy: fi.String("something-else"), + policy: fi.PtrTo("something-else"), expected: []string{unsupportedValueError}, }, } { @@ -396,8 +396,8 @@ func TestValidInstanceGroup(t *testing.T) { Spec: kops.InstanceGroupSpec{ Role: kops.InstanceGroupRoleMaster, Subnets: []string{"eu-central-1a"}, - MaxSize: fi.Int32(1), - MinSize: fi.Int32(1), + MaxSize: fi.PtrTo(int32(1)), + MinSize: fi.PtrTo(int32(1)), Image: "my-image", }, }, @@ -412,8 +412,8 @@ func TestValidInstanceGroup(t *testing.T) { Spec: kops.InstanceGroupSpec{ Role: kops.InstanceGroupRoleAPIServer, Subnets: []string{"eu-central-1a"}, - MaxSize: fi.Int32(1), - MinSize: fi.Int32(1), + MaxSize: fi.PtrTo(int32(1)), + MinSize: fi.PtrTo(int32(1)), Image: "my-image", }, }, @@ -428,8 +428,8 @@ func TestValidInstanceGroup(t *testing.T) { Spec: kops.InstanceGroupSpec{ Role: kops.InstanceGroupRoleNode, Subnets: []string{"eu-central-1a"}, - MaxSize: fi.Int32(1), - MinSize: fi.Int32(1), + MaxSize: fi.PtrTo(int32(1)), + MinSize: fi.PtrTo(int32(1)), Image: "my-image", }, }, @@ -444,8 +444,8 @@ func TestValidInstanceGroup(t *testing.T) { Spec: kops.InstanceGroupSpec{ Role: kops.InstanceGroupRoleBastion, Subnets: []string{"eu-central-1a"}, - MaxSize: fi.Int32(1), - MinSize: fi.Int32(1), + MaxSize: fi.PtrTo(int32(1)), + MinSize: fi.PtrTo(int32(1)), Image: "my-image", }, }, @@ -460,8 +460,8 @@ func TestValidInstanceGroup(t *testing.T) { Spec: kops.InstanceGroupSpec{ Role: kops.InstanceGroupRoleBastion, Subnets: []string{"eu-central-1a"}, - MaxSize: fi.Int32(1), - MinSize: fi.Int32(1), + MaxSize: fi.PtrTo(int32(1)), + MinSize: fi.PtrTo(int32(1)), }, }, ExpectedErrors: []string{"Forbidden::spec.image"}, @@ -482,8 +482,8 @@ func createMinimalInstanceGroup() *kops.InstanceGroup { Spec: kops.InstanceGroupSpec{ CloudLabels: make(map[string]string), Role: "Node", - MaxSize: fi.Int32(1), - MinSize: fi.Int32(1), + MaxSize: fi.PtrTo(int32(1)), + MinSize: fi.PtrTo(int32(1)), Image: "my-image", }, } diff --git a/pkg/apis/kops/validation/legacy.go b/pkg/apis/kops/validation/legacy.go index 5179ed918d428..5ba90bc7dfa73 100644 --- a/pkg/apis/kops/validation/legacy.go +++ b/pkg/apis/kops/validation/legacy.go @@ -209,13 +209,13 @@ func ValidateCluster(c *kops.Cluster, strict bool) field.ErrorList { allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("nonMasqueradeCIDR"), fmt.Sprintf("nonMasqueradeCIDR %q cannot overlap with networkCIDR %q", nonMasqueradeCIDRString, c.Spec.NetworkCIDR))) } - if c.Spec.ContainerRuntime == "docker" && c.Spec.Kubelet != nil && fi.StringValue(c.Spec.Kubelet.NetworkPluginName) == "kubenet" { - if fi.StringValue(c.Spec.Kubelet.NonMasqueradeCIDR) != nonMasqueradeCIDRString { + if c.Spec.ContainerRuntime == "docker" && c.Spec.Kubelet != nil && fi.ValueOf(c.Spec.Kubelet.NetworkPluginName) == "kubenet" { + if fi.ValueOf(c.Spec.Kubelet.NonMasqueradeCIDR) != nonMasqueradeCIDRString { if strict || c.Spec.Kubelet.NonMasqueradeCIDR != nil { allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("kubelet", "nonMasqueradeCIDR"), "kubelet nonMasqueradeCIDR did not match cluster nonMasqueradeCIDR")) } } - if fi.StringValue(c.Spec.MasterKubelet.NonMasqueradeCIDR) != nonMasqueradeCIDRString { + if fi.ValueOf(c.Spec.MasterKubelet.NonMasqueradeCIDR) != nonMasqueradeCIDRString { if strict || c.Spec.MasterKubelet.NonMasqueradeCIDR != nil { allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("masterKubelet", "nonMasqueradeCIDR"), "masterKubelet nonMasqueradeCIDR did not match cluster nonMasqueradeCIDR")) } @@ -287,7 +287,7 @@ func ValidateCluster(c *kops.Cluster, strict bool) field.ErrorList { } // @ check that NodeLocalDNS addon is configured correctly - if c.Spec.KubeDNS.NodeLocalDNS != nil && fi.BoolValue(c.Spec.KubeDNS.NodeLocalDNS.Enabled) { + if c.Spec.KubeDNS.NodeLocalDNS != nil && fi.ValueOf(c.Spec.KubeDNS.NodeLocalDNS.Enabled) { if c.Spec.KubeDNS.Provider != "CoreDNS" && c.Spec.KubeDNS.Provider != "" { allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("kubeDNS", "provider"), "KubeDNS provider must be set to CoreDNS if NodeLocalDNS addon is enabled")) } diff --git a/pkg/apis/kops/validation/openstack_test.go b/pkg/apis/kops/validation/openstack_test.go index b5afdcd7c49e2..4de6e9a62b9c9 100644 --- a/pkg/apis/kops/validation/openstack_test.go +++ b/pkg/apis/kops/validation/openstack_test.go @@ -70,7 +70,7 @@ func Test_ValidateTopology(t *testing.T) { CloudProvider: kops.CloudProviderSpec{ Openstack: &kops.OpenstackSpec{ Router: &kops.OpenstackRouter{ - ExternalNetwork: fi.String("foo"), + ExternalNetwork: fi.PtrTo("foo"), }, }, }, diff --git a/pkg/apis/kops/validation/validation.go b/pkg/apis/kops/validation/validation.go index 432a12fff0af2..baf2aa61ee4fb 100644 --- a/pkg/apis/kops/validation/validation.go +++ b/pkg/apis/kops/validation/validation.go @@ -94,7 +94,7 @@ func validateClusterSpec(spec *kops.ClusterSpec, c *kops.Cluster, fieldPath *fie } // KubernetesAPIAccess - for i, cidr := range spec.KubernetesAPIAccess { + for i, cidr := range spec.API.Access { if strings.HasPrefix(cidr, "pl-") { if spec.GetCloudProvider() != kops.CloudProviderAWS { allErrs = append(allErrs, field.Invalid(fieldPath.Child("kubernetesAPIAccess").Index(i), cidr, "Prefix List ID only supported for AWS")) @@ -243,7 +243,7 @@ func validateClusterSpec(spec *kops.ClusterSpec, c *kops.Cluster, fieldPath *fie allErrs = append(allErrs, validateRollingUpdate(spec.RollingUpdate, fieldPath.Child("rollingUpdate"), false)...) } - if spec.API != nil && spec.API.LoadBalancer != nil { + if spec.API.LoadBalancer != nil { lbSpec := spec.API.LoadBalancer lbPath := fieldPath.Child("api", "loadBalancer") if spec.GetCloudProvider() == kops.CloudProviderAWS { @@ -295,7 +295,7 @@ func validateClusterSpec(spec *kops.ClusterSpec, c *kops.Cluster, fieldPath *fie if spec.Karpenter != nil && spec.Karpenter.Enabled { fldPath := fieldPath.Child("karpenter", "enabled") - if !fi.BoolValue(spec.IAM.UseServiceAccountExternalPermissions) { + if !fi.ValueOf(spec.IAM.UseServiceAccountExternalPermissions) { allErrs = append(allErrs, field.Forbidden(fldPath, "Karpenter requires that service accounts use external permissions")) } if !featureflag.Karpenter.Enabled() { @@ -306,7 +306,7 @@ func validateClusterSpec(spec *kops.ClusterSpec, c *kops.Cluster, fieldPath *fie if spec.PodIdentityWebhook != nil && spec.PodIdentityWebhook.Enabled { allErrs = append(allErrs, validatePodIdentityWebhook(c, spec.PodIdentityWebhook, fieldPath.Child("podIdentityWebhook"))...) } - if spec.CertManager != nil && fi.BoolValue(spec.CertManager.Enabled) { + if spec.CertManager != nil && fi.ValueOf(spec.CertManager.Enabled) { allErrs = append(allErrs, validateCertManager(c, spec.CertManager, fieldPath.Child("certManager"))...) } @@ -464,9 +464,9 @@ func validateSubnets(cluster *kops.ClusterSpec, fieldPath *field.Path) field.Err // cannot mix subnets with specified ID and without specified id if len(subnets) > 0 { - hasID := subnets[0].ProviderID != "" + hasID := subnets[0].ID != "" for i := range subnets { - if (subnets[i].ProviderID != "") != hasID { + if (subnets[i].ID != "") != hasID { allErrs = append(allErrs, field.Forbidden(fieldPath.Index(i).Child("id"), "cannot mix subnets with specified ID and unspecified ID")) } } @@ -511,7 +511,7 @@ func validateSubnet(subnet *kops.ClusterSubnetSpec, c *kops.ClusterSpec, fieldPa } } - allErrs = append(allErrs, IsValidValue(fieldPath.Child("type"), fi.String(string(subnet.Type)), []string{ + allErrs = append(allErrs, IsValidValue(fieldPath.Child("type"), fi.PtrTo(string(subnet.Type)), []string{ string(kops.SubnetTypePublic), string(kops.SubnetTypePrivate), string(kops.SubnetTypeDualStack), @@ -976,7 +976,7 @@ func validateNetworkingCilium(cluster *kops.Cluster, v *kops.CiliumNetworkingSpe allErrs = append(allErrs, field.Invalid(versionFld, v.Version, "Only version 1.11 with patch version 5 or higher is supported")) } - if v.Hubble != nil && fi.BoolValue(v.Hubble.Enabled) { + if v.Hubble != nil && fi.ValueOf(v.Hubble.Enabled) { if !components.IsCertManagerEnabled(cluster) { allErrs = append(allErrs, field.Forbidden(fldPath.Child("hubble", "enabled"), "Hubble requires that cert manager is enabled")) } @@ -1019,13 +1019,13 @@ func validateNetworkingCilium(cluster *kops.Cluster, v *kops.CiliumNetworkingSpe // Cilium with Wireguard integration follow-up --> https://github.com/cilium/cilium/issues/15462. // The following rule of validation should be deleted as this combination // will be supported on future releases of Cilium (>= v1.11.0). - if fi.BoolValue(v.EnableL7Proxy) { + if fi.ValueOf(v.EnableL7Proxy) { allErrs = append(allErrs, field.Forbidden(fldPath.Child("enableL7Proxy"), "L7 proxy cannot be enabled if wireguard is enabled.")) } } } - if fi.BoolValue(v.EnableL7Proxy) && v.InstallIptablesRules != nil && !*v.InstallIptablesRules { + if fi.ValueOf(v.EnableL7Proxy) && v.InstallIptablesRules != nil && !*v.InstallIptablesRules { allErrs = append(allErrs, field.Forbidden(fldPath.Child("enableL7Proxy"), "Cilium L7 Proxy requires installIptablesRules.")) } @@ -1212,7 +1212,7 @@ func validateEtcdMemberSpec(spec kops.EtcdMemberSpec, fieldPath *field.Path) fie allErrs = append(allErrs, field.Required(fieldPath.Child("name"), "etcdMember did not have name")) } - if fi.StringValue(spec.InstanceGroup) == "" { + if fi.ValueOf(spec.InstanceGroup) == "" { allErrs = append(allErrs, field.Required(fieldPath.Child("instanceGroup"), "etcdMember did not have instanceGroup")) } @@ -1228,7 +1228,7 @@ func validateNetworkingCalico(c *kops.ClusterSpec, v *kops.CalicoNetworkingSpec, } if v.CrossSubnet != nil { - if fi.BoolValue(v.CrossSubnet) && v.AWSSrcDstCheck != "Disable" { + if fi.ValueOf(v.CrossSubnet) && v.AWSSrcDstCheck != "Disable" { field.Invalid(fldPath.Child("crossSubnet"), v.CrossSubnet, "crossSubnet is deprecated, use awsSrcDstCheck instead") } } @@ -1404,13 +1404,13 @@ func validateContainerdConfig(spec *kops.ClusterSpec, config *kops.ContainerdCon if config.Packages != nil { if config.Packages.UrlAmd64 != nil && config.Packages.HashAmd64 != nil { - u := fi.StringValue(config.Packages.UrlAmd64) + u := fi.ValueOf(config.Packages.UrlAmd64) _, err := url.Parse(u) if err != nil { allErrs = append(allErrs, field.Invalid(fldPath.Child("packageUrl"), config.Packages.UrlAmd64, fmt.Sprintf("cannot parse package URL: %v", err))) } - h := fi.StringValue(config.Packages.HashAmd64) + h := fi.ValueOf(config.Packages.HashAmd64) if len(h) > 64 { allErrs = append(allErrs, field.Invalid(fldPath.Child("packageHash"), config.Packages.HashAmd64, "Package hash must be 64 characters long")) @@ -1424,13 +1424,13 @@ func validateContainerdConfig(spec *kops.ClusterSpec, config *kops.ContainerdCon } if config.Packages.UrlArm64 != nil && config.Packages.HashArm64 != nil { - u := fi.StringValue(config.Packages.UrlArm64) + u := fi.ValueOf(config.Packages.UrlArm64) _, err := url.Parse(u) if err != nil { allErrs = append(allErrs, field.Invalid(fldPath.Child("packageUrlArm64"), config.Packages.UrlArm64, fmt.Sprintf("cannot parse package URL: %v", err))) } - h := fi.StringValue(config.Packages.HashArm64) + h := fi.ValueOf(config.Packages.HashArm64) if len(h) > 64 { allErrs = append(allErrs, field.Invalid(fldPath.Child("packageHashArm64"), config.Packages.HashArm64, "Package hash must be 64 characters long")) @@ -1471,13 +1471,13 @@ func validateDockerConfig(config *kops.DockerConfig, fldPath *field.Path) field. if config.Packages != nil { if config.Packages.UrlAmd64 != nil && config.Packages.HashAmd64 != nil { - u := fi.StringValue(config.Packages.UrlAmd64) + u := fi.ValueOf(config.Packages.UrlAmd64) _, err := url.Parse(u) if err != nil { allErrs = append(allErrs, field.Invalid(fldPath.Child("packageUrl"), config.Packages.UrlAmd64, fmt.Sprintf("unable parse package URL string: %v", err))) } - h := fi.StringValue(config.Packages.HashAmd64) + h := fi.ValueOf(config.Packages.HashAmd64) if len(h) > 64 { allErrs = append(allErrs, field.Invalid(fldPath.Child("packageHash"), config.Packages.HashAmd64, "Package hash must be 64 characters long")) @@ -1491,13 +1491,13 @@ func validateDockerConfig(config *kops.DockerConfig, fldPath *field.Path) field. } if config.Packages.UrlArm64 != nil && config.Packages.HashArm64 != nil { - u := fi.StringValue(config.Packages.UrlArm64) + u := fi.ValueOf(config.Packages.UrlArm64) _, err := url.Parse(u) if err != nil { allErrs = append(allErrs, field.Invalid(fldPath.Child("packageUrlArm64"), config.Packages.UrlArm64, fmt.Sprintf("unable parse package URL string: %v", err))) } - h := fi.StringValue(config.Packages.HashArm64) + h := fi.ValueOf(config.Packages.HashArm64) if len(h) > 64 { allErrs = append(allErrs, field.Invalid(fldPath.Child("packageHashArm64"), config.Packages.HashArm64, "Package hash must be 64 characters long")) @@ -1523,7 +1523,7 @@ func validateDockerConfig(config *kops.DockerConfig, fldPath *field.Path) field. } func validateNvidiaConfig(spec *kops.ClusterSpec, nvidia *kops.NvidiaGPUConfig, fldPath *field.Path, inClusterConfig bool) (allErrs field.ErrorList) { - if !fi.BoolValue(nvidia.Enabled) { + if !fi.ValueOf(nvidia.Enabled) { return allErrs } if spec.GetCloudProvider() != kops.CloudProviderAWS && spec.GetCloudProvider() != kops.CloudProviderOpenstack { @@ -1597,7 +1597,7 @@ func validateNodeLocalDNS(spec *kops.ClusterSpec, fldpath *field.Path) field.Err func validateClusterAutoscaler(cluster *kops.Cluster, spec *kops.ClusterAutoscalerConfig, fldPath *field.Path) (allErrs field.ErrorList) { allErrs = append(allErrs, IsValidValue(fldPath.Child("expander"), spec.Expander, []string{"least-waste", "random", "most-pods", "price", "priority"})...) - if fi.StringValue(spec.Expander) == "price" && cluster.Spec.CloudProvider.GCE == nil { + if fi.ValueOf(spec.Expander) == "price" && cluster.Spec.CloudProvider.GCE == nil { allErrs = append(allErrs, field.Forbidden(fldPath.Child("expander"), "Cluster autoscaler price expander is only supported on GCE")) } @@ -1634,8 +1634,8 @@ func validateNodeTerminationHandler(cluster *kops.Cluster, spec *kops.NodeTermin } func validateMetricsServer(cluster *kops.Cluster, spec *kops.MetricsServerConfig, fldPath *field.Path) (allErrs field.ErrorList) { - if spec != nil && fi.BoolValue(spec.Enabled) { - if !fi.BoolValue(spec.Insecure) && !components.IsCertManagerEnabled(cluster) { + if spec != nil && fi.ValueOf(spec.Enabled) { + if !fi.ValueOf(spec.Insecure) && !components.IsCertManagerEnabled(cluster) { allErrs = append(allErrs, field.Forbidden(fldPath.Child("insecure"), "Secure metrics server requires that cert manager is enabled")) } } @@ -1644,7 +1644,7 @@ func validateMetricsServer(cluster *kops.Cluster, spec *kops.MetricsServerConfig } func validateAWSLoadBalancerController(cluster *kops.Cluster, spec *kops.AWSLoadBalancerControllerConfig, fldPath *field.Path) (allErrs field.ErrorList) { - if spec != nil && fi.BoolValue(spec.Enabled) { + if spec != nil && fi.ValueOf(spec.Enabled) { if !components.IsCertManagerEnabled(cluster) { allErrs = append(allErrs, field.Forbidden(fldPath, "AWS Load Balancer Controller requires that cert manager is enabled")) } @@ -1678,7 +1678,7 @@ func validateWarmPool(warmPool *kops.WarmPoolSpec, fldPath *field.Path) (allErrs } func validateSnapshotController(cluster *kops.Cluster, spec *kops.SnapshotControllerConfig, fldPath *field.Path) (allErrs field.ErrorList) { - if spec != nil && fi.BoolValue(spec.Enabled) { + if spec != nil && fi.ValueOf(spec.Enabled) { if !components.IsCertManagerEnabled(cluster) { allErrs = append(allErrs, field.Forbidden(fldPath.Child("enabled"), "Snapshot controller requires that cert manager is enabled")) } @@ -1698,7 +1698,7 @@ func validatePodIdentityWebhook(cluster *kops.Cluster, spec *kops.PodIdentityWeb func validateCertManager(cluster *kops.Cluster, spec *kops.CertManagerConfig, fldPath *field.Path) (allErrs field.ErrorList) { if len(spec.HostedZoneIDs) > 0 { - if !fi.BoolValue(cluster.Spec.IAM.UseServiceAccountExternalPermissions) { + if !fi.ValueOf(cluster.Spec.IAM.UseServiceAccountExternalPermissions) { allErrs = append(allErrs, field.Forbidden(fldPath, "Cert Manager requires that service accounts use external permissions in order to do dns-01 validation")) } } diff --git a/pkg/apis/kops/validation/validation_test.go b/pkg/apis/kops/validation/validation_test.go index c0880e3754a78..2a9a3b84925db 100644 --- a/pkg/apis/kops/validation/validation_test.go +++ b/pkg/apis/kops/validation/validation_test.go @@ -138,14 +138,14 @@ func TestValidateSubnets(t *testing.T) { }, { Input: []kops.ClusterSubnetSpec{ - {Name: "a", ProviderID: "a", Type: kops.SubnetTypePublic}, - {Name: "b", ProviderID: "b", Type: kops.SubnetTypePublic}, + {Name: "a", ID: "a", Type: kops.SubnetTypePublic}, + {Name: "b", ID: "b", Type: kops.SubnetTypePublic}, }, }, { Input: []kops.ClusterSubnetSpec{ - {Name: "a", ProviderID: "a", Type: kops.SubnetTypePublic}, - {Name: "b", ProviderID: "", Type: kops.SubnetTypePublic}, + {Name: "a", ID: "a", Type: kops.SubnetTypePublic}, + {Name: "b", ID: "", Type: kops.SubnetTypePublic}, }, ExpectedErrors: []string{"Forbidden::subnets[1].id"}, }, @@ -245,7 +245,7 @@ func TestValidateKubeAPIServer(t *testing.T) { }, { Input: kops.KubeAPIServerConfig{ - AuthorizationMode: fi.String("RBAC"), + AuthorizationMode: fi.PtrTo("RBAC"), }, Cluster: &kops.Cluster{ Spec: kops.ClusterSpec{ @@ -264,7 +264,7 @@ func TestValidateKubeAPIServer(t *testing.T) { }, { Input: kops.KubeAPIServerConfig{ - AuthorizationMode: fi.String("RBAC,Node"), + AuthorizationMode: fi.PtrTo("RBAC,Node"), }, Cluster: &kops.Cluster{ Spec: kops.ClusterSpec{ @@ -280,7 +280,7 @@ func TestValidateKubeAPIServer(t *testing.T) { }, { Input: kops.KubeAPIServerConfig{ - AuthorizationMode: fi.String("RBAC,Node,Bogus"), + AuthorizationMode: fi.PtrTo("RBAC,Node,Bogus"), }, Cluster: &kops.Cluster{ Spec: kops.ClusterSpec{ @@ -446,7 +446,7 @@ func Test_Validate_AdditionalPolicies(t *testing.T) { Members: []kops.EtcdMemberSpec{ { Name: "us-test-1a", - InstanceGroup: fi.String("master-us-test-1a"), + InstanceGroup: fi.PtrTo("master-us-test-1a"), }, }, }, @@ -839,7 +839,7 @@ func Test_Validate_Cilium(t *testing.T) { }, { Cilium: kops.CiliumNetworkingSpec{ - Masquerade: fi.Bool(false), + Masquerade: fi.PtrTo(false), IPAM: "eni", }, Spec: kops.ClusterSpec{ @@ -856,7 +856,7 @@ func Test_Validate_Cilium(t *testing.T) { }, { Cilium: kops.CiliumNetworkingSpec{ - Masquerade: fi.Bool(true), + Masquerade: fi.PtrTo(true), IPAM: "eni", }, Spec: kops.ClusterSpec{ @@ -868,8 +868,8 @@ func Test_Validate_Cilium(t *testing.T) { }, { Cilium: kops.CiliumNetworkingSpec{ - EnableL7Proxy: fi.Bool(true), - InstallIptablesRules: fi.Bool(false), + EnableL7Proxy: fi.PtrTo(true), + InstallIptablesRules: fi.PtrTo(false), }, Spec: kops.ClusterSpec{ CloudProvider: kops.CloudProviderSpec{ @@ -914,7 +914,7 @@ func Test_Validate_Cilium(t *testing.T) { Cilium: kops.CiliumNetworkingSpec{ Version: "v1.8.0", Hubble: &kops.HubbleSpec{ - Enabled: fi.Bool(true), + Enabled: fi.PtrTo(true), }, }, ExpectedErrors: []string{"Forbidden::cilium.hubble.enabled"}, @@ -923,12 +923,12 @@ func Test_Validate_Cilium(t *testing.T) { Cilium: kops.CiliumNetworkingSpec{ Version: "v1.11.6", Hubble: &kops.HubbleSpec{ - Enabled: fi.Bool(true), + Enabled: fi.PtrTo(true), }, }, Spec: kops.ClusterSpec{ CertManager: &kops.CertManagerConfig{ - Enabled: fi.Bool(true), + Enabled: fi.PtrTo(true), }, }, }, @@ -1101,7 +1101,7 @@ func Test_Validate_NodeLocalDNS(t *testing.T) { KubeDNS: &kops.KubeDNSConfig{ Provider: "CoreDNS", NodeLocalDNS: &kops.NodeLocalDNSConfig{ - Enabled: fi.Bool(true), + Enabled: fi.PtrTo(true), }, }, }, @@ -1118,7 +1118,7 @@ func Test_Validate_NodeLocalDNS(t *testing.T) { KubeDNS: &kops.KubeDNSConfig{ Provider: "CoreDNS", NodeLocalDNS: &kops.NodeLocalDNSConfig{ - Enabled: fi.Bool(true), + Enabled: fi.PtrTo(true), }, }, }, @@ -1135,7 +1135,7 @@ func Test_Validate_NodeLocalDNS(t *testing.T) { KubeDNS: &kops.KubeDNSConfig{ Provider: "CoreDNS", NodeLocalDNS: &kops.NodeLocalDNSConfig{ - Enabled: fi.Bool(true), + Enabled: fi.PtrTo(true), }, }, Networking: &kops.NetworkingSpec{ @@ -1155,7 +1155,7 @@ func Test_Validate_NodeLocalDNS(t *testing.T) { KubeDNS: &kops.KubeDNSConfig{ Provider: "CoreDNS", NodeLocalDNS: &kops.NodeLocalDNSConfig{ - Enabled: fi.Bool(true), + Enabled: fi.PtrTo(true), LocalIP: "169.254.20.10", }, }, @@ -1187,13 +1187,13 @@ func Test_Validate_CloudConfiguration(t *testing.T) { { Description: "all false", Input: kops.CloudConfiguration{ - ManageStorageClasses: fi.Bool(false), + ManageStorageClasses: fi.PtrTo(false), }, }, { Description: "all true", Input: kops.CloudConfiguration{ - ManageStorageClasses: fi.Bool(true), + ManageStorageClasses: fi.PtrTo(true), }, }, { @@ -1202,7 +1202,7 @@ func Test_Validate_CloudConfiguration(t *testing.T) { CloudProvider: kops.CloudProviderSpec{ Openstack: &kops.OpenstackSpec{ BlockStorage: &kops.OpenstackBlockStorageConfig{ - CreateStorageClass: fi.Bool(false), + CreateStorageClass: fi.PtrTo(false), }, }, }, @@ -1213,7 +1213,7 @@ func Test_Validate_CloudConfiguration(t *testing.T) { CloudProvider: kops.CloudProviderSpec{ Openstack: &kops.OpenstackSpec{ BlockStorage: &kops.OpenstackBlockStorageConfig{ - CreateStorageClass: fi.Bool(true), + CreateStorageClass: fi.PtrTo(true), }, }, }, @@ -1221,12 +1221,12 @@ func Test_Validate_CloudConfiguration(t *testing.T) { { Description: "all false, os false", Input: kops.CloudConfiguration{ - ManageStorageClasses: fi.Bool(false), + ManageStorageClasses: fi.PtrTo(false), }, CloudProvider: kops.CloudProviderSpec{ Openstack: &kops.OpenstackSpec{ BlockStorage: &kops.OpenstackBlockStorageConfig{ - CreateStorageClass: fi.Bool(false), + CreateStorageClass: fi.PtrTo(false), }, }, }, @@ -1234,12 +1234,12 @@ func Test_Validate_CloudConfiguration(t *testing.T) { { Description: "all false, os true", Input: kops.CloudConfiguration{ - ManageStorageClasses: fi.Bool(false), + ManageStorageClasses: fi.PtrTo(false), }, CloudProvider: kops.CloudProviderSpec{ Openstack: &kops.OpenstackSpec{ BlockStorage: &kops.OpenstackBlockStorageConfig{ - CreateStorageClass: fi.Bool(true), + CreateStorageClass: fi.PtrTo(true), }, }, }, @@ -1248,12 +1248,12 @@ func Test_Validate_CloudConfiguration(t *testing.T) { { Description: "all true, os false", Input: kops.CloudConfiguration{ - ManageStorageClasses: fi.Bool(true), + ManageStorageClasses: fi.PtrTo(true), }, CloudProvider: kops.CloudProviderSpec{ Openstack: &kops.OpenstackSpec{ BlockStorage: &kops.OpenstackBlockStorageConfig{ - CreateStorageClass: fi.Bool(false), + CreateStorageClass: fi.PtrTo(false), }, }, }, @@ -1262,12 +1262,12 @@ func Test_Validate_CloudConfiguration(t *testing.T) { { Description: "all true, os true", Input: kops.CloudConfiguration{ - ManageStorageClasses: fi.Bool(true), + ManageStorageClasses: fi.PtrTo(true), }, CloudProvider: kops.CloudProviderSpec{ Openstack: &kops.OpenstackSpec{ BlockStorage: &kops.OpenstackBlockStorageConfig{ - CreateStorageClass: fi.Bool(true), + CreateStorageClass: fi.PtrTo(true), }, }, }, @@ -1382,7 +1382,7 @@ func Test_Validate_Nvidia_Cluster(t *testing.T) { Input: kops.ClusterSpec{ Containerd: &kops.ContainerdConfig{ NvidiaGPU: &kops.NvidiaGPUConfig{ - Enabled: fi.Bool(true), + Enabled: fi.PtrTo(true), }, }, CloudProvider: kops.CloudProviderSpec{ @@ -1395,7 +1395,7 @@ func Test_Validate_Nvidia_Cluster(t *testing.T) { Input: kops.ClusterSpec{ Containerd: &kops.ContainerdConfig{ NvidiaGPU: &kops.NvidiaGPUConfig{ - Enabled: fi.Bool(true), + Enabled: fi.PtrTo(true), }, }, CloudProvider: kops.CloudProviderSpec{ @@ -1409,7 +1409,7 @@ func Test_Validate_Nvidia_Cluster(t *testing.T) { Input: kops.ClusterSpec{ Containerd: &kops.ContainerdConfig{ NvidiaGPU: &kops.NvidiaGPUConfig{ - Enabled: fi.Bool(true), + Enabled: fi.PtrTo(true), }, }, CloudProvider: kops.CloudProviderSpec{ @@ -1423,7 +1423,7 @@ func Test_Validate_Nvidia_Cluster(t *testing.T) { Input: kops.ClusterSpec{ Containerd: &kops.ContainerdConfig{ NvidiaGPU: &kops.NvidiaGPUConfig{ - Enabled: fi.Bool(true), + Enabled: fi.PtrTo(true), }, }, CloudProvider: kops.CloudProviderSpec{ @@ -1449,7 +1449,7 @@ func Test_Validate_Nvidia_Ig(t *testing.T) { Input: kops.ClusterSpec{ Containerd: &kops.ContainerdConfig{ NvidiaGPU: &kops.NvidiaGPUConfig{ - Enabled: fi.Bool(true), + Enabled: fi.PtrTo(true), }, }, CloudProvider: kops.CloudProviderSpec{ @@ -1462,7 +1462,7 @@ func Test_Validate_Nvidia_Ig(t *testing.T) { Input: kops.ClusterSpec{ Containerd: &kops.ContainerdConfig{ NvidiaGPU: &kops.NvidiaGPUConfig{ - Enabled: fi.Bool(true), + Enabled: fi.PtrTo(true), }, }, CloudProvider: kops.CloudProviderSpec{ @@ -1475,7 +1475,7 @@ func Test_Validate_Nvidia_Ig(t *testing.T) { Input: kops.ClusterSpec{ Containerd: &kops.ContainerdConfig{ NvidiaGPU: &kops.NvidiaGPUConfig{ - Enabled: fi.Bool(true), + Enabled: fi.PtrTo(true), }, }, CloudProvider: kops.CloudProviderSpec{ @@ -1489,7 +1489,7 @@ func Test_Validate_Nvidia_Ig(t *testing.T) { Input: kops.ClusterSpec{ Containerd: &kops.ContainerdConfig{ NvidiaGPU: &kops.NvidiaGPUConfig{ - Enabled: fi.Bool(true), + Enabled: fi.PtrTo(true), }, }, CloudProvider: kops.CloudProviderSpec{ diff --git a/pkg/apis/kops/zz_generated.deepcopy.go b/pkg/apis/kops/zz_generated.deepcopy.go index 43ba2feef7382..69b297cc07627 100644 --- a/pkg/apis/kops/zz_generated.deepcopy.go +++ b/pkg/apis/kops/zz_generated.deepcopy.go @@ -28,6 +28,42 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APISpec) DeepCopyInto(out *APISpec) { + *out = *in + if in.DNS != nil { + in, out := &in.DNS, &out.DNS + *out = new(DNSAccessSpec) + **out = **in + } + if in.LoadBalancer != nil { + in, out := &in.LoadBalancer, &out.LoadBalancer + *out = new(LoadBalancerAccessSpec) + (*in).DeepCopyInto(*out) + } + if in.AdditionalSANs != nil { + in, out := &in.AdditionalSANs, &out.AdditionalSANs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Access != nil { + in, out := &in.Access, &out.Access + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APISpec. +func (in *APISpec) DeepCopy() *APISpec { + if in == nil { + return nil + } + out := new(APISpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AWSAuthenticationIdentityMappingSpec) DeepCopyInto(out *AWSAuthenticationIdentityMappingSpec) { *out = *in @@ -240,32 +276,6 @@ func (in *AccessLogSpec) DeepCopy() *AccessLogSpec { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AccessSpec) DeepCopyInto(out *AccessSpec) { - *out = *in - if in.DNS != nil { - in, out := &in.DNS, &out.DNS - *out = new(DNSAccessSpec) - **out = **in - } - if in.LoadBalancer != nil { - in, out := &in.LoadBalancer, &out.LoadBalancer - *out = new(LoadBalancerAccessSpec) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessSpec. -func (in *AccessSpec) DeepCopy() *AccessSpec { - if in == nil { - return nil - } - out := new(AccessSpec) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AddonSpec) DeepCopyInto(out *AddonSpec) { *out = *in @@ -1132,11 +1142,6 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) { *out = new(DNSControllerGossipConfig) (*in).DeepCopyInto(*out) } - if in.AdditionalSANs != nil { - in, out := &in.AdditionalSANs, &out.AdditionalSANs - *out = make([]string, len(*in)) - copy(*out, *in) - } if in.SSHAccess != nil { in, out := &in.SSHAccess, &out.SSHAccess *out = make([]string, len(*in)) @@ -1157,11 +1162,6 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) { *out = new(string) **out = **in } - if in.KubernetesAPIAccess != nil { - in, out := &in.KubernetesAPIAccess, &out.KubernetesAPIAccess - *out = make([]string, len(*in)) - copy(*out, *in) - } if in.IsolateMasters != nil { in, out := &in.IsolateMasters, &out.IsolateMasters *out = new(bool) @@ -1311,11 +1311,7 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) { *out = new(NetworkingSpec) (*in).DeepCopyInto(*out) } - if in.API != nil { - in, out := &in.API, &out.API - *out = new(AccessSpec) - (*in).DeepCopyInto(*out) - } + in.API.DeepCopyInto(&out.API) if in.Authentication != nil { in, out := &in.Authentication, &out.Authentication *out = new(AuthenticationSpec) @@ -2678,7 +2674,7 @@ func (in *InstanceGroupSpec) DeepCopyInto(out *InstanceGroupSpec) { } if in.ExternalLoadBalancers != nil { in, out := &in.ExternalLoadBalancers, &out.ExternalLoadBalancers - *out = make([]LoadBalancer, len(*in)) + *out = make([]LoadBalancerSpec, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -4205,32 +4201,6 @@ func (in *LeaderElectionConfiguration) DeepCopy() *LeaderElectionConfiguration { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LoadBalancer) DeepCopyInto(out *LoadBalancer) { - *out = *in - if in.LoadBalancerName != nil { - in, out := &in.LoadBalancerName, &out.LoadBalancerName - *out = new(string) - **out = **in - } - if in.TargetGroupARN != nil { - in, out := &in.TargetGroupARN, &out.TargetGroupARN - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancer. -func (in *LoadBalancer) DeepCopy() *LoadBalancer { - if in == nil { - return nil - } - out := new(LoadBalancer) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LoadBalancerAccessSpec) DeepCopyInto(out *LoadBalancerAccessSpec) { *out = *in @@ -4284,6 +4254,32 @@ func (in *LoadBalancerAccessSpec) DeepCopy() *LoadBalancerAccessSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBalancerSpec) DeepCopyInto(out *LoadBalancerSpec) { + *out = *in + if in.LoadBalancerName != nil { + in, out := &in.LoadBalancerName, &out.LoadBalancerName + *out = new(string) + **out = **in + } + if in.TargetGroupARN != nil { + in, out := &in.TargetGroupARN, &out.TargetGroupARN + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerSpec. +func (in *LoadBalancerSpec) DeepCopy() *LoadBalancerSpec { + if in == nil { + return nil + } + out := new(LoadBalancerSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LoadBalancerSubnetSpec) DeepCopyInto(out *LoadBalancerSubnetSpec) { *out = *in diff --git a/pkg/apis/nodeup/config.go b/pkg/apis/nodeup/config.go index fe3cc905abc6d..911d1d9f2828b 100644 --- a/pkg/apis/nodeup/config.go +++ b/pkg/apis/nodeup/config.go @@ -174,7 +174,7 @@ func NewConfig(cluster *kops.Cluster, instanceGroup *kops.InstanceGroup) (*Confi } if cluster.Spec.Networking != nil && cluster.Spec.Networking.AmazonVPC != nil { - config.DefaultMachineType = fi.String(strings.Split(instanceGroup.Spec.MachineType, ",")[0]) + config.DefaultMachineType = fi.PtrTo(strings.Split(instanceGroup.Spec.MachineType, ",")[0]) } if UsesInstanceIDForNodeName(cluster) { diff --git a/pkg/commands/set_cluster_test.go b/pkg/commands/set_cluster_test.go index 21dd3a59494a2..cf042cca3c15a 100644 --- a/pkg/commands/set_cluster_test.go +++ b/pkg/commands/set_cluster_test.go @@ -57,7 +57,7 @@ func TestSetClusterFields(t *testing.T) { KubernetesVersion: "1.8.2", Kubelet: &kops.KubeletConfigSpec{ AuthorizationMode: "Webhook", - AuthenticationTokenWebhook: fi.Bool(true), + AuthenticationTokenWebhook: fi.PtrTo(true), }, }, }, @@ -71,7 +71,7 @@ func TestSetClusterFields(t *testing.T) { }, Output: kops.Cluster{ Spec: kops.ClusterSpec{ - API: &kops.AccessSpec{ + API: kops.APISpec{ DNS: &kops.DNSAccessSpec{}, }, }, @@ -92,7 +92,7 @@ func TestSetClusterFields(t *testing.T) { Output: kops.Cluster{ Spec: kops.ClusterSpec{ Kubelet: &kops.KubeletConfigSpec{ - AuthenticationTokenWebhook: fi.Bool(false), + AuthenticationTokenWebhook: fi.PtrTo(false), }, }, }, @@ -102,7 +102,7 @@ func TestSetClusterFields(t *testing.T) { Output: kops.Cluster{ Spec: kops.ClusterSpec{ Docker: &kops.DockerConfig{ - SelinuxEnabled: fi.Bool(true), + SelinuxEnabled: fi.PtrTo(true), }, }, }, @@ -116,10 +116,12 @@ func TestSetClusterFields(t *testing.T) { }, }, { - Fields: []string{"spec.masterPublicName=api.example.com"}, + Fields: []string{"spec.api.publicName=api.example.com"}, Output: kops.Cluster{ Spec: kops.ClusterSpec{ - MasterPublicName: "api.example.com", + API: kops.APISpec{ + PublicName: "api.example.com", + }, }, }, }, @@ -240,7 +242,7 @@ func TestSetClusterFields(t *testing.T) { Spec: kops.ClusterSpec{ Networking: &kops.NetworkingSpec{ Cilium: &kops.CiliumNetworkingSpec{ - Masquerade: fi.Bool(false), + Masquerade: fi.PtrTo(false), }, }, }, @@ -254,7 +256,7 @@ func TestSetClusterFields(t *testing.T) { Output: kops.Cluster{ Spec: kops.ClusterSpec{ KubeProxy: &kops.KubeProxyConfig{ - Enabled: fi.Bool(true), + Enabled: fi.PtrTo(true), }, }, }, @@ -312,13 +314,13 @@ func TestSetCiliumFields(t *testing.T) { Output: kops.Cluster{ Spec: kops.ClusterSpec{ KubeProxy: &kops.KubeProxyConfig{ - Enabled: fi.Bool(false), + Enabled: fi.PtrTo(false), }, Networking: &kops.NetworkingSpec{ Cilium: &kops.CiliumNetworkingSpec{ IPAM: "eni", EnableNodePort: true, - Masquerade: fi.Bool(false), + Masquerade: fi.PtrTo(false), }, }, }, diff --git a/pkg/commands/set_instancegroups_test.go b/pkg/commands/set_instancegroups_test.go index c97e42839c753..eebc3fe3b7f1c 100644 --- a/pkg/commands/set_instancegroups_test.go +++ b/pkg/commands/set_instancegroups_test.go @@ -73,8 +73,8 @@ func TestSetInstanceGroupsFields(t *testing.T) { }, Output: kops.InstanceGroup{ Spec: kops.InstanceGroupSpec{ - MinSize: fi.Int32(1), - MaxSize: fi.Int32(3), + MinSize: fi.PtrTo(int32(1)), + MaxSize: fi.PtrTo(int32(3)), }, }, }, diff --git a/pkg/commands/unset_cluster_test.go b/pkg/commands/unset_cluster_test.go index a0da02ff32a6f..db1ff0ef6af68 100644 --- a/pkg/commands/unset_cluster_test.go +++ b/pkg/commands/unset_cluster_test.go @@ -52,7 +52,7 @@ func TestUnsetClusterFields(t *testing.T) { KubernetesVersion: "1.8.2", Kubelet: &kops.KubeletConfigSpec{ AuthorizationMode: "Webhook", - AuthenticationTokenWebhook: fi.Bool(true), + AuthenticationTokenWebhook: fi.PtrTo(true), }, }, }, @@ -68,15 +68,13 @@ func TestUnsetClusterFields(t *testing.T) { }, Input: kops.Cluster{ Spec: kops.ClusterSpec{ - API: &kops.AccessSpec{ + API: kops.APISpec{ DNS: &kops.DNSAccessSpec{}, }, }, }, Output: kops.Cluster{ - Spec: kops.ClusterSpec{ - API: &kops.AccessSpec{}, - }, + Spec: kops.ClusterSpec{}, }, }, { @@ -99,7 +97,7 @@ func TestUnsetClusterFields(t *testing.T) { Input: kops.Cluster{ Spec: kops.ClusterSpec{ Kubelet: &kops.KubeletConfigSpec{ - AuthenticationTokenWebhook: fi.Bool(false), + AuthenticationTokenWebhook: fi.PtrTo(false), }, }, }, @@ -114,7 +112,7 @@ func TestUnsetClusterFields(t *testing.T) { Input: kops.Cluster{ Spec: kops.ClusterSpec{ Docker: &kops.DockerConfig{ - SelinuxEnabled: fi.Bool(true), + SelinuxEnabled: fi.PtrTo(true), }, }, }, @@ -136,10 +134,12 @@ func TestUnsetClusterFields(t *testing.T) { }, }, { - Fields: []string{"spec.masterPublicName"}, + Fields: []string{"spec.api.publicName"}, Input: kops.Cluster{ Spec: kops.ClusterSpec{ - MasterPublicName: "api.example.com", + API: kops.APISpec{ + PublicName: "api.example.com", + }, }, }, Output: kops.Cluster{ @@ -308,7 +308,7 @@ func TestUnsetClusterFields(t *testing.T) { Spec: kops.ClusterSpec{ Networking: &kops.NetworkingSpec{ Cilium: &kops.CiliumNetworkingSpec{ - Masquerade: fi.Bool(false), + Masquerade: fi.PtrTo(false), }, }, }, @@ -328,7 +328,7 @@ func TestUnsetClusterFields(t *testing.T) { Input: kops.Cluster{ Spec: kops.ClusterSpec{ KubeProxy: &kops.KubeProxyConfig{ - Enabled: fi.Bool(true), + Enabled: fi.PtrTo(true), }, }, }, @@ -394,13 +394,13 @@ func TestUnsetCiliumFields(t *testing.T) { Input: kops.Cluster{ Spec: kops.ClusterSpec{ KubeProxy: &kops.KubeProxyConfig{ - Enabled: fi.Bool(false), + Enabled: fi.PtrTo(false), }, Networking: &kops.NetworkingSpec{ Cilium: &kops.CiliumNetworkingSpec{ IPAM: "eni", EnableNodePort: true, - Masquerade: fi.Bool(false), + Masquerade: fi.PtrTo(false), }, }, }, diff --git a/pkg/commands/unset_instancegroups_test.go b/pkg/commands/unset_instancegroups_test.go index fb4d83d411b1e..7606344a7bfec 100644 --- a/pkg/commands/unset_instancegroups_test.go +++ b/pkg/commands/unset_instancegroups_test.go @@ -74,8 +74,8 @@ func TestUnsetInstanceGroupsFields(t *testing.T) { }, Input: kops.InstanceGroup{ Spec: kops.InstanceGroupSpec{ - MinSize: fi.Int32(1), - MaxSize: fi.Int32(3), + MinSize: fi.PtrTo(int32(1)), + MaxSize: fi.PtrTo(int32(3)), }, }, Output: kops.InstanceGroup{ diff --git a/pkg/flagbuilder/buildflags_test.go b/pkg/flagbuilder/buildflags_test.go index b6f31d0e770ac..bf375748bd4cc 100644 --- a/pkg/flagbuilder/buildflags_test.go +++ b/pkg/flagbuilder/buildflags_test.go @@ -45,7 +45,7 @@ func TestBuildKCMFlags(t *testing.T) { }, { Config: &kops.KubeControllerManagerConfig{ - TerminatedPodGCThreshold: fi.Int32(1500), + TerminatedPodGCThreshold: fi.PtrTo(int32(1500)), }, Expected: "--terminated-pod-gc-threshold=1500", }, @@ -57,7 +57,7 @@ func TestBuildKCMFlags(t *testing.T) { }, { Config: &kops.KubeControllerManagerConfig{ - KubeAPIBurst: fi.Int32(80), + KubeAPIBurst: fi.PtrTo(int32(80)), }, Expected: "--kube-api-burst=80", }, @@ -100,13 +100,13 @@ func TestKubeletConfigSpec(t *testing.T) { }, { Config: &kops.KubeletConfigSpec{ - LogLevel: fi.Int32(0), + LogLevel: fi.PtrTo(int32(0)), }, Expected: "", }, { Config: &kops.KubeletConfigSpec{ - LogLevel: fi.Int32(2), + LogLevel: fi.PtrTo(int32(2)), }, Expected: "--v=2", }, @@ -114,13 +114,13 @@ func TestKubeletConfigSpec(t *testing.T) { // Test string pointers without the "flag-include-empty" tag { Config: &kops.KubeletConfigSpec{ - EvictionHard: fi.String("memory.available<100Mi"), + EvictionHard: fi.PtrTo("memory.available<100Mi"), }, Expected: "--eviction-hard=memory.available<100Mi", }, { Config: &kops.KubeletConfigSpec{ - EvictionHard: fi.String(""), + EvictionHard: fi.PtrTo(""), }, Expected: "", }, @@ -132,13 +132,13 @@ func TestKubeletConfigSpec(t *testing.T) { }, { Config: &kops.KubeletConfigSpec{ - ResolverConfig: fi.String("test"), + ResolverConfig: fi.PtrTo("test"), }, Expected: "--resolv-conf=test", }, { Config: &kops.KubeletConfigSpec{ - ResolverConfig: fi.String(""), + ResolverConfig: fi.PtrTo(""), }, Expected: "--resolv-conf=", }, @@ -181,13 +181,13 @@ func TestBuildAPIServerFlags(t *testing.T) { }, { Config: &kops.KubeAPIServerConfig{ - AuditWebhookBatchThrottleEnable: fi.Bool(true), + AuditWebhookBatchThrottleEnable: fi.PtrTo(true), }, Expected: "--audit-webhook-batch-throttle-enable=true --secure-port=0", }, { Config: &kops.KubeAPIServerConfig{ - AuditWebhookBatchThrottleEnable: fi.Bool(false), + AuditWebhookBatchThrottleEnable: fi.PtrTo(false), }, Expected: "--audit-webhook-batch-throttle-enable=false --secure-port=0", }, @@ -199,13 +199,13 @@ func TestBuildAPIServerFlags(t *testing.T) { }, { Config: &kops.KubeAPIServerConfig{ - AuditWebhookBatchMaxSize: fi.Int32(1000), + AuditWebhookBatchMaxSize: fi.PtrTo(int32(1000)), }, Expected: "--audit-webhook-batch-max-size=1000 --secure-port=0", }, { Config: &kops.KubeAPIServerConfig{ - AuthorizationWebhookConfigFile: fi.String("/authorization.yaml"), + AuthorizationWebhookConfigFile: fi.PtrTo("/authorization.yaml"), }, Expected: "--authorization-webhook-config-file=/authorization.yaml --secure-port=0", }, diff --git a/pkg/instancegroups/rollingupdate_test.go b/pkg/instancegroups/rollingupdate_test.go index 66b3622633c6a..d2acbc86d802e 100644 --- a/pkg/instancegroups/rollingupdate_test.go +++ b/pkg/instancegroups/rollingupdate_test.go @@ -893,7 +893,7 @@ func TestRollingUpdateDisabled(t *testing.T) { c.CloudOnly = true c.Cluster.Spec.RollingUpdate = &kopsapi.RollingUpdate{ - DrainAndTerminate: fi.Bool(false), + DrainAndTerminate: fi.PtrTo(false), } groups := getGroupsAllNeedUpdate(c.K8sClient, cloud) @@ -936,7 +936,7 @@ func TestRollingUpdateDisabledSurge(t *testing.T) { one := intstr.FromInt(1) c.Cluster.Spec.RollingUpdate = &kopsapi.RollingUpdate{ - DrainAndTerminate: fi.Bool(false), + DrainAndTerminate: fi.PtrTo(false), MaxSurge: &one, } diff --git a/pkg/instancegroups/settings.go b/pkg/instancegroups/settings.go index 45507a1a8b072..8485c916b0467 100644 --- a/pkg/instancegroups/settings.go +++ b/pkg/instancegroups/settings.go @@ -43,7 +43,7 @@ func resolveSettings(cluster *kops.Cluster, group *kops.InstanceGroup, numInstan } if rollingUpdate.DrainAndTerminate == nil { - rollingUpdate.DrainAndTerminate = fi.Bool(true) + rollingUpdate.DrainAndTerminate = fi.PtrTo(true) } if rollingUpdate.MaxSurge == nil { diff --git a/pkg/kubeconfig/create_kubecfg.go b/pkg/kubeconfig/create_kubecfg.go index f076630f3009e..ffabe67aa0e87 100644 --- a/pkg/kubeconfig/create_kubecfg.go +++ b/pkg/kubeconfig/create_kubecfg.go @@ -37,12 +37,9 @@ func BuildKubecfg(cluster *kops.Cluster, keyStore fi.Keystore, secretStore fi.Se var master string if internal { - master = cluster.Spec.MasterInternalName - if master == "" { - master = "api.internal." + clusterName - } + master = cluster.APIInternalName() } else { - master = cluster.Spec.MasterPublicName + master = cluster.Spec.API.PublicName if master == "" { master = "api." + clusterName } @@ -68,7 +65,7 @@ func BuildKubecfg(cluster *kops.Cluster, keyStore fi.Keystore, secretStore fi.Se // We differentiate using the heuristic that if we have an internal ELB // we are likely connected directly to the VPC. privateDNS := cluster.Spec.Topology != nil && cluster.Spec.Topology.DNS == kops.DNSTypePrivate - internalELB := cluster.Spec.API != nil && cluster.Spec.API.LoadBalancer != nil && cluster.Spec.API.LoadBalancer.Type == kops.LoadBalancerTypeInternal + internalELB := cluster.Spec.API.LoadBalancer != nil && cluster.Spec.API.LoadBalancer.Type == kops.LoadBalancerTypeInternal if privateDNS && !internalELB { useELBName = true } @@ -103,7 +100,7 @@ func BuildKubecfg(cluster *kops.Cluster, keyStore fi.Keystore, secretStore fi.Se b := NewKubeconfigBuilder() // Use the secondary load balancer port if a certificate is on the primary listener - if admin != 0 && cluster.Spec.API != nil && cluster.Spec.API.LoadBalancer != nil && cluster.Spec.API.LoadBalancer.SSLCertificate != "" && cluster.Spec.API.LoadBalancer.Class == kops.LoadBalancerClassNetwork { + if admin != 0 && cluster.Spec.API.LoadBalancer != nil && cluster.Spec.API.LoadBalancer.SSLCertificate != "" && cluster.Spec.API.LoadBalancer.Class == kops.LoadBalancerClassNetwork { server = server + ":8443" } @@ -112,7 +109,7 @@ func BuildKubecfg(cluster *kops.Cluster, keyStore fi.Keystore, secretStore fi.Se // add the CA Cert to the kubeconfig only if we didn't specify a certificate for the LB // or if we're using admin credentials and the secondary port - if cluster.Spec.API == nil || cluster.Spec.API.LoadBalancer == nil || cluster.Spec.API.LoadBalancer.SSLCertificate == "" || cluster.Spec.API.LoadBalancer.Class == kops.LoadBalancerClassNetwork || internal { + if cluster.Spec.API.LoadBalancer == nil || cluster.Spec.API.LoadBalancer.SSLCertificate == "" || cluster.Spec.API.LoadBalancer.Class == kops.LoadBalancerClassNetwork || internal { keySet, err := keyStore.FindKeyset(fi.CertificateIDCA) if err != nil { return nil, fmt.Errorf("error fetching CA keypair: %v", err) diff --git a/pkg/kubeconfig/create_kubecfg_test.go b/pkg/kubeconfig/create_kubecfg_test.go index d2870164348ad..2cb85ad834748 100644 --- a/pkg/kubeconfig/create_kubecfg_test.go +++ b/pkg/kubeconfig/create_kubecfg_test.go @@ -17,7 +17,6 @@ limitations under the License. package kubeconfig import ( - "fmt" "testing" "time" @@ -120,12 +119,9 @@ func (f fakeKeyStore) MirrorTo(basedir vfs.Path) error { // build a generic minimal cluster func buildMinimalCluster(clusterName string, masterPublicName string, lbCert bool, nlb bool) *kops.Cluster { cluster := testutils.BuildMinimalCluster(clusterName) - cluster.Spec.MasterPublicName = masterPublicName - cluster.Spec.MasterInternalName = fmt.Sprintf("internal.%v", masterPublicName) + cluster.Spec.API.PublicName = masterPublicName cluster.Spec.KubernetesVersion = "1.24.0" - cluster.Spec.API = &kops.AccessSpec{ - LoadBalancer: &kops.LoadBalancerAccessSpec{}, - } + cluster.Spec.API.LoadBalancer = &kops.LoadBalancerAccessSpec{} if lbCert { cluster.Spec.API.LoadBalancer.SSLCertificate = "cert-arn" } @@ -324,7 +320,7 @@ func TestBuildKubecfg(t *testing.T) { }, want: &KubeconfigBuilder{ Context: "testcluster", - Server: "https://internal.testcluster.test.com", + Server: "https://api.internal.testcluster", CACerts: []byte(nextCertificate + certData), User: "testcluster", }, diff --git a/pkg/model/awsmodel/api_loadbalancer.go b/pkg/model/awsmodel/api_loadbalancer.go index 098083bcf0ccb..df00c4a844a50 100644 --- a/pkg/model/awsmodel/api_loadbalancer.go +++ b/pkg/model/awsmodel/api_loadbalancer.go @@ -180,26 +180,26 @@ func (b *APILoadBalancerBuilder) Build(c *fi.ModelBuilderContext) error { Name: &name, Lifecycle: b.Lifecycle, - LoadBalancerName: fi.String(loadBalancerName), - CLBName: fi.String("api." + b.ClusterName()), + LoadBalancerName: fi.PtrTo(loadBalancerName), + CLBName: fi.PtrTo("api." + b.ClusterName()), SubnetMappings: nlbSubnetMappings, Listeners: nlbListeners, TargetGroups: make([]*awstasks.TargetGroup, 0), Tags: tags, VPC: b.LinkToVPC(), - Type: fi.String("network"), - IpAddressType: fi.String("ipv4"), + Type: fi.PtrTo("network"), + IpAddressType: fi.PtrTo("ipv4"), } if b.UseIPv6ForAPI() { - nlb.IpAddressType = fi.String("dualstack") + nlb.IpAddressType = fi.PtrTo("dualstack") } clb = &awstasks.ClassicLoadBalancer{ - Name: fi.String("api." + b.ClusterName()), + Name: fi.PtrTo("api." + b.ClusterName()), Lifecycle: b.Lifecycle, - LoadBalancerName: fi.String(loadBalancerName), + LoadBalancerName: fi.PtrTo(loadBalancerName), SecurityGroups: []*awstasks.SecurityGroup{ b.LinkToELBSecurityGroup("api"), }, @@ -208,29 +208,29 @@ func (b *APILoadBalancerBuilder) Build(c *fi.ModelBuilderContext) error { // Configure fast-recovery health-checks HealthCheck: &awstasks.ClassicLoadBalancerHealthCheck{ - Target: fi.String("SSL:443"), - Timeout: fi.Int64(5), - Interval: fi.Int64(10), - HealthyThreshold: fi.Int64(2), - UnhealthyThreshold: fi.Int64(2), + Target: fi.PtrTo("SSL:443"), + Timeout: fi.PtrTo(int64(5)), + Interval: fi.PtrTo(int64(10)), + HealthyThreshold: fi.PtrTo(int64(2)), + UnhealthyThreshold: fi.PtrTo(int64(2)), }, ConnectionSettings: &awstasks.ClassicLoadBalancerConnectionSettings{ - IdleTimeout: fi.Int64(int64(idleTimeout.Seconds())), + IdleTimeout: fi.PtrTo(int64(idleTimeout.Seconds())), }, ConnectionDraining: &awstasks.ClassicLoadBalancerConnectionDraining{ - Enabled: fi.Bool(true), - Timeout: fi.Int64(300), + Enabled: fi.PtrTo(true), + Timeout: fi.PtrTo(int64(300)), }, Tags: tags, } if b.Cluster.UsesNoneDNS() { - lbSpec.CrossZoneLoadBalancing = fi.Bool(true) + lbSpec.CrossZoneLoadBalancing = fi.PtrTo(true) } else if lbSpec.CrossZoneLoadBalancing == nil { - lbSpec.CrossZoneLoadBalancing = fi.Bool(false) + lbSpec.CrossZoneLoadBalancing = fi.PtrTo(false) } clb.CrossZoneLoadBalancing = &awstasks.ClassicLoadBalancerCrossZoneLoadBalancing{ @@ -241,8 +241,8 @@ func (b *APILoadBalancerBuilder) Build(c *fi.ModelBuilderContext) error { switch lbSpec.Type { case kops.LoadBalancerTypeInternal: - clb.Scheme = fi.String("internal") - nlb.Scheme = fi.String("internal") + clb.Scheme = fi.PtrTo("internal") + nlb.Scheme = fi.PtrTo("internal") case kops.LoadBalancerTypePublic: clb.Scheme = nil nlb.Scheme = nil @@ -252,22 +252,22 @@ func (b *APILoadBalancerBuilder) Build(c *fi.ModelBuilderContext) error { if lbSpec.AccessLog != nil { clb.AccessLog = &awstasks.ClassicLoadBalancerAccessLog{ - EmitInterval: fi.Int64(int64(lbSpec.AccessLog.Interval)), - Enabled: fi.Bool(true), + EmitInterval: fi.PtrTo(int64(lbSpec.AccessLog.Interval)), + Enabled: fi.PtrTo(true), S3BucketName: lbSpec.AccessLog.Bucket, S3BucketPrefix: lbSpec.AccessLog.BucketPrefix, } nlb.AccessLog = &awstasks.NetworkLoadBalancerAccessLog{ - Enabled: fi.Bool(true), + Enabled: fi.PtrTo(true), S3BucketName: lbSpec.AccessLog.Bucket, S3BucketPrefix: lbSpec.AccessLog.BucketPrefix, } } else { clb.AccessLog = &awstasks.ClassicLoadBalancerAccessLog{ - Enabled: fi.Bool(false), + Enabled: fi.PtrTo(false), } nlb.AccessLog = &awstasks.NetworkLoadBalancerAccessLog{ - Enabled: fi.Bool(false), + Enabled: fi.PtrTo(false), } } @@ -283,16 +283,16 @@ func (b *APILoadBalancerBuilder) Build(c *fi.ModelBuilderContext) error { groupTags["Name"] = groupName tg := &awstasks.TargetGroup{ - Name: fi.String(groupName), + Name: fi.PtrTo(groupName), Lifecycle: b.Lifecycle, VPC: b.LinkToVPC(), Tags: groupTags, - Protocol: fi.String("TCP"), - Port: fi.Int64(443), - Interval: fi.Int64(10), - HealthyThreshold: fi.Int64(2), - UnhealthyThreshold: fi.Int64(2), - Shared: fi.Bool(false), + Protocol: fi.PtrTo("TCP"), + Port: fi.PtrTo(int64(443)), + Interval: fi.PtrTo(int64(10)), + HealthyThreshold: fi.PtrTo(int64(2)), + UnhealthyThreshold: fi.PtrTo(int64(2)), + Shared: fi.PtrTo(false), } c.AddTask(tg) @@ -308,16 +308,16 @@ func (b *APILoadBalancerBuilder) Build(c *fi.ModelBuilderContext) error { groupTags["Name"] = groupName tg := &awstasks.TargetGroup{ - Name: fi.String(groupName), + Name: fi.PtrTo(groupName), Lifecycle: b.Lifecycle, VPC: b.LinkToVPC(), Tags: groupTags, - Protocol: fi.String("TCP"), - Port: fi.Int64(wellknownports.KopsControllerPort), - Interval: fi.Int64(10), - HealthyThreshold: fi.Int64(2), - UnhealthyThreshold: fi.Int64(2), - Shared: fi.Bool(false), + Protocol: fi.PtrTo("TCP"), + Port: fi.PtrTo(int64(wellknownports.KopsControllerPort)), + Interval: fi.PtrTo(int64(10)), + HealthyThreshold: fi.PtrTo(int64(2)), + UnhealthyThreshold: fi.PtrTo(int64(2)), + Shared: fi.PtrTo(false), } c.AddTask(tg) @@ -332,16 +332,16 @@ func (b *APILoadBalancerBuilder) Build(c *fi.ModelBuilderContext) error { // Override the returned name to be the expected NLB TG name tlsGroupTags["Name"] = tlsGroupName secondaryTG := &awstasks.TargetGroup{ - Name: fi.String(tlsGroupName), + Name: fi.PtrTo(tlsGroupName), Lifecycle: b.Lifecycle, VPC: b.LinkToVPC(), Tags: tlsGroupTags, - Protocol: fi.String("TLS"), - Port: fi.Int64(443), - Interval: fi.Int64(10), - HealthyThreshold: fi.Int64(2), - UnhealthyThreshold: fi.Int64(2), - Shared: fi.Bool(false), + Protocol: fi.PtrTo("TLS"), + Port: fi.PtrTo(int64(443)), + Interval: fi.PtrTo(int64(10)), + HealthyThreshold: fi.PtrTo(int64(2)), + UnhealthyThreshold: fi.PtrTo(int64(2)), + Shared: fi.PtrTo(false), } c.AddTask(secondaryTG) nlb.TargetGroups = append(nlb.TargetGroups, secondaryTG) @@ -355,17 +355,17 @@ func (b *APILoadBalancerBuilder) Build(c *fi.ModelBuilderContext) error { var lbSG *awstasks.SecurityGroup { lbSG = &awstasks.SecurityGroup{ - Name: fi.String(b.ELBSecurityGroupName("api")), + Name: fi.PtrTo(b.ELBSecurityGroupName("api")), Lifecycle: b.SecurityLifecycle, - Description: fi.String("Security group for api ELB"), + Description: fi.PtrTo("Security group for api ELB"), RemoveExtraRules: []string{"port=443"}, VPC: b.LinkToVPC(), } lbSG.Tags = b.CloudTags(*lbSG.Name, false) if lbSpec.SecurityGroupOverride != nil { - lbSG.ID = fi.String(*lbSpec.SecurityGroupOverride) - lbSG.Shared = fi.Bool(true) + lbSG.ID = fi.PtrTo(*lbSpec.SecurityGroupOverride) + lbSG.Shared = fi.PtrTo(true) } c.AddTask(lbSG) @@ -375,20 +375,20 @@ func (b *APILoadBalancerBuilder) Build(c *fi.ModelBuilderContext) error { if b.APILoadBalancerClass() == kops.LoadBalancerClassClassic { { t := &awstasks.SecurityGroupRule{ - Name: fi.String("ipv4-api-elb-egress"), + Name: fi.PtrTo("ipv4-api-elb-egress"), Lifecycle: b.SecurityLifecycle, - CIDR: fi.String("0.0.0.0/0"), - Egress: fi.Bool(true), + CIDR: fi.PtrTo("0.0.0.0/0"), + Egress: fi.PtrTo(true), SecurityGroup: lbSG, } AddDirectionalGroupRule(c, t) } { t := &awstasks.SecurityGroupRule{ - Name: fi.String("ipv6-api-elb-egress"), + Name: fi.PtrTo("ipv6-api-elb-egress"), Lifecycle: b.SecurityLifecycle, - IPv6CIDR: fi.String("::/0"), - Egress: fi.Bool(true), + IPv6CIDR: fi.PtrTo("::/0"), + Egress: fi.PtrTo(true), SecurityGroup: lbSG, } AddDirectionalGroupRule(c, t) @@ -397,15 +397,15 @@ func (b *APILoadBalancerBuilder) Build(c *fi.ModelBuilderContext) error { // Allow traffic into the ELB from KubernetesAPIAccess CIDRs if b.APILoadBalancerClass() == kops.LoadBalancerClassClassic { - for _, cidr := range b.Cluster.Spec.KubernetesAPIAccess { + for _, cidr := range b.Cluster.Spec.API.Access { { t := &awstasks.SecurityGroupRule{ - Name: fi.String("https-api-elb-" + cidr), + Name: fi.PtrTo("https-api-elb-" + cidr), Lifecycle: b.SecurityLifecycle, - FromPort: fi.Int64(443), - Protocol: fi.String("tcp"), + FromPort: fi.PtrTo(int64(443)), + Protocol: fi.PtrTo("tcp"), SecurityGroup: lbSG, - ToPort: fi.Int64(443), + ToPort: fi.PtrTo(int64(443)), } t.SetCidrOrPrefix(cidr) AddDirectionalGroupRule(c, t) @@ -414,23 +414,23 @@ func (b *APILoadBalancerBuilder) Build(c *fi.ModelBuilderContext) error { // Allow ICMP traffic required for PMTU discovery if utils.IsIPv6CIDR(cidr) { c.AddTask(&awstasks.SecurityGroupRule{ - Name: fi.String("icmpv6-pmtu-api-elb-" + cidr), + Name: fi.PtrTo("icmpv6-pmtu-api-elb-" + cidr), Lifecycle: b.SecurityLifecycle, - IPv6CIDR: fi.String(cidr), - FromPort: fi.Int64(-1), - Protocol: fi.String("icmpv6"), + IPv6CIDR: fi.PtrTo(cidr), + FromPort: fi.PtrTo(int64(-1)), + Protocol: fi.PtrTo("icmpv6"), SecurityGroup: lbSG, - ToPort: fi.Int64(-1), + ToPort: fi.PtrTo(int64(-1)), }) } else { c.AddTask(&awstasks.SecurityGroupRule{ - Name: fi.String("icmp-pmtu-api-elb-" + cidr), + Name: fi.PtrTo("icmp-pmtu-api-elb-" + cidr), Lifecycle: b.SecurityLifecycle, - CIDR: fi.String(cidr), - FromPort: fi.Int64(3), - Protocol: fi.String("icmp"), + CIDR: fi.PtrTo(cidr), + FromPort: fi.PtrTo(int64(3)), + Protocol: fi.PtrTo("icmp"), SecurityGroup: lbSG, - ToPort: fi.Int64(4), + ToPort: fi.PtrTo(int64(4)), }) } } @@ -442,16 +442,16 @@ func (b *APILoadBalancerBuilder) Build(c *fi.ModelBuilderContext) error { } if b.APILoadBalancerClass() == kops.LoadBalancerClassNetwork { - for _, cidr := range b.Cluster.Spec.KubernetesAPIAccess { + for _, cidr := range b.Cluster.Spec.API.Access { for _, masterGroup := range masterGroups { { t := &awstasks.SecurityGroupRule{ - Name: fi.String(fmt.Sprintf("https-api-elb-%s", cidr)), + Name: fi.PtrTo(fmt.Sprintf("https-api-elb-%s", cidr)), Lifecycle: b.SecurityLifecycle, - FromPort: fi.Int64(443), - Protocol: fi.String("tcp"), + FromPort: fi.PtrTo(int64(443)), + Protocol: fi.PtrTo("tcp"), SecurityGroup: masterGroup.Task, - ToPort: fi.Int64(443), + ToPort: fi.PtrTo(int64(443)), } t.SetCidrOrPrefix(cidr) AddDirectionalGroupRule(c, t) @@ -463,37 +463,37 @@ func (b *APILoadBalancerBuilder) Build(c *fi.ModelBuilderContext) error { } else if utils.IsIPv6CIDR(cidr) { // Allow ICMP traffic required for PMTU discovery t := &awstasks.SecurityGroupRule{ - Name: fi.String("icmpv6-pmtu-api-elb-" + cidr), + Name: fi.PtrTo("icmpv6-pmtu-api-elb-" + cidr), Lifecycle: b.SecurityLifecycle, - FromPort: fi.Int64(-1), - Protocol: fi.String("icmpv6"), + FromPort: fi.PtrTo(int64(-1)), + Protocol: fi.PtrTo("icmpv6"), SecurityGroup: masterGroup.Task, - ToPort: fi.Int64(-1), + ToPort: fi.PtrTo(int64(-1)), } t.SetCidrOrPrefix(cidr) c.AddTask(t) } else { t := &awstasks.SecurityGroupRule{ - Name: fi.String("icmp-pmtu-api-elb-" + cidr), + Name: fi.PtrTo("icmp-pmtu-api-elb-" + cidr), Lifecycle: b.SecurityLifecycle, - FromPort: fi.Int64(3), - Protocol: fi.String("icmp"), + FromPort: fi.PtrTo(int64(3)), + Protocol: fi.PtrTo("icmp"), SecurityGroup: masterGroup.Task, - ToPort: fi.Int64(4), + ToPort: fi.PtrTo(int64(4)), } t.SetCidrOrPrefix(cidr) c.AddTask(t) } - if b.Cluster.Spec.API != nil && b.Cluster.Spec.API.LoadBalancer != nil && b.Cluster.Spec.API.LoadBalancer.SSLCertificate != "" { + if b.Cluster.Spec.API.LoadBalancer != nil && b.Cluster.Spec.API.LoadBalancer.SSLCertificate != "" { // Allow access to masters on secondary port through NLB t := &awstasks.SecurityGroupRule{ - Name: fi.String(fmt.Sprintf("tcp-api-%s", cidr)), + Name: fi.PtrTo(fmt.Sprintf("tcp-api-%s", cidr)), Lifecycle: b.SecurityLifecycle, - FromPort: fi.Int64(8443), - Protocol: fi.String("tcp"), + FromPort: fi.PtrTo(int64(8443)), + Protocol: fi.PtrTo("tcp"), SecurityGroup: masterGroup.Task, - ToPort: fi.Int64(8443), + ToPort: fi.PtrTo(int64(8443)), } t.SetCidrOrPrefix(cidr) c.AddTask(t) @@ -506,10 +506,10 @@ func (b *APILoadBalancerBuilder) Build(c *fi.ModelBuilderContext) error { if b.APILoadBalancerClass() == kops.LoadBalancerClassClassic { for _, id := range b.Cluster.Spec.API.LoadBalancer.AdditionalSecurityGroups { t := &awstasks.SecurityGroup{ - Name: fi.String(id), + Name: fi.PtrTo(id), Lifecycle: b.SecurityLifecycle, - ID: fi.String(id), - Shared: fi.Bool(true), + ID: fi.PtrTo(id), + Shared: fi.PtrTo(true), } if err := c.EnsureTask(t); err != nil { return err @@ -523,36 +523,36 @@ func (b *APILoadBalancerBuilder) Build(c *fi.ModelBuilderContext) error { for _, masterGroup := range masterGroups { suffix := masterGroup.Suffix c.AddTask(&awstasks.SecurityGroupRule{ - Name: fi.String(fmt.Sprintf("https-elb-to-master%s", suffix)), + Name: fi.PtrTo(fmt.Sprintf("https-elb-to-master%s", suffix)), Lifecycle: b.SecurityLifecycle, - FromPort: fi.Int64(443), - Protocol: fi.String("tcp"), + FromPort: fi.PtrTo(int64(443)), + Protocol: fi.PtrTo("tcp"), SecurityGroup: masterGroup.Task, SourceGroup: lbSG, - ToPort: fi.Int64(443), + ToPort: fi.PtrTo(int64(443)), }) } } else if b.APILoadBalancerClass() == kops.LoadBalancerClassNetwork { for _, masterGroup := range masterGroups { suffix := masterGroup.Suffix c.AddTask(&awstasks.SecurityGroupRule{ - Name: fi.String(fmt.Sprintf("https-elb-to-master%s", suffix)), + Name: fi.PtrTo(fmt.Sprintf("https-elb-to-master%s", suffix)), Lifecycle: b.SecurityLifecycle, - FromPort: fi.Int64(443), - Protocol: fi.String("tcp"), + FromPort: fi.PtrTo(int64(443)), + Protocol: fi.PtrTo("tcp"), SecurityGroup: masterGroup.Task, - ToPort: fi.Int64(443), - CIDR: fi.String(b.Cluster.Spec.NetworkCIDR), + ToPort: fi.PtrTo(int64(443)), + CIDR: fi.PtrTo(b.Cluster.Spec.NetworkCIDR), }) for _, cidr := range b.Cluster.Spec.AdditionalNetworkCIDRs { c.AddTask(&awstasks.SecurityGroupRule{ - Name: fi.String(fmt.Sprintf("https-lb-to-master%s-%s", suffix, cidr)), + Name: fi.PtrTo(fmt.Sprintf("https-lb-to-master%s-%s", suffix, cidr)), Lifecycle: b.SecurityLifecycle, - FromPort: fi.Int64(443), - Protocol: fi.String("tcp"), + FromPort: fi.PtrTo(int64(443)), + Protocol: fi.PtrTo("tcp"), SecurityGroup: masterGroup.Task, - ToPort: fi.Int64(443), - CIDR: fi.String(cidr), + ToPort: fi.PtrTo(int64(443)), + CIDR: fi.PtrTo(cidr), }) } } @@ -563,23 +563,23 @@ func (b *APILoadBalancerBuilder) Build(c *fi.ModelBuilderContext) error { for _, masterGroup := range masterGroups { suffix := masterGroup.Suffix c.AddTask(&awstasks.SecurityGroupRule{ - Name: fi.String(fmt.Sprintf("kops-controller-lb-to-master%s", suffix)), + Name: fi.PtrTo(fmt.Sprintf("kops-controller-lb-to-master%s", suffix)), Lifecycle: b.SecurityLifecycle, - FromPort: fi.Int64(wellknownports.KopsControllerPort), - Protocol: fi.String("tcp"), + FromPort: fi.PtrTo(int64(wellknownports.KopsControllerPort)), + Protocol: fi.PtrTo("tcp"), SecurityGroup: masterGroup.Task, - ToPort: fi.Int64(wellknownports.KopsControllerPort), - CIDR: fi.String(b.Cluster.Spec.NetworkCIDR), + ToPort: fi.PtrTo(int64(wellknownports.KopsControllerPort)), + CIDR: fi.PtrTo(b.Cluster.Spec.NetworkCIDR), }) for _, cidr := range b.Cluster.Spec.AdditionalNetworkCIDRs { c.AddTask(&awstasks.SecurityGroupRule{ - Name: fi.String(fmt.Sprintf("kops-controller-lb-to-master%s-%s", suffix, cidr)), + Name: fi.PtrTo(fmt.Sprintf("kops-controller-lb-to-master%s-%s", suffix, cidr)), Lifecycle: b.SecurityLifecycle, - FromPort: fi.Int64(wellknownports.KopsControllerPort), - Protocol: fi.String("tcp"), + FromPort: fi.PtrTo(int64(wellknownports.KopsControllerPort)), + Protocol: fi.PtrTo("tcp"), SecurityGroup: masterGroup.Task, - ToPort: fi.Int64(wellknownports.KopsControllerPort), - CIDR: fi.String(cidr), + ToPort: fi.PtrTo(int64(wellknownports.KopsControllerPort)), + CIDR: fi.PtrTo(cidr), }) } } diff --git a/pkg/model/awsmodel/autoscalinggroup.go b/pkg/model/awsmodel/autoscalinggroup.go index c6644863825fb..3e2c5b850e237 100644 --- a/pkg/model/awsmodel/autoscalinggroup.go +++ b/pkg/model/awsmodel/autoscalinggroup.go @@ -91,7 +91,7 @@ func (b *AutoscalingGroupModelBuilder) Build(c *fi.ModelBuilderContext) error { warmPool := b.Cluster.Spec.WarmPool.ResolveDefaults(ig) - enabled := fi.Bool(warmPool.IsEnabled()) + enabled := fi.PtrTo(warmPool.IsEnabled()) warmPoolTask := &awstasks.WarmPool{ Name: &name, Lifecycle: b.Lifecycle, @@ -142,22 +142,22 @@ func (b *AutoscalingGroupModelBuilder) buildLaunchTemplateTask(c *fi.ModelBuilde if err != nil { return nil, err } - if fi.Int32Value(ig.Spec.RootVolumeSize) > 0 { - rootVolumeSize = fi.Int32Value(ig.Spec.RootVolumeSize) + if fi.ValueOf(ig.Spec.RootVolumeSize) > 0 { + rootVolumeSize = fi.ValueOf(ig.Spec.RootVolumeSize) } - rootVolumeType := fi.StringValue(ig.Spec.RootVolumeType) + rootVolumeType := fi.ValueOf(ig.Spec.RootVolumeType) if rootVolumeType == "" { rootVolumeType = DefaultVolumeType } rootVolumeEncryption := DefaultVolumeEncryption if ig.Spec.RootVolumeEncryption != nil { - rootVolumeEncryption = fi.BoolValue(ig.Spec.RootVolumeEncryption) + rootVolumeEncryption = fi.ValueOf(ig.Spec.RootVolumeEncryption) } rootVolumeKmsKey := "" - if fi.BoolValue(ig.Spec.RootVolumeEncryption) && ig.Spec.RootVolumeEncryptionKey != nil { + if fi.ValueOf(ig.Spec.RootVolumeEncryption) && ig.Spec.RootVolumeEncryptionKey != nil { rootVolumeKmsKey = *ig.Spec.RootVolumeEncryptionKey } @@ -177,30 +177,30 @@ func (b *AutoscalingGroupModelBuilder) buildLaunchTemplateTask(c *fi.ModelBuilde } lt := &awstasks.LaunchTemplate{ - Name: fi.String(name), + Name: fi.PtrTo(name), Lifecycle: b.Lifecycle, - CPUCredits: fi.String(fi.StringValue(ig.Spec.CPUCredits)), - HTTPPutResponseHopLimit: fi.Int64(1), - HTTPTokens: fi.String(ec2.LaunchTemplateHttpTokensStateOptional), - HTTPProtocolIPv6: fi.String(ec2.LaunchTemplateInstanceMetadataProtocolIpv6Disabled), + CPUCredits: fi.PtrTo(fi.ValueOf(ig.Spec.CPUCredits)), + HTTPPutResponseHopLimit: fi.PtrTo(int64(1)), + HTTPTokens: fi.PtrTo(ec2.LaunchTemplateHttpTokensStateOptional), + HTTPProtocolIPv6: fi.PtrTo(ec2.LaunchTemplateInstanceMetadataProtocolIpv6Disabled), IAMInstanceProfile: link, - ImageID: fi.String(ig.Spec.Image), + ImageID: fi.PtrTo(ig.Spec.Image), InstanceInterruptionBehavior: ig.Spec.InstanceInterruptionBehavior, - InstanceMonitoring: fi.Bool(false), - IPv6AddressCount: fi.Int64(0), - RootVolumeIops: fi.Int64(int64(fi.Int32Value(ig.Spec.RootVolumeIOPS))), + InstanceMonitoring: fi.PtrTo(false), + IPv6AddressCount: fi.PtrTo(int64(0)), + RootVolumeIops: fi.PtrTo(int64(fi.ValueOf(ig.Spec.RootVolumeIOPS))), RootVolumeOptimization: ig.Spec.RootVolumeOptimization, - RootVolumeSize: fi.Int64(int64(rootVolumeSize)), - RootVolumeType: fi.String(rootVolumeType), - RootVolumeEncryption: fi.Bool(rootVolumeEncryption), - RootVolumeKmsKey: fi.String(rootVolumeKmsKey), + RootVolumeSize: fi.PtrTo(int64(rootVolumeSize)), + RootVolumeType: fi.PtrTo(rootVolumeType), + RootVolumeEncryption: fi.PtrTo(rootVolumeEncryption), + RootVolumeKmsKey: fi.PtrTo(rootVolumeKmsKey), SecurityGroups: securityGroups, Tags: tags, UserData: userData, } if ig.Spec.Manager == kops.InstanceManagerCloudGroup { - lt.InstanceType = fi.String(strings.Split(ig.Spec.MachineType, ",")[0]) + lt.InstanceType = fi.PtrTo(strings.Split(ig.Spec.MachineType, ",")[0]) } { @@ -213,12 +213,12 @@ func (b *AutoscalingGroupModelBuilder) buildLaunchTemplateTask(c *fi.ModelBuilde // @step: check if we can add a public ip to this subnet switch subnets[0].Type { case kops.SubnetTypePublic, kops.SubnetTypeUtility: - lt.AssociatePublicIP = fi.Bool(true) + lt.AssociatePublicIP = fi.PtrTo(true) if ig.Spec.AssociatePublicIP != nil { lt.AssociatePublicIP = ig.Spec.AssociatePublicIP } case kops.SubnetTypeDualStack, kops.SubnetTypePrivate: - lt.AssociatePublicIP = fi.Bool(false) + lt.AssociatePublicIP = fi.PtrTo(false) } // @step: add an IPv6 address @@ -228,8 +228,8 @@ func (b *AutoscalingGroupModelBuilder) buildLaunchTemplateTask(c *fi.ModelBuilde continue } if clusterSubnet.IPv6CIDR != "" { - lt.IPv6AddressCount = fi.Int64(1) - lt.HTTPProtocolIPv6 = fi.String(ec2.LaunchTemplateInstanceMetadataProtocolIpv6Enabled) + lt.IPv6AddressCount = fi.PtrTo(int64(1)) + lt.HTTPProtocolIPv6 = fi.PtrTo(ec2.LaunchTemplateInstanceMetadataProtocolIpv6Enabled) } } } @@ -243,35 +243,35 @@ func (b *AutoscalingGroupModelBuilder) buildLaunchTemplateTask(c *fi.ModelBuilde } if x.Type == ec2.VolumeTypeIo1 || x.Type == ec2.VolumeTypeIo2 { if x.IOPS == nil { - x.IOPS = fi.Int64(DefaultVolumeIonIops) + x.IOPS = fi.PtrTo(int64(DefaultVolumeIonIops)) } } else if x.Type == ec2.VolumeTypeGp3 { if x.IOPS == nil { - x.IOPS = fi.Int64(DefaultVolumeGp3Iops) + x.IOPS = fi.PtrTo(int64(DefaultVolumeGp3Iops)) } if x.Throughput == nil { - x.Throughput = fi.Int64(DefaultVolumeGp3Throughput) + x.Throughput = fi.PtrTo(int64(DefaultVolumeGp3Throughput)) } } else { x.IOPS = nil } deleteOnTermination := DefaultVolumeDeleteOnTermination if x.DeleteOnTermination != nil { - deleteOnTermination = fi.BoolValue(x.DeleteOnTermination) + deleteOnTermination = fi.ValueOf(x.DeleteOnTermination) } encryption := DefaultVolumeEncryption if x.Encrypted != nil { - encryption = fi.BoolValue(x.Encrypted) + encryption = fi.ValueOf(x.Encrypted) } lt.BlockDeviceMappings = append(lt.BlockDeviceMappings, &awstasks.BlockDeviceMapping{ - DeviceName: fi.String(x.Device), - EbsDeleteOnTermination: fi.Bool(deleteOnTermination), - EbsEncrypted: fi.Bool(encryption), + DeviceName: fi.PtrTo(x.Device), + EbsDeleteOnTermination: fi.PtrTo(deleteOnTermination), + EbsEncrypted: fi.PtrTo(encryption), EbsKmsKey: x.Key, EbsVolumeIops: x.IOPS, - EbsVolumeSize: fi.Int64(x.Size), + EbsVolumeSize: fi.PtrTo(x.Size), EbsVolumeThroughput: x.Throughput, - EbsVolumeType: fi.String(x.Type), + EbsVolumeType: fi.PtrTo(x.Type), }) } @@ -288,17 +288,17 @@ func (b *AutoscalingGroupModelBuilder) buildLaunchTemplateTask(c *fi.ModelBuilde } if rootVolumeType == ec2.VolumeTypeIo1 || rootVolumeType == ec2.VolumeTypeIo2 { - if fi.Int32Value(ig.Spec.RootVolumeIOPS) < 100 { - lt.RootVolumeIops = fi.Int64(int64(DefaultVolumeIonIops)) + if fi.ValueOf(ig.Spec.RootVolumeIOPS) < 100 { + lt.RootVolumeIops = fi.PtrTo(int64(DefaultVolumeIonIops)) } } else if rootVolumeType == ec2.VolumeTypeGp3 { - if fi.Int32Value(ig.Spec.RootVolumeIOPS) < 3000 { - lt.RootVolumeIops = fi.Int64(int64(DefaultVolumeGp3Iops)) + if fi.ValueOf(ig.Spec.RootVolumeIOPS) < 3000 { + lt.RootVolumeIops = fi.PtrTo(int64(DefaultVolumeGp3Iops)) } - if fi.Int32Value(ig.Spec.RootVolumeThroughput) < 125 { - lt.RootVolumeThroughput = fi.Int64(int64(DefaultVolumeGp3Throughput)) + if fi.ValueOf(ig.Spec.RootVolumeThroughput) < 125 { + lt.RootVolumeThroughput = fi.PtrTo(int64(DefaultVolumeGp3Throughput)) } else { - lt.RootVolumeThroughput = fi.Int64(int64(fi.Int32Value(ig.Spec.RootVolumeThroughput))) + lt.RootVolumeThroughput = fi.PtrTo(int64(fi.ValueOf(ig.Spec.RootVolumeThroughput))) } } else { lt.RootVolumeIops = nil @@ -317,14 +317,14 @@ func (b *AutoscalingGroupModelBuilder) buildLaunchTemplateTask(c *fi.ModelBuilde if ig.Spec.MixedInstancesPolicy == nil && ig.Spec.MaxPrice != nil { lt.SpotPrice = ig.Spec.MaxPrice } else { - lt.SpotPrice = fi.String("") + lt.SpotPrice = fi.PtrTo("") } if ig.Spec.SpotDurationInMinutes != nil { lt.SpotDurationInMinutes = ig.Spec.SpotDurationInMinutes } if ig.Spec.Tenancy != "" { - lt.Tenancy = fi.String(ig.Spec.Tenancy) + lt.Tenancy = fi.PtrTo(ig.Spec.Tenancy) } return lt, nil @@ -335,11 +335,11 @@ func (b *AutoscalingGroupModelBuilder) buildSecurityGroups(c *fi.ModelBuilderCon // @step: if required we add the override for the security group for this instancegroup sgLink := b.LinkToSecurityGroup(ig.Spec.Role) if ig.Spec.SecurityGroupOverride != nil { - sgName := fmt.Sprintf("%v-%v", fi.StringValue(ig.Spec.SecurityGroupOverride), ig.Spec.Role) + sgName := fmt.Sprintf("%v-%v", fi.ValueOf(ig.Spec.SecurityGroupOverride), ig.Spec.Role) sgLink = &awstasks.SecurityGroup{ ID: ig.Spec.SecurityGroupOverride, Name: &sgName, - Shared: fi.Bool(true), + Shared: fi.PtrTo(true), } } @@ -349,10 +349,10 @@ func (b *AutoscalingGroupModelBuilder) buildSecurityGroups(c *fi.ModelBuilderCon b.APILoadBalancerClass() == kops.LoadBalancerClassNetwork { for _, id := range b.Cluster.Spec.API.LoadBalancer.AdditionalSecurityGroups { sgTask := &awstasks.SecurityGroup{ - ID: fi.String(id), + ID: fi.PtrTo(id), Lifecycle: b.SecurityLifecycle, - Name: fi.String("nlb-" + id), - Shared: fi.Bool(true), + Name: fi.PtrTo("nlb-" + id), + Shared: fi.PtrTo(true), } if err := c.EnsureTask(sgTask); err != nil { return nil, err @@ -364,10 +364,10 @@ func (b *AutoscalingGroupModelBuilder) buildSecurityGroups(c *fi.ModelBuilderCon // @step: add any additional security groups to the instancegroup for _, id := range ig.Spec.AdditionalSecurityGroups { sgTask := &awstasks.SecurityGroup{ - ID: fi.String(id), + ID: fi.PtrTo(id), Lifecycle: b.SecurityLifecycle, - Name: fi.String(id), - Shared: fi.Bool(true), + Name: fi.PtrTo(id), + Shared: fi.PtrTo(true), } if err := c.EnsureTask(sgTask); err != nil { return nil, err @@ -381,10 +381,10 @@ func (b *AutoscalingGroupModelBuilder) buildSecurityGroups(c *fi.ModelBuilderCon // buildAutoscalingGroupTask is responsible for building the autoscaling task into the model func (b *AutoscalingGroupModelBuilder) buildAutoScalingGroupTask(c *fi.ModelBuilderContext, name string, ig *kops.InstanceGroup) (*awstasks.AutoscalingGroup, error) { t := &awstasks.AutoscalingGroup{ - Name: fi.String(name), + Name: fi.PtrTo(name), Lifecycle: b.Lifecycle, - Granularity: fi.String("1Minute"), + Granularity: fi.PtrTo("1Minute"), Metrics: []string{ "GroupDesiredCapacity", "GroupInServiceInstances", @@ -396,20 +396,20 @@ func (b *AutoscalingGroupModelBuilder) buildAutoScalingGroupTask(c *fi.ModelBuil "GroupTotalInstances", }, - InstanceProtection: fi.Bool(false), + InstanceProtection: fi.PtrTo(false), } - minSize := fi.Int64(1) - maxSize := fi.Int64(1) + minSize := fi.PtrTo(int64(1)) + maxSize := fi.PtrTo(int64(1)) if ig.Spec.MinSize != nil { - minSize = fi.Int64(int64(*ig.Spec.MinSize)) + minSize = fi.PtrTo(int64(*ig.Spec.MinSize)) } else if ig.Spec.Role == kops.InstanceGroupRoleNode { - minSize = fi.Int64(2) + minSize = fi.PtrTo(int64(2)) } if ig.Spec.MaxSize != nil { - maxSize = fi.Int64(int64(*ig.Spec.MaxSize)) + maxSize = fi.PtrTo(int64(*ig.Spec.MaxSize)) } else if ig.Spec.Role == kops.InstanceGroupRoleNode { - maxSize = fi.Int64(2) + maxSize = fi.PtrTo(int64(2)) } t.MinSize = minSize @@ -477,22 +477,22 @@ func (b *AutoscalingGroupModelBuilder) buildAutoScalingGroupTask(c *fi.ModelBuil Name: extLB.LoadBalancerName, Lifecycle: b.Lifecycle, LoadBalancerName: extLB.LoadBalancerName, - Shared: fi.Bool(true), + Shared: fi.PtrTo(true), } t.LoadBalancers = append(t.LoadBalancers, lb) c.EnsureTask(lb) } if extLB.TargetGroupARN != nil { - targetGroupName, err := awsup.GetTargetGroupNameFromARN(fi.StringValue(extLB.TargetGroupARN)) + targetGroupName, err := awsup.GetTargetGroupNameFromARN(fi.ValueOf(extLB.TargetGroupARN)) if err != nil { return nil, err } tg := &awstasks.TargetGroup{ - Name: fi.String(name + "-" + targetGroupName), + Name: fi.PtrTo(name + "-" + targetGroupName), Lifecycle: b.Lifecycle, ARN: extLB.TargetGroupARN, - Shared: fi.Bool(true), + Shared: fi.PtrTo(true), } t.TargetGroups = append(t.TargetGroups, tg) c.AddTask(tg) @@ -520,7 +520,7 @@ func (b *AutoscalingGroupModelBuilder) buildAutoScalingGroupTask(c *fi.ModelBuil ir.CPUMin = &cpuMin } } else { - ir.CPUMin = fi.Int64(0) + ir.CPUMin = fi.PtrTo(int64(0)) } memory := spec.InstanceRequirements.Memory @@ -534,7 +534,7 @@ func (b *AutoscalingGroupModelBuilder) buildAutoScalingGroupTask(c *fi.ModelBuil ir.MemoryMin = &memoryMin } } else { - ir.MemoryMin = fi.Int64(0) + ir.MemoryMin = fi.PtrTo(int64(0)) } t.InstanceRequirements = ir } @@ -547,7 +547,7 @@ func (b *AutoscalingGroupModelBuilder) buildAutoScalingGroupTask(c *fi.ModelBuil t.MixedSpotInstancePools = spec.SpotInstancePools // In order to unset maxprice, the value needs to be "" if ig.Spec.MaxPrice == nil { - t.MixedSpotMaxPrice = fi.String("") + t.MixedSpotMaxPrice = fi.PtrTo("") } else { t.MixedSpotMaxPrice = ig.Spec.MaxPrice } @@ -555,9 +555,9 @@ func (b *AutoscalingGroupModelBuilder) buildAutoScalingGroupTask(c *fi.ModelBuil if ig.Spec.MaxInstanceLifetime != nil { lifetimeSec := int64(ig.Spec.MaxInstanceLifetime.Seconds()) - t.MaxInstanceLifetime = fi.Int64(lifetimeSec) + t.MaxInstanceLifetime = fi.PtrTo(lifetimeSec) } else { - t.MaxInstanceLifetime = fi.Int64(0) + t.MaxInstanceLifetime = fi.PtrTo(int64(0)) } return t, nil } diff --git a/pkg/model/awsmodel/autoscalinggroup_test.go b/pkg/model/awsmodel/autoscalinggroup_test.go index ff7ccd3b4e7c4..24ad47adda606 100644 --- a/pkg/model/awsmodel/autoscalinggroup_test.go +++ b/pkg/model/awsmodel/autoscalinggroup_test.go @@ -51,7 +51,7 @@ func buildNodeInstanceGroup(subnets ...string) *kops.InstanceGroup { func TestRootVolumeOptimizationFlag(t *testing.T) { cluster := buildMinimalCluster() ig := buildNodeInstanceGroup("subnet-us-test-1a") - ig.Spec.RootVolumeOptimization = fi.Bool(true) + ig.Spec.RootVolumeOptimization = fi.PtrTo(true) k := [][]byte{} k = append(k, []byte(sshPublicKeyEntry)) @@ -88,7 +88,7 @@ func TestRootVolumeOptimizationFlag(t *testing.T) { // We need the CA for the bootstrap script caTask := &fitasks.Keypair{ - Name: fi.String(fi.CertificateIDCA), + Name: fi.PtrTo(fi.CertificateIDCA), Subject: "cn=kubernetes", Type: "ca", } @@ -97,7 +97,7 @@ func TestRootVolumeOptimizationFlag(t *testing.T) { "etcd-clients-ca", } { task := &fitasks.Keypair{ - Name: fi.String(keypair), + Name: fi.PtrTo(keypair), Subject: "cn=" + keypair, Type: "ca", } @@ -119,7 +119,7 @@ func TestAPIServerAdditionalSecurityGroupsWithNLB(t *testing.T) { const sgIDAPIServer = "sg-01234567890abcdef" cluster := buildMinimalCluster() - cluster.Spec.API = &kops.AccessSpec{ + cluster.Spec.API = kops.APISpec{ LoadBalancer: &kops.LoadBalancerAccessSpec{ Class: kops.LoadBalancerClassNetwork, AdditionalSecurityGroups: []string{sgIDAPIServer}, @@ -192,7 +192,7 @@ func TestAPIServerAdditionalSecurityGroupsWithNLB(t *testing.T) { // We need the CA for the bootstrap script caTask := &fitasks.Keypair{ - Name: fi.String(fi.CertificateIDCA), + Name: fi.PtrTo(fi.CertificateIDCA), Subject: "cn=kubernetes", Type: "ca", } @@ -207,7 +207,7 @@ func TestAPIServerAdditionalSecurityGroupsWithNLB(t *testing.T) { "service-account", } { task := &fitasks.Keypair{ - Name: fi.String(keypair), + Name: fi.PtrTo(keypair), Subject: "cn=" + keypair, Type: "ca", } @@ -218,7 +218,7 @@ func TestAPIServerAdditionalSecurityGroupsWithNLB(t *testing.T) { "kube-proxy", } { task := &fitasks.Keypair{ - Name: fi.String(keypair), + Name: fi.PtrTo(keypair), Subject: "cn=" + keypair, Signer: caTask, Type: "client", diff --git a/pkg/model/awsmodel/bastion.go b/pkg/model/awsmodel/bastion.go index 73fdf533463c5..f3cb3faab0b53 100644 --- a/pkg/model/awsmodel/bastion.go +++ b/pkg/model/awsmodel/bastion.go @@ -78,21 +78,21 @@ func (b *BastionModelBuilder) Build(c *fi.ModelBuilderContext) error { // Allow traffic from bastion instances to egress freely { t := &awstasks.SecurityGroupRule{ - Name: fi.String("ipv4-bastion-egress" + src.Suffix), + Name: fi.PtrTo("ipv4-bastion-egress" + src.Suffix), Lifecycle: b.SecurityLifecycle, SecurityGroup: src.Task, - Egress: fi.Bool(true), - CIDR: fi.String("0.0.0.0/0"), + Egress: fi.PtrTo(true), + CIDR: fi.PtrTo("0.0.0.0/0"), } AddDirectionalGroupRule(c, t) } { t := &awstasks.SecurityGroupRule{ - Name: fi.String("ipv6-bastion-egress" + src.Suffix), + Name: fi.PtrTo("ipv6-bastion-egress" + src.Suffix), Lifecycle: b.SecurityLifecycle, SecurityGroup: src.Task, - Egress: fi.Bool(true), - IPv6CIDR: fi.String("::/0"), + Egress: fi.PtrTo(true), + IPv6CIDR: fi.PtrTo("::/0"), } AddDirectionalGroupRule(c, t) } @@ -126,13 +126,13 @@ func (b *BastionModelBuilder) Build(c *fi.ModelBuilderContext) error { for _, src := range bastionGroups { for _, dest := range masterGroups { t := &awstasks.SecurityGroupRule{ - Name: fi.String("bastion-to-master-ssh" + JoinSuffixes(src, dest)), + Name: fi.PtrTo("bastion-to-master-ssh" + JoinSuffixes(src, dest)), Lifecycle: b.SecurityLifecycle, SecurityGroup: dest.Task, SourceGroup: src.Task, - Protocol: fi.String("tcp"), - FromPort: fi.Int64(22), - ToPort: fi.Int64(22), + Protocol: fi.PtrTo("tcp"), + FromPort: fi.PtrTo(int64(22)), + ToPort: fi.PtrTo(int64(22)), } AddDirectionalGroupRule(c, t) } @@ -142,13 +142,13 @@ func (b *BastionModelBuilder) Build(c *fi.ModelBuilderContext) error { for _, src := range bastionGroups { for _, dest := range nodeGroups { t := &awstasks.SecurityGroupRule{ - Name: fi.String("bastion-to-node-ssh" + JoinSuffixes(src, dest)), + Name: fi.PtrTo("bastion-to-node-ssh" + JoinSuffixes(src, dest)), Lifecycle: b.SecurityLifecycle, SecurityGroup: dest.Task, SourceGroup: src.Task, - Protocol: fi.String("tcp"), - FromPort: fi.Int64(22), - ToPort: fi.Int64(22), + Protocol: fi.PtrTo("tcp"), + FromPort: fi.PtrTo(int64(22)), + ToPort: fi.PtrTo(int64(22)), } AddDirectionalGroupRule(c, t) } @@ -196,12 +196,12 @@ func (b *BastionModelBuilder) Build(c *fi.ModelBuilderContext) error { for _, bastionGroup := range bastionGroups { { t := &awstasks.SecurityGroupRule{ - Name: fi.String(fmt.Sprintf("ssh-nlb-%s", cidr)), + Name: fi.PtrTo(fmt.Sprintf("ssh-nlb-%s", cidr)), Lifecycle: b.SecurityLifecycle, SecurityGroup: bastionGroup.Task, - Protocol: fi.String("tcp"), - FromPort: fi.Int64(22), - ToPort: fi.Int64(22), + Protocol: fi.PtrTo("tcp"), + FromPort: fi.PtrTo(int64(22)), + ToPort: fi.PtrTo(int64(22)), } t.SetCidrOrPrefix(cidr) AddDirectionalGroupRule(c, t) @@ -213,23 +213,23 @@ func (b *BastionModelBuilder) Build(c *fi.ModelBuilderContext) error { } else if utils.IsIPv6CIDR(cidr) { // Allow ICMP traffic required for PMTU discovery t := &awstasks.SecurityGroupRule{ - Name: fi.String("icmpv6-pmtu-ssh-nlb-" + cidr), + Name: fi.PtrTo("icmpv6-pmtu-ssh-nlb-" + cidr), Lifecycle: b.SecurityLifecycle, - FromPort: fi.Int64(-1), - Protocol: fi.String("icmpv6"), + FromPort: fi.PtrTo(int64(-1)), + Protocol: fi.PtrTo("icmpv6"), SecurityGroup: bastionGroup.Task, - ToPort: fi.Int64(-1), + ToPort: fi.PtrTo(int64(-1)), } t.SetCidrOrPrefix(cidr) c.AddTask(t) } else { t := &awstasks.SecurityGroupRule{ - Name: fi.String("icmp-pmtu-ssh-nlb-" + cidr), + Name: fi.PtrTo("icmp-pmtu-ssh-nlb-" + cidr), Lifecycle: b.SecurityLifecycle, - FromPort: fi.Int64(3), - Protocol: fi.String("icmp"), + FromPort: fi.PtrTo(int64(3)), + Protocol: fi.PtrTo("icmp"), SecurityGroup: bastionGroup.Task, - ToPort: fi.Int64(4), + ToPort: fi.PtrTo(int64(4)), } t.SetCidrOrPrefix(cidr) c.AddTask(t) @@ -256,27 +256,27 @@ func (b *BastionModelBuilder) Build(c *fi.ModelBuilderContext) error { }, } nlb = &awstasks.NetworkLoadBalancer{ - Name: fi.String(b.NLBName("bastion")), + Name: fi.PtrTo(b.NLBName("bastion")), Lifecycle: b.Lifecycle, - LoadBalancerName: fi.String(loadBalancerName), - CLBName: fi.String("bastion." + b.ClusterName()), + LoadBalancerName: fi.PtrTo(loadBalancerName), + CLBName: fi.PtrTo("bastion." + b.ClusterName()), SubnetMappings: nlbSubnetMappings, Listeners: nlbListeners, TargetGroups: make([]*awstasks.TargetGroup, 0), Tags: tags, VPC: b.LinkToVPC(), - Type: fi.String("network"), - IpAddressType: fi.String("ipv4"), + Type: fi.PtrTo("network"), + IpAddressType: fi.PtrTo("ipv4"), } if useIPv6ForBastion(b) { - nlb.IpAddressType = fi.String("dualstack") + nlb.IpAddressType = fi.PtrTo("dualstack") } // Set the NLB Scheme according to load balancer Type switch bastionLoadBalancerType { case kops.LoadBalancerTypeInternal: - nlb.Scheme = fi.String("internal") + nlb.Scheme = fi.PtrTo("internal") case kops.LoadBalancerTypePublic: nlb.Scheme = nil default: @@ -290,16 +290,16 @@ func (b *BastionModelBuilder) Build(c *fi.ModelBuilderContext) error { sshGroupTags["Name"] = sshGroupName tg := &awstasks.TargetGroup{ - Name: fi.String(sshGroupName), + Name: fi.PtrTo(sshGroupName), Lifecycle: b.Lifecycle, VPC: b.LinkToVPC(), Tags: sshGroupTags, - Protocol: fi.String("TCP"), - Port: fi.Int64(22), - Interval: fi.Int64(10), - HealthyThreshold: fi.Int64(2), - UnhealthyThreshold: fi.Int64(2), - Shared: fi.Bool(false), + Protocol: fi.PtrTo("TCP"), + Port: fi.PtrTo(int64(22)), + Interval: fi.PtrTo(int64(10)), + HealthyThreshold: fi.PtrTo(int64(2)), + UnhealthyThreshold: fi.PtrTo(int64(2)), + Shared: fi.PtrTo(false), } c.AddTask(tg) @@ -318,23 +318,23 @@ func (b *BastionModelBuilder) Build(c *fi.ModelBuilderContext) error { // Here we implement the bastion CNAME logic // By default bastions will create a CNAME that follows the `bastion-$clustername` formula t := &awstasks.DNSName{ - Name: fi.String(publicName), + Name: fi.PtrTo(publicName), Lifecycle: b.Lifecycle, Zone: b.LinkToDNSZone(), - ResourceName: fi.String(publicName), - ResourceType: fi.String("A"), + ResourceName: fi.PtrTo(publicName), + ResourceType: fi.PtrTo("A"), TargetLoadBalancer: b.LinkToNLB("bastion"), } c.AddTask(t) if *nlb.IpAddressType == "dualstack" { t := &awstasks.DNSName{ - Name: fi.String(publicName + "-AAAA"), + Name: fi.PtrTo(publicName + "-AAAA"), Lifecycle: b.Lifecycle, Zone: b.LinkToDNSZone(), - ResourceName: fi.String(publicName), - ResourceType: fi.String("AAAA"), + ResourceName: fi.PtrTo(publicName), + ResourceType: fi.PtrTo("AAAA"), TargetLoadBalancer: b.LinkToNLB("bastion"), } c.AddTask(t) diff --git a/pkg/model/awsmodel/dns.go b/pkg/model/awsmodel/dns.go index b7e377d54b50f..3cd4602aa2ff5 100644 --- a/pkg/model/awsmodel/dns.go +++ b/pkg/model/awsmodel/dns.go @@ -40,7 +40,7 @@ func (b *DNSModelBuilder) ensureDNSZone(c *fi.ModelBuilderContext) error { // Configuration for a DNS zone dnsZone := &awstasks.DNSZone{ - Name: fi.String(b.NameForDNSZone()), + Name: fi.PtrTo(b.NameForDNSZone()), Lifecycle: b.Lifecycle, } @@ -51,7 +51,7 @@ func (b *DNSModelBuilder) ensureDNSZone(c *fi.ModelBuilderContext) error { // Ignore case kops.DNSTypePrivate: - dnsZone.Private = fi.Bool(true) + dnsZone.Private = fi.PtrTo(true) dnsZone.PrivateVPC = b.LinkToVPC() default: @@ -61,10 +61,10 @@ func (b *DNSModelBuilder) ensureDNSZone(c *fi.ModelBuilderContext) error { if !strings.Contains(b.Cluster.Spec.DNSZone, ".") { // Looks like a hosted zone ID - dnsZone.ZoneID = fi.String(b.Cluster.Spec.DNSZone) + dnsZone.ZoneID = fi.PtrTo(b.Cluster.Spec.DNSZone) } else { // Looks like a normal DNS name - dnsZone.DNSName = fi.String(b.Cluster.Spec.DNSZone) + dnsZone.DNSName = fi.PtrTo(b.Cluster.Spec.DNSZone) } return c.EnsureTask(dnsZone) @@ -100,20 +100,20 @@ func (b *DNSModelBuilder) Build(c *fi.ModelBuilderContext) error { } c.AddTask(&awstasks.DNSName{ - Name: fi.String(b.Cluster.Spec.MasterPublicName), - ResourceName: fi.String(b.Cluster.Spec.MasterPublicName), + Name: fi.PtrTo(b.Cluster.Spec.API.PublicName), + ResourceName: fi.PtrTo(b.Cluster.Spec.API.PublicName), Lifecycle: b.Lifecycle, Zone: b.LinkToDNSZone(), - ResourceType: fi.String("A"), + ResourceType: fi.PtrTo("A"), TargetLoadBalancer: targetLoadBalancer, }) if b.UseIPv6ForAPI() { c.AddTask(&awstasks.DNSName{ - Name: fi.String(b.Cluster.Spec.MasterPublicName + "-AAAA"), - ResourceName: fi.String(b.Cluster.Spec.MasterPublicName), + Name: fi.PtrTo(b.Cluster.Spec.API.PublicName + "-AAAA"), + ResourceName: fi.PtrTo(b.Cluster.Spec.API.PublicName), Lifecycle: b.Lifecycle, Zone: b.LinkToDNSZone(), - ResourceType: fi.String("AAAA"), + ResourceType: fi.PtrTo("AAAA"), TargetLoadBalancer: targetLoadBalancer, }) } @@ -129,14 +129,14 @@ func (b *DNSModelBuilder) Build(c *fi.ModelBuilderContext) error { return err } - // Using EnsureTask as MasterInternalName and MasterPublicName could be the same + // Using EnsureTask as APIInternalName() and MasterPublicName could be the same { err := c.EnsureTask(&awstasks.DNSName{ - Name: fi.String(b.Cluster.Spec.MasterInternalName), - ResourceName: fi.String(b.Cluster.Spec.MasterInternalName), + Name: fi.PtrTo(b.Cluster.APIInternalName()), + ResourceName: fi.PtrTo(b.Cluster.APIInternalName()), Lifecycle: b.Lifecycle, Zone: b.LinkToDNSZone(), - ResourceType: fi.String("A"), + ResourceType: fi.PtrTo("A"), TargetLoadBalancer: targetLoadBalancer, }) if err != nil { @@ -145,11 +145,11 @@ func (b *DNSModelBuilder) Build(c *fi.ModelBuilderContext) error { } if b.UseIPv6ForAPI() { err := c.EnsureTask(&awstasks.DNSName{ - Name: fi.String(b.Cluster.Spec.MasterInternalName + "-AAAA"), - ResourceName: fi.String(b.Cluster.Spec.MasterInternalName), + Name: fi.PtrTo(b.Cluster.APIInternalName() + "-AAAA"), + ResourceName: fi.PtrTo(b.Cluster.APIInternalName()), Lifecycle: b.Lifecycle, Zone: b.LinkToDNSZone(), - ResourceType: fi.String("AAAA"), + ResourceType: fi.PtrTo("AAAA"), TargetLoadBalancer: targetLoadBalancer, }) if err != nil { diff --git a/pkg/model/awsmodel/external_access.go b/pkg/model/awsmodel/external_access.go index 5aa5a0467da7c..38d09e7258dbd 100644 --- a/pkg/model/awsmodel/external_access.go +++ b/pkg/model/awsmodel/external_access.go @@ -35,7 +35,7 @@ type ExternalAccessModelBuilder struct { var _ fi.ModelBuilder = &ExternalAccessModelBuilder{} func (b *ExternalAccessModelBuilder) Build(c *fi.ModelBuilderContext) error { - if len(b.Cluster.Spec.KubernetesAPIAccess) == 0 { + if len(b.Cluster.Spec.API.Access) == 0 { klog.Warningf("KubernetesAPIAccess is empty") } @@ -63,12 +63,12 @@ func (b *ExternalAccessModelBuilder) Build(c *fi.ModelBuilderContext) error { for _, masterGroup := range masterGroups { suffix := masterGroup.Suffix t := &awstasks.SecurityGroupRule{ - Name: fi.String(fmt.Sprintf("ssh-external-to-master-%s%s", sshAccess, suffix)), + Name: fi.PtrTo(fmt.Sprintf("ssh-external-to-master-%s%s", sshAccess, suffix)), Lifecycle: b.Lifecycle, SecurityGroup: masterGroup.Task, - Protocol: fi.String("tcp"), - FromPort: fi.Int64(22), - ToPort: fi.Int64(22), + Protocol: fi.PtrTo("tcp"), + FromPort: fi.PtrTo(int64(22)), + ToPort: fi.PtrTo(int64(22)), } t.SetCidrOrPrefix(sshAccess) AddDirectionalGroupRule(c, t) @@ -77,12 +77,12 @@ func (b *ExternalAccessModelBuilder) Build(c *fi.ModelBuilderContext) error { for _, nodeGroup := range nodeGroups { suffix := nodeGroup.Suffix t := &awstasks.SecurityGroupRule{ - Name: fi.String(fmt.Sprintf("ssh-external-to-node-%s%s", sshAccess, suffix)), + Name: fi.PtrTo(fmt.Sprintf("ssh-external-to-node-%s%s", sshAccess, suffix)), Lifecycle: b.Lifecycle, SecurityGroup: nodeGroup.Task, - Protocol: fi.String("tcp"), - FromPort: fi.Int64(22), - ToPort: fi.Int64(22), + Protocol: fi.PtrTo("tcp"), + FromPort: fi.PtrTo(int64(22)), + ToPort: fi.PtrTo(int64(22)), } t.SetCidrOrPrefix(sshAccess) AddDirectionalGroupRule(c, t) @@ -100,24 +100,24 @@ func (b *ExternalAccessModelBuilder) Build(c *fi.ModelBuilderContext) error { suffix := nodeGroup.Suffix { t := &awstasks.SecurityGroupRule{ - Name: fi.String(fmt.Sprintf("nodeport-tcp-external-to-node-%s%s", nodePortAccess, suffix)), + Name: fi.PtrTo(fmt.Sprintf("nodeport-tcp-external-to-node-%s%s", nodePortAccess, suffix)), Lifecycle: b.Lifecycle, SecurityGroup: nodeGroup.Task, - Protocol: fi.String("tcp"), - FromPort: fi.Int64(int64(nodePortRange.Base)), - ToPort: fi.Int64(int64(nodePortRange.Base + nodePortRange.Size - 1)), + Protocol: fi.PtrTo("tcp"), + FromPort: fi.PtrTo(int64(nodePortRange.Base)), + ToPort: fi.PtrTo(int64(nodePortRange.Base + nodePortRange.Size - 1)), } t.SetCidrOrPrefix(nodePortAccess) c.AddTask(t) } { t := &awstasks.SecurityGroupRule{ - Name: fi.String(fmt.Sprintf("nodeport-udp-external-to-node-%s%s", nodePortAccess, suffix)), + Name: fi.PtrTo(fmt.Sprintf("nodeport-udp-external-to-node-%s%s", nodePortAccess, suffix)), Lifecycle: b.Lifecycle, SecurityGroup: nodeGroup.Task, - Protocol: fi.String("udp"), - FromPort: fi.Int64(int64(nodePortRange.Base)), - ToPort: fi.Int64(int64(nodePortRange.Base + nodePortRange.Size - 1)), + Protocol: fi.PtrTo("udp"), + FromPort: fi.PtrTo(int64(nodePortRange.Base)), + ToPort: fi.PtrTo(int64(nodePortRange.Base + nodePortRange.Size - 1)), } t.SetCidrOrPrefix(nodePortAccess) c.AddTask(t) @@ -131,16 +131,16 @@ func (b *ExternalAccessModelBuilder) Build(c *fi.ModelBuilderContext) error { // We need to open security groups directly to the master nodes (instead of via the ELB) // HTTPS to the master is allowed (for API access) - for _, apiAccess := range b.Cluster.Spec.KubernetesAPIAccess { + for _, apiAccess := range b.Cluster.Spec.API.Access { for _, masterGroup := range masterGroups { suffix := masterGroup.Suffix t := &awstasks.SecurityGroupRule{ - Name: fi.String(fmt.Sprintf("https-external-to-master-%s%s", apiAccess, suffix)), + Name: fi.PtrTo(fmt.Sprintf("https-external-to-master-%s%s", apiAccess, suffix)), Lifecycle: b.Lifecycle, SecurityGroup: masterGroup.Task, - Protocol: fi.String("tcp"), - FromPort: fi.Int64(443), - ToPort: fi.Int64(443), + Protocol: fi.PtrTo("tcp"), + FromPort: fi.PtrTo(int64(443)), + ToPort: fi.PtrTo(int64(443)), } t.SetCidrOrPrefix(apiAccess) AddDirectionalGroupRule(c, t) diff --git a/pkg/model/awsmodel/firewall.go b/pkg/model/awsmodel/firewall.go index cf2114f1e49f3..3a46a1f76d4ce 100644 --- a/pkg/model/awsmodel/firewall.go +++ b/pkg/model/awsmodel/firewall.go @@ -77,21 +77,21 @@ func (b *FirewallModelBuilder) buildNodeRules(c *fi.ModelBuilderContext) ([]Secu // Allow full egress { t := &awstasks.SecurityGroupRule{ - Name: fi.String("ipv4-node-egress" + src.Suffix), + Name: fi.PtrTo("ipv4-node-egress" + src.Suffix), Lifecycle: b.Lifecycle, SecurityGroup: src.Task, - Egress: fi.Bool(true), - CIDR: fi.String("0.0.0.0/0"), + Egress: fi.PtrTo(true), + CIDR: fi.PtrTo("0.0.0.0/0"), } AddDirectionalGroupRule(c, t) } { t := &awstasks.SecurityGroupRule{ - Name: fi.String("ipv6-node-egress" + src.Suffix), + Name: fi.PtrTo("ipv6-node-egress" + src.Suffix), Lifecycle: b.Lifecycle, SecurityGroup: src.Task, - Egress: fi.Bool(true), - IPv6CIDR: fi.String("::/0"), + Egress: fi.PtrTo(true), + IPv6CIDR: fi.PtrTo("::/0"), } AddDirectionalGroupRule(c, t) } @@ -101,7 +101,7 @@ func (b *FirewallModelBuilder) buildNodeRules(c *fi.ModelBuilderContext) ([]Secu suffix := JoinSuffixes(src, dest) t := &awstasks.SecurityGroupRule{ - Name: fi.String("all-node-to-node" + suffix), + Name: fi.PtrTo("all-node-to-node" + suffix), Lifecycle: b.Lifecycle, SecurityGroup: dest.Task, SourceGroup: src.Task, @@ -168,25 +168,25 @@ func (b *FirewallModelBuilder) applyNodeToMasterBlockSpecificPorts(c *fi.ModelBu for _, r := range udpRanges { t := &awstasks.SecurityGroupRule{ - Name: fi.String(fmt.Sprintf("node-to-master-udp-%d-%d%s", r.From, r.To, suffix)), + Name: fi.PtrTo(fmt.Sprintf("node-to-master-udp-%d-%d%s", r.From, r.To, suffix)), Lifecycle: b.Lifecycle, SecurityGroup: masterGroup.Task, SourceGroup: nodeGroup.Task, - FromPort: fi.Int64(int64(r.From)), - ToPort: fi.Int64(int64(r.To)), - Protocol: fi.String("udp"), + FromPort: fi.PtrTo(int64(r.From)), + ToPort: fi.PtrTo(int64(r.To)), + Protocol: fi.PtrTo("udp"), } AddDirectionalGroupRule(c, t) } for _, r := range tcpRanges { t := &awstasks.SecurityGroupRule{ - Name: fi.String(fmt.Sprintf("node-to-master-tcp-%d-%d%s", r.From, r.To, suffix)), + Name: fi.PtrTo(fmt.Sprintf("node-to-master-tcp-%d-%d%s", r.From, r.To, suffix)), Lifecycle: b.Lifecycle, SecurityGroup: masterGroup.Task, SourceGroup: nodeGroup.Task, - FromPort: fi.Int64(int64(r.From)), - ToPort: fi.Int64(int64(r.To)), - Protocol: fi.String("tcp"), + FromPort: fi.PtrTo(int64(r.From)), + ToPort: fi.PtrTo(int64(r.To)), + Protocol: fi.PtrTo("tcp"), } AddDirectionalGroupRule(c, t) } @@ -201,11 +201,11 @@ func (b *FirewallModelBuilder) applyNodeToMasterBlockSpecificPorts(c *fi.ModelBu } t := &awstasks.SecurityGroupRule{ - Name: fi.String(fmt.Sprintf("node-to-master-protocol-%s%s", name, suffix)), + Name: fi.PtrTo(fmt.Sprintf("node-to-master-protocol-%s%s", name, suffix)), Lifecycle: b.Lifecycle, SecurityGroup: masterGroup.Task, SourceGroup: nodeGroup.Task, - Protocol: fi.String(awsName), + Protocol: fi.PtrTo(awsName), } AddDirectionalGroupRule(c, t) } @@ -220,7 +220,7 @@ func (b *FirewallModelBuilder) applyNodeToMasterBlockSpecificPorts(c *fi.ModelBu suffix := JoinSuffixes(src, dest) t := &awstasks.SecurityGroupRule{ - Name: fi.String("all-nodes-to-master" + suffix), + Name: fi.PtrTo("all-nodes-to-master" + suffix), Lifecycle: b.Lifecycle, SecurityGroup: dest.Task, SourceGroup: src.Task, @@ -246,21 +246,21 @@ func (b *FirewallModelBuilder) buildMasterRules(c *fi.ModelBuilderContext, nodeG // Allow full egress { t := &awstasks.SecurityGroupRule{ - Name: fi.String("ipv4-master-egress" + src.Suffix), + Name: fi.PtrTo("ipv4-master-egress" + src.Suffix), Lifecycle: b.Lifecycle, SecurityGroup: src.Task, - Egress: fi.Bool(true), - CIDR: fi.String("0.0.0.0/0"), + Egress: fi.PtrTo(true), + CIDR: fi.PtrTo("0.0.0.0/0"), } AddDirectionalGroupRule(c, t) } { t := &awstasks.SecurityGroupRule{ - Name: fi.String("ipv6-master-egress" + src.Suffix), + Name: fi.PtrTo("ipv6-master-egress" + src.Suffix), Lifecycle: b.Lifecycle, SecurityGroup: src.Task, - Egress: fi.Bool(true), - IPv6CIDR: fi.String("::/0"), + Egress: fi.PtrTo(true), + IPv6CIDR: fi.PtrTo("::/0"), } AddDirectionalGroupRule(c, t) } @@ -270,7 +270,7 @@ func (b *FirewallModelBuilder) buildMasterRules(c *fi.ModelBuilderContext, nodeG suffix := JoinSuffixes(src, dest) t := &awstasks.SecurityGroupRule{ - Name: fi.String("all-master-to-master" + suffix), + Name: fi.PtrTo("all-master-to-master" + suffix), Lifecycle: b.Lifecycle, SecurityGroup: dest.Task, SourceGroup: src.Task, @@ -283,7 +283,7 @@ func (b *FirewallModelBuilder) buildMasterRules(c *fi.ModelBuilderContext, nodeG suffix := JoinSuffixes(src, dest) t := &awstasks.SecurityGroupRule{ - Name: fi.String("all-master-to-node" + suffix), + Name: fi.PtrTo("all-master-to-node" + suffix), Lifecycle: b.Lifecycle, SecurityGroup: dest.Task, SourceGroup: src.Task, @@ -306,9 +306,9 @@ func (b *AWSModelContext) GetSecurityGroups(role kops.InstanceGroupRole) ([]Secu if role == kops.InstanceGroupRoleMaster { name := b.SecurityGroupName(role) baseGroup = &awstasks.SecurityGroup{ - Name: fi.String(name), + Name: fi.PtrTo(name), VPC: b.LinkToVPC(), - Description: fi.String("Security group for masters"), + Description: fi.PtrTo("Security group for masters"), RemoveExtraRules: []string{ "port=22", // SSH "port=443", // k8s api @@ -328,18 +328,18 @@ func (b *AWSModelContext) GetSecurityGroups(role kops.InstanceGroupRole) ([]Secu } else if role == kops.InstanceGroupRoleNode { name := b.SecurityGroupName(role) baseGroup = &awstasks.SecurityGroup{ - Name: fi.String(name), + Name: fi.PtrTo(name), VPC: b.LinkToVPC(), - Description: fi.String("Security group for nodes"), + Description: fi.PtrTo("Security group for nodes"), RemoveExtraRules: []string{"port=22"}, } baseGroup.Tags = b.CloudTags(name, false) } else if role == kops.InstanceGroupRoleBastion { name := b.SecurityGroupName(role) baseGroup = &awstasks.SecurityGroup{ - Name: fi.String(name), + Name: fi.PtrTo(name), VPC: b.LinkToVPC(), - Description: fi.String("Security group for bastion"), + Description: fi.PtrTo("Security group for bastion"), RemoveExtraRules: []string{"port=22"}, } baseGroup.Tags = b.CloudTags(name, false) @@ -363,7 +363,7 @@ func (b *AWSModelContext) GetSecurityGroups(role kops.InstanceGroupRole) ([]Secu continue } - name := fi.StringValue(ig.Spec.SecurityGroupOverride) + name := fi.ValueOf(ig.Spec.SecurityGroupOverride) // De-duplicate security groups if done[name] { @@ -371,12 +371,12 @@ func (b *AWSModelContext) GetSecurityGroups(role kops.InstanceGroupRole) ([]Secu } done[name] = true - sgName := fmt.Sprintf("%v-%v", fi.StringValue(ig.Spec.SecurityGroupOverride), role) + sgName := fmt.Sprintf("%v-%v", fi.ValueOf(ig.Spec.SecurityGroupOverride), role) t := &awstasks.SecurityGroup{ Name: &sgName, ID: ig.Spec.SecurityGroupOverride, VPC: b.LinkToVPC(), - Shared: fi.Bool(true), + Shared: fi.PtrTo(true), Description: baseGroup.Description, } // Because the SecurityGroup is shared, we don't set RemoveExtraRules @@ -394,7 +394,7 @@ func (b *AWSModelContext) GetSecurityGroups(role kops.InstanceGroupRole) ([]Secu // Add the default SecurityGroup, if any InstanceGroups are using the default if !allOverrides { groups = append(groups, SecurityGroupInfo{ - Name: fi.StringValue(baseGroup.Name), + Name: fi.ValueOf(baseGroup.Name), Task: baseGroup, }) } @@ -425,7 +425,7 @@ func JoinSuffixes(src SecurityGroupInfo, dest SecurityGroupInfo) string { func AddDirectionalGroupRule(c *fi.ModelBuilderContext, t *awstasks.SecurityGroupRule) { name := generateName(t) - t.Name = fi.String(name) + t.Name = fi.PtrTo(name) tags := make(map[string]string) for key, value := range t.SecurityGroup.Tags { tags[key] = value @@ -440,31 +440,31 @@ func AddDirectionalGroupRule(c *fi.ModelBuilderContext, t *awstasks.SecurityGrou func generateName(o *awstasks.SecurityGroupRule) string { var target, dst, src, direction, proto string if o.SourceGroup != nil { - target = fi.StringValue(o.SourceGroup.Name) + target = fi.ValueOf(o.SourceGroup.Name) } else if o.CIDR != nil { - target = fi.StringValue(o.CIDR) + target = fi.ValueOf(o.CIDR) } else if o.IPv6CIDR != nil { - target = fi.StringValue(o.IPv6CIDR) + target = fi.ValueOf(o.IPv6CIDR) } else { target = "0.0.0.0/0" } - if o.Protocol == nil || fi.StringValue(o.Protocol) == "" { + if o.Protocol == nil || fi.ValueOf(o.Protocol) == "" { proto = "all" } else { - proto = fi.StringValue(o.Protocol) + proto = fi.ValueOf(o.Protocol) } - if o.Egress == nil || !fi.BoolValue(o.Egress) { + if o.Egress == nil || !fi.ValueOf(o.Egress) { direction = "ingress" src = target - dst = fi.StringValue(o.SecurityGroup.Name) + dst = fi.ValueOf(o.SecurityGroup.Name) } else { direction = "egress" dst = target - src = fi.StringValue(o.SecurityGroup.Name) + src = fi.ValueOf(o.SecurityGroup.Name) } return fmt.Sprintf("from-%s-%s-%s-%dto%d-%s", src, direction, - proto, fi.Int64Value(o.FromPort), fi.Int64Value(o.ToPort), dst) + proto, fi.ValueOf(o.FromPort), fi.ValueOf(o.ToPort), dst) } diff --git a/pkg/model/awsmodel/iam.go b/pkg/model/awsmodel/iam.go index 08c5469901b6f..45d843f6140fb 100644 --- a/pkg/model/awsmodel/iam.go +++ b/pkg/model/awsmodel/iam.go @@ -65,7 +65,7 @@ func (b *IAMModelBuilder) Build(c *fi.ModelBuilderContext) error { sharedProfileARNsToIGRole := make(map[string]kops.InstanceGroupRole) for _, ig := range b.InstanceGroups { if ig.Spec.IAM != nil && ig.Spec.IAM.Profile != nil { - specProfile := fi.StringValue(ig.Spec.IAM.Profile) + specProfile := fi.ValueOf(ig.Spec.IAM.Profile) if matchingRole, ok := sharedProfileARNsToIGRole[specProfile]; ok { if matchingRole != ig.Spec.Role { return fmt.Errorf("found IAM instance profile assigned to multiple Instance Group roles %v and %v: %v", @@ -153,10 +153,10 @@ func (b *IAMModelBuilder) Build(c *fi.ModelBuilderContext) error { return fmt.Errorf("error building service account role tasks: %w", err) } if len(aws.PolicyARNs) > 0 { - name := "external-" + fi.StringValue(iamRole.Name) + name := "external-" + fi.ValueOf(iamRole.Name) externalPolicies := aws.PolicyARNs c.AddTask(&awstasks.IAMRolePolicy{ - Name: fi.String(name), + Name: fi.PtrTo(name), ExternalPolicies: &externalPolicies, Managed: true, Role: iamRole, @@ -197,7 +197,7 @@ func (b *IAMModelBuilder) buildIAMRole(role iam.Subject, iamName string, c *fi.M } iamRole := &awstasks.IAMRole{ - Name: fi.String(iamName), + Name: fi.PtrTo(iamName), Lifecycle: b.Lifecycle, RolePolicyDocument: rolePolicy, @@ -205,14 +205,14 @@ func (b *IAMModelBuilder) buildIAMRole(role iam.Subject, iamName string, c *fi.M if isServiceAccount { // e.g. kube-system-dns-controller - iamRole.ExportWithID = fi.String(roleKey) + iamRole.ExportWithID = fi.PtrTo(roleKey) sa, ok := role.ServiceAccount() if ok { iamRole.Tags = b.CloudTagsForServiceAccount(iamName, sa) } } else { // e.g. nodes - iamRole.ExportWithID = fi.String(roleKey + "s") + iamRole.ExportWithID = fi.PtrTo(roleKey + "s") iamRole.Tags = b.CloudTags(iamName, false) } @@ -241,12 +241,12 @@ func (b *IAMModelBuilder) buildIAMRolePolicy(role iam.Subject, iamName string, i // but we might be creating the hosted zone dynamically. // We create a stub-reference which will be combined by the execution engine. iamPolicy.DNSZone = &awstasks.DNSZone{ - Name: fi.String(b.NameForDNSZone()), + Name: fi.PtrTo(b.NameForDNSZone()), } } t := &awstasks.IAMRolePolicy{ - Name: fi.String(iamName), + Name: fi.PtrTo(iamName), Lifecycle: b.Lifecycle, Role: iamRole, @@ -290,9 +290,9 @@ func (b *IAMModelBuilder) buildIAMTasks(role iam.Subject, iamName string, c *fi. var iamInstanceProfile *awstasks.IAMInstanceProfile { iamInstanceProfile = &awstasks.IAMInstanceProfile{ - Name: fi.String(iamName), + Name: fi.PtrTo(iamName), Lifecycle: b.Lifecycle, - Shared: fi.Bool(shared), + Shared: fi.PtrTo(shared), Tags: b.CloudTags(iamName, shared), } c.AddTask(iamInstanceProfile) @@ -312,7 +312,7 @@ func (b *IAMModelBuilder) buildIAMTasks(role iam.Subject, iamName string, c *fi. } { iamInstanceProfileRole := &awstasks.IAMInstanceProfileRole{ - Name: fi.String(iamName), + Name: fi.PtrTo(iamName), Lifecycle: b.Lifecycle, InstanceProfile: iamInstanceProfile, @@ -331,7 +331,7 @@ func (b *IAMModelBuilder) buildIAMTasks(role iam.Subject, iamName string, c *fi. name := fmt.Sprintf("%s-policyoverride", roleKey) t := &awstasks.IAMRolePolicy{ - Name: fi.String(name), + Name: fi.PtrTo(name), Lifecycle: b.Lifecycle, Role: iamRole, Managed: true, @@ -353,7 +353,7 @@ func (b *IAMModelBuilder) buildIAMTasks(role iam.Subject, iamName string, c *fi. additionalPolicyName := "additional." + iamName t := &awstasks.IAMRolePolicy{ - Name: fi.String(additionalPolicyName), + Name: fi.PtrTo(additionalPolicyName), Lifecycle: b.Lifecycle, Role: iamRole, @@ -474,18 +474,18 @@ func (b *IAMModelBuilder) FindDeletions(context *fi.ModelBuilderContext, cloud f var getRoleErr error err := iamapi.ListRolesPages(request, func(p *awsIam.ListRolesOutput, lastPage bool) bool { for _, role := range p.Roles { - if !strings.HasSuffix(fi.StringValue(role.RoleName), "."+b.Cluster.ObjectMeta.Name) { + if !strings.HasSuffix(fi.ValueOf(role.RoleName), "."+b.Cluster.ObjectMeta.Name) { continue } getRequest := &awsIam.GetRoleInput{RoleName: role.RoleName} roleOutput, err := iamapi.GetRole(getRequest) if err != nil { - getRoleErr = fmt.Errorf("calling IAM GetRole on %s: %w", fi.StringValue(role.RoleName), err) + getRoleErr = fmt.Errorf("calling IAM GetRole on %s: %w", fi.ValueOf(role.RoleName), err) return false } for _, tag := range roleOutput.Role.Tags { - if fi.StringValue(tag.Key) == ownershipTag && fi.StringValue(tag.Value) == "owned" { - if _, ok := context.Tasks["IAMRole/"+fi.StringValue(role.RoleName)]; !ok { + if fi.ValueOf(tag.Key) == ownershipTag && fi.ValueOf(tag.Value) == "owned" { + if _, ok := context.Tasks["IAMRole/"+fi.ValueOf(role.RoleName)]; !ok { context.AddTask(&awstasks.IAMRole{ ID: role.RoleId, Name: role.RoleName, diff --git a/pkg/model/awsmodel/network.go b/pkg/model/awsmodel/network.go index a3f7b515f2020..6d62d59653a7e 100644 --- a/pkg/model/awsmodel/network.go +++ b/pkg/model/awsmodel/network.go @@ -60,10 +60,10 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error { vpcTags = nil } t := &awstasks.VPC{ - Name: fi.String(vpcName), + Name: fi.PtrTo(vpcName), Lifecycle: b.Lifecycle, - Shared: fi.Bool(sharedVPC), - EnableDNSSupport: fi.Bool(true), + Shared: fi.PtrTo(sharedVPC), + EnableDNSSupport: fi.PtrTo(true), Tags: vpcTags, } @@ -74,20 +74,20 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error { } else { // In theory we don't need to enable it for >= 1.5, // but seems safer to stick with existing behaviour - t.EnableDNSHostnames = fi.Bool(true) + t.EnableDNSHostnames = fi.PtrTo(true) // Used only for Terraform rendering. // Direct and CloudFormation rendering is handled via the VPCAmazonIPv6CIDRBlock task - t.AmazonIPv6 = fi.Bool(true) + t.AmazonIPv6 = fi.PtrTo(true) t.AssociateExtraCIDRBlocks = b.Cluster.Spec.AdditionalNetworkCIDRs } if b.Cluster.Spec.NetworkID != "" { - t.ID = fi.String(b.Cluster.Spec.NetworkID) + t.ID = fi.PtrTo(b.Cluster.Spec.NetworkID) } if b.Cluster.Spec.NetworkCIDR != "" { - t.CIDR = fi.String(b.Cluster.Spec.NetworkCIDR) + t.CIDR = fi.PtrTo(b.Cluster.Spec.NetworkCIDR) } c.AddTask(t) @@ -96,20 +96,20 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error { if !sharedVPC { // Associate an Amazon-provided IPv6 CIDR block with the VPC c.AddTask(&awstasks.VPCAmazonIPv6CIDRBlock{ - Name: fi.String("AmazonIPv6"), + Name: fi.PtrTo("AmazonIPv6"), Lifecycle: b.Lifecycle, VPC: b.LinkToVPC(), - Shared: fi.Bool(false), + Shared: fi.PtrTo(false), }) // Associate additional CIDR blocks with the VPC for _, cidr := range b.Cluster.Spec.AdditionalNetworkCIDRs { c.AddTask(&awstasks.VPCCIDRBlock{ - Name: fi.String(cidr), + Name: fi.PtrTo(cidr), Lifecycle: b.Lifecycle, VPC: b.LinkToVPC(), - Shared: fi.Bool(false), - CIDRBlock: fi.String(cidr), + Shared: fi.PtrTo(false), + CIDRBlock: fi.PtrTo(cidr), }) } } @@ -117,22 +117,22 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error { // TODO: would be good to create these as shared, to verify them if !sharedVPC { dhcp := &awstasks.DHCPOptions{ - Name: fi.String(b.ClusterName()), + Name: fi.PtrTo(b.ClusterName()), Lifecycle: b.Lifecycle, - DomainNameServers: fi.String("AmazonProvidedDNS"), + DomainNameServers: fi.PtrTo("AmazonProvidedDNS"), Tags: tags, - Shared: fi.Bool(sharedVPC), + Shared: fi.PtrTo(sharedVPC), } if b.Region == "us-east-1" { - dhcp.DomainName = fi.String("ec2.internal") + dhcp.DomainName = fi.PtrTo("ec2.internal") } else { - dhcp.DomainName = fi.String(b.Region + ".compute.internal") + dhcp.DomainName = fi.PtrTo(b.Region + ".compute.internal") } c.AddTask(dhcp) c.AddTask(&awstasks.VPCDHCPOptionsAssociation{ - Name: fi.String(b.ClusterName()), + Name: fi.PtrTo(b.ClusterName()), Lifecycle: b.Lifecycle, VPC: b.LinkToVPC(), DHCPOptions: dhcp, @@ -150,7 +150,7 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error { for i := range b.Cluster.Spec.Subnets { subnetSpec := &b.Cluster.Spec.Subnets[i] - sharedSubnet := subnetSpec.ProviderID != "" + sharedSubnet := subnetSpec.ID != "" if !sharedSubnet { allSubnetsShared = false allSubnetsSharedInZone[subnetSpec.Zone] = false @@ -170,10 +170,10 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error { if !allSubnetsUnmanaged { // The internet gateway is the main entry point to the cluster. igw = &awstasks.InternetGateway{ - Name: fi.String(b.ClusterName()), + Name: fi.PtrTo(b.ClusterName()), Lifecycle: b.Lifecycle, VPC: b.LinkToVPC(), - Shared: fi.Bool(sharedVPC), + Shared: fi.PtrTo(sharedVPC), } igw.Tags = b.CloudTags(*igw.Name, *igw.Shared) c.AddTask(igw) @@ -186,28 +186,28 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error { routeTableTags := b.CloudTags(vpcName, sharedRouteTable) routeTableTags[awsup.TagNameKopsRole] = "public" publicRouteTable = &awstasks.RouteTable{ - Name: fi.String(b.ClusterName()), + Name: fi.PtrTo(b.ClusterName()), Lifecycle: b.Lifecycle, VPC: b.LinkToVPC(), Tags: routeTableTags, - Shared: fi.Bool(sharedRouteTable), + Shared: fi.PtrTo(sharedRouteTable), } c.AddTask(publicRouteTable) // TODO: Validate when allSubnetsShared c.AddTask(&awstasks.Route{ - Name: fi.String("0.0.0.0/0"), + Name: fi.PtrTo("0.0.0.0/0"), Lifecycle: b.Lifecycle, - CIDR: fi.String("0.0.0.0/0"), + CIDR: fi.PtrTo("0.0.0.0/0"), RouteTable: publicRouteTable, InternetGateway: igw, }) c.AddTask(&awstasks.Route{ - Name: fi.String("::/0"), + Name: fi.PtrTo("::/0"), Lifecycle: b.Lifecycle, - IPv6CIDR: fi.String("::/0"), + IPv6CIDR: fi.PtrTo("::/0"), RouteTable: publicRouteTable, InternetGateway: igw, }) @@ -225,7 +225,7 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error { for i := range b.Cluster.Spec.Subnets { subnetSpec := &b.Cluster.Spec.Subnets[i] - sharedSubnet := subnetSpec.ProviderID != "" + sharedSubnet := subnetSpec.ID != "" subnetName := subnetSpec.Name + "." + b.ClusterName() tags := map[string]string{} @@ -270,31 +270,31 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error { } subnet := &awstasks.Subnet{ - Name: fi.String(subnetName), - ShortName: fi.String(subnetSpec.Name), + Name: fi.PtrTo(subnetName), + ShortName: fi.PtrTo(subnetSpec.Name), Lifecycle: b.Lifecycle, VPC: b.LinkToVPC(), - AvailabilityZone: fi.String(subnetSpec.Zone), - Shared: fi.Bool(sharedSubnet), + AvailabilityZone: fi.PtrTo(subnetSpec.Zone), + Shared: fi.PtrTo(sharedSubnet), Tags: tags, } if b.Cluster.Spec.ExternalCloudControllerManager != nil && b.Cluster.IsKubernetesGTE("1.22") { - subnet.ResourceBasedNaming = fi.Bool(true) + subnet.ResourceBasedNaming = fi.PtrTo(true) } if subnetSpec.CIDR != "" { - subnet.CIDR = fi.String(subnetSpec.CIDR) + subnet.CIDR = fi.PtrTo(subnetSpec.CIDR) } if subnetSpec.IPv6CIDR != "" { if !sharedVPC { subnet.AmazonIPv6CIDR = b.LinkToAmazonVPCIPv6CIDR() } - subnet.IPv6CIDR = fi.String(subnetSpec.IPv6CIDR) + subnet.IPv6CIDR = fi.PtrTo(subnetSpec.IPv6CIDR) } - if subnetSpec.ProviderID != "" { - subnet.ID = fi.String(subnetSpec.ProviderID) + if subnetSpec.ID != "" { + subnet.ID = fi.PtrTo(subnetSpec.ID) } c.AddTask(subnet) @@ -304,7 +304,7 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error { if b.IsIPv6Only() && subnetSpec.Type == kops.SubnetTypePublic && subnetSpec.IPv6CIDR != "" { // Public IPv6-capable subnets route NAT64 to a NAT gateway c.AddTask(&awstasks.RouteTableAssociation{ - Name: fi.String("public-" + subnetSpec.Name + "." + b.ClusterName()), + Name: fi.PtrTo("public-" + subnetSpec.Name + "." + b.ClusterName()), Lifecycle: b.Lifecycle, RouteTable: b.LinkToPublicRouteTableInZone(subnetSpec.Zone), Subnet: subnet, @@ -317,7 +317,7 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error { infoByZone[subnetSpec.Zone].HaveIPv6PublicSubnet = true } else { c.AddTask(&awstasks.RouteTableAssociation{ - Name: fi.String(subnetSpec.Name + "." + b.ClusterName()), + Name: fi.PtrTo(subnetSpec.Name + "." + b.ClusterName()), Lifecycle: b.Lifecycle, RouteTable: publicRouteTable, Subnet: subnet, @@ -333,7 +333,7 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error { // // Map the Private subnet to the Private route table c.AddTask(&awstasks.RouteTableAssociation{ - Name: fi.String("private-" + subnetSpec.Name + "." + b.ClusterName()), + Name: fi.PtrTo("private-" + subnetSpec.Name + "." + b.ClusterName()), Lifecycle: b.Lifecycle, RouteTable: b.LinkToPrivateRouteTableInZone(subnetSpec.Zone), Subnet: subnet, @@ -358,10 +358,10 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error { var eigw *awstasks.EgressOnlyInternetGateway if !allPrivateSubnetsUnmanaged && b.IsIPv6Only() { eigw = &awstasks.EgressOnlyInternetGateway{ - Name: fi.String(b.ClusterName()), + Name: fi.PtrTo(b.ClusterName()), Lifecycle: b.Lifecycle, VPC: b.LinkToVPC(), - Shared: fi.Bool(sharedVPC), + Shared: fi.PtrTo(sharedVPC), } eigw.Tags = b.CloudTags(*eigw.Name, *eigw.Shared) c.AddTask(eigw) @@ -417,13 +417,13 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error { if strings.HasPrefix(egress, "nat-") { ngw = &awstasks.NatGateway{ - Name: fi.String(zone + "." + b.ClusterName()), + Name: fi.PtrTo(zone + "." + b.ClusterName()), Lifecycle: b.Lifecycle, Subnet: egressSubnet, - ID: fi.String(egress), + ID: fi.PtrTo(egress), AssociatedRouteTable: egressRouteTable, // If we're here, it means this NatGateway was specified, so we are Shared - Shared: fi.Bool(true), + Shared: fi.PtrTo(true), Tags: b.CloudTags(zone+"."+b.ClusterName(), true), } @@ -432,17 +432,17 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error { } else if strings.HasPrefix(egress, "eipalloc-") { eip := &awstasks.ElasticIP{ - Name: fi.String(zone + "." + b.ClusterName()), - ID: fi.String(egress), + Name: fi.PtrTo(zone + "." + b.ClusterName()), + ID: fi.PtrTo(egress), Lifecycle: b.Lifecycle, AssociatedNatGatewayRouteTable: egressRouteTable, - Shared: fi.Bool(true), + Shared: fi.PtrTo(true), Tags: b.CloudTags(zone+"."+b.ClusterName(), true), } c.AddTask(eip) ngw = &awstasks.NatGateway{ - Name: fi.String(zone + "." + b.ClusterName()), + Name: fi.PtrTo(zone + "." + b.ClusterName()), Lifecycle: b.Lifecycle, Subnet: egressSubnet, ElasticIP: eip, @@ -454,10 +454,10 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error { } else if strings.HasPrefix(egress, "i-") { in = &awstasks.Instance{ - Name: fi.String(egress), + Name: fi.PtrTo(egress), Lifecycle: b.Lifecycle, - ID: fi.String(egress), - Shared: fi.Bool(true), + ID: fi.PtrTo(egress), + Shared: fi.PtrTo(true), Tags: nil, // We don't need to add tags here } @@ -475,13 +475,13 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error { // subnet needs a NGW, lets create it. We tie it to a subnet // so we can track it in AWS eip := &awstasks.ElasticIP{ - Name: fi.String(zone + "." + b.ClusterName()), + Name: fi.PtrTo(zone + "." + b.ClusterName()), Lifecycle: b.Lifecycle, AssociatedNatGatewayRouteTable: egressRouteTable, } if publicIP != "" { - eip.PublicIP = fi.String(publicIP) + eip.PublicIP = fi.PtrTo(publicIP) eip.Tags = b.CloudTags(*eip.Name, true) } else { eip.Tags = b.CloudTags(*eip.Name, false) @@ -498,7 +498,7 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error { // var ngw = &awstasks.NatGateway{} ngw = &awstasks.NatGateway{ - Name: fi.String(zone + "." + b.ClusterName()), + Name: fi.PtrTo(zone + "." + b.ClusterName()), Lifecycle: b.Lifecycle, Subnet: egressSubnet, ElasticIP: eip, @@ -517,11 +517,11 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error { routeTableTags := b.CloudTags(b.NamePrivateRouteTableInZone(zone), routeTableShared) routeTableTags[awsup.TagNameKopsRole] = "private-" + zone rt := &awstasks.RouteTable{ - Name: fi.String(b.NamePrivateRouteTableInZone(zone)), + Name: fi.PtrTo(b.NamePrivateRouteTableInZone(zone)), VPC: b.LinkToVPC(), Lifecycle: b.Lifecycle, - Shared: fi.Bool(routeTableShared), + Shared: fi.PtrTo(routeTableShared), Tags: routeTableTags, } c.AddTask(rt) @@ -533,17 +533,17 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error { var r *awstasks.Route if in != nil { r = &awstasks.Route{ - Name: fi.String("private-" + zone + "-0.0.0.0/0"), + Name: fi.PtrTo("private-" + zone + "-0.0.0.0/0"), Lifecycle: b.Lifecycle, - CIDR: fi.String("0.0.0.0/0"), + CIDR: fi.PtrTo("0.0.0.0/0"), RouteTable: rt, Instance: in, } } else { r = &awstasks.Route{ - Name: fi.String("private-" + zone + "-0.0.0.0/0"), + Name: fi.PtrTo("private-" + zone + "-0.0.0.0/0"), Lifecycle: b.Lifecycle, - CIDR: fi.String("0.0.0.0/0"), + CIDR: fi.PtrTo("0.0.0.0/0"), RouteTable: rt, // Only one of these will be not nil NatGateway: ngw, @@ -555,9 +555,9 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error { if b.IsIPv6Only() { // Route NAT64 well-known prefix to the NAT gateway c.AddTask(&awstasks.Route{ - Name: fi.String("private-" + zone + "-64:ff9b::/96"), + Name: fi.PtrTo("private-" + zone + "-64:ff9b::/96"), Lifecycle: b.Lifecycle, - IPv6CIDR: fi.String("64:ff9b::/96"), + IPv6CIDR: fi.PtrTo("64:ff9b::/96"), RouteTable: rt, // Only one of these will be not nil NatGateway: ngw, @@ -566,9 +566,9 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error { // Route IPv6 to the Egress-only Internet Gateway. c.AddTask(&awstasks.Route{ - Name: fi.String("private-" + zone + "-::/0"), + Name: fi.PtrTo("private-" + zone + "-::/0"), Lifecycle: b.Lifecycle, - IPv6CIDR: fi.String("::/0"), + IPv6CIDR: fi.PtrTo("::/0"), RouteTable: rt, EgressOnlyInternetGateway: eigw, }) @@ -600,36 +600,36 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error { routeTableTags := b.CloudTags(b.NamePublicRouteTableInZone(zone), routeTableShared) routeTableTags[awsup.TagNameKopsRole] = "public-" + zone rt := &awstasks.RouteTable{ - Name: fi.String(b.NamePublicRouteTableInZone(zone)), + Name: fi.PtrTo(b.NamePublicRouteTableInZone(zone)), VPC: b.LinkToVPC(), Lifecycle: b.Lifecycle, - Shared: fi.Bool(routeTableShared), + Shared: fi.PtrTo(routeTableShared), Tags: routeTableTags, } c.AddTask(rt) // Routes for the public route table. c.AddTask(&awstasks.Route{ - Name: fi.String("public-" + zone + "-0.0.0.0/0"), + Name: fi.PtrTo("public-" + zone + "-0.0.0.0/0"), Lifecycle: b.Lifecycle, - CIDR: fi.String("0.0.0.0/0"), + CIDR: fi.PtrTo("0.0.0.0/0"), RouteTable: rt, InternetGateway: igw, }) c.AddTask(&awstasks.Route{ - Name: fi.String("public-" + zone + "-::/0"), + Name: fi.PtrTo("public-" + zone + "-::/0"), Lifecycle: b.Lifecycle, - IPv6CIDR: fi.String("::/0"), + IPv6CIDR: fi.PtrTo("::/0"), RouteTable: rt, InternetGateway: igw, }) // Route NAT64 well-known prefix to the NAT gateway c.AddTask(&awstasks.Route{ - Name: fi.String("public-" + zone + "-64:ff9b::/96"), + Name: fi.PtrTo("public-" + zone + "-64:ff9b::/96"), Lifecycle: b.Lifecycle, - IPv6CIDR: fi.String("64:ff9b::/96"), + IPv6CIDR: fi.PtrTo("64:ff9b::/96"), RouteTable: rt, // Only one of these will be not nil NatGateway: ngw, @@ -644,20 +644,20 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error { func addAdditionalRoutes(routes []kops.RouteSpec, sbName string, rt *awstasks.RouteTable, lf fi.Lifecycle, c *fi.ModelBuilderContext) error { for _, r := range routes { t := &awstasks.Route{ - Name: fi.String(sbName + "." + r.CIDR), + Name: fi.PtrTo(sbName + "." + r.CIDR), Lifecycle: lf, - CIDR: fi.String(r.CIDR), + CIDR: fi.PtrTo(r.CIDR), RouteTable: rt, } if strings.HasPrefix(r.Target, "pcx-") { - t.VPCPeeringConnectionID = fi.String(r.Target) + t.VPCPeeringConnectionID = fi.PtrTo(r.Target) c.AddTask(t) } else if strings.HasPrefix(r.Target, "i-") { inst := &awstasks.Instance{ - Name: fi.String(r.Target), + Name: fi.PtrTo(r.Target), Lifecycle: lf, - ID: fi.String(r.Target), - Shared: fi.Bool(true), + ID: fi.PtrTo(r.Target), + Shared: fi.PtrTo(true), } err := c.EnsureTask(inst) if err != nil { @@ -667,10 +667,10 @@ func addAdditionalRoutes(routes []kops.RouteSpec, sbName string, rt *awstasks.Ro c.AddTask(t) } else if strings.HasPrefix(r.Target, "nat-") { nat := &awstasks.NatGateway{ - Name: fi.String(r.Target), + Name: fi.PtrTo(r.Target), Lifecycle: lf, - ID: fi.String(r.Target), - Shared: fi.Bool(true), + ID: fi.PtrTo(r.Target), + Shared: fi.PtrTo(true), } err := c.EnsureTask(nat) if err != nil { @@ -679,14 +679,14 @@ func addAdditionalRoutes(routes []kops.RouteSpec, sbName string, rt *awstasks.Ro t.NatGateway = nat c.AddTask(t) } else if strings.HasPrefix(r.Target, "tgw-") { - t.TransitGatewayID = fi.String(r.Target) + t.TransitGatewayID = fi.PtrTo(r.Target) c.AddTask(t) } else if strings.HasPrefix(r.Target, "igw-") { internetGW := &awstasks.InternetGateway{ - Name: fi.String(r.Target), + Name: fi.PtrTo(r.Target), Lifecycle: lf, - ID: fi.String(r.Target), - Shared: fi.Bool(true), + ID: fi.PtrTo(r.Target), + Shared: fi.PtrTo(true), } err := c.EnsureTask(internetGW) if err != nil { @@ -696,10 +696,10 @@ func addAdditionalRoutes(routes []kops.RouteSpec, sbName string, rt *awstasks.Ro c.AddTask(t) } else if strings.HasPrefix(r.Target, "eigw-") { eigw := &awstasks.EgressOnlyInternetGateway{ - Name: fi.String(r.Target), + Name: fi.PtrTo(r.Target), Lifecycle: lf, - ID: fi.String(r.Target), - Shared: fi.Bool(true), + ID: fi.PtrTo(r.Target), + Shared: fi.PtrTo(true), } err := c.EnsureTask(eigw) if err != nil { diff --git a/pkg/model/awsmodel/oidc_provider.go b/pkg/model/awsmodel/oidc_provider.go index b0bc9d0714fab..5d52f12717113 100644 --- a/pkg/model/awsmodel/oidc_provider.go +++ b/pkg/model/awsmodel/oidc_provider.go @@ -45,7 +45,7 @@ func (b *OIDCProviderBuilder) Build(c *fi.ModelBuilderContext) error { thumbprints := []*string{} for _, fingerprint := range fingerprints { - thumbprints = append(thumbprints, fi.String(fingerprint)) + thumbprints = append(thumbprints, fi.PtrTo(fingerprint)) } audiences := []string{defaultAudience} @@ -54,7 +54,7 @@ func (b *OIDCProviderBuilder) Build(c *fi.ModelBuilderContext) error { } c.AddTask(&awstasks.IAMOIDCProvider{ - Name: fi.String(b.ClusterName()), + Name: fi.PtrTo(b.ClusterName()), Lifecycle: b.Lifecycle, URL: b.Cluster.Spec.KubeAPIServer.ServiceAccountIssuer, ClientIDs: fi.StringSlice(audiences), diff --git a/pkg/model/awsmodel/spotinst.go b/pkg/model/awsmodel/spotinst.go index a30ecf9d01aa1..7070c692967ab 100644 --- a/pkg/model/awsmodel/spotinst.go +++ b/pkg/model/awsmodel/spotinst.go @@ -191,10 +191,10 @@ func (b *SpotInstanceGroupModelBuilder) buildElastigroup(c *fi.ModelBuilderConte klog.V(4).Infof("Building instance group as Elastigroup: %q", b.AutoscalingGroupName(ig)) group := &spotinsttasks.Elastigroup{ Lifecycle: b.Lifecycle, - Name: fi.String(b.AutoscalingGroupName(ig)), - Region: fi.String(b.Region), - ImageID: fi.String(ig.Spec.Image), - OnDemandInstanceType: fi.String(strings.Split(ig.Spec.MachineType, ",")[0]), + Name: fi.PtrTo(b.AutoscalingGroupName(ig)), + Region: fi.PtrTo(b.Region), + ImageID: fi.PtrTo(ig.Spec.Image), + OnDemandInstanceType: fi.PtrTo(strings.Split(ig.Spec.MachineType, ",")[0]), SpotInstanceTypes: strings.Split(ig.Spec.MachineType, ","), } @@ -214,7 +214,7 @@ func (b *SpotInstanceGroupModelBuilder) buildElastigroup(c *fi.ModelBuilderConte } case SpotInstanceGroupLabelOrientation: - group.Orientation = fi.String(v) + group.Orientation = fi.PtrTo(v) case SpotInstanceGroupLabelUtilizeReservedInstances: group.UtilizeReservedInstances, err = parseBool(v) @@ -241,7 +241,7 @@ func (b *SpotInstanceGroupModelBuilder) buildElastigroup(c *fi.ModelBuilderConte } case SpotInstanceGroupLabelHealthCheckType: - group.HealthCheckType = fi.String(strings.ToUpper(v)) + group.HealthCheckType = fi.PtrTo(strings.ToUpper(v)) } } @@ -264,7 +264,7 @@ func (b *SpotInstanceGroupModelBuilder) buildElastigroup(c *fi.ModelBuilderConte // Tenancy. if ig.Spec.Tenancy != "" { - group.Tenancy = fi.String(ig.Spec.Tenancy) + group.Tenancy = fi.PtrTo(ig.Spec.Tenancy) } // Security groups. @@ -324,7 +324,7 @@ func (b *SpotInstanceGroupModelBuilder) buildElastigroup(c *fi.ModelBuilderConte group.AutoScalerOpts.Taints = nil } - klog.V(4).Infof("Adding task: Elastigroup/%s", fi.StringValue(group.Name)) + klog.V(4).Infof("Adding task: Elastigroup/%s", fi.ValueOf(group.Name)) c.AddTask(group) return nil @@ -334,11 +334,11 @@ func (b *SpotInstanceGroupModelBuilder) buildOcean(c *fi.ModelBuilderContext, ig klog.V(4).Infof("Building instance group as Ocean: %q", "nodes."+b.ClusterName()) ocean := &spotinsttasks.Ocean{ Lifecycle: b.Lifecycle, - Name: fi.String("nodes." + b.ClusterName()), + Name: fi.PtrTo("nodes." + b.ClusterName()), } if featureflag.SpotinstOceanTemplate.Enabled() { - ocean.UseAsTemplateOnly = fi.Bool(true) + ocean.UseAsTemplateOnly = fi.PtrTo(true) } if len(igs) == 0 { @@ -352,7 +352,7 @@ func (b *SpotInstanceGroupModelBuilder) buildOcean(c *fi.ModelBuilderContext, ig if err != nil { continue } - if fi.BoolValue(defaultLaunchSpec) { + if fi.ValueOf(defaultLaunchSpec) { if ig != nil { return fmt.Errorf("unable to detect default launch spec: "+ "multiple instance groups labeled with `%s: \"true\"`", @@ -371,7 +371,7 @@ func (b *SpotInstanceGroupModelBuilder) buildOcean(c *fi.ModelBuilderContext, ig klog.V(4).Infof("Detected default launch spec: %q", b.AutoscalingGroupName(ig)) // Image. - ocean.ImageID = fi.String(ig.Spec.Image) + ocean.ImageID = fi.PtrTo(ig.Spec.Image) // Strategy and instance types. for k, v := range ig.ObjectMeta.Labels { @@ -452,10 +452,10 @@ func (b *SpotInstanceGroupModelBuilder) buildOcean(c *fi.ModelBuilderContext, ig ocean.AutoScalerOpts.Headroom = nil } - if !fi.BoolValue(ocean.UseAsTemplateOnly) { + if !fi.ValueOf(ocean.UseAsTemplateOnly) { // Capacity. - ocean.MinSize = fi.Int64(0) - ocean.MaxSize = fi.Int64(0) + ocean.MinSize = fi.PtrTo(int64(0)) + ocean.MaxSize = fi.PtrTo(int64(0)) // User data. ocean.UserData, err = b.BootstrapScriptBuilder.ResourceNodeUp(c, ig) @@ -499,7 +499,7 @@ func (b *SpotInstanceGroupModelBuilder) buildOcean(c *fi.ModelBuilderContext, ig } } - klog.V(4).Infof("Adding task: Ocean/%s", fi.StringValue(ocean.Name)) + klog.V(4).Infof("Adding task: Ocean/%s", fi.ValueOf(ocean.Name)) c.AddTask(ocean) return nil @@ -509,9 +509,9 @@ func (b *SpotInstanceGroupModelBuilder) buildLaunchSpec(c *fi.ModelBuilderContex ig, igOcean *kops.InstanceGroup, ocean *spotinsttasks.Ocean) (err error) { klog.V(4).Infof("Building instance group as LaunchSpec: %q", b.AutoscalingGroupName(ig)) launchSpec := &spotinsttasks.LaunchSpec{ - Name: fi.String(b.AutoscalingGroupName(ig)), + Name: fi.PtrTo(b.AutoscalingGroupName(ig)), Lifecycle: b.Lifecycle, - ImageID: fi.String(ig.Spec.Image), + ImageID: fi.PtrTo(ig.Spec.Image), Ocean: ocean, // link to Ocean } @@ -545,12 +545,12 @@ func (b *SpotInstanceGroupModelBuilder) buildLaunchSpec(c *fi.ModelBuilderContex // Capacity. minSize, maxSize := b.buildCapacity(ig) - if fi.BoolValue(ocean.UseAsTemplateOnly) { + if fi.ValueOf(ocean.UseAsTemplateOnly) { launchSpec.MinSize = minSize launchSpec.MaxSize = maxSize } else { - ocean.MinSize = fi.Int64(fi.Int64Value(ocean.MinSize) + fi.Int64Value(minSize)) - ocean.MaxSize = fi.Int64(fi.Int64Value(ocean.MaxSize) + fi.Int64Value(maxSize)) + ocean.MinSize = fi.PtrTo(fi.ValueOf(ocean.MinSize) + fi.ValueOf(minSize)) + ocean.MaxSize = fi.PtrTo(fi.ValueOf(ocean.MaxSize) + fi.ValueOf(maxSize)) } // User data. @@ -621,7 +621,7 @@ func (b *SpotInstanceGroupModelBuilder) buildLaunchSpec(c *fi.ModelBuilderContex } } - klog.V(4).Infof("Adding task: LaunchSpec/%s", fi.StringValue(launchSpec.Name)) + klog.V(4).Infof("Adding task: LaunchSpec/%s", fi.ValueOf(launchSpec.Name)) c.AddTask(launchSpec) return nil @@ -636,9 +636,9 @@ func (b *SpotInstanceGroupModelBuilder) buildSecurityGroups(c *fi.ModelBuilderCo for _, id := range ig.Spec.AdditionalSecurityGroups { sg := &awstasks.SecurityGroup{ Lifecycle: b.SecurityLifecycle, - ID: fi.String(id), - Name: fi.String(id), - Shared: fi.Bool(true), + ID: fi.PtrTo(id), + Name: fi.PtrTo(id), + Shared: fi.PtrTo(true), } if err := c.EnsureTask(sg); err != nil { return nil, err @@ -703,7 +703,7 @@ func (b *SpotInstanceGroupModelBuilder) buildPublicIPOpts(ig *kops.InstanceGroup return nil, fmt.Errorf("unknown subnet type %q", subnetType) } - return fi.Bool(associatePublicIP), nil + return fi.PtrTo(associatePublicIP), nil } func (b *SpotInstanceGroupModelBuilder) buildRootVolumeOpts(ig *kops.InstanceGroup) (*spotinsttasks.RootVolumeOpts, error) { @@ -711,21 +711,21 @@ func (b *SpotInstanceGroupModelBuilder) buildRootVolumeOpts(ig *kops.InstanceGro // Optimization. { - if fi.BoolValue(ig.Spec.RootVolumeOptimization) { + if fi.ValueOf(ig.Spec.RootVolumeOptimization) { opts.Optimization = ig.Spec.RootVolumeOptimization } } // Encryption. { - if fi.BoolValue(ig.Spec.RootVolumeEncryption) { + if fi.ValueOf(ig.Spec.RootVolumeEncryption) { opts.Encryption = ig.Spec.RootVolumeEncryption } } // Size. { - size := fi.Int32Value(ig.Spec.RootVolumeSize) + size := fi.ValueOf(ig.Spec.RootVolumeSize) if size == 0 { var err error size, err = defaults.DefaultInstanceGroupVolumeSize(ig.Spec.Role) @@ -733,31 +733,31 @@ func (b *SpotInstanceGroupModelBuilder) buildRootVolumeOpts(ig *kops.InstanceGro return nil, err } } - opts.Size = fi.Int64(int64(size)) + opts.Size = fi.PtrTo(int64(size)) } // Type. { - typ := fi.StringValue(ig.Spec.RootVolumeType) + typ := fi.ValueOf(ig.Spec.RootVolumeType) if typ == "" { typ = "gp2" } - opts.Type = fi.String(typ) + opts.Type = fi.PtrTo(typ) } // IOPS. { - iops := fi.Int32Value(ig.Spec.RootVolumeIOPS) + iops := fi.ValueOf(ig.Spec.RootVolumeIOPS) if iops > 0 { - opts.IOPS = fi.Int64(int64(iops)) + opts.IOPS = fi.PtrTo(int64(iops)) } } // Throughput. { - throughput := fi.Int32Value(ig.Spec.RootVolumeThroughput) + throughput := fi.ValueOf(ig.Spec.RootVolumeThroughput) if throughput > 0 { - opts.Throughput = fi.Int64(int64(throughput)) + opts.Throughput = fi.PtrTo(int64(throughput)) } } @@ -767,7 +767,7 @@ func (b *SpotInstanceGroupModelBuilder) buildRootVolumeOpts(ig *kops.InstanceGro func (b *SpotInstanceGroupModelBuilder) buildCapacity(ig *kops.InstanceGroup) (*int64, *int64) { minSize := int32(1) if ig.Spec.MinSize != nil { - minSize = fi.Int32Value(ig.Spec.MinSize) + minSize = fi.ValueOf(ig.Spec.MinSize) } else if ig.Spec.Role == kops.InstanceGroupRoleNode { minSize = 2 } @@ -779,7 +779,7 @@ func (b *SpotInstanceGroupModelBuilder) buildCapacity(ig *kops.InstanceGroup) (* maxSize = 2 } - return fi.Int64(int64(minSize)), fi.Int64(int64(maxSize)) + return fi.PtrTo(int64(minSize)), fi.PtrTo(int64(maxSize)) } func (b *SpotInstanceGroupModelBuilder) buildLoadBalancers(c *fi.ModelBuilderContext, @@ -807,20 +807,20 @@ func (b *SpotInstanceGroupModelBuilder) buildLoadBalancers(c *fi.ModelBuilderCon lb := &awstasks.ClassicLoadBalancer{ Name: extLB.LoadBalancerName, LoadBalancerName: extLB.LoadBalancerName, - Shared: fi.Bool(true), + Shared: fi.PtrTo(true), } loadBalancers = append(loadBalancers, lb) c.EnsureTask(lb) } if extLB.TargetGroupARN != nil { - targetGroupName, err := awsup.GetTargetGroupNameFromARN(fi.StringValue(extLB.TargetGroupARN)) + targetGroupName, err := awsup.GetTargetGroupNameFromARN(fi.ValueOf(extLB.TargetGroupARN)) if err != nil { return nil, nil, err } tg := &awstasks.TargetGroup{ - Name: fi.String(ig.Name + "-" + targetGroupName), + Name: fi.PtrTo(ig.Name + "-" + targetGroupName), ARN: extLB.TargetGroupARN, - Shared: fi.Bool(true), + Shared: fi.PtrTo(true), } targetGroups = append(targetGroups, tg) c.AddTask(tg) @@ -840,7 +840,7 @@ func (b *SpotInstanceGroupModelBuilder) buildTags(ig *kops.InstanceGroup) (map[s func (b *SpotInstanceGroupModelBuilder) buildAutoScalerOpts(clusterID string, ig *kops.InstanceGroup) (*spotinsttasks.AutoScalerOpts, error) { opts := &spotinsttasks.AutoScalerOpts{ - ClusterID: fi.String(clusterID), + ClusterID: fi.PtrTo(clusterID), } switch ig.Spec.Role { @@ -852,8 +852,8 @@ func (b *SpotInstanceGroupModelBuilder) buildAutoScalerOpts(clusterID string, ig } // Enable the auto scaler for Node instance groups. - opts.Enabled = fi.Bool(true) - opts.AutoConfig = fi.Bool(true) + opts.Enabled = fi.PtrTo(true) + opts.AutoConfig = fi.PtrTo(true) // Parse instance group labels. var defaultNodeLabels bool @@ -865,7 +865,7 @@ func (b *SpotInstanceGroupModelBuilder) buildAutoScalerOpts(clusterID string, ig if err != nil { return nil, err } - opts.Enabled = fi.Bool(!fi.BoolValue(v)) + opts.Enabled = fi.PtrTo(!fi.ValueOf(v)) } case SpotInstanceGroupLabelAutoScalerDefaultNodeLabels: @@ -874,7 +874,7 @@ func (b *SpotInstanceGroupModelBuilder) buildAutoScalerOpts(clusterID string, ig if err != nil { return nil, err } - defaultNodeLabels = fi.BoolValue(v) + defaultNodeLabels = fi.ValueOf(v) } case SpotInstanceGroupLabelAutoScalerCooldown: @@ -883,7 +883,7 @@ func (b *SpotInstanceGroupModelBuilder) buildAutoScalerOpts(clusterID string, ig if err != nil { return nil, err } - opts.Cooldown = fi.Int(int(fi.Int64Value(v))) + opts.Cooldown = fi.PtrTo(int(fi.ValueOf(v))) } case SpotInstanceGroupLabelAutoScalerAutoConfig: @@ -901,7 +901,7 @@ func (b *SpotInstanceGroupModelBuilder) buildAutoScalerOpts(clusterID string, ig if err != nil { return nil, err } - opts.AutoHeadroomPercentage = fi.Int(int(fi.Int64Value(v))) + opts.AutoHeadroomPercentage = fi.PtrTo(int(fi.ValueOf(v))) } case SpotInstanceGroupLabelAutoScalerHeadroomCPUPerUnit: @@ -913,7 +913,7 @@ func (b *SpotInstanceGroupModelBuilder) buildAutoScalerOpts(clusterID string, ig if opts.Headroom == nil { opts.Headroom = new(spotinsttasks.AutoScalerHeadroomOpts) } - opts.Headroom.CPUPerUnit = fi.Int(int(fi.Int64Value(v))) + opts.Headroom.CPUPerUnit = fi.PtrTo(int(fi.ValueOf(v))) } case SpotInstanceGroupLabelAutoScalerHeadroomGPUPerUnit: @@ -925,7 +925,7 @@ func (b *SpotInstanceGroupModelBuilder) buildAutoScalerOpts(clusterID string, ig if opts.Headroom == nil { opts.Headroom = new(spotinsttasks.AutoScalerHeadroomOpts) } - opts.Headroom.GPUPerUnit = fi.Int(int(fi.Int64Value(v))) + opts.Headroom.GPUPerUnit = fi.PtrTo(int(fi.ValueOf(v))) } case SpotInstanceGroupLabelAutoScalerHeadroomMemPerUnit: @@ -937,7 +937,7 @@ func (b *SpotInstanceGroupModelBuilder) buildAutoScalerOpts(clusterID string, ig if opts.Headroom == nil { opts.Headroom = new(spotinsttasks.AutoScalerHeadroomOpts) } - opts.Headroom.MemPerUnit = fi.Int(int(fi.Int64Value(v))) + opts.Headroom.MemPerUnit = fi.PtrTo(int(fi.ValueOf(v))) } case SpotInstanceGroupLabelAutoScalerHeadroomNumOfUnits: @@ -949,7 +949,7 @@ func (b *SpotInstanceGroupModelBuilder) buildAutoScalerOpts(clusterID string, ig if opts.Headroom == nil { opts.Headroom = new(spotinsttasks.AutoScalerHeadroomOpts) } - opts.Headroom.NumOfUnits = fi.Int(int(fi.Int64Value(v))) + opts.Headroom.NumOfUnits = fi.PtrTo(int(fi.ValueOf(v))) } case SpotInstanceGroupLabelAutoScalerScaleDownMaxPercentage: @@ -973,7 +973,7 @@ func (b *SpotInstanceGroupModelBuilder) buildAutoScalerOpts(clusterID string, ig if opts.Down == nil { opts.Down = new(spotinsttasks.AutoScalerDownOpts) } - opts.Down.EvaluationPeriods = fi.Int(int(fi.Int64Value(v))) + opts.Down.EvaluationPeriods = fi.PtrTo(int(fi.ValueOf(v))) } case SpotInstanceGroupLabelAutoScalerResourceLimitsMaxVCPU: @@ -985,7 +985,7 @@ func (b *SpotInstanceGroupModelBuilder) buildAutoScalerOpts(clusterID string, ig if opts.ResourceLimits == nil { opts.ResourceLimits = new(spotinsttasks.AutoScalerResourceLimitsOpts) } - opts.ResourceLimits.MaxVCPU = fi.Int(int(fi.Int64Value(v))) + opts.ResourceLimits.MaxVCPU = fi.PtrTo(int(fi.ValueOf(v))) } case SpotInstanceGroupLabelAutoScalerResourceLimitsMaxMemory: @@ -997,7 +997,7 @@ func (b *SpotInstanceGroupModelBuilder) buildAutoScalerOpts(clusterID string, ig if opts.ResourceLimits == nil { opts.ResourceLimits = new(spotinsttasks.AutoScalerResourceLimitsOpts) } - opts.ResourceLimits.MaxMemory = fi.Int(int(fi.Int64Value(v))) + opts.ResourceLimits.MaxMemory = fi.PtrTo(int(fi.ValueOf(v))) } } } @@ -1005,10 +1005,10 @@ func (b *SpotInstanceGroupModelBuilder) buildAutoScalerOpts(clusterID string, ig // Configure Elastigroup defaults to avoid state drifts. if !featureflag.SpotinstOcean.Enabled() { if opts.Cooldown == nil { - opts.Cooldown = fi.Int(300) + opts.Cooldown = fi.PtrTo(300) } if opts.Down != nil && opts.Down.EvaluationPeriods == nil { - opts.Down.EvaluationPeriods = fi.Int(5) + opts.Down.EvaluationPeriods = fi.PtrTo(5) } } diff --git a/pkg/model/awsmodel/sshkey.go b/pkg/model/awsmodel/sshkey.go index af5ee01256443..2317e8af85592 100644 --- a/pkg/model/awsmodel/sshkey.go +++ b/pkg/model/awsmodel/sshkey.go @@ -39,10 +39,10 @@ func (b *SSHKeyModelBuilder) Build(c *fi.ModelBuilderContext) error { return err } t := &awstasks.SSHKey{ - Name: fi.String(name), + Name: fi.PtrTo(name), Lifecycle: b.Lifecycle, Tags: b.CloudTags(b.ClusterName(), false), - Shared: fi.StringValue(b.Cluster.Spec.SSHKeyName) != "", + Shared: fi.ValueOf(b.Cluster.Spec.SSHKeyName) != "", } if len(b.SSHPublicKeys) >= 1 { t.PublicKey = fi.NewStringResource(string(b.SSHPublicKeys[0])) diff --git a/pkg/model/azuremodel/api_loadbalancer.go b/pkg/model/azuremodel/api_loadbalancer.go index d66f1f1be3280..503f3333a9441 100644 --- a/pkg/model/azuremodel/api_loadbalancer.go +++ b/pkg/model/azuremodel/api_loadbalancer.go @@ -49,7 +49,7 @@ func (b *APILoadBalancerModelBuilder) Build(c *fi.ModelBuilderContext) error { // Create LoadBalancer for API ELB lb := &azuretasks.LoadBalancer{ - Name: fi.String(b.NameForLoadBalancer()), + Name: fi.PtrTo(b.NameForLoadBalancer()), Lifecycle: b.Lifecycle, ResourceGroup: b.LinkToResourceGroup(), Tags: map[string]*string{}, @@ -68,7 +68,7 @@ func (b *APILoadBalancerModelBuilder) Build(c *fi.ModelBuilderContext) error { // Create Public IP Address for Public Loadbalacer p := &azuretasks.PublicIPAddress{ - Name: fi.String(b.NameForLoadBalancer()), + Name: fi.PtrTo(b.NameForLoadBalancer()), Lifecycle: b.Lifecycle, ResourceGroup: b.LinkToResourceGroup(), Tags: map[string]*string{}, diff --git a/pkg/model/azuremodel/context.go b/pkg/model/azuremodel/context.go index 43589099393fd..f801e499dfaa4 100644 --- a/pkg/model/azuremodel/context.go +++ b/pkg/model/azuremodel/context.go @@ -35,7 +35,7 @@ type AzureModelContext struct { // LinkToVirtualNetwork returns the Azure Virtual Network object the cluster is located in. func (c *AzureModelContext) LinkToVirtualNetwork() *azuretasks.VirtualNetwork { - return &azuretasks.VirtualNetwork{Name: fi.String(c.NameForVirtualNetwork())} + return &azuretasks.VirtualNetwork{Name: fi.PtrTo(c.NameForVirtualNetwork())} } // NameForVirtualNetwork returns the name of the Azure Virtual Network object the cluster is located in. @@ -49,7 +49,7 @@ func (c *AzureModelContext) NameForVirtualNetwork() string { // LinkToResourceGroup returns the Resource Group object the cluster is located in. func (c *AzureModelContext) LinkToResourceGroup() *azuretasks.ResourceGroup { - return &azuretasks.ResourceGroup{Name: fi.String(c.NameForResourceGroup())} + return &azuretasks.ResourceGroup{Name: fi.PtrTo(c.NameForResourceGroup())} } // NameForResourceGroup returns the name of the Resource Group object the cluster is located in. @@ -59,7 +59,7 @@ func (c *AzureModelContext) NameForResourceGroup() string { // LinkToAzureSubnet returns the Azure Subnet object the cluster is located in. func (c *AzureModelContext) LinkToAzureSubnet(spec *kops.ClusterSubnetSpec) *azuretasks.Subnet { - return &azuretasks.Subnet{Name: fi.String(spec.Name)} + return &azuretasks.Subnet{Name: fi.PtrTo(spec.Name)} } // NameForRouteTable returns the name of the Route Table object for the cluster. @@ -69,7 +69,7 @@ func (c *AzureModelContext) NameForRouteTable() string { // LinkToLoadBalancer returns the Load Balancer object for the cluster. func (c *AzureModelContext) LinkToLoadBalancer() *azuretasks.LoadBalancer { - return &azuretasks.LoadBalancer{Name: fi.String(c.NameForLoadBalancer())} + return &azuretasks.LoadBalancer{Name: fi.PtrTo(c.NameForLoadBalancer())} } // NameForLoadBalancer returns the name of the Load Balancer object for the cluster. @@ -123,7 +123,7 @@ func (c *AzureModelContext) CloudTagsForInstanceGroup(ig *kops.InstanceGroup) ma // Replace all "/" with "_" as "/" is not an allowed key character in Azure. m := make(map[string]*string) for k, v := range labels { - m[strings.ReplaceAll(k, "/", "_")] = fi.String(v) + m[strings.ReplaceAll(k, "/", "_")] = fi.PtrTo(v) } return m } diff --git a/pkg/model/azuremodel/context_test.go b/pkg/model/azuremodel/context_test.go index 7abeeebd9f5c6..57cddef24bb3d 100644 --- a/pkg/model/azuremodel/context_test.go +++ b/pkg/model/azuremodel/context_test.go @@ -43,13 +43,13 @@ func TestCloudTagsForInstanceGroup(t *testing.T) { actual := c.CloudTagsForInstanceGroup(c.InstanceGroups[0]) expected := map[string]*string{ - "cluster_label_key": fi.String("cluster_label_value"), - "ig_label_key": fi.String("ig_label_value"), - "test_label": fi.String("from_ig"), - "k8s.io_cluster_node-template_label_0": fi.String("node_label/key=node_label_value"), - "k8s.io_cluster_node-template_taint_taint_key": fi.String("taint_value"), - "k8s.io_role_node": fi.String("1"), - "kops.k8s.io_instancegroup": fi.String("nodes"), + "cluster_label_key": fi.PtrTo("cluster_label_value"), + "ig_label_key": fi.PtrTo("ig_label_value"), + "test_label": fi.PtrTo("from_ig"), + "k8s.io_cluster_node-template_label_0": fi.PtrTo("node_label/key=node_label_value"), + "k8s.io_cluster_node-template_taint_taint_key": fi.PtrTo("taint_value"), + "k8s.io_role_node": fi.PtrTo("1"), + "kops.k8s.io_instancegroup": fi.PtrTo("nodes"), } if !reflect.DeepEqual(actual, expected) { diff --git a/pkg/model/azuremodel/network.go b/pkg/model/azuremodel/network.go index 9ccc4f5747136..bb3312456e8f9 100644 --- a/pkg/model/azuremodel/network.go +++ b/pkg/model/azuremodel/network.go @@ -32,33 +32,33 @@ var _ fi.ModelBuilder = &NetworkModelBuilder{} // Build builds tasks for creating a virtual network and subnets. func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error { networkTask := &azuretasks.VirtualNetwork{ - Name: fi.String(b.NameForVirtualNetwork()), + Name: fi.PtrTo(b.NameForVirtualNetwork()), Lifecycle: b.Lifecycle, ResourceGroup: b.LinkToResourceGroup(), - CIDR: fi.String(b.Cluster.Spec.NetworkCIDR), + CIDR: fi.PtrTo(b.Cluster.Spec.NetworkCIDR), Tags: map[string]*string{}, - Shared: fi.Bool(b.Cluster.SharedVPC()), + Shared: fi.PtrTo(b.Cluster.SharedVPC()), } c.AddTask(networkTask) for _, subnetSpec := range b.Cluster.Spec.Subnets { subnetTask := &azuretasks.Subnet{ - Name: fi.String(subnetSpec.Name), + Name: fi.PtrTo(subnetSpec.Name), Lifecycle: b.Lifecycle, ResourceGroup: b.LinkToResourceGroup(), VirtualNetwork: b.LinkToVirtualNetwork(), - CIDR: fi.String(subnetSpec.CIDR), - Shared: fi.Bool(b.Cluster.SharedVPC()), + CIDR: fi.PtrTo(subnetSpec.CIDR), + Shared: fi.PtrTo(b.Cluster.SharedVPC()), } c.AddTask(subnetTask) } rtTask := &azuretasks.RouteTable{ - Name: fi.String(b.NameForRouteTable()), + Name: fi.PtrTo(b.NameForRouteTable()), Lifecycle: b.Lifecycle, ResourceGroup: b.LinkToResourceGroup(), Tags: map[string]*string{}, - Shared: fi.Bool(b.Cluster.IsSharedAzureRouteTable()), + Shared: fi.PtrTo(b.Cluster.IsSharedAzureRouteTable()), } c.AddTask(rtTask) diff --git a/pkg/model/azuremodel/resourcegroup.go b/pkg/model/azuremodel/resourcegroup.go index 99c2137a06aa2..81aecfc51ac85 100644 --- a/pkg/model/azuremodel/resourcegroup.go +++ b/pkg/model/azuremodel/resourcegroup.go @@ -32,10 +32,10 @@ var _ fi.ModelBuilder = &ResourceGroupModelBuilder{} // Build builds a task for creating a Resource Group. func (b *ResourceGroupModelBuilder) Build(c *fi.ModelBuilderContext) error { t := &azuretasks.ResourceGroup{ - Name: fi.String(b.NameForResourceGroup()), + Name: fi.PtrTo(b.NameForResourceGroup()), Lifecycle: b.Lifecycle, Tags: map[string]*string{}, - Shared: fi.Bool(b.Cluster.IsSharedAzureResourceGroup()), + Shared: fi.PtrTo(b.Cluster.IsSharedAzureResourceGroup()), } c.AddTask(t) return nil diff --git a/pkg/model/azuremodel/testing.go b/pkg/model/azuremodel/testing.go index 783a5195b8a19..d7edf412cb626 100644 --- a/pkg/model/azuremodel/testing.go +++ b/pkg/model/azuremodel/testing.go @@ -44,7 +44,7 @@ func newTestCluster() *kops.Cluster { Name: "testcluster.test.com", }, Spec: kops.ClusterSpec{ - API: &kops.AccessSpec{ + API: kops.APISpec{ LoadBalancer: &kops.LoadBalancerAccessSpec{ Type: kops.LoadBalancerTypeInternal, }, @@ -77,7 +77,7 @@ func newTestInstanceGroup() *kops.InstanceGroup { Spec: kops.InstanceGroupSpec{ Role: kops.InstanceGroupRoleNode, Image: "Canonical:UbuntuServer:18.04-LTS:latest", - RootVolumeSize: fi.Int32(32), + RootVolumeSize: fi.PtrTo(int32(32)), Subnets: []string{"test-subnet"}, }, } diff --git a/pkg/model/azuremodel/vmscaleset.go b/pkg/model/azuremodel/vmscaleset.go index 3a31048950abf..0387abd93e476 100644 --- a/pkg/model/azuremodel/vmscaleset.go +++ b/pkg/model/azuremodel/vmscaleset.go @@ -80,13 +80,13 @@ func (b *VMScaleSetModelBuilder) buildVMScaleSetTask( azNumbers = append(azNumbers, az) } t := &azuretasks.VMScaleSet{ - Name: fi.String(name), + Name: fi.PtrTo(name), Lifecycle: b.Lifecycle, ResourceGroup: b.LinkToResourceGroup(), VirtualNetwork: b.LinkToVirtualNetwork(), - SKUName: fi.String(ig.Spec.MachineType), - ComputerNamePrefix: fi.String(ig.Name), - AdminUser: fi.String(b.Cluster.Spec.CloudProvider.Azure.AdminUser), + SKUName: fi.PtrTo(ig.Spec.MachineType), + ComputerNamePrefix: fi.PtrTo(ig.Name), + AdminUser: fi.PtrTo(b.Cluster.Spec.CloudProvider.Azure.AdminUser), Zones: azNumbers, } @@ -107,7 +107,7 @@ func (b *VMScaleSetModelBuilder) buildVMScaleSetTask( if n > 1 { return nil, fmt.Errorf("expected at most one SSH public key; found %d keys", n) } - t.SSHPublicKey = fi.String(string(b.SSHPublicKeys[0])) + t.SSHPublicKey = fi.PtrTo(string(b.SSHPublicKeys[0])) } if t.UserData, err = b.BootstrapScriptBuilder.ResourceNodeUp(c, ig); err != nil { @@ -126,12 +126,12 @@ func (b *VMScaleSetModelBuilder) buildVMScaleSetTask( switch subnet.Type { case kops.SubnetTypePublic, kops.SubnetTypeUtility: - t.RequirePublicIP = fi.Bool(true) + t.RequirePublicIP = fi.PtrTo(true) if ig.Spec.AssociatePublicIP != nil { t.RequirePublicIP = ig.Spec.AssociatePublicIP } case kops.SubnetTypeDualStack, kops.SubnetTypePrivate: - t.RequirePublicIP = fi.Bool(false) + t.RequirePublicIP = fi.PtrTo(false) default: return nil, fmt.Errorf("unexpected subnet type: for InstanceGroup %q; type was %s", ig.Name, subnet.Type) } @@ -152,7 +152,7 @@ func getCapacity(spec *kops.InstanceGroupSpec) (*int64, error) { minSize := int32(1) maxSize := int32(1) if spec.MinSize != nil { - minSize = fi.Int32Value(spec.MinSize) + minSize = fi.ValueOf(spec.MinSize) } else if spec.Role == kops.InstanceGroupRoleNode { minSize = 2 } @@ -164,7 +164,7 @@ func getCapacity(spec *kops.InstanceGroupSpec) (*int64, error) { if minSize != maxSize { return nil, fmt.Errorf("instance group must have the same min and max size in Azure, but got %d and %d", minSize, maxSize) } - return fi.Int64(int64(minSize)), nil + return fi.PtrTo(int64(minSize)), nil } func getStorageProfile(spec *kops.InstanceGroupSpec) (*compute.VirtualMachineScaleSetStorageProfile, error) { diff --git a/pkg/model/azuremodel/vmscaleset_test.go b/pkg/model/azuremodel/vmscaleset_test.go index e87ac026b57e6..7c273da82f405 100644 --- a/pkg/model/azuremodel/vmscaleset_test.go +++ b/pkg/model/azuremodel/vmscaleset_test.go @@ -47,13 +47,13 @@ func TestVMScaleSetModelBuilder_Build(t *testing.T) { } caTask := &fitasks.Keypair{ - Name: fi.String(fi.CertificateIDCA), + Name: fi.PtrTo(fi.CertificateIDCA), Subject: "cn=kubernetes", Type: "ca", } c.AddTask(caTask) etcdCaTask := &fitasks.Keypair{ - Name: fi.String("etcd-clients-ca"), + Name: fi.PtrTo("etcd-clients-ca"), Subject: "cn=etcd-clients-ca", Type: "ca", } @@ -63,7 +63,7 @@ func TestVMScaleSetModelBuilder_Build(t *testing.T) { "kube-proxy", } { c.AddTask(&fitasks.Keypair{ - Name: fi.String(cert), + Name: fi.PtrTo(cert), Subject: "cn=" + cert, Signer: caTask, Type: "client", @@ -85,8 +85,8 @@ func TestGetCapacity(t *testing.T) { { spec: kops.InstanceGroupSpec{ Role: kops.InstanceGroupRoleMaster, - MinSize: fi.Int32(3), - MaxSize: fi.Int32(3), + MinSize: fi.PtrTo(int32(3)), + MaxSize: fi.PtrTo(int32(3)), }, success: true, capacity: 3, @@ -108,8 +108,8 @@ func TestGetCapacity(t *testing.T) { { spec: kops.InstanceGroupSpec{ Role: kops.InstanceGroupRoleMaster, - MinSize: fi.Int32(1), - MaxSize: fi.Int32(2), + MinSize: fi.PtrTo(int32(1)), + MaxSize: fi.PtrTo(int32(2)), }, success: false, }, @@ -143,8 +143,8 @@ func TestGetStorageProfile(t *testing.T) { { spec: kops.InstanceGroupSpec{ Image: "Canonical:UbuntuServer:18.04-LTS:latest", - RootVolumeType: fi.String(string(compute.StorageAccountTypesStandardLRS)), - RootVolumeSize: fi.Int32(128), + RootVolumeType: fi.PtrTo(string(compute.StorageAccountTypesStandardLRS)), + RootVolumeSize: fi.PtrTo(int32(128)), }, profile: &compute.VirtualMachineScaleSetStorageProfile{ ImageReference: &compute.ImageReference{ diff --git a/pkg/model/bootstrapscript.go b/pkg/model/bootstrapscript.go index b43a40fcf01d8..740f77b13a1c6 100644 --- a/pkg/model/bootstrapscript.go +++ b/pkg/model/bootstrapscript.go @@ -299,9 +299,9 @@ func (b *BootstrapScriptBuilder) ResourceNodeUp(c *fi.ModelBuilderContext, ig *k c.AddTask(task) c.AddTask(&fitasks.ManagedFile{ - Name: fi.String("nodeupconfig-" + ig.Name), + Name: fi.PtrTo("nodeupconfig-" + ig.Name), Lifecycle: b.Lifecycle, - Location: fi.String("igconfig/" + strings.ToLower(string(ig.Spec.Role)) + "/" + ig.Name + "/nodeupconfig.yaml"), + Location: fi.PtrTo("igconfig/" + strings.ToLower(string(ig.Spec.Role)) + "/" + ig.Name + "/nodeupconfig.yaml"), Contents: &task.nodeupConfig, }) return &task.resource, nil @@ -401,7 +401,7 @@ func (b *BootstrapScript) Run(c *fi.Context) error { MemoryRequest: etcdCluster.MemoryRequest, } for _, etcdMember := range etcdCluster.Members { - if fi.StringValue(etcdMember.InstanceGroup) == b.ig.Name && etcdMember.VolumeSize != nil { + if fi.ValueOf(etcdMember.InstanceGroup) == b.ig.Name && etcdMember.VolumeSize != nil { m := kops.EtcdMemberSpec{ Name: etcdMember.Name, VolumeSize: etcdMember.VolumeSize, @@ -421,7 +421,7 @@ func (b *BootstrapScript) Run(c *fi.Context) error { } } - nodeupScript.CompressUserData = fi.BoolValue(b.ig.Spec.CompressUserData) + nodeupScript.CompressUserData = fi.ValueOf(b.ig.Spec.CompressUserData) // By setting some sysctls early, we avoid broken configurations that prevent nodeup download. // See https://github.com/kubernetes/kops/issues/10206 for details. diff --git a/pkg/model/bootstrapscript_test.go b/pkg/model/bootstrapscript_test.go index cda9d54a34db9..7dfc843fb15db 100644 --- a/pkg/model/bootstrapscript_test.go +++ b/pkg/model/bootstrapscript_test.go @@ -142,7 +142,7 @@ func TestBootstrapUserData(t *testing.T) { } caTask := &fitasks.Keypair{ - Name: fi.String(fi.CertificateIDCA), + Name: fi.PtrTo(fi.CertificateIDCA), Subject: "cn=kubernetes", Type: "ca", } @@ -157,7 +157,7 @@ func TestBootstrapUserData(t *testing.T) { "service-account", } { task := &fitasks.Keypair{ - Name: fi.String(keypair), + Name: fi.PtrTo(keypair), Subject: "cn=" + keypair, Type: "ca", } @@ -229,7 +229,7 @@ func makeTestCluster(hookSpecRoles []kops.InstanceGroupRole, fileAssetSpecRoles Members: []kops.EtcdMemberSpec{ { Name: "test", - InstanceGroup: fi.String("ig-1"), + InstanceGroup: fi.PtrTo("ig-1"), }, }, Version: "3.1.11", @@ -239,7 +239,7 @@ func makeTestCluster(hookSpecRoles []kops.InstanceGroupRole, fileAssetSpecRoles Members: []kops.EtcdMemberSpec{ { Name: "test", - InstanceGroup: fi.String("ig-1"), + InstanceGroup: fi.PtrTo("ig-1"), }, }, Version: "3.1.11", @@ -248,14 +248,14 @@ func makeTestCluster(hookSpecRoles []kops.InstanceGroupRole, fileAssetSpecRoles }, NetworkCIDR: "10.79.0.0/24", CloudConfig: &kops.CloudConfiguration{ - NodeTags: fi.String("something"), + NodeTags: fi.PtrTo("something"), }, ContainerRuntime: "docker", Containerd: &kops.ContainerdConfig{ - LogLevel: fi.String("info"), + LogLevel: fi.PtrTo("info"), }, Docker: &kops.DockerConfig{ - LogLevel: fi.String("INFO"), + LogLevel: fi.PtrTo("INFO"), }, KubeAPIServer: &kops.KubeAPIServerConfig{ Image: "CoreOS", diff --git a/pkg/model/components/addonmanifests/awsebscsidriver/iam.go b/pkg/model/components/addonmanifests/awsebscsidriver/iam.go index abb7a1fc27ed9..6ecf72461315a 100644 --- a/pkg/model/components/addonmanifests/awsebscsidriver/iam.go +++ b/pkg/model/components/addonmanifests/awsebscsidriver/iam.go @@ -33,7 +33,7 @@ func (r *ServiceAccount) BuildAWSPolicy(b *iam.PolicyBuilder) (*iam.Policy, erro clusterName := b.Cluster.ObjectMeta.Name p := iam.NewPolicy(clusterName, b.Partition) - addSnapshotControllerPermissions := b.Cluster.Spec.SnapshotController != nil && fi.BoolValue(b.Cluster.Spec.SnapshotController.Enabled) + addSnapshotControllerPermissions := b.Cluster.Spec.SnapshotController != nil && fi.ValueOf(b.Cluster.Spec.SnapshotController.Enabled) iam.AddAWSEBSCSIDriverPermissions(p, addSnapshotControllerPermissions) return p, nil diff --git a/pkg/model/components/addonmanifests/clusterautoscaler/iam.go b/pkg/model/components/addonmanifests/clusterautoscaler/iam.go index dafd88ed315cf..f802f0dd57d2c 100644 --- a/pkg/model/components/addonmanifests/clusterautoscaler/iam.go +++ b/pkg/model/components/addonmanifests/clusterautoscaler/iam.go @@ -34,7 +34,7 @@ func (r *ServiceAccount) BuildAWSPolicy(b *iam.PolicyBuilder) (*iam.Policy, erro p := iam.NewPolicy(clusterName, b.Partition) var useStaticInstanceList bool - if ca := b.Cluster.Spec.ClusterAutoscaler; ca != nil && fi.BoolValue(ca.AWSUseStaticInstanceList) { + if ca := b.Cluster.Spec.ClusterAutoscaler; ca != nil && fi.ValueOf(ca.AWSUseStaticInstanceList) { useStaticInstanceList = true } iam.AddClusterAutoscalerPermissions(p, useStaticInstanceList) diff --git a/pkg/model/components/addonmanifests/remap.go b/pkg/model/components/addonmanifests/remap.go index 6afd387d194b3..d047fcf36d495 100644 --- a/pkg/model/components/addonmanifests/remap.go +++ b/pkg/model/components/addonmanifests/remap.go @@ -32,7 +32,7 @@ import ( ) func RemapAddonManifest(addon *addonsapi.AddonSpec, context *model.KopsModelContext, assetBuilder *assets.AssetBuilder, manifest []byte, serviceAccounts map[string]iam.Subject) ([]byte, error) { - name := fi.StringValue(addon.Name) + name := fi.ValueOf(addon.Name) { objects, err := kubemanifest.LoadObjectsFrom(manifest) diff --git a/pkg/model/components/apiserver.go b/pkg/model/components/apiserver.go index 32c0ae58b7617..1f57406b6b06b 100644 --- a/pkg/model/components/apiserver.go +++ b/pkg/model/components/apiserver.go @@ -49,7 +49,7 @@ func (b *KubeAPIServerOptionsBuilder) BuildOptions(o interface{}) error { if count == 0 { return fmt.Errorf("no instance groups found") } - c.APIServerCount = fi.Int32(int32(count)) + c.APIServerCount = fi.PtrTo(int32(count)) } // @question: should the question every be able to set this? @@ -60,7 +60,7 @@ func (b *KubeAPIServerOptionsBuilder) BuildOptions(o interface{}) error { if err != nil { return err } - c.StorageBackend = fi.String(fmt.Sprintf("etcd%d", sem.Major)) + c.StorageBackend = fi.PtrTo(fmt.Sprintf("etcd%d", sem.Major)) } if c.KubeletPreferredAddressTypes == nil { @@ -74,7 +74,7 @@ func (b *KubeAPIServerOptionsBuilder) BuildOptions(o interface{}) error { if clusterSpec.Authentication != nil { if clusterSpec.Authentication.Kopeio != nil { - c.AuthenticationTokenWebhookConfigFile = fi.String("/etc/kubernetes/authn.config") + c.AuthenticationTokenWebhookConfigFile = fi.PtrTo("/etc/kubernetes/authn.config") } } @@ -82,9 +82,9 @@ func (b *KubeAPIServerOptionsBuilder) BuildOptions(o interface{}) error { // Do nothing - use the default as defined by the apiserver // (this won't happen anyway because of our default logic) } else if clusterSpec.Authorization.AlwaysAllow != nil { - clusterSpec.KubeAPIServer.AuthorizationMode = fi.String("AlwaysAllow") + clusterSpec.KubeAPIServer.AuthorizationMode = fi.PtrTo("AlwaysAllow") } else if clusterSpec.Authorization.RBAC != nil { - clusterSpec.KubeAPIServer.AuthorizationMode = fi.String("Node,RBAC") + clusterSpec.KubeAPIServer.AuthorizationMode = fi.PtrTo("Node,RBAC") } if err := b.configureAggregation(clusterSpec); err != nil { @@ -127,7 +127,7 @@ func (b *KubeAPIServerOptionsBuilder) BuildOptions(o interface{}) error { c.BindAddress = "0.0.0.0" } - c.AllowPrivileged = fi.Bool(true) + c.AllowPrivileged = fi.PtrTo(true) c.ServiceClusterIPRange = clusterSpec.ServiceClusterIPRange c.EtcdServers = nil c.EtcdServersOverrides = nil @@ -161,7 +161,7 @@ func (b *KubeAPIServerOptionsBuilder) BuildOptions(o interface{}) error { } // We make sure to disable AnonymousAuth - c.AnonymousAuth = fi.Bool(false) + c.AnonymousAuth = fi.PtrTo(false) // We query via the kube-apiserver-healthcheck proxy, which listens on port 3990 c.InsecureBindAddress = "" @@ -169,9 +169,9 @@ func (b *KubeAPIServerOptionsBuilder) BuildOptions(o interface{}) error { // If metrics-server is enabled, we want aggregator routing enabled so that requests are load balanced. metricsServer := clusterSpec.MetricsServer - if metricsServer != nil && fi.BoolValue(metricsServer.Enabled) { + if metricsServer != nil && fi.ValueOf(metricsServer.Enabled) { if c.EnableAggregatorRouting == nil { - c.EnableAggregatorRouting = fi.Bool(true) + c.EnableAggregatorRouting = fi.PtrTo(true) } } @@ -179,7 +179,7 @@ func (b *KubeAPIServerOptionsBuilder) BuildOptions(o interface{}) error { c.FeatureGates = make(map[string]string) } - if clusterSpec.CloudConfig != nil && clusterSpec.CloudConfig.AWSEBSCSIDriver != nil && fi.BoolValue(clusterSpec.CloudConfig.AWSEBSCSIDriver.Enabled) { + if clusterSpec.CloudConfig != nil && clusterSpec.CloudConfig.AWSEBSCSIDriver != nil && fi.ValueOf(clusterSpec.CloudConfig.AWSEBSCSIDriver.Enabled) { if _, found := c.FeatureGates["InTreePluginAWSUnregister"]; !found { c.FeatureGates["InTreePluginAWSUnregister"] = "true" @@ -204,9 +204,9 @@ func (b *KubeAPIServerOptionsBuilder) buildAPIServerCount(clusterSpec *kops.Clus // if !ig.IsMaster() { // continue // } - // size := fi.IntValue(ig.Spec.MaxSize) + // size := fi.ValueOf(ig.Spec.MaxSize) // if size == 0 { - // size = fi.IntValue(ig.Spec.MinSize) + // size = fi.ValueOf(ig.Spec.MinSize) // } // count += size //} diff --git a/pkg/model/components/awscloudcontrollermanager.go b/pkg/model/components/awscloudcontrollermanager.go index 597bb2bcd3438..bf87bc22c48f4 100644 --- a/pkg/model/components/awscloudcontrollermanager.go +++ b/pkg/model/components/awscloudcontrollermanager.go @@ -51,35 +51,35 @@ func (b *AWSCloudControllerManagerOptionsBuilder) BuildOptions(o interface{}) er // No significant downside to always doing a leader election. // Also, having multiple control plane nodes requires leader election. - eccm.LeaderElection = &kops.LeaderElectionConfiguration{LeaderElect: fi.Bool(true)} + eccm.LeaderElection = &kops.LeaderElectionConfiguration{LeaderElect: fi.PtrTo(true)} eccm.ClusterName = b.ClusterName eccm.ClusterCIDR = clusterSpec.NonMasqueradeCIDR - eccm.AllocateNodeCIDRs = fi.Bool(true) - eccm.ConfigureCloudRoutes = fi.Bool(false) + eccm.AllocateNodeCIDRs = fi.PtrTo(true) + eccm.ConfigureCloudRoutes = fi.PtrTo(false) // TODO: we want to consolidate this with the logic from KCM networking := clusterSpec.Networking if networking == nil { - eccm.ConfigureCloudRoutes = fi.Bool(true) + eccm.ConfigureCloudRoutes = fi.PtrTo(true) } else if networking.Kubenet != nil { - eccm.ConfigureCloudRoutes = fi.Bool(true) + eccm.ConfigureCloudRoutes = fi.PtrTo(true) } else if networking.GCE != nil { - eccm.ConfigureCloudRoutes = fi.Bool(false) - eccm.CIDRAllocatorType = fi.String("CloudAllocator") + eccm.ConfigureCloudRoutes = fi.PtrTo(false) + eccm.CIDRAllocatorType = fi.PtrTo("CloudAllocator") if eccm.ClusterCIDR == "" { eccm.ClusterCIDR = clusterSpec.PodCIDR } } else if networking.External != nil { - eccm.ConfigureCloudRoutes = fi.Bool(false) + eccm.ConfigureCloudRoutes = fi.PtrTo(false) } else if UsesCNI(networking) { - eccm.ConfigureCloudRoutes = fi.Bool(false) + eccm.ConfigureCloudRoutes = fi.PtrTo(false) } else if networking.Kopeio != nil { // Kopeio is based on kubenet / external - eccm.ConfigureCloudRoutes = fi.Bool(false) + eccm.ConfigureCloudRoutes = fi.PtrTo(false) } else { return fmt.Errorf("no networking mode set") } @@ -103,7 +103,7 @@ func (b *AWSCloudControllerManagerOptionsBuilder) BuildOptions(o interface{}) er } if b.IsKubernetesGTE("1.24") && b.IsKubernetesLT("1.25") { - eccm.EnableLeaderMigration = fi.Bool(true) + eccm.EnableLeaderMigration = fi.PtrTo(true) } return nil diff --git a/pkg/model/components/awsebscsidriver.go b/pkg/model/components/awsebscsidriver.go index 6be3f83548764..7af3577eb7a65 100644 --- a/pkg/model/components/awsebscsidriver.go +++ b/pkg/model/components/awsebscsidriver.go @@ -38,18 +38,18 @@ func (b *AWSEBSCSIDriverOptionsBuilder) BuildOptions(o interface{}) error { cc := clusterSpec.CloudConfig if cc.AWSEBSCSIDriver == nil { cc.AWSEBSCSIDriver = &kops.AWSEBSCSIDriver{ - Enabled: fi.Bool(b.IsKubernetesGTE("1.22")), + Enabled: fi.PtrTo(b.IsKubernetesGTE("1.22")), } } c := cc.AWSEBSCSIDriver - if !fi.BoolValue(c.Enabled) { + if !fi.ValueOf(c.Enabled) { return nil } if c.Version == nil { version := "v1.12.0" - c.Version = fi.String(version) + c.Version = fi.PtrTo(version) } return nil diff --git a/pkg/model/components/cilium.go b/pkg/model/components/cilium.go index 990a9e63f8dbe..520a448cab4d4 100644 --- a/pkg/model/components/cilium.go +++ b/pkg/model/components/cilium.go @@ -44,7 +44,7 @@ func (b *CiliumOptionsBuilder) BuildOptions(o interface{}) error { } if c.EnableEndpointHealthChecking == nil { - c.EnableEndpointHealthChecking = fi.Bool(true) + c.EnableEndpointHealthChecking = fi.PtrTo(true) } if c.IdentityAllocationMode == "" { @@ -112,7 +112,7 @@ func (b *CiliumOptionsBuilder) BuildOptions(o interface{}) error { } if c.Masquerade == nil { - c.Masquerade = fi.Bool(!clusterSpec.IsIPv6Only() && c.IPAM != "eni") + c.Masquerade = fi.PtrTo(!clusterSpec.IsIPv6Only() && c.IPAM != "eni") } if c.Tunnel == "" { @@ -124,19 +124,19 @@ func (b *CiliumOptionsBuilder) BuildOptions(o interface{}) error { } if c.EnableRemoteNodeIdentity == nil { - c.EnableRemoteNodeIdentity = fi.Bool(true) + c.EnableRemoteNodeIdentity = fi.PtrTo(true) } if c.EnableBPFMasquerade == nil { - c.EnableBPFMasquerade = fi.Bool(false) + c.EnableBPFMasquerade = fi.PtrTo(false) } if c.EnableL7Proxy == nil { - c.EnableL7Proxy = fi.Bool(true) + c.EnableL7Proxy = fi.PtrTo(true) } if c.DisableCNPStatusUpdates == nil { - c.DisableCNPStatusUpdates = fi.Bool(true) + c.DisableCNPStatusUpdates = fi.PtrTo(true) } if c.CPURequest == nil { @@ -156,11 +156,11 @@ func (b *CiliumOptionsBuilder) BuildOptions(o interface{}) error { hubble := c.Hubble if hubble != nil { if hubble.Enabled == nil { - hubble.Enabled = fi.Bool(true) + hubble.Enabled = fi.PtrTo(true) } } else { c.Hubble = &kops.HubbleSpec{ - Enabled: fi.Bool(false), + Enabled: fi.PtrTo(false), } } diff --git a/pkg/model/components/cloudconfiguration.go b/pkg/model/components/cloudconfiguration.go index c2c0899ab518a..deec4be329825 100644 --- a/pkg/model/components/cloudconfiguration.go +++ b/pkg/model/components/cloudconfiguration.go @@ -47,7 +47,7 @@ func (b *CloudConfigurationOptionsBuilder) BuildOptions(o interface{}) error { // adopting that more particular setting generally. manage = clusterSpec.CloudProvider.Openstack.BlockStorage.CreateStorageClass } else { - manage = fi.Bool(true) + manage = fi.PtrTo(true) } c.ManageStorageClasses = manage } diff --git a/pkg/model/components/cloudconfiguration_test.go b/pkg/model/components/cloudconfiguration_test.go index e9c3013414c16..4247a59c48ffe 100644 --- a/pkg/model/components/cloudconfiguration_test.go +++ b/pkg/model/components/cloudconfiguration_test.go @@ -27,8 +27,8 @@ func TestCloudConfigurationOptionsBuilder(t *testing.T) { ob := &CloudConfigurationOptionsBuilder{ Context: nil, } - disabled := fi.Bool(false) - enabled := fi.Bool(true) + disabled := fi.PtrTo(false) + enabled := fi.PtrTo(true) for _, test := range []struct { description string generalManageSCs *bool diff --git a/pkg/model/components/clusterautoscaler.go b/pkg/model/components/clusterautoscaler.go index 0051abfc3c12b..62a8ec903928e 100644 --- a/pkg/model/components/clusterautoscaler.go +++ b/pkg/model/components/clusterautoscaler.go @@ -33,7 +33,7 @@ var _ loader.OptionsBuilder = &ClusterAutoscalerOptionsBuilder{} func (b *ClusterAutoscalerOptionsBuilder) BuildOptions(o interface{}) error { clusterSpec := o.(*kops.ClusterSpec) cas := clusterSpec.ClusterAutoscaler - if cas == nil || !fi.BoolValue(cas.Enabled) { + if cas == nil || !fi.ValueOf(cas.Enabled) { return nil } @@ -57,38 +57,38 @@ func (b *ClusterAutoscalerOptionsBuilder) BuildOptions(o interface{}) error { image = "registry.k8s.io/autoscaling/cluster-autoscaler:v1.25.0" } } - cas.Image = fi.String(image) + cas.Image = fi.PtrTo(image) } if cas.Expander == nil { - cas.Expander = fi.String("random") + cas.Expander = fi.PtrTo("random") } if cas.ScaleDownUtilizationThreshold == nil { - cas.ScaleDownUtilizationThreshold = fi.String("0.5") + cas.ScaleDownUtilizationThreshold = fi.PtrTo("0.5") } if cas.SkipNodesWithLocalStorage == nil { - cas.SkipNodesWithLocalStorage = fi.Bool(true) + cas.SkipNodesWithLocalStorage = fi.PtrTo(true) } if cas.SkipNodesWithSystemPods == nil { - cas.SkipNodesWithSystemPods = fi.Bool(true) + cas.SkipNodesWithSystemPods = fi.PtrTo(true) } if cas.BalanceSimilarNodeGroups == nil { - cas.BalanceSimilarNodeGroups = fi.Bool(false) + cas.BalanceSimilarNodeGroups = fi.PtrTo(false) } if cas.AWSUseStaticInstanceList == nil { - cas.AWSUseStaticInstanceList = fi.Bool(false) + cas.AWSUseStaticInstanceList = fi.PtrTo(false) } if cas.NewPodScaleUpDelay == nil { - cas.NewPodScaleUpDelay = fi.String("0s") + cas.NewPodScaleUpDelay = fi.PtrTo("0s") } if cas.ScaleDownDelayAfterAdd == nil { - cas.ScaleDownDelayAfterAdd = fi.String("10m0s") + cas.ScaleDownDelayAfterAdd = fi.PtrTo("10m0s") } if cas.ScaleDownUnneededTime == nil { - cas.ScaleDownUnneededTime = fi.String("10m0s") + cas.ScaleDownUnneededTime = fi.PtrTo("10m0s") } if cas.ScaleDownUnreadyTime == nil { - cas.ScaleDownUnreadyTime = fi.String("20m0s") + cas.ScaleDownUnreadyTime = fi.PtrTo("20m0s") } if cas.MaxNodeProvisionTime == "" { cas.MaxNodeProvisionTime = "15m0s" diff --git a/pkg/model/components/containerd.go b/pkg/model/components/containerd.go index 8360da5a67038..9a5824cf899b4 100644 --- a/pkg/model/components/containerd.go +++ b/pkg/model/components/containerd.go @@ -45,22 +45,22 @@ func (b *ContainerdOptionsBuilder) BuildOptions(o interface{}) error { if clusterSpec.ContainerRuntime == "containerd" { // Set version based on Kubernetes version - if fi.StringValue(containerd.Version) == "" { + if fi.ValueOf(containerd.Version) == "" { if b.IsKubernetesGTE("1.23") { - containerd.Version = fi.String("1.6.10") + containerd.Version = fi.PtrTo("1.6.10") containerd.Runc = &kops.Runc{ - Version: fi.String("1.1.4"), + Version: fi.PtrTo("1.1.4"), } } else { - containerd.Version = fi.String("1.4.13") + containerd.Version = fi.PtrTo("1.4.13") } } // Set default log level to INFO - containerd.LogLevel = fi.String("info") + containerd.LogLevel = fi.PtrTo("info") } else if clusterSpec.ContainerRuntime == "docker" { // Docker version should always be available - dockerVersion := fi.StringValue(clusterSpec.Docker.Version) + dockerVersion := fi.ValueOf(clusterSpec.Docker.Version) if dockerVersion == "" { return fmt.Errorf("docker version is required") } else { @@ -75,18 +75,18 @@ func (b *ContainerdOptionsBuilder) BuildOptions(o interface{}) error { } } // Set default log level to INFO - containerd.LogLevel = fi.String("info") + containerd.LogLevel = fi.PtrTo("info") // Build config file for containerd running in Docker mode config, _ := toml.Load("") config.SetPath([]string{"disabled_plugins"}, []string{"cri"}) - containerd.ConfigOverride = fi.String(config.String()) + containerd.ConfigOverride = fi.PtrTo(config.String()) } else { // Unknown container runtime, should not install containerd containerd.SkipInstall = true } - if containerd.NvidiaGPU != nil && fi.BoolValue(containerd.NvidiaGPU.Enabled) && containerd.NvidiaGPU.DriverPackage == "" { + if containerd.NvidiaGPU != nil && fi.ValueOf(containerd.NvidiaGPU.Enabled) && containerd.NvidiaGPU.DriverPackage == "" { containerd.NvidiaGPU.DriverPackage = kops.NvidiaDefaultDriverPackage } diff --git a/pkg/model/components/context.go b/pkg/model/components/context.go index 4da2cffd921e4..acb889c5a442c 100644 --- a/pkg/model/components/context.go +++ b/pkg/model/components/context.go @@ -175,5 +175,5 @@ func GCETagForRole(clusterName string, role kops.InstanceGroupRole) string { // IsCertManagerEnabled returns true if the cluster has the capability to handle cert-manager PKI func IsCertManagerEnabled(cluster *kops.Cluster) bool { - return cluster.Spec.CertManager != nil && fi.BoolValue(cluster.Spec.CertManager.Enabled) + return cluster.Spec.CertManager != nil && fi.ValueOf(cluster.Spec.CertManager.Enabled) } diff --git a/pkg/model/components/discovery.go b/pkg/model/components/discovery.go index 1d0a6a2215e64..11f660b8778ba 100644 --- a/pkg/model/components/discovery.go +++ b/pkg/model/components/discovery.go @@ -71,25 +71,25 @@ func (b *DiscoveryOptionsBuilder) BuildOptions(o interface{}) error { return fmt.Errorf("locationStore=%q is of unexpected type %T", store, base) } } else { - if supportsPublicJWKS(clusterSpec) && clusterSpec.MasterPublicName != "" { - serviceAccountIssuer = "https://" + clusterSpec.MasterPublicName + if supportsPublicJWKS(clusterSpec) && clusterSpec.API.PublicName != "" { + serviceAccountIssuer = "https://" + clusterSpec.API.PublicName } else { - serviceAccountIssuer = "https://" + clusterSpec.MasterInternalName + serviceAccountIssuer = "https://api.internal." + b.ClusterName } } kubeAPIServer.ServiceAccountIssuer = &serviceAccountIssuer } - kubeAPIServer.ServiceAccountJWKSURI = fi.String(*kubeAPIServer.ServiceAccountIssuer + "/openid/v1/jwks") + kubeAPIServer.ServiceAccountJWKSURI = fi.PtrTo(*kubeAPIServer.ServiceAccountIssuer + "/openid/v1/jwks") // We set apiserver ServiceAccountKey and ServiceAccountSigningKeyFile in nodeup return nil } func supportsPublicJWKS(clusterSpec *kops.ClusterSpec) bool { - if !fi.BoolValue(clusterSpec.KubeAPIServer.AnonymousAuth) { + if !fi.ValueOf(clusterSpec.KubeAPIServer.AnonymousAuth) { return false } - for _, cidr := range clusterSpec.KubernetesAPIAccess { + for _, cidr := range clusterSpec.API.Access { if cidr == "0.0.0.0/0" || cidr == "::/0" { return true } diff --git a/pkg/model/components/docker.go b/pkg/model/components/docker.go index 86a2ed4a88de0..640f597b57ab6 100644 --- a/pkg/model/components/docker.go +++ b/pkg/model/components/docker.go @@ -46,8 +46,8 @@ func (b *DockerOptionsBuilder) BuildOptions(o interface{}) error { } // Set the Docker version for known Kubernetes versions - if fi.StringValue(clusterSpec.Docker.Version) == "" { - docker.Version = fi.String("20.10.17") + if fi.ValueOf(clusterSpec.Docker.Version) == "" { + docker.Version = fi.PtrTo("20.10.17") } if len(clusterSpec.Docker.LogOpt) == 0 && clusterSpec.Docker.LogDriver == nil { @@ -58,14 +58,14 @@ func (b *DockerOptionsBuilder) BuildOptions(o interface{}) error { clusterSpec.Docker.LogOpt = append(clusterSpec.Docker.LogOpt, "max-file=5") } - docker.LogLevel = fi.String("info") - docker.IPTables = fi.Bool(false) - docker.IPMasq = fi.Bool(false) + docker.LogLevel = fi.PtrTo("info") + docker.IPTables = fi.PtrTo(false) + docker.IPMasq = fi.PtrTo(false) // Note the alternative syntax... with a comma nodeup will try each of the filesystems in turn // TODO(justinsb): The ContainerOS image now has docker configured to use overlay2 out-of-the-box // and it is an error to specify the flag twice. - docker.Storage = fi.String("overlay2,overlay,aufs") + docker.Storage = fi.PtrTo("overlay2,overlay,aufs") // Set systemd as the default cgroup driver in docker from k8s 1.20. if getDockerCgroupDriver(docker.ExecOpt) == "" { diff --git a/pkg/model/components/etcdmanager/model.go b/pkg/model/components/etcdmanager/model.go index 38586181e2bf0..9e7e709a3d248 100644 --- a/pkg/model/components/etcdmanager/model.go +++ b/pkg/model/components/etcdmanager/model.go @@ -65,7 +65,7 @@ func (b *EtcdManagerBuilder) Build(c *fi.ModelBuilderContext) error { } for _, member := range etcdCluster.Members { - instanceGroupName := fi.StringValue(member.InstanceGroup) + instanceGroupName := fi.ValueOf(member.InstanceGroup) manifest, err := b.buildManifest(etcdCluster, instanceGroupName) if err != nil { return err @@ -80,8 +80,8 @@ func (b *EtcdManagerBuilder) Build(c *fi.ModelBuilderContext) error { c.AddTask(&fitasks.ManagedFile{ Contents: fi.NewBytesResource(manifestYAML), Lifecycle: b.Lifecycle, - Location: fi.String("manifests/etcd/" + name + ".yaml"), - Name: fi.String("manifests-etcdmanager-" + name), + Location: fi.PtrTo("manifests/etcd/" + name + ".yaml"), + Name: fi.PtrTo("manifests-etcdmanager-" + name), }) } @@ -105,15 +105,15 @@ func (b *EtcdManagerBuilder) Build(c *fi.ModelBuilderContext) error { c.AddTask(&fitasks.ManagedFile{ Contents: fi.NewBytesResource(d), Lifecycle: b.Lifecycle, - Base: fi.String(backupStore), + Base: fi.PtrTo(backupStore), // TODO: We need this to match the backup base (currently) - Location: fi.String(location + "/control/etcd-cluster-spec"), - Name: fi.String("etcd-cluster-spec-" + etcdCluster.Name), + Location: fi.PtrTo(location + "/control/etcd-cluster-spec"), + Name: fi.PtrTo("etcd-cluster-spec-" + etcdCluster.Name), }) // We create a CA keypair to enable secure communication c.AddTask(&fitasks.Keypair{ - Name: fi.String("etcd-manager-ca-" + etcdCluster.Name), + Name: fi.PtrTo("etcd-manager-ca-" + etcdCluster.Name), Lifecycle: b.Lifecycle, Subject: "cn=etcd-manager-ca-" + etcdCluster.Name, Type: "ca", @@ -121,7 +121,7 @@ func (b *EtcdManagerBuilder) Build(c *fi.ModelBuilderContext) error { // We create a CA for etcd peers and a separate one for clients c.AddTask(&fitasks.Keypair{ - Name: fi.String("etcd-peers-ca-" + etcdCluster.Name), + Name: fi.PtrTo("etcd-peers-ca-" + etcdCluster.Name), Lifecycle: b.Lifecycle, Subject: "cn=etcd-peers-ca-" + etcdCluster.Name, Type: "ca", @@ -129,7 +129,7 @@ func (b *EtcdManagerBuilder) Build(c *fi.ModelBuilderContext) error { // Because API server can only have a single client-cert, we need to share a client CA if err := c.EnsureTask(&fitasks.Keypair{ - Name: fi.String("etcd-clients-ca"), + Name: fi.PtrTo("etcd-clients-ca"), Lifecycle: b.Lifecycle, Subject: "cn=etcd-clients-ca", Type: "ca", @@ -139,7 +139,7 @@ func (b *EtcdManagerBuilder) Build(c *fi.ModelBuilderContext) error { if etcdCluster.Name == "cilium" { clientsCaCilium := &fitasks.Keypair{ - Name: fi.String("etcd-clients-ca-cilium"), + Name: fi.PtrTo("etcd-clients-ca-cilium"), Lifecycle: b.Lifecycle, Subject: "cn=etcd-clients-ca-cilium", Type: "ca", @@ -148,7 +148,7 @@ func (b *EtcdManagerBuilder) Build(c *fi.ModelBuilderContext) error { if !b.UseKopsControllerForNodeBootstrap() { c.AddTask(&fitasks.Keypair{ - Name: fi.String("etcd-client-cilium"), + Name: fi.PtrTo("etcd-client-cilium"), Lifecycle: b.Lifecycle, Subject: "cn=cilium", Type: "client", @@ -294,7 +294,7 @@ func (b *EtcdManagerBuilder) buildPod(etcdCluster kops.EtcdClusterSpec, instance dnsInternalSuffix := "" if b.Cluster.IsGossip() { // @TODO: This is hacky, but we want it so that we can have a different internal & external name - dnsInternalSuffix = b.Cluster.Spec.MasterInternalName + dnsInternalSuffix = b.Cluster.APIInternalName() dnsInternalSuffix = strings.TrimPrefix(dnsInternalSuffix, "api.") } @@ -316,7 +316,7 @@ func (b *EtcdManagerBuilder) buildPod(etcdCluster kops.EtcdClusterSpec, instance case "cilium": if !featureflag.APIServerNodes.Enabled() { - clientHost = b.Cluster.Spec.MasterInternalName + clientHost = b.Cluster.APIInternalName() } default: return nil, fmt.Errorf("unknown etcd cluster key %q", etcdCluster.Name) @@ -349,11 +349,11 @@ func (b *EtcdManagerBuilder) buildPod(etcdCluster kops.EtcdClusterSpec, instance } if etcdCluster.Manager != nil && etcdCluster.Manager.BackupInterval != nil { - config.BackupInterval = fi.String(etcdCluster.Manager.BackupInterval.Duration.String()) + config.BackupInterval = fi.PtrTo(etcdCluster.Manager.BackupInterval.Duration.String()) } if etcdCluster.Manager != nil && etcdCluster.Manager.DiscoveryPollInterval != nil { - config.DiscoveryPollInterval = fi.String(etcdCluster.Manager.DiscoveryPollInterval.Duration.String()) + config.DiscoveryPollInterval = fi.PtrTo(etcdCluster.Manager.DiscoveryPollInterval.Duration.String()) } { @@ -471,7 +471,7 @@ func (b *EtcdManagerBuilder) buildPod(etcdCluster kops.EtcdClusterSpec, instance kubemanifest.AddHostPathMapping(pod, container, "varlogetcd", "/var/log/etcd.log").WithReadWrite().WithType(v1.HostPathFileOrCreate).WithHostPath(logFile) - if fi.BoolValue(b.Cluster.Spec.UseHostCertificates) { + if fi.ValueOf(b.Cluster.Spec.UseHostCertificates) { kubemanifest.AddHostPathMapping(pod, container, "etc-ssl-certs", "/etc/ssl/certs").WithType(v1.HostPathDirectoryOrCreate) } } diff --git a/pkg/model/components/etcdmanager/tests/interval/cluster.yaml b/pkg/model/components/etcdmanager/tests/interval/cluster.yaml index 4c38ebe91de73..f8617d13d63eb 100644 --- a/pkg/model/components/etcdmanager/tests/interval/cluster.yaml +++ b/pkg/model/components/etcdmanager/tests/interval/cluster.yaml @@ -34,7 +34,6 @@ spec: backups: backupStore: memfs://clusters.example.com/minimal.example.com/backups/etcd-events kubernetesVersion: v1.21.0 - masterInternalName: api.internal.minimal.example.com masterPublicName: api.minimal.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/pkg/model/components/etcdmanager/tests/minimal/cluster.yaml b/pkg/model/components/etcdmanager/tests/minimal/cluster.yaml index 0f581b8ed4bc3..10d4c92514a94 100644 --- a/pkg/model/components/etcdmanager/tests/minimal/cluster.yaml +++ b/pkg/model/components/etcdmanager/tests/minimal/cluster.yaml @@ -29,7 +29,6 @@ spec: backups: backupStore: memfs://clusters.example.com/minimal.example.com/backups/etcd-events kubernetesVersion: v1.21.0 - masterInternalName: api.internal.minimal.example.com masterPublicName: api.minimal.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/pkg/model/components/etcdmanager/tests/overwrite_settings/cluster.yaml b/pkg/model/components/etcdmanager/tests/overwrite_settings/cluster.yaml index 8d944c38ce3f2..80b07f04254a4 100644 --- a/pkg/model/components/etcdmanager/tests/overwrite_settings/cluster.yaml +++ b/pkg/model/components/etcdmanager/tests/overwrite_settings/cluster.yaml @@ -39,7 +39,6 @@ spec: backups: backupStore: memfs://clusters.example.com/minimal.example.com/backups/etcd-events kubernetesVersion: v1.21.0 - masterInternalName: api.internal.minimal.example.com masterPublicName: api.minimal.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/pkg/model/components/etcdmanager/tests/proxy/cluster.yaml b/pkg/model/components/etcdmanager/tests/proxy/cluster.yaml index aa9eb13d993db..c89515b9f53ea 100644 --- a/pkg/model/components/etcdmanager/tests/proxy/cluster.yaml +++ b/pkg/model/components/etcdmanager/tests/proxy/cluster.yaml @@ -33,7 +33,6 @@ spec: backups: backupStore: memfs://clusters.example.com/minimal.example.com/backups/etcd-events kubernetesVersion: v1.21.0 - masterInternalName: api.internal.minimal.example.com masterPublicName: api.minimal.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/pkg/model/components/gcpcloudcontrollermanager.go b/pkg/model/components/gcpcloudcontrollermanager.go index 185a2b43d1f43..581b9583984b7 100644 --- a/pkg/model/components/gcpcloudcontrollermanager.go +++ b/pkg/model/components/gcpcloudcontrollermanager.go @@ -48,12 +48,12 @@ func (b *GCPCloudControllerManagerOptionsBuilder) BuildOptions(options interface // No significant downside to always doing a leader election. // Also, having multiple control plane nodes requires leader election. - ccmConfig.LeaderElection = &kops.LeaderElectionConfiguration{LeaderElect: fi.Bool(true)} + ccmConfig.LeaderElection = &kops.LeaderElectionConfiguration{LeaderElect: fi.PtrTo(true)} // CCM interacts directly with the GCP API, use the name safe for GCP ccmConfig.ClusterName = gce.SafeClusterName(b.ClusterName) - ccmConfig.AllocateNodeCIDRs = fi.Bool(true) - ccmConfig.CIDRAllocatorType = fi.String("CloudAllocator") + ccmConfig.AllocateNodeCIDRs = fi.PtrTo(true) + ccmConfig.CIDRAllocatorType = fi.PtrTo("CloudAllocator") if ccmConfig.ClusterCIDR == "" { ccmConfig.ClusterCIDR = clusterSpec.PodCIDR } @@ -68,7 +68,7 @@ func (b *GCPCloudControllerManagerOptionsBuilder) BuildOptions(options interface } if b.IsKubernetesGTE("1.24") && b.IsKubernetesLT("1.25") { - ccmConfig.EnableLeaderMigration = fi.Bool(true) + ccmConfig.EnableLeaderMigration = fi.PtrTo(true) } return nil diff --git a/pkg/model/components/gcppdcsidriver.go b/pkg/model/components/gcppdcsidriver.go index 58199c3292294..35007dc702f6d 100644 --- a/pkg/model/components/gcppdcsidriver.go +++ b/pkg/model/components/gcppdcsidriver.go @@ -38,7 +38,7 @@ func (b *GCPPDCSIDriverOptionsBuilder) BuildOptions(o interface{}) error { cc := clusterSpec.CloudConfig if cc.GCPPDCSIDriver == nil { cc.GCPPDCSIDriver = &kops.GCPPDCSIDriver{ - Enabled: fi.Bool(b.IsKubernetesGTE("1.23")), + Enabled: fi.PtrTo(b.IsKubernetesGTE("1.23")), } } diff --git a/pkg/model/components/hetznercloudcontrollermanager.go b/pkg/model/components/hetznercloudcontrollermanager.go index 7e28bbc0d36f7..4ee157c6e1267 100644 --- a/pkg/model/components/hetznercloudcontrollermanager.go +++ b/pkg/model/components/hetznercloudcontrollermanager.go @@ -43,14 +43,14 @@ func (b *HetznerCloudControllerManagerOptionsBuilder) BuildOptions(o interface{} eccm := clusterSpec.ExternalCloudControllerManager eccm.CloudProvider = "hcloud" - eccm.AllowUntaggedCloud = fi.Bool(true) + eccm.AllowUntaggedCloud = fi.PtrTo(true) eccm.LeaderElection = &kops.LeaderElectionConfiguration{ - LeaderElect: fi.Bool(false), + LeaderElect: fi.PtrTo(false), } eccm.ClusterCIDR = clusterSpec.NonMasqueradeCIDR - eccm.AllocateNodeCIDRs = fi.Bool(true) - eccm.ConfigureCloudRoutes = fi.Bool(false) + eccm.AllocateNodeCIDRs = fi.PtrTo(true) + eccm.ConfigureCloudRoutes = fi.PtrTo(false) if eccm.Image == "" { eccm.Image = "hetznercloud/hcloud-cloud-controller-manager:v1.13.2" diff --git a/pkg/model/components/kubeapiserver/model.go b/pkg/model/components/kubeapiserver/model.go index 6c2793c6f805c..c7df572689152 100644 --- a/pkg/model/components/kubeapiserver/model.go +++ b/pkg/model/components/kubeapiserver/model.go @@ -57,8 +57,8 @@ func (b *KubeApiserverBuilder) Build(c *fi.ModelBuilderContext) error { c.AddTask(&fitasks.ManagedFile{ Contents: fi.NewBytesResource(manifestYAML), Lifecycle: b.Lifecycle, - Location: fi.String(location), - Name: fi.String("manifests-static-" + key), + Location: fi.PtrTo(location), + Name: fi.PtrTo("manifests-static-" + key), }) b.AssetBuilder.StaticManifests = append(b.AssetBuilder.StaticManifests, &assets.StaticManifest{ diff --git a/pkg/model/components/kubeapiserver/tests/minimal/cluster.yaml b/pkg/model/components/kubeapiserver/tests/minimal/cluster.yaml index dd84042b1949b..66f2cf081bae9 100644 --- a/pkg/model/components/kubeapiserver/tests/minimal/cluster.yaml +++ b/pkg/model/components/kubeapiserver/tests/minimal/cluster.yaml @@ -10,7 +10,6 @@ spec: cloudProvider: aws configBase: memfs://clusters.example.com/minimal.example.com kubernetesVersion: v1.21.0 - masterInternalName: api.internal.minimal.example.com masterPublicName: api.minimal.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/pkg/model/components/kubecontrollermanager.go b/pkg/model/components/kubecontrollermanager.go index 6d20b5c2abf79..d7162258fed30 100644 --- a/pkg/model/components/kubecontrollermanager.go +++ b/pkg/model/components/kubecontrollermanager.go @@ -106,7 +106,7 @@ func (b *KubeControllerManagerOptionsBuilder) BuildOptions(o interface{}) error if clusterSpec.ExternalCloudControllerManager == nil { if b.IsKubernetesGTE("1.23") && (kcm.CloudProvider == "aws" || kcm.CloudProvider == "gce") { - kcm.EnableLeaderMigration = fi.Bool(true) + kcm.EnableLeaderMigration = fi.PtrTo(true) } } else { kcm.CloudProvider = "external" @@ -123,9 +123,9 @@ func (b *KubeControllerManagerOptionsBuilder) BuildOptions(o interface{}) error kcm.Image = image // Doesn't seem to be any real downside to always doing a leader election - kcm.LeaderElection = &kops.LeaderElectionConfiguration{LeaderElect: fi.Bool(true)} + kcm.LeaderElection = &kops.LeaderElectionConfiguration{LeaderElect: fi.PtrTo(true)} - kcm.AllocateNodeCIDRs = fi.Bool(!clusterSpec.IsKopsControllerIPAM()) + kcm.AllocateNodeCIDRs = fi.PtrTo(!clusterSpec.IsKopsControllerIPAM()) if kcm.ClusterCIDR == "" && !clusterSpec.IsKopsControllerIPAM() { kcm.ClusterCIDR = clusterSpec.PodCIDR @@ -139,37 +139,37 @@ func (b *KubeControllerManagerOptionsBuilder) BuildOptions(o interface{}) error // Kubernetes limitation nodeSize = 16 } - kcm.NodeCIDRMaskSize = fi.Int32(int32(clusterSize + nodeSize)) + kcm.NodeCIDRMaskSize = fi.PtrTo(int32(clusterSize + nodeSize)) } networking := clusterSpec.Networking if networking == nil { - kcm.ConfigureCloudRoutes = fi.Bool(true) + kcm.ConfigureCloudRoutes = fi.PtrTo(true) } else if networking.Kubenet != nil { - kcm.ConfigureCloudRoutes = fi.Bool(true) + kcm.ConfigureCloudRoutes = fi.PtrTo(true) } else if networking.GCE != nil { - kcm.ConfigureCloudRoutes = fi.Bool(false) - kcm.CIDRAllocatorType = fi.String("CloudAllocator") + kcm.ConfigureCloudRoutes = fi.PtrTo(false) + kcm.CIDRAllocatorType = fi.PtrTo("CloudAllocator") } else if networking.External != nil { - kcm.ConfigureCloudRoutes = fi.Bool(false) + kcm.ConfigureCloudRoutes = fi.PtrTo(false) } else if UsesCNI(networking) { - kcm.ConfigureCloudRoutes = fi.Bool(false) + kcm.ConfigureCloudRoutes = fi.PtrTo(false) } else if networking.Kopeio != nil { // Kopeio is based on kubenet / external - kcm.ConfigureCloudRoutes = fi.Bool(false) + kcm.ConfigureCloudRoutes = fi.PtrTo(false) } else { return fmt.Errorf("no networking mode set") } if kcm.UseServiceAccountCredentials == nil { - kcm.UseServiceAccountCredentials = fi.Bool(true) + kcm.UseServiceAccountCredentials = fi.PtrTo(true) } if len(kcm.Controllers) == 0 { var changes []string // @check if the node authorization is enabled and if so enable the tokencleaner controller (disabled by default) // This is responsible for cleaning up bootstrap tokens which have expired - if fi.BoolValue(clusterSpec.KubeAPIServer.EnableBootstrapAuthToken) { + if fi.ValueOf(clusterSpec.KubeAPIServer.EnableBootstrapAuthToken) { changes = append(changes, "tokencleaner") } if clusterSpec.IsKopsControllerIPAM() { @@ -180,7 +180,7 @@ func (b *KubeControllerManagerOptionsBuilder) BuildOptions(o interface{}) error } } - if clusterSpec.CloudConfig != nil && clusterSpec.CloudConfig.AWSEBSCSIDriver != nil && fi.BoolValue(clusterSpec.CloudConfig.AWSEBSCSIDriver.Enabled) { + if clusterSpec.CloudConfig != nil && clusterSpec.CloudConfig.AWSEBSCSIDriver != nil && fi.ValueOf(clusterSpec.CloudConfig.AWSEBSCSIDriver.Enabled) { if kcm.FeatureGates == nil { kcm.FeatureGates = make(map[string]string) diff --git a/pkg/model/components/kubecontrollermanager_test.go b/pkg/model/components/kubecontrollermanager_test.go index 9b4f92923e030..6b3d9f79f9761 100644 --- a/pkg/model/components/kubecontrollermanager_test.go +++ b/pkg/model/components/kubecontrollermanager_test.go @@ -103,40 +103,40 @@ func Test_Build_KCM_Builder_CIDR_Mask_Size(t *testing.T) { }, { PodCIDR: "2001:DB8::/32", - ExpectedMaskSize: fi.Int32(48), + ExpectedMaskSize: fi.PtrTo(int32(48)), }, { PodCIDR: "2001:DB8::/65", - ExpectedMaskSize: fi.Int32(81), + ExpectedMaskSize: fi.PtrTo(int32(81)), }, { PodCIDR: "2001:DB8::/32", ClusterCIDR: "2001:DB8::/65", - ExpectedMaskSize: fi.Int32(81), + ExpectedMaskSize: fi.PtrTo(int32(81)), }, { PodCIDR: "2001:DB8::/95", - ExpectedMaskSize: fi.Int32(111), + ExpectedMaskSize: fi.PtrTo(int32(111)), }, { PodCIDR: "2001:DB8::/96", - ExpectedMaskSize: fi.Int32(112), + ExpectedMaskSize: fi.PtrTo(int32(112)), }, { PodCIDR: "2001:DB8::/97", - ExpectedMaskSize: fi.Int32(112), + ExpectedMaskSize: fi.PtrTo(int32(112)), }, { PodCIDR: "2001:DB8::/98", - ExpectedMaskSize: fi.Int32(113), + ExpectedMaskSize: fi.PtrTo(int32(113)), }, { PodCIDR: "2001:DB8::/99", - ExpectedMaskSize: fi.Int32(113), + ExpectedMaskSize: fi.PtrTo(int32(113)), }, { PodCIDR: "2001:DB8::/100", - ExpectedMaskSize: fi.Int32(114), + ExpectedMaskSize: fi.PtrTo(int32(114)), }, } for _, tc := range grid { diff --git a/pkg/model/components/kubedns.go b/pkg/model/components/kubedns.go index f47ee8dad32c4..3e8714d5a51bd 100644 --- a/pkg/model/components/kubedns.go +++ b/pkg/model/components/kubedns.go @@ -86,17 +86,17 @@ func (b *KubeDnsOptionsBuilder) BuildOptions(o interface{}) error { clusterSpec.KubeDNS.NodeLocalDNS = nodeLocalDNS } if nodeLocalDNS.Enabled == nil { - nodeLocalDNS.Enabled = fi.Bool(false) + nodeLocalDNS.Enabled = fi.PtrTo(false) } - if fi.BoolValue(nodeLocalDNS.Enabled) && nodeLocalDNS.LocalIP == "" { + if fi.ValueOf(nodeLocalDNS.Enabled) && nodeLocalDNS.LocalIP == "" { if clusterSpec.IsIPv6Only() { nodeLocalDNS.LocalIP = "fd00:90de:d95::1" } else { nodeLocalDNS.LocalIP = "169.254.20.10" } } - if fi.BoolValue(nodeLocalDNS.Enabled) && nodeLocalDNS.ForwardToKubeDNS == nil { - nodeLocalDNS.ForwardToKubeDNS = fi.Bool(false) + if fi.ValueOf(nodeLocalDNS.Enabled) && nodeLocalDNS.ForwardToKubeDNS == nil { + nodeLocalDNS.ForwardToKubeDNS = fi.PtrTo(false) } if nodeLocalDNS.MemoryRequest == nil || nodeLocalDNS.MemoryRequest.IsZero() { @@ -110,7 +110,7 @@ func (b *KubeDnsOptionsBuilder) BuildOptions(o interface{}) error { } if nodeLocalDNS.Image == nil { - nodeLocalDNS.Image = fi.String("registry.k8s.io/dns/k8s-dns-node-cache:1.22.8") + nodeLocalDNS.Image = fi.PtrTo("registry.k8s.io/dns/k8s-dns-node-cache:1.22.8") } return nil diff --git a/pkg/model/components/kubelet.go b/pkg/model/components/kubelet.go index 7c58e493b01a1..1c8d82af72ff6 100644 --- a/pkg/model/components/kubelet.go +++ b/pkg/model/components/kubelet.go @@ -57,9 +57,9 @@ func (b *KubeletOptionsBuilder) BuildOptions(o interface{}) error { } // Standard options - clusterSpec.Kubelet.EnableDebuggingHandlers = fi.Bool(true) + clusterSpec.Kubelet.EnableDebuggingHandlers = fi.PtrTo(true) clusterSpec.Kubelet.PodManifestPath = "/etc/kubernetes/manifests" - clusterSpec.Kubelet.LogLevel = fi.Int32(2) + clusterSpec.Kubelet.LogLevel = fi.PtrTo(int32(2)) clusterSpec.Kubelet.ClusterDomain = clusterSpec.ClusterDNSDomain // AllowPrivileged is deprecated and removed in v1.14. @@ -75,7 +75,7 @@ func (b *KubeletOptionsBuilder) BuildOptions(o interface{}) error { } if clusterSpec.Kubelet.ClusterDNS == "" { - if clusterSpec.KubeDNS != nil && clusterSpec.KubeDNS.NodeLocalDNS != nil && fi.BoolValue(clusterSpec.KubeDNS.NodeLocalDNS.Enabled) { + if clusterSpec.KubeDNS != nil && clusterSpec.KubeDNS.NodeLocalDNS != nil && fi.ValueOf(clusterSpec.KubeDNS.NodeLocalDNS.Enabled) { clusterSpec.Kubelet.ClusterDNS = clusterSpec.KubeDNS.NodeLocalDNS.LocalIP } else { ip, err := WellKnownServiceIP(clusterSpec, 10) @@ -86,11 +86,11 @@ func (b *KubeletOptionsBuilder) BuildOptions(o interface{}) error { } } - clusterSpec.MasterKubelet.RegisterSchedulable = fi.Bool(false) + clusterSpec.MasterKubelet.RegisterSchedulable = fi.PtrTo(false) // Replace the CIDR with a CIDR allocated by KCM (the default, but included for clarity) // We _do_ allow debugging handlers, so we can do logs // This does allow more access than we would like though - clusterSpec.MasterKubelet.EnableDebuggingHandlers = fi.Bool(true) + clusterSpec.MasterKubelet.EnableDebuggingHandlers = fi.PtrTo(true) { // For pod eviction in low memory or empty disk situations @@ -106,7 +106,7 @@ func (b *KubeletOptionsBuilder) BuildOptions(o interface{}) error { "imagefs.available<10%", "imagefs.inodesFree<5%", } - clusterSpec.Kubelet.EvictionHard = fi.String(strings.Join(evictionHard, ",")) + clusterSpec.Kubelet.EvictionHard = fi.PtrTo(strings.Join(evictionHard, ",")) } } @@ -117,8 +117,8 @@ func (b *KubeletOptionsBuilder) BuildOptions(o interface{}) error { // IsolateMasters enables the legacy behaviour, where master pods on a separate network // In newer versions of kubernetes, most of that functionality has been removed though - if fi.BoolValue(clusterSpec.IsolateMasters) { - clusterSpec.MasterKubelet.EnableDebuggingHandlers = fi.Bool(false) + if fi.ValueOf(clusterSpec.IsolateMasters) { + clusterSpec.MasterKubelet.EnableDebuggingHandlers = fi.PtrTo(false) clusterSpec.MasterKubelet.HairpinMode = "none" } @@ -142,8 +142,8 @@ func (b *KubeletOptionsBuilder) BuildOptions(o interface{}) error { if clusterSpec.CloudConfig == nil { clusterSpec.CloudConfig = &kops.CloudConfiguration{} } - clusterSpec.CloudConfig.Multizone = fi.Bool(true) - clusterSpec.CloudConfig.NodeTags = fi.String(gce.TagForRole(b.ClusterName, kops.InstanceGroupRoleNode)) + clusterSpec.CloudConfig.Multizone = fi.PtrTo(true) + clusterSpec.CloudConfig.NodeTags = fi.PtrTo(gce.TagForRole(b.ClusterName, kops.InstanceGroupRoleNode)) } @@ -169,9 +169,9 @@ func (b *KubeletOptionsBuilder) BuildOptions(o interface{}) error { return fmt.Errorf("no networking mode set") } if UsesKubenet(networking) && b.IsKubernetesLT("1.24") { - clusterSpec.Kubelet.NetworkPluginName = fi.String("kubenet") - clusterSpec.Kubelet.NetworkPluginMTU = fi.Int32(9001) - clusterSpec.Kubelet.NonMasqueradeCIDR = fi.String(clusterSpec.NonMasqueradeCIDR) + clusterSpec.Kubelet.NetworkPluginName = fi.PtrTo("kubenet") + clusterSpec.Kubelet.NetworkPluginMTU = fi.PtrTo(int32(9001)) + clusterSpec.Kubelet.NonMasqueradeCIDR = fi.PtrTo(clusterSpec.NonMasqueradeCIDR) } } @@ -188,7 +188,7 @@ func (b *KubeletOptionsBuilder) BuildOptions(o interface{}) error { clusterSpec.Kubelet.FeatureGates = make(map[string]string) } - if clusterSpec.CloudConfig != nil && clusterSpec.CloudConfig.AWSEBSCSIDriver != nil && fi.BoolValue(clusterSpec.CloudConfig.AWSEBSCSIDriver.Enabled) { + if clusterSpec.CloudConfig != nil && clusterSpec.CloudConfig.AWSEBSCSIDriver != nil && fi.ValueOf(clusterSpec.CloudConfig.AWSEBSCSIDriver.Enabled) { if _, found := clusterSpec.Kubelet.FeatureGates["CSIMigrationAWS"]; !found { clusterSpec.Kubelet.FeatureGates["CSIMigrationAWS"] = "true" } @@ -204,7 +204,7 @@ func (b *KubeletOptionsBuilder) BuildOptions(o interface{}) error { } if b.IsKubernetesGTE("1.22") && clusterSpec.Kubelet.ProtectKernelDefaults == nil { - clusterSpec.Kubelet.ProtectKernelDefaults = fi.Bool(true) + clusterSpec.Kubelet.ProtectKernelDefaults = fi.PtrTo(true) } // We do not enable graceful shutdown when using amazonaws due to leaking ENIs. @@ -217,8 +217,8 @@ func (b *KubeletOptionsBuilder) BuildOptions(o interface{}) error { clusterSpec.Kubelet.ShutdownGracePeriodCriticalPods = &metav1.Duration{Duration: 0} } - clusterSpec.Kubelet.RegisterSchedulable = fi.Bool(true) - clusterSpec.MasterKubelet.RegisterSchedulable = fi.Bool(true) + clusterSpec.Kubelet.RegisterSchedulable = fi.PtrTo(true) + clusterSpec.MasterKubelet.RegisterSchedulable = fi.PtrTo(true) return nil } diff --git a/pkg/model/components/kubeproxy.go b/pkg/model/components/kubeproxy.go index 43bc635cd058c..b35035714cb94 100644 --- a/pkg/model/components/kubeproxy.go +++ b/pkg/model/components/kubeproxy.go @@ -64,7 +64,7 @@ func (b *KubeProxyOptionsBuilder) BuildOptions(o interface{}) error { if config.ClusterCIDR == nil { if b.needsClusterCIDR(clusterSpec) { - config.ClusterCIDR = fi.String(clusterSpec.KubeControllerManager.ClusterCIDR) + config.ClusterCIDR = fi.PtrTo(clusterSpec.KubeControllerManager.ClusterCIDR) } } diff --git a/pkg/model/components/kubescheduler.go b/pkg/model/components/kubescheduler.go index 713bb47474e3d..86de942ba4b71 100644 --- a/pkg/model/components/kubescheduler.go +++ b/pkg/model/components/kubescheduler.go @@ -53,11 +53,11 @@ func (b *KubeSchedulerOptionsBuilder) BuildOptions(o interface{}) error { if config.LeaderElection == nil { // Doesn't seem to be any real downside to always doing a leader election config.LeaderElection = &kops.LeaderElectionConfiguration{ - LeaderElect: fi.Bool(true), + LeaderElect: fi.PtrTo(true), } } - if clusterSpec.CloudConfig != nil && clusterSpec.CloudConfig.AWSEBSCSIDriver != nil && fi.BoolValue(clusterSpec.CloudConfig.AWSEBSCSIDriver.Enabled) { + if clusterSpec.CloudConfig != nil && clusterSpec.CloudConfig.AWSEBSCSIDriver != nil && fi.ValueOf(clusterSpec.CloudConfig.AWSEBSCSIDriver.Enabled) { if config.FeatureGates == nil { config.FeatureGates = make(map[string]string) diff --git a/pkg/model/components/networking.go b/pkg/model/components/networking.go index 043e65c7db03e..dc430bf89422e 100644 --- a/pkg/model/components/networking.go +++ b/pkg/model/components/networking.go @@ -46,9 +46,9 @@ func (b *NetworkingOptionsBuilder) BuildOptions(o interface{}) error { if b.Context.IsKubernetesLT("1.24") { if UsesCNI(networking) { - options.Kubelet.NetworkPluginName = fi.String("cni") + options.Kubelet.NetworkPluginName = fi.PtrTo("cni") } else if networking.GCE != nil { - options.Kubelet.NetworkPluginName = fi.String("kubenet") + options.Kubelet.NetworkPluginName = fi.PtrTo("kubenet") } } diff --git a/pkg/model/components/nodeproblemdetector.go b/pkg/model/components/nodeproblemdetector.go index 4a11aaf0a6636..e6aa79126902d 100644 --- a/pkg/model/components/nodeproblemdetector.go +++ b/pkg/model/components/nodeproblemdetector.go @@ -38,7 +38,7 @@ func (b *NodeProblemDetectorOptionsBuilder) BuildOptions(o interface{}) error { npd := clusterSpec.NodeProblemDetector if npd.Enabled == nil { - npd.Enabled = fi.Bool(false) + npd.Enabled = fi.PtrTo(false) } if npd.CPURequest == nil { @@ -62,7 +62,7 @@ func (b *NodeProblemDetectorOptionsBuilder) BuildOptions(o interface{}) error { } if npd.Image == nil { - npd.Image = fi.String("registry.k8s.io/node-problem-detector/node-problem-detector:v0.8.12") + npd.Image = fi.PtrTo("registry.k8s.io/node-problem-detector/node-problem-detector:v0.8.12") } return nil diff --git a/pkg/model/components/nodeterminationhandler.go b/pkg/model/components/nodeterminationhandler.go index 7f3eee4f4d587..6eaee19d6e5ab 100644 --- a/pkg/model/components/nodeterminationhandler.go +++ b/pkg/model/components/nodeterminationhandler.go @@ -37,35 +37,35 @@ func (b *NodeTerminationHandlerOptionsBuilder) BuildOptions(o interface{}) error } nth := clusterSpec.NodeTerminationHandler if nth.Enabled == nil { - nth.Enabled = fi.Bool(true) + nth.Enabled = fi.PtrTo(true) } if nth.EnableSpotInterruptionDraining == nil { - nth.EnableSpotInterruptionDraining = fi.Bool(true) + nth.EnableSpotInterruptionDraining = fi.PtrTo(true) } if nth.EnableScheduledEventDraining == nil { - nth.EnableScheduledEventDraining = fi.Bool(false) + nth.EnableScheduledEventDraining = fi.PtrTo(false) } if nth.EnableRebalanceMonitoring == nil { - nth.EnableRebalanceMonitoring = fi.Bool(false) + nth.EnableRebalanceMonitoring = fi.PtrTo(false) } if nth.EnableRebalanceDraining == nil { - nth.EnableRebalanceDraining = fi.Bool(false) + nth.EnableRebalanceDraining = fi.PtrTo(false) } if nth.EnablePrometheusMetrics == nil { - nth.EnablePrometheusMetrics = fi.Bool(false) + nth.EnablePrometheusMetrics = fi.PtrTo(false) } if nth.EnableSQSTerminationDraining == nil { - nth.EnableSQSTerminationDraining = fi.Bool(false) + nth.EnableSQSTerminationDraining = fi.PtrTo(false) } if nth.ExcludeFromLoadBalancers == nil { - nth.ExcludeFromLoadBalancers = fi.Bool(true) + nth.ExcludeFromLoadBalancers = fi.PtrTo(true) } if nth.ManagedASGTag == nil { - nth.ManagedASGTag = fi.String("aws-node-termination-handler/managed") + nth.ManagedASGTag = fi.PtrTo("aws-node-termination-handler/managed") } if nth.CPURequest == nil { @@ -79,7 +79,7 @@ func (b *NodeTerminationHandlerOptionsBuilder) BuildOptions(o interface{}) error } if nth.Version == nil { - nth.Version = fi.String("v1.17.1") + nth.Version = fi.PtrTo("v1.17.1") } return nil diff --git a/pkg/model/components/openstack.go b/pkg/model/components/openstack.go index 2ccb6cc7e92c1..ab92034d4c8c2 100644 --- a/pkg/model/components/openstack.go +++ b/pkg/model/components/openstack.go @@ -46,21 +46,21 @@ func (b *OpenStackOptionsBuilder) BuildOptions(o interface{}) error { } if openstack.BlockStorage.CreateStorageClass == nil { - openstack.BlockStorage.CreateStorageClass = fi.Bool(true) + openstack.BlockStorage.CreateStorageClass = fi.PtrTo(true) } if openstack.Metadata == nil { openstack.Metadata = &kops.OpenstackMetadata{} } if openstack.Metadata.ConfigDrive == nil { - openstack.Metadata.ConfigDrive = fi.Bool(false) + openstack.Metadata.ConfigDrive = fi.PtrTo(false) } if clusterSpec.ExternalCloudControllerManager == nil { clusterSpec.ExternalCloudControllerManager = &kops.CloudControllerManagerConfig{ // No significant downside to always doing a leader election. // Also, having a replicated (HA) control plane requires leader election. - LeaderElection: &kops.LeaderElectionConfiguration{LeaderElect: fi.Bool(true)}, + LeaderElection: &kops.LeaderElectionConfiguration{LeaderElect: fi.PtrTo(true)}, } } diff --git a/pkg/model/config.go b/pkg/model/config.go index 59fb1b042ed42..957c2c3a63b2c 100644 --- a/pkg/model/config.go +++ b/pkg/model/config.go @@ -36,10 +36,10 @@ type ConfigBuilder struct { func (b *ConfigBuilder) Build(c *fi.ModelBuilderContext) error { c.AddTask(&fitasks.ManagedFile{ - Name: fi.String(registry.PathKopsVersionUpdated), + Name: fi.PtrTo(registry.PathKopsVersionUpdated), Lifecycle: b.Lifecycle, - Base: fi.String(b.Cluster.Spec.ConfigBase), - Location: fi.String(registry.PathKopsVersionUpdated), + Base: fi.PtrTo(b.Cluster.Spec.ConfigBase), + Location: fi.PtrTo(registry.PathKopsVersionUpdated), Contents: fi.NewStringResource(kopsbase.Version), }) @@ -48,10 +48,10 @@ func (b *ConfigBuilder) Build(c *fi.ModelBuilderContext) error { return fmt.Errorf("serializing completed cluster spec: %w", err) } c.AddTask(&fitasks.ManagedFile{ - Name: fi.String(registry.PathClusterCompleted), + Name: fi.PtrTo(registry.PathClusterCompleted), Lifecycle: b.Lifecycle, - Base: fi.String(b.Cluster.Spec.ConfigBase), - Location: fi.String(registry.PathClusterCompleted), + Base: fi.PtrTo(b.Cluster.Spec.ConfigBase), + Location: fi.PtrTo(registry.PathClusterCompleted), Contents: fi.NewBytesResource(versionedYaml), }) diff --git a/pkg/model/context.go b/pkg/model/context.go index 0ae6ebec6596e..40892465f5abc 100644 --- a/pkg/model/context.go +++ b/pkg/model/context.go @@ -149,8 +149,8 @@ func (b *KopsModelContext) CloudTagsForInstanceGroup(ig *kops.InstanceGroup) (ma // Apply NTH Labels nth := b.Cluster.Spec.NodeTerminationHandler - if nth != nil && fi.BoolValue(nth.Enabled) && fi.BoolValue(nth.EnableSQSTerminationDraining) { - labels[fi.StringValue(nth.ManagedASGTag)] = "" + if nth != nil && fi.ValueOf(nth.Enabled) && fi.ValueOf(nth.EnableSQSTerminationDraining) { + labels[fi.ValueOf(nth.ManagedASGTag)] = "" } // Apply labels for cluster autoscaler node labels @@ -246,7 +246,7 @@ func (b *KopsModelContext) UseBootstrapTokens() bool { return false } - return fi.BoolValue(b.Cluster.Spec.KubeAPIServer.EnableBootstrapAuthToken) + return fi.ValueOf(b.Cluster.Spec.KubeAPIServer.EnableBootstrapAuthToken) } // UsesBastionDns checks if we should use a specific name for the bastion dns @@ -270,9 +270,6 @@ func (b *KopsModelContext) UsesSSHBastion() bool { // UseLoadBalancerForAPI checks if we are using a load balancer for the kubeapi func (b *KopsModelContext) UseLoadBalancerForAPI() bool { - if b.Cluster.Spec.API == nil { - return false - } return b.Cluster.Spec.API.LoadBalancer != nil } @@ -286,7 +283,7 @@ func (b *KopsModelContext) UseLoadBalancerForInternalAPI() bool { // APILoadBalancerClass returns which type of load balancer to use for the api func (b *KopsModelContext) APILoadBalancerClass() kops.LoadBalancerClass { - if b.Cluster.Spec.API != nil && b.Cluster.Spec.API.LoadBalancer != nil { + if b.Cluster.Spec.API.LoadBalancer != nil { return b.Cluster.Spec.API.LoadBalancer.Class } return kops.LoadBalancerClassClassic @@ -386,7 +383,7 @@ func (b *KopsModelContext) NodePortRange() (utilnet.PortRange, error) { // UseServiceAccountExternalPermissions returns true if we are using service-account bound IAM roles. func (b *KopsModelContext) UseServiceAccountExternalPermissions() bool { return b.Cluster.Spec.IAM != nil && - fi.BoolValue(b.Cluster.Spec.IAM.UseServiceAccountExternalPermissions) + fi.ValueOf(b.Cluster.Spec.IAM.UseServiceAccountExternalPermissions) } // NetworkingIsCalico returns true if we are using calico networking diff --git a/pkg/model/domodel/api_loadbalancer.go b/pkg/model/domodel/api_loadbalancer.go index ec7c64a589de0..77ae9a19dd43d 100644 --- a/pkg/model/domodel/api_loadbalancer.go +++ b/pkg/model/domodel/api_loadbalancer.go @@ -60,18 +60,18 @@ func (b *APILoadBalancerModelBuilder) Build(c *fi.ModelBuilderContext) error { // Create LoadBalancer for API LB loadbalancer := &dotasks.LoadBalancer{ - Name: fi.String(loadbalancerName), - Region: fi.String(b.Cluster.Spec.Subnets[0].Region), - DropletTag: fi.String(clusterMasterTag), + Name: fi.PtrTo(loadbalancerName), + Region: fi.PtrTo(b.Cluster.Spec.Subnets[0].Region), + DropletTag: fi.PtrTo(clusterMasterTag), Lifecycle: b.Lifecycle, } if b.Cluster.Spec.NetworkID != "" { - loadbalancer.VPCUUID = fi.String(b.Cluster.Spec.NetworkID) + loadbalancer.VPCUUID = fi.PtrTo(b.Cluster.Spec.NetworkID) } else if b.Cluster.Spec.NetworkCIDR != "" { vpcName := "vpc-" + clusterName - loadbalancer.VPCName = fi.String(vpcName) - loadbalancer.NetworkCIDR = fi.String(b.Cluster.Spec.NetworkCIDR) + loadbalancer.VPCName = fi.PtrTo(vpcName) + loadbalancer.NetworkCIDR = fi.PtrTo(b.Cluster.Spec.NetworkCIDR) } c.AddTask(loadbalancer) diff --git a/pkg/model/domodel/droplets.go b/pkg/model/domodel/droplets.go index a340219f957e8..732cc9806bb3b 100644 --- a/pkg/model/domodel/droplets.go +++ b/pkg/model/domodel/droplets.go @@ -57,15 +57,15 @@ func (d *DropletBuilder) Build(c *fi.ModelBuilderContext) error { name := d.AutoscalingGroupName(ig) droplet := dotasks.Droplet{ - Count: int(fi.Int32Value(ig.Spec.MinSize)), - Name: fi.String(name), + Count: int(fi.ValueOf(ig.Spec.MinSize)), + Name: fi.PtrTo(name), Lifecycle: d.Lifecycle, // kops do supports allow only 1 region - Region: fi.String(d.Cluster.Spec.Subnets[0].Region), - Size: fi.String(ig.Spec.MachineType), - Image: fi.String(ig.Spec.Image), - SSHKey: fi.String(sshKeyFingerPrint), + Region: fi.PtrTo(d.Cluster.Spec.Subnets[0].Region), + Size: fi.PtrTo(ig.Spec.MachineType), + Image: fi.PtrTo(ig.Spec.Image), + SSHKey: fi.PtrTo(sshKeyFingerPrint), Tags: []string{clusterTag}, } @@ -82,13 +82,13 @@ func (d *DropletBuilder) Build(c *fi.ModelBuilderContext) error { } if d.Cluster.Spec.NetworkID != "" { - droplet.VPCUUID = fi.String(d.Cluster.Spec.NetworkID) + droplet.VPCUUID = fi.PtrTo(d.Cluster.Spec.NetworkID) } else if d.Cluster.Spec.NetworkCIDR != "" { // since networkCIDR specified as part of the request, it is made sure that vpc with this cidr exist before // creating the droplet, so you can associate with vpc uuid for this droplet. vpcName := "vpc-" + clusterName - droplet.VPCName = fi.String(vpcName) - droplet.NetworkCIDR = fi.String(d.Cluster.Spec.NetworkCIDR) + droplet.VPCName = fi.PtrTo(vpcName) + droplet.NetworkCIDR = fi.PtrTo(d.Cluster.Spec.NetworkCIDR) } userData, err := d.BootstrapScriptBuilder.ResourceNodeUp(c, ig) diff --git a/pkg/model/domodel/network.go b/pkg/model/domodel/network.go index d290a6d9cbff1..3bbeba2dfb6e3 100644 --- a/pkg/model/domodel/network.go +++ b/pkg/model/domodel/network.go @@ -44,10 +44,10 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error { // Create a separate vpc for this cluster. vpc := &dotasks.VPC{ - Name: fi.String(vpcName), - Region: fi.String(b.Cluster.Spec.Subnets[0].Region), + Name: fi.PtrTo(vpcName), + Region: fi.PtrTo(b.Cluster.Spec.Subnets[0].Region), Lifecycle: b.Lifecycle, - IPRange: fi.String(ipRange), + IPRange: fi.PtrTo(ipRange), } c.AddTask(vpc) diff --git a/pkg/model/gcemodel/api_loadbalancer.go b/pkg/model/gcemodel/api_loadbalancer.go index 6b2a10533573f..7d1fe26b9a496 100644 --- a/pkg/model/gcemodel/api_loadbalancer.go +++ b/pkg/model/gcemodel/api_loadbalancer.go @@ -91,7 +91,7 @@ func createPublicLB(b *APILoadBalancerBuilder, c *fi.ModelBuilderContext) error b.AddFirewallRulesTasks(c, "https-api", &gcetasks.FirewallRule{ Lifecycle: b.Lifecycle, Network: network, - SourceRanges: b.Cluster.Spec.KubernetesAPIAccess, + SourceRanges: b.Cluster.Spec.API.Access, TargetTags: []string{b.GCETagForRole(kops.InstanceGroupRoleMaster)}, Allowed: []string{"tcp:443"}, }) @@ -194,7 +194,7 @@ func (b *APILoadBalancerBuilder) Build(c *fi.ModelBuilderContext) error { // subnetNotSpecified returns true if the given LB subnet is not listed in the list of cluster subnets. func subnetNotSpecified(sn kops.LoadBalancerSubnetSpec, subnets []kops.ClusterSubnetSpec) bool { for _, csn := range subnets { - if csn.Name == sn.Name || csn.ProviderID == sn.Name { + if csn.Name == sn.Name || csn.ID == sn.Name { return false } } diff --git a/pkg/model/gcemodel/autoscalinggroup.go b/pkg/model/gcemodel/autoscalinggroup.go index 8092124c5778c..648246465f2e0 100644 --- a/pkg/model/gcemodel/autoscalinggroup.go +++ b/pkg/model/gcemodel/autoscalinggroup.go @@ -62,14 +62,14 @@ func (b *AutoscalingGroupModelBuilder) buildInstanceTemplate(c *fi.ModelBuilderC } { - volumeSize := fi.Int32Value(ig.Spec.RootVolumeSize) + volumeSize := fi.ValueOf(ig.Spec.RootVolumeSize) if volumeSize == 0 { volumeSize, err = defaults.DefaultInstanceGroupVolumeSize(ig.Spec.Role) if err != nil { return nil, err } } - volumeType := fi.StringValue(ig.Spec.RootVolumeType) + volumeType := fi.ValueOf(ig.Spec.RootVolumeType) if volumeType == "" { volumeType = DefaultVolumeType } @@ -89,10 +89,10 @@ func (b *AutoscalingGroupModelBuilder) buildInstanceTemplate(c *fi.ModelBuilderC BootDiskSizeGB: i64(int64(volumeSize)), BootDiskImage: s(ig.Spec.Image), - Preemptible: fi.Bool(fi.StringValue(ig.Spec.GCPProvisioningModel) == "SPOT"), + Preemptible: fi.PtrTo(fi.ValueOf(ig.Spec.GCPProvisioningModel) == "SPOT"), GCPProvisioningModel: ig.Spec.GCPProvisioningModel, - HasExternalIP: fi.Bool(b.Cluster.Spec.Topology.ControlPlane == kops.TopologyPublic), + HasExternalIP: fi.PtrTo(b.Cluster.Spec.Topology.ControlPlane == kops.TopologyPublic), Scopes: []string{ "compute-rw", @@ -150,13 +150,13 @@ func (b *AutoscalingGroupModelBuilder) buildInstanceTemplate(c *fi.ModelBuilderC } if gce.UsesIPAliases(b.Cluster) { - t.CanIPForward = fi.Bool(false) + t.CanIPForward = fi.PtrTo(false) t.AliasIPRanges = map[string]string{ b.NameForIPAliasRange("pods"): "/24", } } else { - t.CanIPForward = fi.Bool(true) + t.CanIPForward = fi.PtrTo(true) } t.Subnet = b.LinkToSubnet(subnet) @@ -194,7 +194,7 @@ func (b *AutoscalingGroupModelBuilder) splitToZones(ig *kops.InstanceGroup) (map // TODO: Duplicated from aws - move to defaults? minSize := 1 if ig.Spec.MinSize != nil { - minSize = int(fi.Int32Value(ig.Spec.MinSize)) + minSize = int(fi.ValueOf(ig.Spec.MinSize)) } else if ig.Spec.Role == kops.InstanceGroupRoleNode { minSize = 2 } @@ -266,7 +266,7 @@ func (b *AutoscalingGroupModelBuilder) Build(c *fi.ModelBuilderContext) error { Name: s(name), Lifecycle: b.Lifecycle, Zone: s(zone), - TargetSize: fi.Int64(int64(targetSize)), + TargetSize: fi.PtrTo(int64(targetSize)), BaseInstanceName: s(ig.ObjectMeta.Name), InstanceTemplate: instanceTemplate, } diff --git a/pkg/model/gcemodel/context.go b/pkg/model/gcemodel/context.go index ff5e8d4dd0c61..d4ad86566c0e9 100644 --- a/pkg/model/gcemodel/context.go +++ b/pkg/model/gcemodel/context.go @@ -59,7 +59,7 @@ func (c *GCEModelContext) NameForIPAliasRange(key string) string { // LinkToSubnet returns a link to the GCE subnet object func (c *GCEModelContext) LinkToSubnet(subnet *kops.ClusterSubnetSpec) *gcetasks.Subnet { - name := subnet.ProviderID + name := subnet.ID if name == "" { name = gce.ClusterSuffixedName(subnet.Name, c.Cluster.ObjectMeta.Name, 63) } @@ -148,7 +148,7 @@ func (c *GCEModelContext) LinkToServiceAccount(ig *kops.InstanceGroup) *gcetasks return &gcetasks.ServiceAccount{ Name: s("shared"), Email: &c.Cluster.Spec.CloudConfig.GCEServiceAccount, - Shared: fi.Bool(true), + Shared: fi.PtrTo(true), } } diff --git a/pkg/model/gcemodel/convenience.go b/pkg/model/gcemodel/convenience.go index 2fb6f6d53775c..f68d0e7058835 100644 --- a/pkg/model/gcemodel/convenience.go +++ b/pkg/model/gcemodel/convenience.go @@ -20,10 +20,10 @@ import "k8s.io/kops/upup/pkg/fi" // s is a helper that builds a *string from a string value func s(v string) *string { - return fi.String(v) + return fi.PtrTo(v) } // i64 is a helper that builds a *int64 from an int64 value func i64(v int64) *int64 { - return fi.Int64(v) + return fi.PtrTo(v) } diff --git a/pkg/model/gcemodel/external_access.go b/pkg/model/gcemodel/external_access.go index 8f3297a3bdee7..b247ea0a885a9 100644 --- a/pkg/model/gcemodel/external_access.go +++ b/pkg/model/gcemodel/external_access.go @@ -34,7 +34,7 @@ var _ fi.ModelBuilder = &ExternalAccessModelBuilder{} func (b *ExternalAccessModelBuilder) Build(c *fi.ModelBuilderContext) error { klog.Warningf("TODO: Harmonize gcemodel ExternalAccessModelBuilder with awsmodel") - if len(b.Cluster.Spec.KubernetesAPIAccess) == 0 { + if len(b.Cluster.Spec.API.Access) == 0 { klog.Warningf("KubernetesAPIAccess is empty") } @@ -109,7 +109,7 @@ func (b *ExternalAccessModelBuilder) Build(c *fi.ModelBuilderContext) error { Lifecycle: b.Lifecycle, TargetTags: []string{b.GCETagForRole(kops.InstanceGroupRoleMaster)}, Allowed: []string{"tcp:443"}, - SourceRanges: b.Cluster.Spec.KubernetesAPIAccess, + SourceRanges: b.Cluster.Spec.API.Access, Network: network, }) } diff --git a/pkg/model/gcemodel/network.go b/pkg/model/gcemodel/network.go index b47640034ded0..0da6050672817 100644 --- a/pkg/model/gcemodel/network.go +++ b/pkg/model/gcemodel/network.go @@ -41,7 +41,7 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error { return nil } network.Lifecycle = b.Lifecycle - network.Shared = fi.Bool(sharedNetwork) + network.Shared = fi.PtrTo(sharedNetwork) if !sharedNetwork { // As we're creating the network, we're also creating the subnets. // We therefore use custom mode, for a few reasons: @@ -54,7 +54,7 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error { for i := range b.Cluster.Spec.Subnets { subnet := &b.Cluster.Spec.Subnets[i] - sharedSubnet := subnet.ProviderID != "" + sharedSubnet := subnet.ID != "" network, err := b.LinkToNetwork() if err != nil { @@ -65,7 +65,7 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error { Network: network, Lifecycle: b.Lifecycle, Region: s(b.Region), - Shared: fi.Bool(sharedSubnet), + Shared: fi.PtrTo(sharedSubnet), SecondaryIpRanges: make(map[string]string), } @@ -104,7 +104,7 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error { } // If we're in an existing subnet, we assume egress is already configured. - if subnet.ProviderID != "" { + if subnet.ID != "" { continue } diff --git a/pkg/model/gcemodel/service_accounts.go b/pkg/model/gcemodel/service_accounts.go index 3fa63e5d95b6d..96f457ff99d3d 100644 --- a/pkg/model/gcemodel/service_accounts.go +++ b/pkg/model/gcemodel/service_accounts.go @@ -37,7 +37,7 @@ func (b *ServiceAccountsBuilder) Build(c *fi.ModelBuilderContext) error { serviceAccount := &gcetasks.ServiceAccount{ Name: s("shared"), Email: &b.Cluster.Spec.CloudConfig.GCEServiceAccount, - Shared: fi.Bool(true), + Shared: fi.PtrTo(true), Lifecycle: b.Lifecycle, } c.AddTask(serviceAccount) @@ -48,7 +48,7 @@ func (b *ServiceAccountsBuilder) Build(c *fi.ModelBuilderContext) error { doneEmails := make(map[string]bool) for _, ig := range b.InstanceGroups { link := b.LinkToServiceAccount(ig) - if fi.BoolValue(link.Shared) { + if fi.ValueOf(link.Shared) { c.EnsureTask(link) continue } @@ -66,11 +66,11 @@ func (b *ServiceAccountsBuilder) Build(c *fi.ModelBuilderContext) error { } switch ig.Spec.Role { case kops.InstanceGroupRoleAPIServer, kops.InstanceGroupRoleMaster: - serviceAccount.Description = fi.String("kubernetes control-plane instances") + serviceAccount.Description = fi.PtrTo("kubernetes control-plane instances") case kops.InstanceGroupRoleNode: - serviceAccount.Description = fi.String("kubernetes worker nodes") + serviceAccount.Description = fi.PtrTo("kubernetes worker nodes") case kops.InstanceGroupRoleBastion: - serviceAccount.Description = fi.String("bastion nodes") + serviceAccount.Description = fi.PtrTo("bastion nodes") default: klog.Warningf("unknown instance role %q", ig.Spec.Role) } diff --git a/pkg/model/hetznermodel/firewall.go b/pkg/model/hetznermodel/firewall.go index 5c1e1a584c166..b3c46742743c9 100644 --- a/pkg/model/hetznermodel/firewall.go +++ b/pkg/model/hetznermodel/firewall.go @@ -50,7 +50,7 @@ func (b *ExternalAccessModelBuilder) Build(c *fi.ModelBuilderContext) error { fmt.Sprintf("%s=%s", hetzner.TagKubernetesInstanceRole, string(kops.InstanceGroupRoleMaster)), } controlPlaneFirewall := &hetznertasks.Firewall{ - Name: fi.String("control-plane." + b.ClusterName()), + Name: fi.PtrTo("control-plane." + b.ClusterName()), Lifecycle: b.Lifecycle, Selector: strings.Join(controlPlaneLabelSelector, ","), Rules: []*hetznertasks.FirewallRule{ @@ -58,7 +58,7 @@ func (b *ExternalAccessModelBuilder) Build(c *fi.ModelBuilderContext) error { Direction: string(hcloud.FirewallRuleDirectionIn), SourceIPs: sshAccess, Protocol: string(hcloud.FirewallRuleProtocolTCP), - Port: fi.String("22"), + Port: fi.PtrTo("22"), }, }, Labels: map[string]string{ @@ -71,7 +71,7 @@ func (b *ExternalAccessModelBuilder) Build(c *fi.ModelBuilderContext) error { fmt.Sprintf("%s=%s", hetzner.TagKubernetesInstanceRole, string(kops.InstanceGroupRoleNode)), } nodesFirewall := &hetznertasks.Firewall{ - Name: fi.String("nodes." + b.ClusterName()), + Name: fi.PtrTo("nodes." + b.ClusterName()), Lifecycle: b.Lifecycle, Selector: strings.Join(nodesLabelSelector, ","), Rules: []*hetznertasks.FirewallRule{ @@ -79,7 +79,7 @@ func (b *ExternalAccessModelBuilder) Build(c *fi.ModelBuilderContext) error { Direction: string(hcloud.FirewallRuleDirectionIn), SourceIPs: sshAccess, Protocol: string(hcloud.FirewallRuleProtocolTCP), - Port: fi.String("22"), + Port: fi.PtrTo("22"), }, }, Labels: map[string]string{ @@ -90,7 +90,7 @@ func (b *ExternalAccessModelBuilder) Build(c *fi.ModelBuilderContext) error { if !b.UseLoadBalancerForAPI() { var apiAccess []net.IPNet - for _, cidr := range b.Cluster.Spec.KubernetesAPIAccess { + for _, cidr := range b.Cluster.Spec.API.Access { _, ipNet, err := net.ParseCIDR(cidr) if err != nil { return err @@ -101,7 +101,7 @@ func (b *ExternalAccessModelBuilder) Build(c *fi.ModelBuilderContext) error { Direction: string(hcloud.FirewallRuleDirectionIn), SourceIPs: apiAccess, Protocol: string(hcloud.FirewallRuleProtocolTCP), - Port: fi.String("443"), + Port: fi.PtrTo("443"), }) } @@ -122,13 +122,13 @@ func (b *ExternalAccessModelBuilder) Build(c *fi.ModelBuilderContext) error { Direction: string(hcloud.FirewallRuleDirectionIn), SourceIPs: nodePortAccess, Protocol: string(hcloud.FirewallRuleProtocolTCP), - Port: fi.String(fmt.Sprintf("%d-%d", nodePortRange.Base, nodePortRange.Base+nodePortRange.Size-1)), + Port: fi.PtrTo(fmt.Sprintf("%d-%d", nodePortRange.Base, nodePortRange.Base+nodePortRange.Size-1)), }) nodesFirewall.Rules = append(nodesFirewall.Rules, &hetznertasks.FirewallRule{ Direction: string(hcloud.FirewallRuleDirectionIn), SourceIPs: nodePortAccess, Protocol: string(hcloud.FirewallRuleProtocolTCP), - Port: fi.String(fmt.Sprintf("%d-%d", nodePortRange.Base, nodePortRange.Base+nodePortRange.Size-1)), + Port: fi.PtrTo(fmt.Sprintf("%d-%d", nodePortRange.Base, nodePortRange.Base+nodePortRange.Size-1)), }) } diff --git a/pkg/model/hetznermodel/loadbalancer.go b/pkg/model/hetznermodel/loadbalancer.go index dfc699d9ed598..f9bdb71fd5c1f 100644 --- a/pkg/model/hetznermodel/loadbalancer.go +++ b/pkg/model/hetznermodel/loadbalancer.go @@ -42,7 +42,7 @@ func (b *LoadBalancerModelBuilder) Build(c *fi.ModelBuilderContext) error { fmt.Sprintf("%s=%s", hetzner.TagKubernetesInstanceRole, string(kops.InstanceGroupRoleMaster)), } loadbalancer := hetznertasks.LoadBalancer{ - Name: fi.String("api." + b.ClusterName()), + Name: fi.PtrTo("api." + b.ClusterName()), Lifecycle: b.Lifecycle, Network: b.LinkToNetwork(), Location: b.InstanceGroups[0].Spec.Subnets[0], @@ -50,8 +50,8 @@ func (b *LoadBalancerModelBuilder) Build(c *fi.ModelBuilderContext) error { Services: []*hetznertasks.LoadBalancerService{ { Protocol: string(hcloud.LoadBalancerServiceProtocolTCP), - ListenerPort: fi.Int(wellknownports.KubeAPIServer), - DestinationPort: fi.Int(wellknownports.KubeAPIServer), + ListenerPort: fi.PtrTo(wellknownports.KubeAPIServer), + DestinationPort: fi.PtrTo(wellknownports.KubeAPIServer), }, }, Target: strings.Join(controlPlaneLabelSelector, ","), @@ -63,8 +63,8 @@ func (b *LoadBalancerModelBuilder) Build(c *fi.ModelBuilderContext) error { if b.Cluster.UsesNoneDNS() { loadbalancer.Services = append(loadbalancer.Services, &hetznertasks.LoadBalancerService{ Protocol: string(hcloud.LoadBalancerServiceProtocolTCP), - ListenerPort: fi.Int(wellknownports.KopsControllerPort), - DestinationPort: fi.Int(wellknownports.KopsControllerPort), + ListenerPort: fi.PtrTo(wellknownports.KopsControllerPort), + DestinationPort: fi.PtrTo(wellknownports.KopsControllerPort), }) } diff --git a/pkg/model/hetznermodel/network.go b/pkg/model/hetznermodel/network.go index a149815ebd3fc..df7b44fd8b287 100644 --- a/pkg/model/hetznermodel/network.go +++ b/pkg/model/hetznermodel/network.go @@ -32,7 +32,7 @@ var _ fi.ModelBuilder = &NetworkModelBuilder{} func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error { network := &hetznertasks.Network{ - Name: fi.String(b.ClusterName()), + Name: fi.PtrTo(b.ClusterName()), Lifecycle: b.Lifecycle, } @@ -46,7 +46,7 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error { hetzner.TagKubernetesClusterName: b.ClusterName(), } } else { - network.ID = fi.String(b.Cluster.Spec.NetworkID) + network.ID = fi.PtrTo(b.Cluster.Spec.NetworkID) } c.AddTask(network) diff --git a/pkg/model/hetznermodel/servers.go b/pkg/model/hetznermodel/servers.go index 02e1b7ee65d2b..1afe97aa3a9df 100644 --- a/pkg/model/hetznermodel/servers.go +++ b/pkg/model/hetznermodel/servers.go @@ -41,7 +41,7 @@ func (b *ServerGroupModelBuilder) Build(c *fi.ModelBuilderContext) error { return err } t := &hetznertasks.SSHKey{ - Name: fi.String(b.ClusterName() + "-" + fingerprint), + Name: fi.PtrTo(b.ClusterName() + "-" + fingerprint), Lifecycle: b.Lifecycle, PublicKey: string(sshkey), Labels: map[string]string{ @@ -53,7 +53,7 @@ func (b *ServerGroupModelBuilder) Build(c *fi.ModelBuilderContext) error { } for _, ig := range b.InstanceGroups { - igSize := fi.Int32Value(ig.Spec.MinSize) + igSize := fi.ValueOf(ig.Spec.MinSize) labels := make(map[string]string) labels[hetzner.TagKubernetesClusterName] = b.ClusterName() @@ -66,7 +66,7 @@ func (b *ServerGroupModelBuilder) Build(c *fi.ModelBuilderContext) error { } serverGroup := hetznertasks.ServerGroup{ - Name: fi.String(ig.Name), + Name: fi.PtrTo(ig.Name), Lifecycle: b.Lifecycle, SSHKeys: sshkeyTasks, Network: b.LinkToNetwork(), diff --git a/pkg/model/iam/iam_builder.go b/pkg/model/iam/iam_builder.go index 24135870a852c..4c7a19c3dd74d 100644 --- a/pkg/model/iam/iam_builder.go +++ b/pkg/model/iam/iam_builder.go @@ -413,7 +413,7 @@ func (r *NodeRoleMaster) BuildAWSPolicy(b *PolicyBuilder) (*Policy, error) { if !b.UseServiceAccountExternalPermisssions { esc := b.Cluster.Spec.SnapshotController != nil && - fi.BoolValue(b.Cluster.Spec.SnapshotController.Enabled) + fi.ValueOf(b.Cluster.Spec.SnapshotController.Enabled) AddAWSEBSCSIDriverPermissions(p, esc) if b.Cluster.Spec.ExternalCloudControllerManager != nil { @@ -424,18 +424,18 @@ func (r *NodeRoleMaster) BuildAWSPolicy(b *PolicyBuilder) (*Policy, error) { } } - if c := b.Cluster.Spec.AWSLoadBalancerController; c != nil && fi.BoolValue(b.Cluster.Spec.AWSLoadBalancerController.Enabled) { + if c := b.Cluster.Spec.AWSLoadBalancerController; c != nil && fi.ValueOf(b.Cluster.Spec.AWSLoadBalancerController.Enabled) { AddAWSLoadbalancerControllerPermissions(p, c.EnableWAF, c.EnableWAFv2, c.EnableShield) } var useStaticInstanceList bool - if ca := b.Cluster.Spec.ClusterAutoscaler; ca != nil && fi.BoolValue(ca.AWSUseStaticInstanceList) { + if ca := b.Cluster.Spec.ClusterAutoscaler; ca != nil && fi.ValueOf(ca.AWSUseStaticInstanceList) { useStaticInstanceList = true } AddClusterAutoscalerPermissions(p, useStaticInstanceList) nth := b.Cluster.Spec.NodeTerminationHandler - if nth != nil && fi.BoolValue(nth.Enabled) && fi.BoolValue(nth.EnableSQSTerminationDraining) { + if nth != nil && fi.ValueOf(nth.Enabled) && fi.ValueOf(nth.EnableSQSTerminationDraining) { AddNodeTerminationHandlerSQSPermissions(p) } } @@ -761,7 +761,7 @@ func (b *PolicyResource) Open() (io.Reader, error) { pb := *b.Builder if b.DNSZone != nil { - hostedZoneID := fi.StringValue(b.DNSZone.ZoneID) + hostedZoneID := fi.ValueOf(b.DNSZone.ZoneID) if hostedZoneID == "" { // Dependency analysis failure? return nil, fmt.Errorf("DNS ZoneID not set") @@ -790,7 +790,7 @@ func useBootstrapTokens(cluster *kops.Cluster) bool { return false } - return fi.BoolValue(cluster.Spec.KubeAPIServer.EnableBootstrapAuthToken) + return fi.ValueOf(cluster.Spec.KubeAPIServer.EnableBootstrapAuthToken) } func addECRPermissions(p *Policy) { diff --git a/pkg/model/iam/iam_builder_test.go b/pkg/model/iam/iam_builder_test.go index a96bb28113f92..391835b609748 100644 --- a/pkg/model/iam/iam_builder_test.go +++ b/pkg/model/iam/iam_builder_test.go @@ -168,7 +168,7 @@ func TestPolicyGeneration(t *testing.T) { }, CloudConfig: &kops.CloudConfiguration{ AWSEBSCSIDriver: &kops.AWSEBSCSIDriver{ - Enabled: fi.Bool(true), + Enabled: fi.PtrTo(true), }, }, Networking: &kops.NetworkingSpec{ diff --git a/pkg/model/issuerdiscovery.go b/pkg/model/issuerdiscovery.go index d46288a4d1ac0..386dc8417ce6f 100644 --- a/pkg/model/issuerdiscovery.go +++ b/pkg/model/issuerdiscovery.go @@ -74,20 +74,20 @@ func (b *IssuerDiscoveryModelBuilder) Build(c *fi.ModelBuilderContext) error { keysFile := &fitasks.ManagedFile{ Contents: keys, Lifecycle: b.Lifecycle, - Location: fi.String("openid/v1/jwks"), - Name: fi.String("keys.json"), - Base: fi.String(b.Cluster.Spec.ServiceAccountIssuerDiscovery.DiscoveryStore), - Public: fi.Bool(true), + Location: fi.PtrTo("openid/v1/jwks"), + Name: fi.PtrTo("keys.json"), + Base: fi.PtrTo(b.Cluster.Spec.ServiceAccountIssuerDiscovery.DiscoveryStore), + Public: fi.PtrTo(true), } c.AddTask(keysFile) discoveryFile := &fitasks.ManagedFile{ Contents: fi.NewBytesResource(discovery), Lifecycle: b.Lifecycle, - Location: fi.String(".well-known/openid-configuration"), - Name: fi.String("discovery.json"), - Base: fi.String(b.Cluster.Spec.ServiceAccountIssuerDiscovery.DiscoveryStore), - Public: fi.Bool(true), + Location: fi.PtrTo(".well-known/openid-configuration"), + Name: fi.PtrTo("discovery.json"), + Base: fi.PtrTo(b.Cluster.Spec.ServiceAccountIssuerDiscovery.DiscoveryStore), + Public: fi.PtrTo(true), } c.AddTask(discoveryFile) diff --git a/pkg/model/master_volumes.go b/pkg/model/master_volumes.go index 9a86185946b79..b7bc6ec9abc74 100644 --- a/pkg/model/master_volumes.go +++ b/pkg/model/master_volumes.go @@ -64,7 +64,7 @@ func (b *MasterVolumeBuilder) Build(c *fi.ModelBuilderContext) error { prefix := m.Name + ".etcd-" + etcd.Name name := prefix + "." + b.ClusterName() - igName := fi.StringValue(m.InstanceGroup) + igName := fi.ValueOf(m.InstanceGroup) if igName == "" { return fmt.Errorf("InstanceGroup not set on etcd %s/%s", m.Name, etcd.Name) } @@ -85,7 +85,7 @@ func (b *MasterVolumeBuilder) Build(c *fi.ModelBuilderContext) error { } zone := zones[0] - volumeSize := fi.Int32Value(m.VolumeSize) + volumeSize := fi.ValueOf(m.VolumeSize) if volumeSize == 0 { volumeSize = DefaultEtcdVolumeSize } @@ -128,12 +128,12 @@ func (b *MasterVolumeBuilder) Build(c *fi.ModelBuilderContext) error { func (b *MasterVolumeBuilder) addAWSVolume(c *fi.ModelBuilderContext, name string, volumeSize int32, zone string, etcd kops.EtcdClusterSpec, m kops.EtcdMemberSpec, allMembers []string) error { // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html - volumeType := fi.StringValue(m.VolumeType) + volumeType := fi.ValueOf(m.VolumeType) if volumeType == "" { volumeType = DefaultAWSEtcdVolumeType } - volumeIops := fi.Int32Value(m.VolumeIOPS) - volumeThroughput := fi.Int32Value(m.VolumeThroughput) + volumeIops := fi.ValueOf(m.VolumeIOPS) + volumeThroughput := fi.ValueOf(m.VolumeThroughput) switch volumeType { case ec2.VolumeTypeIo1, ec2.VolumeTypeIo2: if volumeIops < 100 { @@ -169,25 +169,25 @@ func (b *MasterVolumeBuilder) addAWSVolume(c *fi.ModelBuilderContext, name strin // We always add an owned tags (these can't be shared) tags["kubernetes.io/cluster/"+b.Cluster.ObjectMeta.Name] = "owned" - encrypted := fi.BoolValue(m.EncryptedVolume) + encrypted := fi.ValueOf(m.EncryptedVolume) t := &awstasks.EBSVolume{ - Name: fi.String(name), + Name: fi.PtrTo(name), Lifecycle: b.Lifecycle, - AvailabilityZone: fi.String(zone), - SizeGB: fi.Int64(int64(volumeSize)), - VolumeType: fi.String(volumeType), + AvailabilityZone: fi.PtrTo(zone), + SizeGB: fi.PtrTo(int64(volumeSize)), + VolumeType: fi.PtrTo(volumeType), KmsKeyId: m.KmsKeyID, - Encrypted: fi.Bool(encrypted), + Encrypted: fi.PtrTo(encrypted), Tags: tags, } switch volumeType { case ec2.VolumeTypeGp3: - t.VolumeThroughput = fi.Int64(int64(volumeThroughput)) + t.VolumeThroughput = fi.PtrTo(int64(volumeThroughput)) fallthrough case ec2.VolumeTypeIo1, ec2.VolumeTypeIo2: - t.VolumeIops = fi.Int64(int64(volumeIops)) + t.VolumeIops = fi.PtrTo(int64(volumeIops)) } c.AddTask(t) @@ -235,10 +235,10 @@ func (b *MasterVolumeBuilder) addDOVolume(c *fi.ModelBuilderContext, name string tags[do.TagKubernetesClusterNamePrefix] = do.SafeClusterName(b.Cluster.ObjectMeta.Name) t := &dotasks.Volume{ - Name: fi.String(name), + Name: fi.PtrTo(name), Lifecycle: b.Lifecycle, - SizeGB: fi.Int64(int64(volumeSize)), - Region: fi.String(zone), + SizeGB: fi.PtrTo(int64(volumeSize)), + Region: fi.PtrTo(zone), Tags: tags, } @@ -246,7 +246,7 @@ func (b *MasterVolumeBuilder) addDOVolume(c *fi.ModelBuilderContext, name string } func (b *MasterVolumeBuilder) addGCEVolume(c *fi.ModelBuilderContext, prefix string, volumeSize int32, zone string, etcd kops.EtcdClusterSpec, m kops.EtcdMemberSpec, allMembers []string) { - volumeType := fi.StringValue(m.VolumeType) + volumeType := fi.ValueOf(m.VolumeType) if volumeType == "" { volumeType = DefaultGCEEtcdVolumeType } @@ -281,12 +281,12 @@ func (b *MasterVolumeBuilder) addGCEVolume(c *fi.ModelBuilderContext, prefix str name := gce.ClusterSuffixedName(prefix, b.Cluster.ObjectMeta.Name, 63) t := &gcetasks.Disk{ - Name: fi.String(name), + Name: fi.PtrTo(name), Lifecycle: b.Lifecycle, - Zone: fi.String(zone), - SizeGB: fi.Int64(int64(volumeSize)), - VolumeType: fi.String(volumeType), + Zone: fi.PtrTo(zone), + SizeGB: fi.PtrTo(int64(volumeSize)), + VolumeType: fi.PtrTo(volumeType), Labels: tags, } @@ -296,11 +296,11 @@ func (b *MasterVolumeBuilder) addGCEVolume(c *fi.ModelBuilderContext, prefix str func (b *MasterVolumeBuilder) addHetznerVolume(c *fi.ModelBuilderContext, name string, volumeSize int32, zone string, etcd kops.EtcdClusterSpec, m kops.EtcdMemberSpec, allMembers []string) { tags := make(map[string]string) tags[hetzner.TagKubernetesClusterName] = b.Cluster.ObjectMeta.Name - tags[hetzner.TagKubernetesInstanceGroup] = fi.StringValue(m.InstanceGroup) + tags[hetzner.TagKubernetesInstanceGroup] = fi.ValueOf(m.InstanceGroup) tags[hetzner.TagKubernetesVolumeRole] = etcd.Name t := &hetznertasks.Volume{ - Name: fi.String(name), + Name: fi.PtrTo(name), Lifecycle: b.Lifecycle, Size: int(volumeSize), Location: zone, @@ -312,7 +312,7 @@ func (b *MasterVolumeBuilder) addHetznerVolume(c *fi.ModelBuilderContext, name s } func (b *MasterVolumeBuilder) addOpenstackVolume(c *fi.ModelBuilderContext, name string, volumeSize int32, zone string, etcd kops.EtcdClusterSpec, m kops.EtcdMemberSpec, allMembers []string) error { - volumeType := fi.StringValue(m.VolumeType) + volumeType := fi.ValueOf(m.VolumeType) // The tags are how protokube knows to mount the volume and use it for etcd tags := make(map[string]string) @@ -327,13 +327,13 @@ func (b *MasterVolumeBuilder) addOpenstackVolume(c *fi.ModelBuilderContext, name // override zone if b.Cluster.Spec.CloudProvider.Openstack.BlockStorage != nil && b.Cluster.Spec.CloudProvider.Openstack.BlockStorage.OverrideAZ != nil { - zone = fi.StringValue(b.Cluster.Spec.CloudProvider.Openstack.BlockStorage.OverrideAZ) + zone = fi.ValueOf(b.Cluster.Spec.CloudProvider.Openstack.BlockStorage.OverrideAZ) } t := &openstacktasks.Volume{ - Name: fi.String(name), - AvailabilityZone: fi.String(zone), - VolumeType: fi.String(volumeType), - SizeGB: fi.Int64(int64(volumeSize)), + Name: fi.PtrTo(name), + AvailabilityZone: fi.PtrTo(zone), + VolumeType: fi.PtrTo(volumeType), + SizeGB: fi.PtrTo(int64(volumeSize)), Tags: tags, Lifecycle: b.Lifecycle, } @@ -354,18 +354,18 @@ func (b *MasterVolumeBuilder) addAzureVolume( // The tags are use by Protokube to mount the volume and use it for etcd. tags := map[string]*string{ // This is the configuration of the etcd cluster. - azure.TagNameEtcdClusterPrefix + etcd.Name: fi.String(m.Name + "/" + strings.Join(allMembers, ",")), + azure.TagNameEtcdClusterPrefix + etcd.Name: fi.PtrTo(m.Name + "/" + strings.Join(allMembers, ",")), // This says "only mount on a master". - azure.TagNameRolePrefix + azure.TagRoleMaster: fi.String("1"), + azure.TagNameRolePrefix + azure.TagRoleMaster: fi.PtrTo("1"), // We always add an owned tags (these can't be shared). // Use dash (_) as a splitter. Other CSPs use slash (/), but slash is not // allowed as a tag key in Azure. - "kubernetes.io_cluster_" + b.Cluster.ObjectMeta.Name: fi.String("owned"), + "kubernetes.io_cluster_" + b.Cluster.ObjectMeta.Name: fi.PtrTo("owned"), } // Apply all user defined labels on the volumes. for k, v := range b.Cluster.Spec.CloudLabels { - tags[k] = fi.String(v) + tags[k] = fi.PtrTo(v) } zoneNumber, err := azure.ZoneToAvailabilityZoneNumber(zone) @@ -375,13 +375,13 @@ func (b *MasterVolumeBuilder) addAzureVolume( // TODO(kenji): Respect m.EncryptedVolume. t := &azuretasks.Disk{ - Name: fi.String(name), + Name: fi.PtrTo(name), Lifecycle: b.Lifecycle, // We cannot use AzureModelContext.LinkToResourceGroup() here because of cyclic dependency. ResourceGroup: &azuretasks.ResourceGroup{ - Name: fi.String(b.Cluster.AzureResourceGroupName()), + Name: fi.PtrTo(b.Cluster.AzureResourceGroupName()), }, - SizeGB: fi.Int32(volumeSize), + SizeGB: fi.PtrTo(volumeSize), Tags: tags, Zones: &[]string{zoneNumber}, } diff --git a/pkg/model/names.go b/pkg/model/names.go index 48930f9501b27..5f8fe2a162bbe 100644 --- a/pkg/model/names.go +++ b/pkg/model/names.go @@ -126,7 +126,7 @@ func (b *KopsModelContext) LinkToVPC() *awstasks.VPC { } func (b *KopsModelContext) LinkToAmazonVPCIPv6CIDR() *awstasks.VPCAmazonIPv6CIDRBlock { - return &awstasks.VPCAmazonIPv6CIDRBlock{Name: fi.String("AmazonIPv6")} + return &awstasks.VPCAmazonIPv6CIDRBlock{Name: fi.PtrTo("AmazonIPv6")} } func (b *KopsModelContext) LinkToDNSZone() *awstasks.DNSZone { @@ -175,7 +175,7 @@ func FindCustomAuthNameFromArn(arn string) (string, error) { func (b *KopsModelContext) LinkToIAMInstanceProfile(ig *kops.InstanceGroup) (*awstasks.IAMInstanceProfile, error) { if ig.Spec.IAM != nil && ig.Spec.IAM.Profile != nil { - name, err := FindCustomAuthNameFromArn(fi.StringValue(ig.Spec.IAM.Profile)) + name, err := FindCustomAuthNameFromArn(fi.ValueOf(ig.Spec.IAM.Profile)) return &awstasks.IAMInstanceProfile{Name: &name}, err } name := b.IAMName(ig.Spec.Role) @@ -214,7 +214,7 @@ func (b *KopsModelContext) NamePublicRouteTableInZone(zoneName string) string { } func (b *KopsModelContext) LinkToPublicRouteTableInZone(zoneName string) *awstasks.RouteTable { - return &awstasks.RouteTable{Name: fi.String(b.NamePublicRouteTableInZone(zoneName))} + return &awstasks.RouteTable{Name: fi.PtrTo(b.NamePublicRouteTableInZone(zoneName))} } func (b *KopsModelContext) NamePrivateRouteTableInZone(zoneName string) string { @@ -222,7 +222,7 @@ func (b *KopsModelContext) NamePrivateRouteTableInZone(zoneName string) string { } func (b *KopsModelContext) LinkToPrivateRouteTableInZone(zoneName string) *awstasks.RouteTable { - return &awstasks.RouteTable{Name: fi.String(b.NamePrivateRouteTableInZone(zoneName))} + return &awstasks.RouteTable{Name: fi.PtrTo(b.NamePrivateRouteTableInZone(zoneName))} } func (b *KopsModelContext) InstanceName(ig *kops.InstanceGroup, suffix string) string { diff --git a/pkg/model/openstackmodel/context.go b/pkg/model/openstackmodel/context.go index 1ea4d00a39cab..34f971372f493 100644 --- a/pkg/model/openstackmodel/context.go +++ b/pkg/model/openstackmodel/context.go @@ -76,7 +76,7 @@ func (c *OpenstackModelContext) GetNetworkName() (string, error) { func (c *OpenstackModelContext) findSubnetClusterSpec(subnet string) (string, error) { for _, sp := range c.Cluster.Spec.Subnets { if sp.Name == subnet { - name, err := c.findSubnetNameByID(sp.ProviderID, sp.Name) + name, err := c.findSubnetNameByID(sp.ID, sp.Name) if err != nil { return "", err } @@ -125,5 +125,5 @@ func (c *OpenstackModelContext) LinkToPort(name *string) *openstacktasks.Port { } func (c *OpenstackModelContext) LinkToSecurityGroup(name string) *openstacktasks.SecurityGroup { - return &openstacktasks.SecurityGroup{Name: fi.String(name)} + return &openstacktasks.SecurityGroup{Name: fi.PtrTo(name)} } diff --git a/pkg/model/openstackmodel/convenience.go b/pkg/model/openstackmodel/convenience.go index 1376d772f7a2d..7d5de09a1e43c 100644 --- a/pkg/model/openstackmodel/convenience.go +++ b/pkg/model/openstackmodel/convenience.go @@ -20,15 +20,15 @@ import "k8s.io/kops/upup/pkg/fi" // s is a helper that builds a *string from a string value func s(v string) *string { - return fi.String(v) + return fi.PtrTo(v) } // i32 is a helper that builds a *int32 from an int32 value func i32(v int32) *int32 { - return fi.Int32(v) + return fi.PtrTo(v) } // i is a helper that builds a *int from an int value func i(v int) *int { - return fi.Int(v) + return fi.PtrTo(v) } diff --git a/pkg/model/openstackmodel/firewall.go b/pkg/model/openstackmodel/firewall.go index 70550cd86e3d5..419f2344f6fff 100644 --- a/pkg/model/openstackmodel/firewall.go +++ b/pkg/model/openstackmodel/firewall.go @@ -50,14 +50,14 @@ var _ fi.ModelBuilder = &FirewallModelBuilder{} func (b *FirewallModelBuilder) usesOctavia() bool { if b.Cluster.Spec.CloudProvider.Openstack.Loadbalancer != nil { - return fi.BoolValue(b.Cluster.Spec.CloudProvider.Openstack.Loadbalancer.UseOctavia) + return fi.ValueOf(b.Cluster.Spec.CloudProvider.Openstack.Loadbalancer.UseOctavia) } return false } func (b *FirewallModelBuilder) getOctaviaProvider() string { if b.Cluster.Spec.CloudProvider.Openstack.Loadbalancer != nil { - return fi.StringValue(b.Cluster.Spec.CloudProvider.Openstack.Loadbalancer.Provider) + return fi.ValueOf(b.Cluster.Spec.CloudProvider.Openstack.Loadbalancer.Provider) } return "" } @@ -78,11 +78,11 @@ func (b *FirewallModelBuilder) addDirectionalGroupRule(c *fi.ModelBuilderContext RemoteGroup: dest, RemoteIPPrefix: sgr.RemoteIPPrefix, SecGroup: source, - Delete: fi.Bool(false), + Delete: fi.PtrTo(false), } - klog.V(8).Infof("Adding rule %v", fi.StringValue(t.GetName())) - b.Rules[fi.StringValue(t.GetName())] = t + klog.V(8).Infof("Adding rule %v", fi.ValueOf(t.GetName())) + b.Rules[fi.ValueOf(t.GetName())] = t } // addSSHRules - sets the ssh rules based on the presence of a bastion @@ -233,7 +233,7 @@ func (b *FirewallModelBuilder) addNodePortRules(c *fi.ModelBuilderContext, sgMap func (b *FirewallModelBuilder) addHTTPSRules(c *fi.ModelBuilderContext, sgMap map[string]*openstacktasks.SecurityGroup, useVIPACL bool) error { masterName := b.SecurityGroupName(kops.InstanceGroupRoleMaster) nodeName := b.SecurityGroupName(kops.InstanceGroupRoleNode) - lbSGName := b.Cluster.Spec.MasterPublicName + lbSGName := b.Cluster.Spec.API.PublicName lbSG := sgMap[lbSGName] masterSG := sgMap[masterName] nodeSG := sgMap[nodeName] @@ -254,7 +254,7 @@ func (b *FirewallModelBuilder) addHTTPSRules(c *fi.ModelBuilderContext, sgMap ma if b.UseLoadBalancerForAPI() { if !useVIPACL { // Allow API Access to the lb sg - for _, apiAccess := range b.Cluster.Spec.KubernetesAPIAccess { + for _, apiAccess := range b.Cluster.Spec.API.Access { etherType := IPV4 if !net.IsIPv4CIDRString(apiAccess) { etherType = IPV6 @@ -276,7 +276,7 @@ func (b *FirewallModelBuilder) addHTTPSRules(c *fi.ModelBuilderContext, sgMap ma // FIXME: Octavia port traffic appears to be denied though its port is in lbSG if b.usesOctavia() { if b.getOctaviaProvider() == "ovn" { - for _, apiAccess := range b.Cluster.Spec.KubernetesAPIAccess { + for _, apiAccess := range b.Cluster.Spec.API.Access { etherType := IPV4 if !net.IsIPv4CIDRString(apiAccess) { etherType = IPV6 @@ -306,7 +306,7 @@ func (b *FirewallModelBuilder) addHTTPSRules(c *fi.ModelBuilderContext, sgMap ma } else { // Allow the masters to receive connections from KubernetesAPIAccess - for _, apiAccess := range b.Cluster.Spec.KubernetesAPIAccess { + for _, apiAccess := range b.Cluster.Spec.API.Access { etherType := IPV4 if !net.IsIPv4CIDRString(apiAccess) { etherType = IPV6 @@ -545,7 +545,7 @@ func (b *FirewallModelBuilder) getExistingRules(sgMap map[string]*openstacktasks return fmt.Errorf("Found multiple security groups with the same name: %v", sgName) } sg := sgs[0] - sgt.Name = fi.String(sg.Name) + sgt.Name = fi.PtrTo(sg.Name) sgIdMap[sg.ID] = sgt } @@ -560,20 +560,20 @@ func (b *FirewallModelBuilder) getExistingRules(sgMap map[string]*openstacktasks for _, rule := range sgRules { t := &openstacktasks.SecurityGroupRule{ - ID: fi.String(rule.ID), - Direction: fi.String(rule.Direction), - EtherType: fi.String(rule.EtherType), - PortRangeMax: fi.Int(rule.PortRangeMax), - PortRangeMin: fi.Int(rule.PortRangeMin), - Protocol: fi.String(rule.Protocol), - RemoteIPPrefix: fi.String(rule.RemoteIPPrefix), + ID: fi.PtrTo(rule.ID), + Direction: fi.PtrTo(rule.Direction), + EtherType: fi.PtrTo(rule.EtherType), + PortRangeMax: fi.PtrTo(rule.PortRangeMax), + PortRangeMin: fi.PtrTo(rule.PortRangeMin), + Protocol: fi.PtrTo(rule.Protocol), + RemoteIPPrefix: fi.PtrTo(rule.RemoteIPPrefix), RemoteGroup: sgIdMap[rule.RemoteGroupID], Lifecycle: b.Lifecycle, SecGroup: sgIdMap[rule.SecGroupID], - Delete: fi.Bool(true), + Delete: fi.PtrTo(true), } klog.V(8).Infof("Adding existing rule %v", t) - b.Rules[fi.StringValue(t.GetName())] = t + b.Rules[fi.ValueOf(t.GetName())] = t } } return nil @@ -581,7 +581,7 @@ func (b *FirewallModelBuilder) getExistingRules(sgMap map[string]*openstacktasks func (b *FirewallModelBuilder) addDefaultEgress(c *fi.ModelBuilderContext, sgMap map[string]*openstacktasks.SecurityGroup, useVIPACL bool) { for name, sg := range sgMap { - if useVIPACL && name == b.Cluster.Spec.MasterPublicName { + if useVIPACL && name == b.Cluster.Spec.API.PublicName { continue } t := &openstacktasks.SecurityGroupRule{ @@ -618,7 +618,7 @@ func (b *FirewallModelBuilder) Build(c *fi.ModelBuilderContext) error { useVIPACL = true } sg := &openstacktasks.SecurityGroup{ - Name: s(b.Cluster.Spec.MasterPublicName), + Name: s(b.Cluster.Spec.API.PublicName), Lifecycle: b.Lifecycle, RemoveExtraRules: []string{"port=443"}, } @@ -626,7 +626,7 @@ func (b *FirewallModelBuilder) Build(c *fi.ModelBuilderContext) error { sg.RemoveGroup = true } c.AddTask(sg) - sgMap[b.Cluster.Spec.MasterPublicName] = sg + sgMap[b.Cluster.Spec.API.PublicName] = sg for _, role := range roles { // Create Security Group for Role diff --git a/pkg/model/openstackmodel/network.go b/pkg/model/openstackmodel/network.go index 68b44850500f6..d856b7a9e9890 100644 --- a/pkg/model/openstackmodel/network.go +++ b/pkg/model/openstackmodel/network.go @@ -61,10 +61,10 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error { routerName := strings.Replace(clusterName, ".", "-", -1) for _, sp := range b.Cluster.Spec.Subnets { // assumes that we do not need to create routers if we use existing subnets - if sp.ProviderID != "" { + if sp.ID != "" { needRouter = false } - subnetName, err := b.findSubnetNameByID(sp.ProviderID, sp.Name) + subnetName, err := b.findSubnetNameByID(sp.ID, sp.Name) if err != nil { return err } @@ -77,10 +77,10 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error { Tag: s(clusterName), } if osSpec.Router != nil && osSpec.Router.DNSServers != nil { - dnsSplitted := strings.Split(fi.StringValue(osSpec.Router.DNSServers), ",") + dnsSplitted := strings.Split(fi.ValueOf(osSpec.Router.DNSServers), ",") dnsNameSrv := make([]*string, len(dnsSplitted)) for i, ns := range dnsSplitted { - dnsNameSrv[i] = fi.String(ns) + dnsNameSrv[i] = fi.PtrTo(ns) } t.DNSServers = dnsNameSrv } diff --git a/pkg/model/openstackmodel/servergroup.go b/pkg/model/openstackmodel/servergroup.go index 38e631a801f52..42cab2d89dee0 100644 --- a/pkg/model/openstackmodel/servergroup.go +++ b/pkg/model/openstackmodel/servergroup.go @@ -106,7 +106,7 @@ func (b *ServerGroupModelBuilder) buildInstances(c *fi.ModelBuilderContext, sg * securityGroups = append(securityGroups, b.LinkToSecurityGroup(securityGroupName)) if b.Cluster.Spec.CloudProvider.Openstack.Loadbalancer == nil && ig.Spec.Role == kops.InstanceGroupRoleMaster { - securityGroups = append(securityGroups, b.LinkToSecurityGroup(b.Cluster.Spec.MasterPublicName)) + securityGroups = append(securityGroups, b.LinkToSecurityGroup(b.Cluster.Spec.API.PublicName)) } r := strings.NewReplacer("_", "-", ".", "-") @@ -117,7 +117,7 @@ func (b *ServerGroupModelBuilder) buildInstances(c *fi.ModelBuilderContext, sg * // FIXME: Must ensure 63 or less characters // replace all dots and _ with -, this is needed to get external cloudprovider working iName := strings.Replace(strings.ToLower(fmt.Sprintf("%s-%d.%s", ig.Name, i+1, b.ClusterName())), "_", "-", -1) - instanceName := fi.String(strings.Replace(iName, ".", "-", -1)) + instanceName := fi.PtrTo(strings.Replace(iName, ".", "-", -1)) var az *string var subnets []*openstacktasks.Subnet @@ -125,9 +125,9 @@ func (b *ServerGroupModelBuilder) buildInstances(c *fi.ModelBuilderContext, sg * subnet := ig.Spec.Subnets[int(i)%len(ig.Spec.Subnets)] // bastion subnet name might contain a "utility-" prefix if ig.Spec.Role == kops.InstanceGroupRoleBastion { - az = fi.String(strings.Replace(subnet, "utility-", "", 1)) + az = fi.PtrTo(strings.Replace(subnet, "utility-", "", 1)) } else { - az = fi.String(subnet) + az = fi.PtrTo(subnet) } subnetName, err := b.findSubnetClusterSpec(subnet) @@ -138,7 +138,7 @@ func (b *ServerGroupModelBuilder) buildInstances(c *fi.ModelBuilderContext, sg * } if len(ig.Spec.Zones) > 0 { zone := ig.Spec.Zones[int(i)%len(ig.Spec.Zones)] - az = fi.String(zone) + az = fi.PtrTo(zone) } // Create instance port task portName := fmt.Sprintf("%s-%s", "port", *instanceName) @@ -151,7 +151,7 @@ func (b *ServerGroupModelBuilder) buildInstances(c *fi.ModelBuilderContext, sg * ), ".", "-", -1, ) portTask := &openstacktasks.Port{ - Name: fi.String(portName), + Name: fi.PtrTo(portName), InstanceGroupName: &groupName, Network: b.LinkToNetwork(), Tags: []string{ @@ -170,17 +170,17 @@ func (b *ServerGroupModelBuilder) buildInstances(c *fi.ModelBuilderContext, sg * for k, v := range igMeta { metaWithName[k] = v } - metaWithName[openstack.TagKopsName] = fi.StringValue(instanceName) + metaWithName[openstack.TagKopsName] = fi.ValueOf(instanceName) instanceTask := &openstacktasks.Instance{ Name: instanceName, Lifecycle: b.Lifecycle, GroupName: s(groupName), - Region: fi.String(b.Cluster.Spec.Subnets[0].Region), - Flavor: fi.String(ig.Spec.MachineType), - Image: fi.String(ig.Spec.Image), - SSHKey: fi.String(sshKeyName), + Region: fi.PtrTo(b.Cluster.Spec.Subnets[0].Region), + Flavor: fi.PtrTo(ig.Spec.MachineType), + Image: fi.PtrTo(ig.Spec.Image), + SSHKey: fi.PtrTo(sshKeyName), ServerGroup: sg, - Role: fi.String(string(ig.Spec.Role)), + Role: fi.PtrTo(string(ig.Spec.Role)), Port: portTask, UserData: startupScript, Metadata: metaWithName, @@ -193,13 +193,13 @@ func (b *ServerGroupModelBuilder) buildInstances(c *fi.ModelBuilderContext, sg * // Associate a floating IP to the instances if we have external network in router // and respective topology is "public" if b.Cluster.Spec.CloudProvider.Openstack.Router != nil { - if ig.Spec.AssociatePublicIP != nil && !fi.BoolValue(ig.Spec.AssociatePublicIP) { + if ig.Spec.AssociatePublicIP != nil && !fi.ValueOf(ig.Spec.AssociatePublicIP) { continue } switch ig.Spec.Role { case kops.InstanceGroupRoleBastion: t := &openstacktasks.FloatingIP{ - Name: fi.String(fmt.Sprintf("%s-%s", "fip", *instanceTask.Name)), + Name: fi.PtrTo(fmt.Sprintf("%s-%s", "fip", *instanceTask.Name)), Lifecycle: b.Lifecycle, } c.AddTask(t) @@ -208,7 +208,7 @@ func (b *ServerGroupModelBuilder) buildInstances(c *fi.ModelBuilderContext, sg * if b.Cluster.Spec.Topology == nil || b.Cluster.Spec.Topology.ControlPlane != kops.TopologyPrivate { t := &openstacktasks.FloatingIP{ - Name: fi.String(fmt.Sprintf("%s-%s", "fip", *instanceTask.Name)), + Name: fi.PtrTo(fmt.Sprintf("%s-%s", "fip", *instanceTask.Name)), Lifecycle: b.Lifecycle, } c.AddTask(t) @@ -218,7 +218,7 @@ func (b *ServerGroupModelBuilder) buildInstances(c *fi.ModelBuilderContext, sg * default: if b.Cluster.Spec.Topology == nil || b.Cluster.Spec.Topology.Nodes != kops.TopologyPrivate { t := &openstacktasks.FloatingIP{ - Name: fi.String(fmt.Sprintf("%s-%s", "fip", *instanceTask.Name)), + Name: fi.PtrTo(fmt.Sprintf("%s-%s", "fip", *instanceTask.Name)), Lifecycle: b.Lifecycle, } c.AddTask(t) @@ -274,7 +274,7 @@ func (b *ServerGroupModelBuilder) Build(c *fi.ModelBuilderContext) error { var err error for _, sp := range b.Cluster.Spec.Subnets { if sp.Type == kops.SubnetTypeDualStack || sp.Type == kops.SubnetTypePrivate { - lbSubnetName, err = b.findSubnetNameByID(sp.ProviderID, sp.Name) + lbSubnetName, err = b.findSubnetNameByID(sp.ID, sp.Name) if err != nil { return err } @@ -285,20 +285,20 @@ func (b *ServerGroupModelBuilder) Build(c *fi.ModelBuilderContext) error { return fmt.Errorf("could not find subnet for master loadbalancer") } lbTask := &openstacktasks.LB{ - Name: fi.String(b.Cluster.Spec.MasterPublicName), - Subnet: fi.String(lbSubnetName), + Name: fi.PtrTo(b.Cluster.Spec.API.PublicName), + Subnet: fi.PtrTo(lbSubnetName), Lifecycle: b.Lifecycle, } useVIPACL := b.UseVIPACL() if !useVIPACL { - lbTask.SecurityGroup = b.LinkToSecurityGroup(b.Cluster.Spec.MasterPublicName) + lbTask.SecurityGroup = b.LinkToSecurityGroup(b.Cluster.Spec.API.PublicName) } c.AddTask(lbTask) lbfipTask := &openstacktasks.FloatingIP{ - Name: fi.String(fmt.Sprintf("%s-%s", "fip", *lbTask.Name)), + Name: fi.PtrTo(fmt.Sprintf("%s-%s", "fip", *lbTask.Name)), LB: lbTask, Lifecycle: b.Lifecycle, } @@ -309,7 +309,7 @@ func (b *ServerGroupModelBuilder) Build(c *fi.ModelBuilderContext) error { } poolTask := &openstacktasks.LBPool{ - Name: fi.String(fmt.Sprintf("%s-https", fi.StringValue(lbTask.Name))), + Name: fi.PtrTo(fmt.Sprintf("%s-https", fi.ValueOf(lbTask.Name))), Loadbalancer: lbTask, Lifecycle: b.Lifecycle, } @@ -323,7 +323,7 @@ func (b *ServerGroupModelBuilder) Build(c *fi.ModelBuilderContext) error { if useVIPACL { var AllowedCIDRs []string // currently kOps openstack supports only ipv4 addresses - for _, CIDR := range b.Cluster.Spec.KubernetesAPIAccess { + for _, CIDR := range b.Cluster.Spec.API.Access { if net.IsIPv4CIDRString(CIDR) { AllowedCIDRs = append(AllowedCIDRs, CIDR) } @@ -349,10 +349,10 @@ func (b *ServerGroupModelBuilder) Build(c *fi.ModelBuilderContext) error { Name: mastersg.Name, Pool: poolTask, ServerGroup: mastersg, - InterfaceName: fi.String(ifName), - ProtocolPort: fi.Int(443), + InterfaceName: fi.PtrTo(ifName), + ProtocolPort: fi.PtrTo(443), Lifecycle: b.Lifecycle, - Weight: fi.Int(1), + Weight: fi.PtrTo(1), } c.AddTask(associateTask) diff --git a/pkg/model/openstackmodel/servergroup_test.go b/pkg/model/openstackmodel/servergroup_test.go index fcf68669f6bdb..e986f1db2ff74 100644 --- a/pkg/model/openstackmodel/servergroup_test.go +++ b/pkg/model/openstackmodel/servergroup_test.go @@ -49,14 +49,16 @@ func getServerGroupModelBuilderTestInput() []serverGroupModelBuilderTestInput { Name: "cluster", }, Spec: kops.ClusterSpec{ - MasterPublicName: "master-public-name", + API: kops.APISpec{ + PublicName: "master-public-name", + }, CloudProvider: kops.CloudProviderSpec{ Openstack: &kops.OpenstackSpec{ Router: &kops.OpenstackRouter{ - ExternalNetwork: fi.String("test"), + ExternalNetwork: fi.PtrTo("test"), }, Metadata: &kops.OpenstackMetadata{ - ConfigDrive: fi.Bool(false), + ConfigDrive: fi.PtrTo(false), }, }, }, @@ -107,14 +109,16 @@ func getServerGroupModelBuilderTestInput() []serverGroupModelBuilderTestInput { Name: "cluster", }, Spec: kops.ClusterSpec{ - MasterPublicName: "master-public-name", + API: kops.APISpec{ + PublicName: "master-public-name", + }, CloudProvider: kops.CloudProviderSpec{ Openstack: &kops.OpenstackSpec{ Router: &kops.OpenstackRouter{ - ExternalNetwork: fi.String("test"), + ExternalNetwork: fi.PtrTo("test"), }, Metadata: &kops.OpenstackMetadata{ - ConfigDrive: fi.Bool(false), + ConfigDrive: fi.PtrTo(false), }, }, }, @@ -193,14 +197,16 @@ func getServerGroupModelBuilderTestInput() []serverGroupModelBuilderTestInput { Name: "cluster", }, Spec: kops.ClusterSpec{ - MasterPublicName: "master-public-name", + API: kops.APISpec{ + PublicName: "master-public-name", + }, CloudProvider: kops.CloudProviderSpec{ Openstack: &kops.OpenstackSpec{ Router: &kops.OpenstackRouter{ - ExternalNetwork: fi.String("test"), + ExternalNetwork: fi.PtrTo("test"), }, Metadata: &kops.OpenstackMetadata{ - ConfigDrive: fi.Bool(false), + ConfigDrive: fi.PtrTo(false), }, }, }, @@ -315,14 +321,16 @@ func getServerGroupModelBuilderTestInput() []serverGroupModelBuilderTestInput { Name: "tom-software-dev-playground-real33-k8s-local", }, Spec: kops.ClusterSpec{ - MasterPublicName: "master-public-name", + API: kops.APISpec{ + PublicName: "master-public-name", + }, CloudProvider: kops.CloudProviderSpec{ Openstack: &kops.OpenstackSpec{ Router: &kops.OpenstackRouter{ - ExternalNetwork: fi.String("test"), + ExternalNetwork: fi.PtrTo("test"), }, Metadata: &kops.OpenstackMetadata{ - ConfigDrive: fi.Bool(false), + ConfigDrive: fi.PtrTo(false), }, }, }, @@ -373,15 +381,17 @@ func getServerGroupModelBuilderTestInput() []serverGroupModelBuilderTestInput { Name: "cluster", }, Spec: kops.ClusterSpec{ - MasterPublicName: "master-public-name", + API: kops.APISpec{ + PublicName: "master-public-name", + }, CloudProvider: kops.CloudProviderSpec{ Openstack: &kops.OpenstackSpec{ Loadbalancer: &kops.OpenstackLoadbalancerConfig{}, Router: &kops.OpenstackRouter{ - ExternalNetwork: fi.String("test"), + ExternalNetwork: fi.PtrTo("test"), }, Metadata: &kops.OpenstackMetadata{ - ConfigDrive: fi.Bool(false), + ConfigDrive: fi.PtrTo(false), }, }, }, @@ -502,11 +512,13 @@ func getServerGroupModelBuilderTestInput() []serverGroupModelBuilderTestInput { Name: "cluster", }, Spec: kops.ClusterSpec{ - MasterPublicName: "master-public-name", + API: kops.APISpec{ + PublicName: "master-public-name", + }, CloudProvider: kops.CloudProviderSpec{ Openstack: &kops.OpenstackSpec{ Metadata: &kops.OpenstackMetadata{ - ConfigDrive: fi.Bool(false), + ConfigDrive: fi.PtrTo(false), }, }, }, @@ -621,14 +633,16 @@ func getServerGroupModelBuilderTestInput() []serverGroupModelBuilderTestInput { Name: "cluster", }, Spec: kops.ClusterSpec{ - MasterPublicName: "master-public-name", + API: kops.APISpec{ + PublicName: "master-public-name", + }, CloudProvider: kops.CloudProviderSpec{ Openstack: &kops.OpenstackSpec{ Router: &kops.OpenstackRouter{ - ExternalNetwork: fi.String("test"), + ExternalNetwork: fi.PtrTo("test"), }, Metadata: &kops.OpenstackMetadata{ - ConfigDrive: fi.Bool(false), + ConfigDrive: fi.PtrTo(false), }, }, }, @@ -703,14 +717,16 @@ func getServerGroupModelBuilderTestInput() []serverGroupModelBuilderTestInput { Name: "cluster", }, Spec: kops.ClusterSpec{ - MasterPublicName: "master-public-name", + API: kops.APISpec{ + PublicName: "master-public-name", + }, CloudProvider: kops.CloudProviderSpec{ Openstack: &kops.OpenstackSpec{ Router: &kops.OpenstackRouter{ - ExternalNetwork: fi.String("test"), + ExternalNetwork: fi.PtrTo("test"), }, Metadata: &kops.OpenstackMetadata{ - ConfigDrive: fi.Bool(false), + ConfigDrive: fi.PtrTo(false), }, }, }, @@ -736,7 +752,7 @@ func getServerGroupModelBuilderTestInput() []serverGroupModelBuilderTestInput { MachineType: "blc.1-2", Subnets: []string{"subnet"}, Zones: []string{"zone-1"}, - AssociatePublicIP: fi.Bool(false), + AssociatePublicIP: fi.PtrTo(false), }, }, { @@ -751,7 +767,7 @@ func getServerGroupModelBuilderTestInput() []serverGroupModelBuilderTestInput { MachineType: "blc.2-4", Subnets: []string{"subnet"}, Zones: []string{"zone-1"}, - AssociatePublicIP: fi.Bool(false), + AssociatePublicIP: fi.PtrTo(false), }, }, }, @@ -763,14 +779,16 @@ func getServerGroupModelBuilderTestInput() []serverGroupModelBuilderTestInput { Name: "cluster", }, Spec: kops.ClusterSpec{ - MasterPublicName: "master-public-name", + API: kops.APISpec{ + PublicName: "master-public-name", + }, CloudProvider: kops.CloudProviderSpec{ Openstack: &kops.OpenstackSpec{ Router: &kops.OpenstackRouter{ - ExternalNetwork: fi.String("test"), + ExternalNetwork: fi.PtrTo("test"), }, Metadata: &kops.OpenstackMetadata{ - ConfigDrive: fi.Bool(false), + ConfigDrive: fi.PtrTo(false), }, }, }, @@ -800,7 +818,7 @@ func getServerGroupModelBuilderTestInput() []serverGroupModelBuilderTestInput { MachineType: "blc.1-2", Subnets: []string{"subnet"}, Zones: []string{"zone-1"}, - AssociatePublicIP: fi.Bool(false), + AssociatePublicIP: fi.PtrTo(false), }, }, { @@ -815,7 +833,7 @@ func getServerGroupModelBuilderTestInput() []serverGroupModelBuilderTestInput { MachineType: "blc.1-2", Subnets: []string{"subnet"}, Zones: []string{"zone-1"}, - AssociatePublicIP: fi.Bool(false), + AssociatePublicIP: fi.PtrTo(false), }, }, { @@ -837,7 +855,7 @@ func getServerGroupModelBuilderTestInput() []serverGroupModelBuilderTestInput { MachineType: "blc.1-2", Subnets: []string{"utility-subnet"}, Zones: []string{"zone-1"}, - AssociatePublicIP: fi.Bool(false), + AssociatePublicIP: fi.PtrTo(false), }, }, }, @@ -849,11 +867,13 @@ func getServerGroupModelBuilderTestInput() []serverGroupModelBuilderTestInput { Name: "cluster", }, Spec: kops.ClusterSpec{ - MasterPublicName: "master-public-name", + API: kops.APISpec{ + PublicName: "master-public-name", + }, CloudProvider: kops.CloudProviderSpec{ Openstack: &kops.OpenstackSpec{ Metadata: &kops.OpenstackMetadata{ - ConfigDrive: fi.Bool(false), + ConfigDrive: fi.PtrTo(false), }, }, }, @@ -893,11 +913,13 @@ func getServerGroupModelBuilderTestInput() []serverGroupModelBuilderTestInput { Name: "cluster", }, Spec: kops.ClusterSpec{ - MasterPublicName: "master-public-name", + API: kops.APISpec{ + PublicName: "master-public-name", + }, CloudProvider: kops.CloudProviderSpec{ Openstack: &kops.OpenstackSpec{ Metadata: &kops.OpenstackMetadata{ - ConfigDrive: fi.Bool(false), + ConfigDrive: fi.PtrTo(false), }, }, }, @@ -939,11 +961,13 @@ func getServerGroupModelBuilderTestInput() []serverGroupModelBuilderTestInput { Name: "cluster", }, Spec: kops.ClusterSpec{ - MasterPublicName: "master-public-name", + API: kops.APISpec{ + PublicName: "master-public-name", + }, CloudProvider: kops.CloudProviderSpec{ Openstack: &kops.OpenstackSpec{ Metadata: &kops.OpenstackMetadata{ - ConfigDrive: fi.Bool(false), + ConfigDrive: fi.PtrTo(false), }, }, }, @@ -983,11 +1007,13 @@ func getServerGroupModelBuilderTestInput() []serverGroupModelBuilderTestInput { Name: "cluster", }, Spec: kops.ClusterSpec{ - MasterPublicName: "master-public-name", + API: kops.APISpec{ + PublicName: "master-public-name", + }, CloudProvider: kops.CloudProviderSpec{ Openstack: &kops.OpenstackSpec{ Metadata: &kops.OpenstackMetadata{ - ConfigDrive: fi.Bool(false), + ConfigDrive: fi.PtrTo(false), }, }, }, @@ -1027,11 +1053,13 @@ func getServerGroupModelBuilderTestInput() []serverGroupModelBuilderTestInput { Name: "cluster", }, Spec: kops.ClusterSpec{ - MasterPublicName: "master-public-name", + API: kops.APISpec{ + PublicName: "master-public-name", + }, CloudProvider: kops.CloudProviderSpec{ Openstack: &kops.OpenstackSpec{ Metadata: &kops.OpenstackMetadata{ - ConfigDrive: fi.Bool(false), + ConfigDrive: fi.PtrTo(false), }, }, }, @@ -1071,11 +1099,13 @@ func getServerGroupModelBuilderTestInput() []serverGroupModelBuilderTestInput { Name: "cluster", }, Spec: kops.ClusterSpec{ - MasterPublicName: "master-public-name", + API: kops.APISpec{ + PublicName: "master-public-name", + }, CloudProvider: kops.CloudProviderSpec{ Openstack: &kops.OpenstackSpec{ Metadata: &kops.OpenstackMetadata{ - ConfigDrive: fi.Bool(false), + ConfigDrive: fi.PtrTo(false), }, }, }, @@ -1182,7 +1212,7 @@ func RunGoldenTest(t *testing.T, basedir string, testCase serverGroupModelBuilde // We need the CA and service-account for the bootstrap script caTask := &fitasks.Keypair{ - Name: fi.String(fi.CertificateIDCA), + Name: fi.PtrTo(fi.CertificateIDCA), Subject: "cn=kubernetes", Type: "ca", } @@ -1197,7 +1227,7 @@ func RunGoldenTest(t *testing.T, basedir string, testCase serverGroupModelBuilde "service-account", } { task := &fitasks.Keypair{ - Name: fi.String(keypair), + Name: fi.PtrTo(keypair), Subject: "cn=" + keypair, Type: "ca", } @@ -1208,7 +1238,7 @@ func RunGoldenTest(t *testing.T, basedir string, testCase serverGroupModelBuilde "kube-proxy", } { task := &fitasks.Keypair{ - Name: fi.String(keypair), + Name: fi.PtrTo(keypair), Subject: "cn=" + keypair, Signer: caTask, Type: "client", diff --git a/pkg/model/pki.go b/pkg/model/pki.go index 241a16d51ca39..9371e9acb25d0 100644 --- a/pkg/model/pki.go +++ b/pkg/model/pki.go @@ -38,7 +38,7 @@ var _ fi.ModelBuilder = &PKIModelBuilder{} func (b *PKIModelBuilder) Build(c *fi.ModelBuilderContext) error { // TODO: Only create the CA via this task defaultCA := &fitasks.Keypair{ - Name: fi.String(fi.CertificateIDCA), + Name: fi.PtrTo(fi.CertificateIDCA), Lifecycle: b.Lifecycle, Subject: "cn=kubernetes-ca", Type: "ca", @@ -50,7 +50,7 @@ func (b *PKIModelBuilder) Build(c *fi.ModelBuilderContext) error { // block at the IAM level for AWS cluster for pre-existing clusters. if !b.UseKopsControllerForNodeBootstrap() && !b.UseBootstrapTokens() { c.AddTask(&fitasks.Keypair{ - Name: fi.String("kubelet"), + Name: fi.PtrTo("kubelet"), Lifecycle: b.Lifecycle, Subject: "o=" + rbac.NodesGroup + ",cn=kubelet", Type: "client", @@ -61,7 +61,7 @@ func (b *PKIModelBuilder) Build(c *fi.ModelBuilderContext) error { if !b.UseKopsControllerForNodeBootstrap() { t := &fitasks.Keypair{ - Name: fi.String("kube-proxy"), + Name: fi.PtrTo("kube-proxy"), Lifecycle: b.Lifecycle, Subject: "cn=" + rbac.KubeProxy, Type: "client", @@ -72,7 +72,7 @@ func (b *PKIModelBuilder) Build(c *fi.ModelBuilderContext) error { if b.KopsModelContext.Cluster.Spec.Networking.Kuberouter != nil && !b.UseKopsControllerForNodeBootstrap() { t := &fitasks.Keypair{ - Name: fi.String("kube-router"), + Name: fi.PtrTo("kube-router"), Lifecycle: b.Lifecycle, Subject: "cn=" + rbac.KubeRouter, Type: "client", @@ -83,7 +83,7 @@ func (b *PKIModelBuilder) Build(c *fi.ModelBuilderContext) error { { aggregatorCA := &fitasks.Keypair{ - Name: fi.String("apiserver-aggregator-ca"), + Name: fi.PtrTo("apiserver-aggregator-ca"), Lifecycle: b.Lifecycle, Subject: "cn=apiserver-aggregator-ca", Type: "ca", @@ -94,7 +94,7 @@ func (b *PKIModelBuilder) Build(c *fi.ModelBuilderContext) error { { serviceAccount := &fitasks.Keypair{ // We only need the private key, but it's easier to create a certificate as well. - Name: fi.String("service-account"), + Name: fi.PtrTo("service-account"), Lifecycle: b.Lifecycle, Subject: "cn=service-account", Type: "ca", @@ -120,7 +120,7 @@ func (b *PKIModelBuilder) Build(c *fi.ModelBuilderContext) error { // @note: the certificate used by the node authorizers c.AddTask(&fitasks.Keypair{ - Name: fi.String("node-authorizer"), + Name: fi.PtrTo("node-authorizer"), Lifecycle: b.Lifecycle, Subject: "cn=node-authorizaer", Type: "server", @@ -130,7 +130,7 @@ func (b *PKIModelBuilder) Build(c *fi.ModelBuilderContext) error { // @note: we use this for mutual tls between node and authorizer c.AddTask(&fitasks.Keypair{ - Name: fi.String("node-authorizer-client"), + Name: fi.PtrTo("node-authorizer-client"), Lifecycle: b.Lifecycle, Subject: "cn=node-authorizer-client", Type: "client", @@ -140,7 +140,7 @@ func (b *PKIModelBuilder) Build(c *fi.ModelBuilderContext) error { // Create auth tokens (though this is deprecated) for _, x := range tokens.GetKubernetesAuthTokens_Deprecated() { - c.AddTask(&fitasks.Secret{Name: fi.String(x), Lifecycle: b.Lifecycle}) + c.AddTask(&fitasks.Secret{Name: fi.PtrTo(x), Lifecycle: b.Lifecycle}) } { @@ -150,7 +150,7 @@ func (b *PKIModelBuilder) Build(c *fi.ModelBuilderContext) error { } t := &fitasks.MirrorSecrets{ - Name: fi.String("mirror-secrets"), + Name: fi.PtrTo("mirror-secrets"), Lifecycle: b.Lifecycle, MirrorPath: mirrorPath, } @@ -165,7 +165,7 @@ func (b *PKIModelBuilder) Build(c *fi.ModelBuilderContext) error { // Keypair used by the kubelet t := &fitasks.MirrorKeystore{ - Name: fi.String("mirror-keystore"), + Name: fi.PtrTo("mirror-keystore"), Lifecycle: b.Lifecycle, MirrorPath: mirrorPath, } diff --git a/pkg/resources/aws/aws.go b/pkg/resources/aws/aws.go index 86f1ec38b2fcd..0f006b32077a5 100644 --- a/pkg/resources/aws/aws.go +++ b/pkg/resources/aws/aws.go @@ -608,7 +608,7 @@ func ListVolumes(cloud fi.Cloud, clusterName string) ([]*resources.Resource, err } var blocks []string - // blocks = append(blocks, "vpc:" + aws.StringValue(rt.VpcId)) + // blocks = append(blocks, "vpc:" + aws.ValueOf(rt.VpcId)) resourceTracker.Blocks = blocks @@ -1398,7 +1398,7 @@ func DeleteAutoScalingGroupLaunchTemplate(cloud fi.Cloud, r *resources.Resource) klog.V(2).Infof("Deleting EC2 LaunchTemplate %q", r.ID) if _, err := c.EC2().DeleteLaunchTemplate(&ec2.DeleteLaunchTemplateInput{ - LaunchTemplateId: fi.String(r.ID), + LaunchTemplateId: fi.PtrTo(r.ID), }); err != nil { return fmt.Errorf("error deleting ec2 LaunchTemplate %q: %v", r.ID, err) } @@ -2022,7 +2022,7 @@ func ListIAMRoles(cloud fi.Cloud, clusterName string) ([]*resources.Resource, er return false } for _, tag := range roleOutput.Role.Tags { - if fi.StringValue(tag.Key) == ownershipTag && fi.StringValue(tag.Value) == "owned" { + if fi.ValueOf(tag.Key) == ownershipTag && fi.ValueOf(tag.Value) == "owned" { resourceTracker := &resources.Resource{ Name: name, ID: name, @@ -2107,7 +2107,7 @@ func ListIAMInstanceProfiles(cloud fi.Cloud, clusterName string) ([]*resources.R return false } for _, tag := range profileOutput.InstanceProfile.Tags { - if fi.StringValue(tag.Key) == ownershipTag && fi.StringValue(tag.Value) == "owned" { + if fi.ValueOf(tag.Key) == ownershipTag && fi.ValueOf(tag.Value) == "owned" { profiles = append(profiles, p) } } @@ -2186,7 +2186,7 @@ func ListIAMOIDCProviders(cloud fi.Cloud, clusterName string) ([]*resources.Reso func DeleteIAMOIDCProvider(cloud fi.Cloud, r *resources.Resource) error { c := cloud.(awsup.AWSCloud) - arn := fi.String(r.ID) + arn := fi.PtrTo(r.ID) { klog.V(2).Infof("Deleting IAM OIDC Provider %v", arn) request := &iam.DeleteOpenIDConnectProviderInput{ diff --git a/pkg/resources/aws/aws_test.go b/pkg/resources/aws/aws_test.go index 799db316522ac..57306deb23b79 100644 --- a/pkg/resources/aws/aws_test.go +++ b/pkg/resources/aws/aws_test.go @@ -108,7 +108,7 @@ func TestListIAMInstanceProfiles(t *testing.T) { tags := []*iam.Tag{ { Key: &ownershipTagKey, - Value: fi.String("owned"), + Value: fi.PtrTo("owned"), }, } @@ -137,7 +137,7 @@ func TestListIAMInstanceProfiles(t *testing.T) { Tags: []*iam.Tag{ { Key: &owner, - Value: fi.String("owned"), + Value: fi.PtrTo("owned"), }, }, } @@ -182,7 +182,7 @@ func TestListIAMRoles(t *testing.T) { tags := []*iam.Tag{ { Key: &ownershipTagKey, - Value: fi.String("owned"), + Value: fi.PtrTo("owned"), }, } @@ -211,7 +211,7 @@ func TestListIAMRoles(t *testing.T) { Tags: []*iam.Tag{ { Key: &owner, - Value: fi.String("owned"), + Value: fi.PtrTo("owned"), }, }, } @@ -352,12 +352,12 @@ func TestMatchesElbTags(t *testing.T) { tags: map[string]string{"tagkey1": "tagvalue1"}, actual: []*elb.Tag{ { - Key: fi.String("tagkey1"), - Value: fi.String("tagvalue1"), + Key: fi.PtrTo("tagkey1"), + Value: fi.PtrTo("tagvalue1"), }, { - Key: fi.String("tagkey2"), - Value: fi.String("tagvalue2"), + Key: fi.PtrTo("tagkey2"), + Value: fi.PtrTo("tagvalue2"), }, }, expected: true, @@ -366,12 +366,12 @@ func TestMatchesElbTags(t *testing.T) { tags: map[string]string{"tagkey2": "tagvalue2"}, actual: []*elb.Tag{ { - Key: fi.String("tagkey1"), - Value: fi.String("tagvalue1"), + Key: fi.PtrTo("tagkey1"), + Value: fi.PtrTo("tagvalue1"), }, { - Key: fi.String("tagkey2"), - Value: fi.String("tagvalue2"), + Key: fi.PtrTo("tagkey2"), + Value: fi.PtrTo("tagvalue2"), }, }, expected: true, @@ -380,12 +380,12 @@ func TestMatchesElbTags(t *testing.T) { tags: map[string]string{"tagkey3": "tagvalue3"}, actual: []*elb.Tag{ { - Key: fi.String("tagkey1"), - Value: fi.String("tagvalue1"), + Key: fi.PtrTo("tagkey1"), + Value: fi.PtrTo("tagvalue1"), }, { - Key: fi.String("tagkey2"), - Value: fi.String("tagvalue2"), + Key: fi.PtrTo("tagkey2"), + Value: fi.PtrTo("tagvalue2"), }, }, expected: false, diff --git a/pkg/resources/spotinst/aws.go b/pkg/resources/spotinst/aws.go index 36e4ef07251df..9c0fd03d1a08e 100644 --- a/pkg/resources/spotinst/aws.go +++ b/pkg/resources/spotinst/aws.go @@ -66,13 +66,13 @@ func (x *awsElastigroupService) Create(ctx context.Context, group InstanceGroup) return "", err } - return fi.StringValue(output.Group.ID), nil + return fi.ValueOf(output.Group.ID), nil } // Read returns an existing InstanceGroup by ID. func (x *awsElastigroupService) Read(ctx context.Context, groupID string) (InstanceGroup, error) { input := &awseg.ReadGroupInput{ - GroupID: fi.String(groupID), + GroupID: fi.PtrTo(groupID), } output, err := x.svc.Read(ctx, input) @@ -96,7 +96,7 @@ func (x *awsElastigroupService) Update(ctx context.Context, group InstanceGroup) // Delete deletes an existing InstanceGroup by ID. func (x *awsElastigroupService) Delete(ctx context.Context, groupID string) error { input := &awseg.DeleteGroupInput{ - GroupID: fi.String(groupID), + GroupID: fi.PtrTo(groupID), } _, err := x.svc.Delete(ctx, input) @@ -106,10 +106,10 @@ func (x *awsElastigroupService) Delete(ctx context.Context, groupID string) erro // Detach removes one or more instances from the specified InstanceGroup. func (x *awsElastigroupService) Detach(ctx context.Context, groupID string, instanceIDs []string) error { input := &awseg.DetachGroupInput{ - GroupID: fi.String(groupID), + GroupID: fi.PtrTo(groupID), InstanceIDs: instanceIDs, - ShouldDecrementTargetCapacity: fi.Bool(false), - ShouldTerminateInstances: fi.Bool(true), + ShouldDecrementTargetCapacity: fi.PtrTo(false), + ShouldTerminateInstances: fi.PtrTo(true), } _, err := x.svc.Detach(ctx, input) @@ -119,7 +119,7 @@ func (x *awsElastigroupService) Detach(ctx context.Context, groupID string, inst // Instances returns a list of all instances that belong to specified InstanceGroup. func (x *awsElastigroupService) Instances(ctx context.Context, groupID string) ([]Instance, error) { input := &awseg.StatusGroupInput{ - GroupID: fi.String(groupID), + GroupID: fi.PtrTo(groupID), } output, err := x.svc.Status(ctx, input) @@ -165,13 +165,13 @@ func (x *awsOceanService) Create(ctx context.Context, group InstanceGroup) (stri return "", err } - return fi.StringValue(output.Cluster.ID), nil + return fi.ValueOf(output.Cluster.ID), nil } // Read returns an existing InstanceGroup by ID. func (x *awsOceanService) Read(ctx context.Context, clusterID string) (InstanceGroup, error) { input := &awsoc.ReadClusterInput{ - ClusterID: fi.String(clusterID), + ClusterID: fi.PtrTo(clusterID), } output, err := x.svc.ReadCluster(ctx, input) @@ -195,7 +195,7 @@ func (x *awsOceanService) Update(ctx context.Context, group InstanceGroup) error // Delete deletes an existing InstanceGroup by ID. func (x *awsOceanService) Delete(ctx context.Context, clusterID string) error { input := &awsoc.DeleteClusterInput{ - ClusterID: fi.String(clusterID), + ClusterID: fi.PtrTo(clusterID), } _, err := x.svc.DeleteCluster(ctx, input) @@ -205,10 +205,10 @@ func (x *awsOceanService) Delete(ctx context.Context, clusterID string) error { // Detach removes one or more instances from the specified InstanceGroup. func (x *awsOceanService) Detach(ctx context.Context, clusterID string, instanceIDs []string) error { input := &awsoc.DetachClusterInstancesInput{ - ClusterID: fi.String(clusterID), + ClusterID: fi.PtrTo(clusterID), InstanceIDs: instanceIDs, - ShouldDecrementTargetCapacity: fi.Bool(false), - ShouldTerminateInstances: fi.Bool(true), + ShouldDecrementTargetCapacity: fi.PtrTo(false), + ShouldTerminateInstances: fi.PtrTo(true), } _, err := x.svc.DetachClusterInstances(ctx, input) @@ -218,7 +218,7 @@ func (x *awsOceanService) Detach(ctx context.Context, clusterID string, instance // Instances returns a list of all instances that belong to specified InstanceGroup. func (x *awsOceanService) Instances(ctx context.Context, clusterID string) ([]Instance, error) { input := &awsoc.ListClusterInstancesInput{ - ClusterID: fi.String(clusterID), + ClusterID: fi.PtrTo(clusterID), } output, err := x.svc.ListClusterInstances(ctx, input) @@ -241,7 +241,7 @@ type awsOceanLaunchSpecService struct { // List returns a list of LaunchSpecs. func (x *awsOceanLaunchSpecService) List(ctx context.Context, oceanID string) ([]LaunchSpec, error) { input := &awsoc.ListLaunchSpecsInput{ - OceanID: fi.String(oceanID), + OceanID: fi.PtrTo(oceanID), } output, err := x.svc.ListLaunchSpecs(ctx, input) @@ -268,13 +268,13 @@ func (x *awsOceanLaunchSpecService) Create(ctx context.Context, spec LaunchSpec) return "", err } - return fi.StringValue(output.LaunchSpec.ID), nil + return fi.ValueOf(output.LaunchSpec.ID), nil } // Read returns an existing LaunchSpec by ID. func (x *awsOceanLaunchSpecService) Read(ctx context.Context, specID string) (LaunchSpec, error) { input := &awsoc.ReadLaunchSpecInput{ - LaunchSpecID: fi.String(specID), + LaunchSpecID: fi.PtrTo(specID), } output, err := x.svc.ReadLaunchSpec(ctx, input) @@ -298,7 +298,7 @@ func (x *awsOceanLaunchSpecService) Update(ctx context.Context, spec LaunchSpec) // Delete deletes an existing LaunchSpec by ID. func (x *awsOceanLaunchSpecService) Delete(ctx context.Context, specID string) error { input := &awsoc.DeleteLaunchSpecInput{ - LaunchSpecID: fi.String(specID), + LaunchSpecID: fi.PtrTo(specID), } _, err := x.svc.DeleteLaunchSpec(ctx, input) @@ -311,7 +311,7 @@ type awsElastigroupInstanceGroup struct { // Id returns the ID of the InstanceGroup. func (x *awsElastigroupInstanceGroup) Id() string { - return fi.StringValue(x.obj.ID) + return fi.ValueOf(x.obj.ID) } // Type returns the type of the InstanceGroup. @@ -321,17 +321,17 @@ func (x *awsElastigroupInstanceGroup) Type() InstanceGroupType { // Name returns the name of the InstanceGroup. func (x *awsElastigroupInstanceGroup) Name() string { - return fi.StringValue(x.obj.Name) + return fi.ValueOf(x.obj.Name) } // MinSize returns the minimum size of the InstanceGroup. func (x *awsElastigroupInstanceGroup) MinSize() int { - return fi.IntValue(x.obj.Capacity.Minimum) + return fi.ValueOf(x.obj.Capacity.Minimum) } // MaxSize returns the maximum size of the InstanceGroup. func (x *awsElastigroupInstanceGroup) MaxSize() int { - return fi.IntValue(x.obj.Capacity.Maximum) + return fi.ValueOf(x.obj.Capacity.Maximum) } // CreatedAt returns the timestamp when the InstanceGroup has been created. @@ -355,7 +355,7 @@ type awsElastigroupInstance struct { // Id returns the ID of the instance. func (x *awsElastigroupInstance) Id() string { - return fi.StringValue(x.obj.ID) + return fi.ValueOf(x.obj.ID) } // CreatedAt returns the timestamp when the Instance has been created. @@ -374,7 +374,7 @@ type awsOceanInstanceGroup struct { // Id returns the ID of the InstanceGroup. func (x *awsOceanInstanceGroup) Id() string { - return fi.StringValue(x.obj.ID) + return fi.ValueOf(x.obj.ID) } // Type returns the type of the InstanceGroup. @@ -384,17 +384,17 @@ func (x *awsOceanInstanceGroup) Type() InstanceGroupType { // Name returns the name of the InstanceGroup. func (x *awsOceanInstanceGroup) Name() string { - return fi.StringValue(x.obj.Name) + return fi.ValueOf(x.obj.Name) } // MinSize returns the minimum size of the InstanceGroup. func (x *awsOceanInstanceGroup) MinSize() int { - return fi.IntValue(x.obj.Capacity.Minimum) + return fi.ValueOf(x.obj.Capacity.Minimum) } // MaxSize returns the maximum size of the InstanceGroup. func (x *awsOceanInstanceGroup) MaxSize() int { - return fi.IntValue(x.obj.Capacity.Maximum) + return fi.ValueOf(x.obj.Capacity.Maximum) } // CreatedAt returns the timestamp when the InstanceGroup has been created. @@ -418,7 +418,7 @@ type awsOceanInstance struct { // Id returns the ID of the instance. func (x *awsOceanInstance) Id() string { - return fi.StringValue(x.obj.ID) + return fi.ValueOf(x.obj.ID) } // CreatedAt returns the timestamp when the Instance has been created. @@ -437,17 +437,17 @@ type awsOceanLaunchSpec struct { // Id returns the ID of the LaunchSpec. func (x *awsOceanLaunchSpec) Id() string { - return fi.StringValue(x.obj.ID) + return fi.ValueOf(x.obj.ID) } // Name returns the name of the LaunchSpec. func (x *awsOceanLaunchSpec) Name() string { - return fi.StringValue(x.obj.Name) + return fi.ValueOf(x.obj.Name) } // OceanId returns the ID of the Ocean instance group. func (x *awsOceanLaunchSpec) OceanId() string { - return fi.StringValue(x.obj.OceanID) + return fi.ValueOf(x.obj.OceanID) } // CreatedAt returns the timestamp when the LaunchSpec has been created. diff --git a/pkg/testutils/cluster.go b/pkg/testutils/cluster.go index 351ae0552a526..e64d775545bcc 100644 --- a/pkg/testutils/cluster.go +++ b/pkg/testutils/cluster.go @@ -36,9 +36,8 @@ func BuildMinimalCluster(clusterName string) *kops.Cluster { c.Spec.ContainerRuntime = "containerd" c.Spec.Containerd = &kops.ContainerdConfig{} - c.Spec.MasterPublicName = fmt.Sprintf("api.%v", clusterName) - c.Spec.MasterInternalName = fmt.Sprintf("internal.api.%v", clusterName) - c.Spec.KubernetesAPIAccess = []string{"0.0.0.0/0"} + c.Spec.API.PublicName = fmt.Sprintf("api.%v", clusterName) + c.Spec.API.Access = []string{"0.0.0.0/0"} c.Spec.SSHAccess = []string{"0.0.0.0/0"} // Default to public topology @@ -64,7 +63,7 @@ func BuildMinimalCluster(clusterName string) *kops.Cluster { c.Spec.DNSZone = "test.com" - c.Spec.SSHKeyName = fi.String("test") + c.Spec.SSHKeyName = fi.PtrTo("test") addEtcdClusters(c) @@ -84,7 +83,7 @@ func addEtcdClusters(c *kops.Cluster) { for _, zone := range etcdZones { m := kops.EtcdMemberSpec{} m.Name = zone - m.InstanceGroup = fi.String("master-" + zone) + m.InstanceGroup = fi.PtrTo("master-" + zone) etcd.Members = append(etcd.Members, m) } c.Spec.EtcdClusters = append(c.Spec.EtcdClusters, etcd) diff --git a/pkg/testutils/integrationtestharness.go b/pkg/testutils/integrationtestharness.go index 57d4b63b627fd..c7b51b530e41d 100644 --- a/pkg/testutils/integrationtestharness.go +++ b/pkg/testutils/integrationtestharness.go @@ -314,11 +314,11 @@ func SetupMockOpenstack() *openstack.MockCloud { extNetworkName := "external" networkCreateOpts := networks.CreateOpts{ Name: extNetworkName, - AdminStateUp: fi.Bool(true), + AdminStateUp: fi.PtrTo(true), } extNetwork := external.CreateOptsExt{ CreateOptsBuilder: networkCreateOpts, - External: fi.Bool(true), + External: fi.PtrTo(true), } c.CreateNetwork(extNetwork) c.SetExternalNetwork(&extNetworkName) @@ -327,12 +327,12 @@ func SetupMockOpenstack() *openstack.MockCloud { extSubnet := subnets.CreateOpts{ Name: extSubnetName, NetworkID: extNetworkName, - EnableDHCP: fi.Bool(true), + EnableDHCP: fi.PtrTo(true), CIDR: "172.20.0.0/22", } c.CreateSubnet(extSubnet) - c.SetExternalSubnet(fi.String(extSubnetName)) - c.SetLBFloatingSubnet(fi.String(extSubnetName)) + c.SetExternalSubnet(fi.PtrTo(extSubnetName)) + c.SetLBFloatingSubnet(fi.PtrTo(extSubnetName)) images.Create(c.MockImageClient.ServiceClient(), images.CreateOpts{ Name: "Ubuntu-20.04", MinDisk: 12, @@ -341,13 +341,13 @@ func SetupMockOpenstack() *openstack.MockCloud { Name: "n1-standard-2", RAM: 8192, VCPUs: 4, - Disk: fi.Int(16), + Disk: fi.PtrTo(16), }) flavors.Create(c.MockNovaClient.ServiceClient(), flavors.CreateOpts{ Name: "n1-standard-1", RAM: 8192, VCPUs: 4, - Disk: fi.Int(16), + Disk: fi.PtrTo(16), }) zones.Create(c.MockDNSClient.ServiceClient(), zones.CreateOpts{ Name: "minimal-openstack.k8s.local", diff --git a/pkg/wellknownoperators/operators.go b/pkg/wellknownoperators/operators.go index 22bdbad4f36c7..ef5c03ae76e21 100644 --- a/pkg/wellknownoperators/operators.go +++ b/pkg/wellknownoperators/operators.go @@ -102,10 +102,10 @@ func (b *Builder) loadClusterPackage(u *unstructured.Unstructured) (*Package, er addon := &Package{ Manifest: manifestBytes, Spec: channelsapi.AddonSpec{ - Name: fi.String(operatorKey), + Name: fi.PtrTo(operatorKey), Id: id, Selector: map[string]string{"k8s-addon": operatorKey}, - Manifest: fi.String(location), + Manifest: fi.PtrTo(location), }, } return addon, nil diff --git a/tests/codecs/componentconfig_test.go b/tests/codecs/componentconfig_test.go index 9cfabb0deb93d..3b1cd590e9258 100644 --- a/tests/codecs/componentconfig_test.go +++ b/tests/codecs/componentconfig_test.go @@ -39,6 +39,7 @@ kind: Cluster metadata: creationTimestamp: null spec: + api: {} cloudProvider: {} kubeControllerManager: {} kubelet: {} diff --git a/tests/e2e/pkg/tester/skip_regex.go b/tests/e2e/pkg/tester/skip_regex.go index abd76e540fa01..ad24a16aceca1 100644 --- a/tests/e2e/pkg/tester/skip_regex.go +++ b/tests/e2e/pkg/tester/skip_regex.go @@ -45,6 +45,13 @@ func (t *Tester) setSkipRegexFlag() error { skipRegex := skipRegexBase + // All the loadbalancer tests in the suite fail on IPv6, however, + // they were skipped because they were tagged as [Slow] + // skip these tests temporary since they fail always on IPv6 + // TODO: aojea + // https://github.com/kubernetes/kubernetes/issues/113964 + skipRegex += "|LoadBalancers.should.be.able.to.preserve.UDP.traffic" + networking := cluster.Spec.Networking switch { case networking.Kubenet != nil, networking.Canal != nil, networking.Weave != nil, networking.Cilium != nil: @@ -113,7 +120,7 @@ func (t *Tester) setSkipRegexFlag() error { skipRegex += "|should.verify.that.all.nodes.have.volume.limits" } - if cluster.Spec.CloudConfig != nil && cluster.Spec.CloudConfig.AWSEBSCSIDriver != nil && fi.BoolValue(cluster.Spec.CloudConfig.AWSEBSCSIDriver.Enabled) { + if cluster.Spec.CloudConfig != nil && cluster.Spec.CloudConfig.AWSEBSCSIDriver != nil && fi.ValueOf(cluster.Spec.CloudConfig.AWSEBSCSIDriver.Enabled) { skipRegex += "|In-tree.Volumes.\\[Driver:.aws\\]" } diff --git a/tests/e2e/scenarios/keypair-rotation/run-test.sh b/tests/e2e/scenarios/keypair-rotation/run-test.sh index 5fba13fdc1b74..204cefaa24172 100755 --- a/tests/e2e/scenarios/keypair-rotation/run-test.sh +++ b/tests/e2e/scenarios/keypair-rotation/run-test.sh @@ -19,7 +19,7 @@ source "${REPO_ROOT}"/tests/e2e/scenarios/lib/common.sh kops-acquire-latest -OVERRIDES="${OVERRIDES} --master-size=t4g.medium --node-size=t4g.medium" +OVERRIDES="${OVERRIDES-} --master-size=t4g.medium --node-size=t4g.medium" kops-up diff --git a/tests/e2e/scenarios/podidentitywebhook/cluster.yaml.tmpl b/tests/e2e/scenarios/podidentitywebhook/cluster.yaml.tmpl index af55ccbcdba91..e50807357a012 100644 --- a/tests/e2e/scenarios/podidentitywebhook/cluster.yaml.tmpl +++ b/tests/e2e/scenarios/podidentitywebhook/cluster.yaml.tmpl @@ -30,7 +30,6 @@ spec: kubelet: anonymousAuth: false kubernetesVersion: {{.kubernetesVersion}} - masterInternalName: api.internal.{{.clusterName}} masterPublicName: api.{{.clusterName}} networkCIDR: 172.20.0.0/16 networking: diff --git a/tests/e2e/templates/apiserver-dns-none.yaml.tmpl b/tests/e2e/templates/apiserver-dns-none.yaml.tmpl index 290e110699c43..3f89ca89c0191 100644 --- a/tests/e2e/templates/apiserver-dns-none.yaml.tmpl +++ b/tests/e2e/templates/apiserver-dns-none.yaml.tmpl @@ -26,7 +26,6 @@ spec: kubelet: anonymousAuth: false kubernetesVersion: {{.kubernetesVersion}} - masterInternalName: api.internal.{{.clusterName}} networkCIDR: 172.20.0.0/16 networking: calico: {} diff --git a/tests/e2e/templates/apiserver.yaml.tmpl b/tests/e2e/templates/apiserver.yaml.tmpl index 9d53ba88ae672..ccaf4778c489d 100644 --- a/tests/e2e/templates/apiserver.yaml.tmpl +++ b/tests/e2e/templates/apiserver.yaml.tmpl @@ -22,7 +22,6 @@ spec: kubelet: anonymousAuth: false kubernetesVersion: {{.kubernetesVersion}} - masterInternalName: api.internal.{{.clusterName}} masterPublicName: api.{{.clusterName}} networkCIDR: 172.20.0.0/16 networking: diff --git a/tests/e2e/templates/many-addons.yaml.tmpl b/tests/e2e/templates/many-addons.yaml.tmpl index 5415084ce9463..a8b8ce25016a6 100644 --- a/tests/e2e/templates/many-addons.yaml.tmpl +++ b/tests/e2e/templates/many-addons.yaml.tmpl @@ -35,7 +35,6 @@ spec: provider: CoreDNS nodeLocalDNS: enabled: true - masterInternalName: api.internal.{{.clusterName}} masterPublicName: api.{{.clusterName}} metricsServer: enabled: true @@ -89,7 +88,7 @@ metadata: kops.k8s.io/cluster: {{$.clusterName}} spec: associatePublicIp: true - image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20201112.1 + image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20221115.1 machineType: t3.medium maxSize: 4 minSize: 4 @@ -108,7 +107,7 @@ metadata: kops.k8s.io/cluster: {{$.clusterName}} spec: associatePublicIp: true - image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20201112.1 + image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20221115.1 machineType: c5.large maxSize: 1 minSize: 1 diff --git a/tests/e2e/templates/simple.yaml.tmpl b/tests/e2e/templates/simple.yaml.tmpl index cfe957efa6e6c..4708d0908cc7e 100644 --- a/tests/e2e/templates/simple.yaml.tmpl +++ b/tests/e2e/templates/simple.yaml.tmpl @@ -22,7 +22,6 @@ spec: kubelet: anonymousAuth: false kubernetesVersion: {{.kubernetesVersion}} - masterInternalName: api.internal.{{.clusterName}} masterPublicName: api.{{.clusterName}} networkCIDR: 172.20.0.0/16 networking: diff --git a/tests/e2e/templates/staticcpumanagerpolicy.tmpl b/tests/e2e/templates/staticcpumanagerpolicy.tmpl index 218945e1835c3..99371582bd073 100644 --- a/tests/e2e/templates/staticcpumanagerpolicy.tmpl +++ b/tests/e2e/templates/staticcpumanagerpolicy.tmpl @@ -32,7 +32,6 @@ spec: ephemeral-storage: 1Gi memory: 120Mi kubernetesVersion: {{.kubernetesVersion}} - masterInternalName: api.internal.{{.clusterName}} masterPublicName: api.{{.clusterName}} networkCIDR: 172.20.0.0/16 networking: diff --git a/tests/integration/conversion/aws/v1alpha2.yaml b/tests/integration/conversion/aws/v1alpha2.yaml index 94b71a916ea11..bd0b2b49d59bb 100644 --- a/tests/integration/conversion/aws/v1alpha2.yaml +++ b/tests/integration/conversion/aws/v1alpha2.yaml @@ -11,6 +11,9 @@ spec: - manifest: s3://somebucket/example.yaml api: dns: {} + loadBalancer: + class: Network + type: Public authorization: alwaysAllow: {} channel: stable @@ -42,7 +45,6 @@ spec: kubernetesApiAccess: - 0.0.0.0/0 kubernetesVersion: v1.21.0 - masterInternalName: api.internal.minimal.example.com masterPublicName: api.minimal.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/tests/integration/conversion/aws/v1alpha3.yaml b/tests/integration/conversion/aws/v1alpha3.yaml index bf6f08a183711..a31637ba6ba25 100644 --- a/tests/integration/conversion/aws/v1alpha3.yaml +++ b/tests/integration/conversion/aws/v1alpha3.yaml @@ -4,12 +4,18 @@ metadata: creationTimestamp: "2016-12-10T22:42:27Z" name: minimal.example.com spec: - additionalSANs: - - proxy.api.minimal.example.com addons: - manifest: s3://somebucket/example.yaml api: + access: + - 0.0.0.0/0 + additionalSANs: + - proxy.api.minimal.example.com dns: {} + loadBalancer: + class: Network + type: Public + publicName: api.minimal.example.com authorization: alwaysAllow: {} channel: stable @@ -38,11 +44,7 @@ spec: - enabled: true name: hookEnabled iam: {} - kubernetesAPIAccess: - - 0.0.0.0/0 kubernetesVersion: v1.21.0 - masterInternalName: api.internal.minimal.example.com - masterPublicName: api.minimal.example.com networkCIDR: 172.20.0.0/16 networking: kubenet: {} diff --git a/tests/integration/conversion/azure/v1alpha2.yaml b/tests/integration/conversion/azure/v1alpha2.yaml index ffc1558c2ef09..be92c5643fcaa 100644 --- a/tests/integration/conversion/azure/v1alpha2.yaml +++ b/tests/integration/conversion/azure/v1alpha2.yaml @@ -39,7 +39,6 @@ spec: kubernetesApiAccess: - 0.0.0.0/0 kubernetesVersion: v1.21.0 - masterInternalName: api.internal.minimal.example.com masterPublicName: api.minimal.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/tests/integration/conversion/azure/v1alpha3.yaml b/tests/integration/conversion/azure/v1alpha3.yaml index 20404aba1d24e..66b0a2aeedbf2 100644 --- a/tests/integration/conversion/azure/v1alpha3.yaml +++ b/tests/integration/conversion/azure/v1alpha3.yaml @@ -4,10 +4,13 @@ metadata: creationTimestamp: "2016-12-10T22:42:27Z" name: minimal.example.com spec: - additionalSANs: - - proxy.api.minimal.example.com api: + access: + - 0.0.0.0/0 + additionalSANs: + - proxy.api.minimal.example.com dns: {} + publicName: api.minimal.example.com authorization: alwaysAllow: {} channel: stable @@ -34,11 +37,7 @@ spec: memoryRequest: 100Mi name: events iam: {} - kubernetesAPIAccess: - - 0.0.0.0/0 kubernetesVersion: v1.21.0 - masterInternalName: api.internal.minimal.example.com - masterPublicName: api.minimal.example.com networkCIDR: 172.20.0.0/16 networking: kubenet: {} diff --git a/tests/integration/conversion/canal/v1alpha2.yaml b/tests/integration/conversion/canal/v1alpha2.yaml index 8ca4e6533510e..50ef76ad60c87 100644 --- a/tests/integration/conversion/canal/v1alpha2.yaml +++ b/tests/integration/conversion/canal/v1alpha2.yaml @@ -33,7 +33,6 @@ spec: kubernetesApiAccess: - 0.0.0.0/0 kubernetesVersion: v1.21.0 - masterInternalName: api.internal.minimal.example.com masterPublicName: api.minimal.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/tests/integration/conversion/canal/v1alpha3.yaml b/tests/integration/conversion/canal/v1alpha3.yaml index 2de1ab04a77af..35e1d4951ff77 100644 --- a/tests/integration/conversion/canal/v1alpha3.yaml +++ b/tests/integration/conversion/canal/v1alpha3.yaml @@ -4,12 +4,15 @@ metadata: creationTimestamp: "2016-12-10T22:42:27Z" name: minimal.example.com spec: - additionalSANs: - - proxy.api.minimal.example.com addons: - manifest: s3://somebucket/example.yaml api: + access: + - 0.0.0.0/0 + additionalSANs: + - proxy.api.minimal.example.com dns: {} + publicName: api.minimal.example.com authorization: alwaysAllow: {} channel: stable @@ -30,11 +33,7 @@ spec: memoryRequest: 100Mi name: events iam: {} - kubernetesAPIAccess: - - 0.0.0.0/0 kubernetesVersion: v1.21.0 - masterInternalName: api.internal.minimal.example.com - masterPublicName: api.minimal.example.com networkCIDR: 172.20.0.0/16 networking: canal: diff --git a/tests/integration/conversion/cilium/v1alpha2.yaml b/tests/integration/conversion/cilium/v1alpha2.yaml index 5648f9b11b0e7..93e0038b77708 100644 --- a/tests/integration/conversion/cilium/v1alpha2.yaml +++ b/tests/integration/conversion/cilium/v1alpha2.yaml @@ -33,7 +33,6 @@ spec: kubernetesApiAccess: - 0.0.0.0/0 kubernetesVersion: v1.21.0 - masterInternalName: api.internal.minimal.example.com masterPublicName: api.minimal.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/tests/integration/conversion/cilium/v1alpha3.yaml b/tests/integration/conversion/cilium/v1alpha3.yaml index dde9960137dce..8d05ce776d15c 100644 --- a/tests/integration/conversion/cilium/v1alpha3.yaml +++ b/tests/integration/conversion/cilium/v1alpha3.yaml @@ -4,12 +4,15 @@ metadata: creationTimestamp: "2016-12-10T22:42:27Z" name: minimal.example.com spec: - additionalSANs: - - proxy.api.minimal.example.com addons: - manifest: s3://somebucket/example.yaml api: + access: + - 0.0.0.0/0 + additionalSANs: + - proxy.api.minimal.example.com dns: {} + publicName: api.minimal.example.com authorization: alwaysAllow: {} channel: stable @@ -30,11 +33,7 @@ spec: memoryRequest: 100Mi name: events iam: {} - kubernetesAPIAccess: - - 0.0.0.0/0 kubernetesVersion: v1.21.0 - masterInternalName: api.internal.minimal.example.com - masterPublicName: api.minimal.example.com networkCIDR: 172.20.0.0/16 networking: cilium: diff --git a/tests/integration/conversion/do/v1alpha2.yaml b/tests/integration/conversion/do/v1alpha2.yaml index 970c6c413126c..e4c89edb0d26d 100644 --- a/tests/integration/conversion/do/v1alpha2.yaml +++ b/tests/integration/conversion/do/v1alpha2.yaml @@ -32,7 +32,6 @@ spec: kubernetesApiAccess: - 0.0.0.0/0 kubernetesVersion: v1.21.0 - masterInternalName: api.internal.minimal.example.com masterPublicName: api.minimal.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/tests/integration/conversion/do/v1alpha3.yaml b/tests/integration/conversion/do/v1alpha3.yaml index b56832ddacd36..519840499c7b5 100644 --- a/tests/integration/conversion/do/v1alpha3.yaml +++ b/tests/integration/conversion/do/v1alpha3.yaml @@ -4,10 +4,13 @@ metadata: creationTimestamp: "2016-12-10T22:42:27Z" name: minimal.example.com spec: - additionalSANs: - - proxy.api.minimal.example.com api: + access: + - 0.0.0.0/0 + additionalSANs: + - proxy.api.minimal.example.com dns: {} + publicName: api.minimal.example.com authorization: alwaysAllow: {} channel: stable @@ -28,11 +31,7 @@ spec: memoryRequest: 100Mi name: events iam: {} - kubernetesAPIAccess: - - 0.0.0.0/0 kubernetesVersion: v1.21.0 - masterInternalName: api.internal.minimal.example.com - masterPublicName: api.minimal.example.com networkCIDR: 172.20.0.0/16 networking: kubenet: {} diff --git a/tests/integration/conversion/gce/v1alpha2.yaml b/tests/integration/conversion/gce/v1alpha2.yaml index 44806b86e00b2..19ad5fcc11cc7 100644 --- a/tests/integration/conversion/gce/v1alpha2.yaml +++ b/tests/integration/conversion/gce/v1alpha2.yaml @@ -32,7 +32,6 @@ spec: kubernetesApiAccess: - 0.0.0.0/0 kubernetesVersion: v1.21.0 - masterInternalName: api.internal.minimal.example.com masterPublicName: api.minimal.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/tests/integration/conversion/gce/v1alpha3.yaml b/tests/integration/conversion/gce/v1alpha3.yaml index 60635ec13188c..c16c810f6a637 100644 --- a/tests/integration/conversion/gce/v1alpha3.yaml +++ b/tests/integration/conversion/gce/v1alpha3.yaml @@ -4,10 +4,13 @@ metadata: creationTimestamp: "2016-12-10T22:42:27Z" name: minimal.example.com spec: - additionalSANs: - - proxy.api.minimal.example.com api: + access: + - 0.0.0.0/0 + additionalSANs: + - proxy.api.minimal.example.com dns: {} + publicName: api.minimal.example.com authorization: alwaysAllow: {} channel: stable @@ -29,11 +32,7 @@ spec: memoryRequest: 100Mi name: events iam: {} - kubernetesAPIAccess: - - 0.0.0.0/0 kubernetesVersion: v1.21.0 - masterInternalName: api.internal.minimal.example.com - masterPublicName: api.minimal.example.com networkCIDR: 172.20.0.0/16 networking: kubenet: {} diff --git a/tests/integration/conversion/minimal/legacy-v1alpha2.yaml b/tests/integration/conversion/minimal/legacy-v1alpha2.yaml index 35ea048f56683..b215ad44f2efd 100644 --- a/tests/integration/conversion/minimal/legacy-v1alpha2.yaml +++ b/tests/integration/conversion/minimal/legacy-v1alpha2.yaml @@ -33,7 +33,6 @@ spec: kubernetesApiAccess: - 0.0.0.0/0 kubernetesVersion: v1.21.0 - masterInternalName: api.internal.minimal.example.com masterPublicName: api.minimal.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/tests/integration/conversion/minimal/v1alpha2.yaml b/tests/integration/conversion/minimal/v1alpha2.yaml index e61e5b8ac9318..34b4f298dc02c 100644 --- a/tests/integration/conversion/minimal/v1alpha2.yaml +++ b/tests/integration/conversion/minimal/v1alpha2.yaml @@ -33,7 +33,6 @@ spec: kubernetesApiAccess: - 0.0.0.0/0 kubernetesVersion: v1.21.0 - masterInternalName: api.internal.minimal.example.com masterPublicName: api.minimal.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/tests/integration/conversion/minimal/v1alpha3.yaml b/tests/integration/conversion/minimal/v1alpha3.yaml index 1283ac5310aaf..7014b950b1d72 100644 --- a/tests/integration/conversion/minimal/v1alpha3.yaml +++ b/tests/integration/conversion/minimal/v1alpha3.yaml @@ -4,12 +4,15 @@ metadata: creationTimestamp: "2016-12-10T22:42:27Z" name: minimal.example.com spec: - additionalSANs: - - proxy.api.minimal.example.com addons: - manifest: s3://somebucket/example.yaml api: + access: + - 0.0.0.0/0 + additionalSANs: + - proxy.api.minimal.example.com dns: {} + publicName: api.minimal.example.com authorization: alwaysAllow: {} channel: stable @@ -30,11 +33,7 @@ spec: memoryRequest: 100Mi name: events iam: {} - kubernetesAPIAccess: - - 0.0.0.0/0 kubernetesVersion: v1.21.0 - masterInternalName: api.internal.minimal.example.com - masterPublicName: api.minimal.example.com networkCIDR: 172.20.0.0/16 networking: kubenet: {} diff --git a/tests/integration/conversion/openstack/v1alpha2.yaml b/tests/integration/conversion/openstack/v1alpha2.yaml index 583f2272284a6..ef362cdd17824 100644 --- a/tests/integration/conversion/openstack/v1alpha2.yaml +++ b/tests/integration/conversion/openstack/v1alpha2.yaml @@ -48,7 +48,6 @@ spec: kubernetesApiAccess: - 0.0.0.0/0 kubernetesVersion: v1.21.0 - masterInternalName: api.internal.minimal.example.com masterPublicName: api.minimal.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/tests/integration/conversion/openstack/v1alpha3.yaml b/tests/integration/conversion/openstack/v1alpha3.yaml index 872c819f0e34b..a769995f134d4 100644 --- a/tests/integration/conversion/openstack/v1alpha3.yaml +++ b/tests/integration/conversion/openstack/v1alpha3.yaml @@ -4,10 +4,13 @@ metadata: creationTimestamp: "2016-12-10T22:42:27Z" name: minimal.example.com spec: - additionalSANs: - - proxy.api.minimal.example.com api: + access: + - 0.0.0.0/0 + additionalSANs: + - proxy.api.minimal.example.com dns: {} + publicName: api.minimal.example.com authorization: alwaysAllow: {} channel: stable @@ -43,11 +46,7 @@ spec: memoryRequest: 100Mi name: events iam: {} - kubernetesAPIAccess: - - 0.0.0.0/0 kubernetesVersion: v1.21.0 - masterInternalName: api.internal.minimal.example.com - masterPublicName: api.minimal.example.com networkCIDR: 172.20.0.0/16 networking: kubenet: {} diff --git a/tests/integration/create_cluster/ipv6/expected-v1alpha2.yaml b/tests/integration/create_cluster/ipv6/expected-v1alpha2.yaml index d0b0e03d88681..8b8c144d374cb 100644 --- a/tests/integration/create_cluster/ipv6/expected-v1alpha2.yaml +++ b/tests/integration/create_cluster/ipv6/expected-v1alpha2.yaml @@ -5,7 +5,9 @@ metadata: name: ipv6.example.com spec: api: - dns: {} + loadBalancer: + class: Network + type: Public authorization: rbac: {} channel: stable @@ -45,16 +47,25 @@ spec: - 0.0.0.0/0 - ::/0 subnets: - - cidr: 172.20.32.0/19 - ipv6CIDR: /64#0 + - ipv6CIDR: /64#0 name: us-test-1a - type: Public + type: Private + zone: us-test-1a + - cidr: 172.20.32.0/19 + ipv6CIDR: /64#1 + name: dualstack-us-test-1a + type: DualStack + zone: us-test-1a + - cidr: 172.20.0.0/22 + ipv6CIDR: /64#2 + name: utility-us-test-1a + type: Utility zone: us-test-1a topology: dns: type: Public - masters: public - nodes: public + masters: private + nodes: private --- @@ -75,7 +86,7 @@ spec: minSize: 1 role: Master subnets: - - us-test-1a + - dualstack-us-test-1a --- diff --git a/tests/integration/create_cluster/minimal_feature-gates/expected-v1alpha2.yaml b/tests/integration/create_cluster/minimal_feature-gates/expected-v1alpha2.yaml new file mode 100644 index 0000000000000..a64c7dd93d721 --- /dev/null +++ b/tests/integration/create_cluster/minimal_feature-gates/expected-v1alpha2.yaml @@ -0,0 +1,121 @@ +apiVersion: kops.k8s.io/v1alpha2 +kind: Cluster +metadata: + creationTimestamp: "2017-01-01T00:00:00Z" + name: minimal.example.com +spec: + api: + dns: {} + authorization: + rbac: {} + channel: stable + cloudProvider: aws + configBase: memfs://tests/minimal.example.com + etcdClusters: + - cpuRequest: 200m + etcdMembers: + - encryptedVolume: true + instanceGroup: master-us-test-1a + name: a + memoryRequest: 100Mi + name: main + - cpuRequest: 100m + etcdMembers: + - encryptedVolume: true + instanceGroup: master-us-test-1a + name: a + memoryRequest: 100Mi + name: events + iam: + allowContainerRegistry: true + legacy: false + kubeAPIServer: + featureGates: + APIResponseCompression: "false" + ReadWriteOncePod: "true" + SELinuxMountReadWriteOncePod: "true" + kubeControllerManager: + featureGates: + APIResponseCompression: "false" + ReadWriteOncePod: "true" + SELinuxMountReadWriteOncePod: "true" + kubeProxy: + featureGates: + APIResponseCompression: "false" + ReadWriteOncePod: "true" + SELinuxMountReadWriteOncePod: "true" + kubeScheduler: + featureGates: + APIResponseCompression: "false" + ReadWriteOncePod: "true" + SELinuxMountReadWriteOncePod: "true" + kubelet: + anonymousAuth: false + featureGates: + APIResponseCompression: "false" + ReadWriteOncePod: "true" + SELinuxMountReadWriteOncePod: "true" + kubernetesApiAccess: + - 0.0.0.0/0 + - ::/0 + kubernetesVersion: v1.26.0 + masterPublicName: api.minimal.example.com + networkCIDR: 172.20.0.0/16 + networking: + cni: {} + nonMasqueradeCIDR: 100.64.0.0/10 + sshAccess: + - 0.0.0.0/0 + - ::/0 + subnets: + - cidr: 172.20.32.0/19 + name: us-test-1a + type: Public + zone: us-test-1a + topology: + dns: + type: Public + masters: public + nodes: public + +--- + +apiVersion: kops.k8s.io/v1alpha2 +kind: InstanceGroup +metadata: + creationTimestamp: "2017-01-01T00:00:00Z" + labels: + kops.k8s.io/cluster: minimal.example.com + name: master-us-test-1a +spec: + image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20221018 + instanceMetadata: + httpPutResponseHopLimit: 3 + httpTokens: required + machineType: m3.medium + maxSize: 1 + minSize: 1 + role: Master + subnets: + - us-test-1a + +--- + +apiVersion: kops.k8s.io/v1alpha2 +kind: InstanceGroup +metadata: + creationTimestamp: "2017-01-01T00:00:00Z" + labels: + kops.k8s.io/cluster: minimal.example.com + name: nodes-us-test-1a +spec: + image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20221018 + instanceMetadata: + httpPutResponseHopLimit: 1 + httpTokens: required + machineType: t2.medium + maxSize: 1 + minSize: 1 + role: Node + subnets: + - us-test-1a diff --git a/tests/integration/create_cluster/minimal_feature-gates/options.yaml b/tests/integration/create_cluster/minimal_feature-gates/options.yaml new file mode 100644 index 0000000000000..766d3f6fd8697 --- /dev/null +++ b/tests/integration/create_cluster/minimal_feature-gates/options.yaml @@ -0,0 +1,10 @@ +ClusterName: minimal.example.com +Zones: +- us-test-1a +CloudProvider: aws +Networking: cni +KubernetesVersion: v1.26.0 +KubernetesFeatureGates: +- SELinuxMountReadWriteOncePod +- +ReadWriteOncePod +- -APIResponseCompression diff --git a/tests/integration/update_cluster/additionalobjects/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/additionalobjects/data/aws_s3_object_cluster-completed.spec_content index 0e8e90f7bde52..713311d0f7006 100644 --- a/tests/integration/update_cluster/additionalobjects/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/additionalobjects/data/aws_s3_object_cluster-completed.spec_content @@ -167,7 +167,6 @@ spec: - 0.0.0.0/0 - ::/0 kubernetesVersion: 1.23.0 - masterInternalName: api.internal.additionalobjects.example.com masterKubelet: anonymousAuth: false cgroupDriver: systemd diff --git a/tests/integration/update_cluster/apiservernodes/cloudformation.json b/tests/integration/update_cluster/apiservernodes/cloudformation.json deleted file mode 100644 index faa4260c1a9c7..0000000000000 --- a/tests/integration/update_cluster/apiservernodes/cloudformation.json +++ /dev/null @@ -1,1651 +0,0 @@ -{ - "Resources": { - "AWSAutoScalingAutoScalingGroupapiserverapiserversminimalexamplecom": { - "Type": "AWS::AutoScaling::AutoScalingGroup", - "Properties": { - "AutoScalingGroupName": "apiserver.apiservers.minimal.example.com", - "LaunchTemplate": { - "LaunchTemplateId": { - "Ref": "AWSEC2LaunchTemplateapiserverapiserversminimalexamplecom" - }, - "Version": { - "Fn::GetAtt": [ - "AWSEC2LaunchTemplateapiserverapiserversminimalexamplecom", - "LatestVersionNumber" - ] - } - }, - "MaxSize": "2", - "MinSize": "2", - "VPCZoneIdentifier": [ - { - "Ref": "AWSEC2Subnetustest1aminimalexamplecom" - } - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "Name", - "Value": "apiserver.apiservers.minimal.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "api-server", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/api-server", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/role/apiserver", - "Value": "1", - "PropagateAtLaunch": true - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "apiserver", - "PropagateAtLaunch": true - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned", - "PropagateAtLaunch": true - } - ], - "MetricsCollection": [ - { - "Granularity": "1Minute", - "Metrics": [ - "GroupDesiredCapacity", - "GroupInServiceInstances", - "GroupMaxSize", - "GroupMinSize", - "GroupPendingInstances", - "GroupStandbyInstances", - "GroupTerminatingInstances", - "GroupTotalInstances" - ] - } - ] - } - }, - "AWSAutoScalingAutoScalingGroupmasterustest1amastersminimalexamplecom": { - "Type": "AWS::AutoScaling::AutoScalingGroup", - "Properties": { - "AutoScalingGroupName": "master-us-test-1a.masters.minimal.example.com", - "LaunchTemplate": { - "LaunchTemplateId": { - "Ref": "AWSEC2LaunchTemplatemasterustest1amastersminimalexamplecom" - }, - "Version": { - "Fn::GetAtt": [ - "AWSEC2LaunchTemplatemasterustest1amastersminimalexamplecom", - "LatestVersionNumber" - ] - } - }, - "MaxSize": "1", - "MinSize": "1", - "VPCZoneIdentifier": [ - { - "Ref": "AWSEC2Subnetustest1aminimalexamplecom" - } - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.minimal.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/api-server", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/role/master", - "Value": "1", - "PropagateAtLaunch": true - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a", - "PropagateAtLaunch": true - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned", - "PropagateAtLaunch": true - } - ], - "MetricsCollection": [ - { - "Granularity": "1Minute", - "Metrics": [ - "GroupDesiredCapacity", - "GroupInServiceInstances", - "GroupMaxSize", - "GroupMinSize", - "GroupPendingInstances", - "GroupStandbyInstances", - "GroupTerminatingInstances", - "GroupTotalInstances" - ] - } - ] - } - }, - "AWSAutoScalingAutoScalingGroupnodesminimalexamplecom": { - "Type": "AWS::AutoScaling::AutoScalingGroup", - "Properties": { - "AutoScalingGroupName": "nodes.minimal.example.com", - "LaunchTemplate": { - "LaunchTemplateId": { - "Ref": "AWSEC2LaunchTemplatenodesminimalexamplecom" - }, - "Version": { - "Fn::GetAtt": [ - "AWSEC2LaunchTemplatenodesminimalexamplecom", - "LatestVersionNumber" - ] - } - }, - "MaxSize": "2", - "MinSize": "2", - "VPCZoneIdentifier": [ - { - "Ref": "AWSEC2Subnetustest1aminimalexamplecom" - } - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "Name", - "Value": "nodes.minimal.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/role/node", - "Value": "1", - "PropagateAtLaunch": true - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes", - "PropagateAtLaunch": true - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned", - "PropagateAtLaunch": true - } - ], - "MetricsCollection": [ - { - "Granularity": "1Minute", - "Metrics": [ - "GroupDesiredCapacity", - "GroupInServiceInstances", - "GroupMaxSize", - "GroupMinSize", - "GroupPendingInstances", - "GroupStandbyInstances", - "GroupTerminatingInstances", - "GroupTotalInstances" - ] - } - ] - } - }, - "AWSEC2DHCPOptionsminimalexamplecom": { - "Type": "AWS::EC2::DHCPOptions", - "Properties": { - "DomainName": "us-test-1.compute.internal", - "DomainNameServers": [ - "AmazonProvidedDNS" - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "minimal.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2InternetGatewayminimalexamplecom": { - "Type": "AWS::EC2::InternetGateway", - "Properties": { - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "minimal.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2LaunchTemplateapiserverapiserversminimalexamplecom": { - "Type": "AWS::EC2::LaunchTemplate", - "Properties": { - "LaunchTemplateName": "apiserver.apiservers.minimal.example.com", - "LaunchTemplateData": { - "BlockDeviceMappings": [ - { - "DeviceName": "/dev/xvda", - "Ebs": { - "VolumeType": "gp3", - "VolumeSize": 128, - "Iops": 3000, - "Throughput": 125, - "DeleteOnTermination": true, - "Encrypted": true - } - } - ], - "IamInstanceProfile": { - "Name": { - "Ref": "AWSIAMInstanceProfileapiserversminimalexamplecom" - } - }, - "ImageId": "ami-12345678", - "InstanceType": "t2.medium", - "KeyName": "kubernetes.minimal.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57", - "MetadataOptions": { - "HttpPutResponseHopLimit": 1, - "HttpTokens": "optional" - }, - "Monitoring": { - "Enabled": false - }, - "NetworkInterfaces": [ - { - "AssociatePublicIpAddress": true, - "DeleteOnTermination": true, - "DeviceIndex": 0, - "Ipv6AddressCount": 0, - "Groups": [ - { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - } - ] - } - ], - "TagSpecifications": [ - { - "ResourceType": "instance", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "apiserver.apiservers.minimal.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "api-server" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/api-server", - "Value": "" - }, - { - "Key": "k8s.io/role/apiserver", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "apiserver" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - }, - { - "ResourceType": "volume", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "apiserver.apiservers.minimal.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "api-server" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/api-server", - "Value": "" - }, - { - "Key": "k8s.io/role/apiserver", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "apiserver" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - ], - "UserData": "extracted" - } - } - }, - "AWSEC2LaunchTemplatemasterustest1amastersminimalexamplecom": { - "Type": "AWS::EC2::LaunchTemplate", - "Properties": { - "LaunchTemplateName": "master-us-test-1a.masters.minimal.example.com", - "LaunchTemplateData": { - "BlockDeviceMappings": [ - { - "DeviceName": "/dev/xvda", - "Ebs": { - "VolumeType": "gp3", - "VolumeSize": 64, - "Iops": 3000, - "Throughput": 125, - "DeleteOnTermination": true, - "Encrypted": true - } - }, - { - "DeviceName": "/dev/sdc", - "VirtualName": "ephemeral0" - } - ], - "IamInstanceProfile": { - "Name": { - "Ref": "AWSIAMInstanceProfilemastersminimalexamplecom" - } - }, - "ImageId": "ami-12345678", - "InstanceType": "m3.medium", - "KeyName": "kubernetes.minimal.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57", - "MetadataOptions": { - "HttpPutResponseHopLimit": 1, - "HttpTokens": "optional" - }, - "Monitoring": { - "Enabled": false - }, - "NetworkInterfaces": [ - { - "AssociatePublicIpAddress": true, - "DeleteOnTermination": true, - "DeviceIndex": 0, - "Ipv6AddressCount": 0, - "Groups": [ - { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - } - ] - } - ], - "TagSpecifications": [ - { - "ResourceType": "instance", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.minimal.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/api-server", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - }, - { - "ResourceType": "volume", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.minimal.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/api-server", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - ], - "UserData": "extracted" - } - } - }, - "AWSEC2LaunchTemplatenodesminimalexamplecom": { - "Type": "AWS::EC2::LaunchTemplate", - "Properties": { - "LaunchTemplateName": "nodes.minimal.example.com", - "LaunchTemplateData": { - "BlockDeviceMappings": [ - { - "DeviceName": "/dev/xvda", - "Ebs": { - "VolumeType": "gp3", - "VolumeSize": 128, - "Iops": 3000, - "Throughput": 125, - "DeleteOnTermination": true, - "Encrypted": true - } - } - ], - "IamInstanceProfile": { - "Name": { - "Ref": "AWSIAMInstanceProfilenodesminimalexamplecom" - } - }, - "ImageId": "ami-12345678", - "InstanceType": "t2.medium", - "KeyName": "kubernetes.minimal.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57", - "MetadataOptions": { - "HttpPutResponseHopLimit": 1, - "HttpTokens": "optional" - }, - "Monitoring": { - "Enabled": false - }, - "NetworkInterfaces": [ - { - "AssociatePublicIpAddress": true, - "DeleteOnTermination": true, - "DeviceIndex": 0, - "Ipv6AddressCount": 0, - "Groups": [ - { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - } - ] - } - ], - "TagSpecifications": [ - { - "ResourceType": "instance", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "nodes.minimal.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "" - }, - { - "Key": "k8s.io/role/node", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - }, - { - "ResourceType": "volume", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "nodes.minimal.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "" - }, - { - "Key": "k8s.io/role/node", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - ], - "UserData": "extracted" - } - } - }, - "AWSEC2Route0": { - "Type": "AWS::EC2::Route", - "Properties": { - "RouteTableId": { - "Ref": "AWSEC2RouteTableminimalexamplecom" - }, - "DestinationIpv6CidrBlock": "::/0", - "GatewayId": { - "Ref": "AWSEC2InternetGatewayminimalexamplecom" - } - } - }, - "AWSEC2Route00000": { - "Type": "AWS::EC2::Route", - "Properties": { - "RouteTableId": { - "Ref": "AWSEC2RouteTableminimalexamplecom" - }, - "DestinationCidrBlock": "0.0.0.0/0", - "GatewayId": { - "Ref": "AWSEC2InternetGatewayminimalexamplecom" - } - } - }, - "AWSEC2RouteTableminimalexamplecom": { - "Type": "AWS::EC2::RouteTable", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCminimalexamplecom" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "minimal.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - }, - { - "Key": "kubernetes.io/kops/role", - "Value": "public" - } - ] - } - }, - "AWSEC2SecurityGroupEgressfrommastersminimalexamplecomegressall0to00": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIpv6": "::/0" - } - }, - "AWSEC2SecurityGroupEgressfrommastersminimalexamplecomegressall0to000000": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupEgressfromnodesminimalexamplecomegressall0to00": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIpv6": "::/0" - } - }, - "AWSEC2SecurityGroupEgressfromnodesminimalexamplecomegressall0to000000": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom00000ingresstcp22to22mastersminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "FromPort": 22, - "ToPort": 22, - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom00000ingresstcp22to22nodesminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "FromPort": 22, - "ToPort": 22, - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom00000ingresstcp443to443mastersminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "FromPort": 443, - "ToPort": 443, - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrommastersminimalexamplecomingressall0to0mastersminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfrommastersminimalexamplecomingressall0to0nodesminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfromnodesminimalexamplecomingressall0to0nodesminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfromnodesminimalexamplecomingresstcp1to2379mastersminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "FromPort": 1, - "ToPort": 2379, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodesminimalexamplecomingresstcp2382to4000mastersminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "FromPort": 2382, - "ToPort": 4000, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodesminimalexamplecomingresstcp4003to65535mastersminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "FromPort": 4003, - "ToPort": 65535, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodesminimalexamplecomingressudp1to65535mastersminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "FromPort": 1, - "ToPort": 65535, - "IpProtocol": "udp" - } - }, - "AWSEC2SecurityGroupmastersminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupName": "masters.minimal.example.com", - "VpcId": { - "Ref": "AWSEC2VPCminimalexamplecom" - }, - "GroupDescription": "Security group for masters", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "masters.minimal.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2SecurityGroupnodesminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupName": "nodes.minimal.example.com", - "VpcId": { - "Ref": "AWSEC2VPCminimalexamplecom" - }, - "GroupDescription": "Security group for nodes", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "nodes.minimal.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2SubnetRouteTableAssociationustest1aminimalexamplecom": { - "Type": "AWS::EC2::SubnetRouteTableAssociation", - "Properties": { - "SubnetId": { - "Ref": "AWSEC2Subnetustest1aminimalexamplecom" - }, - "RouteTableId": { - "Ref": "AWSEC2RouteTableminimalexamplecom" - } - } - }, - "AWSEC2Subnetustest1aminimalexamplecom": { - "Type": "AWS::EC2::Subnet", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCminimalexamplecom" - }, - "CidrBlock": "172.20.32.0/19", - "AvailabilityZone": "us-test-1a", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.minimal.example.com" - }, - { - "Key": "SubnetType", - "Value": "Public" - }, - { - "Key": "kops.k8s.io/instance-group/apiserver", - "Value": "true" - }, - { - "Key": "kops.k8s.io/instance-group/master-us-test-1a", - "Value": "true" - }, - { - "Key": "kops.k8s.io/instance-group/nodes", - "Value": "true" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - }, - { - "Key": "kubernetes.io/role/elb", - "Value": "1" - }, - { - "Key": "kubernetes.io/role/internal-elb", - "Value": "1" - } - ] - } - }, - "AWSEC2VPCCidrBlockAmazonIPv6": { - "Type": "AWS::EC2::VPCCidrBlock", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCminimalexamplecom" - }, - "AmazonProvidedIpv6CidrBlock": true - } - }, - "AWSEC2VPCDHCPOptionsAssociationminimalexamplecom": { - "Type": "AWS::EC2::VPCDHCPOptionsAssociation", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCminimalexamplecom" - }, - "DhcpOptionsId": { - "Ref": "AWSEC2DHCPOptionsminimalexamplecom" - } - } - }, - "AWSEC2VPCGatewayAttachmentminimalexamplecom": { - "Type": "AWS::EC2::VPCGatewayAttachment", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCminimalexamplecom" - }, - "InternetGatewayId": { - "Ref": "AWSEC2InternetGatewayminimalexamplecom" - } - } - }, - "AWSEC2VPCminimalexamplecom": { - "Type": "AWS::EC2::VPC", - "Properties": { - "CidrBlock": "172.20.0.0/16", - "EnableDnsHostnames": true, - "EnableDnsSupport": true, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "minimal.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2Volumeustest1aetcdeventsminimalexamplecom": { - "Type": "AWS::EC2::Volume", - "Properties": { - "AvailabilityZone": "us-test-1a", - "Size": 20, - "VolumeType": "gp3", - "Iops": 3000, - "Throughput": 125, - "Encrypted": false, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.etcd-events.minimal.example.com" - }, - { - "Key": "k8s.io/etcd/events", - "Value": "us-test-1a/us-test-1a" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2Volumeustest1aetcdmainminimalexamplecom": { - "Type": "AWS::EC2::Volume", - "Properties": { - "AvailabilityZone": "us-test-1a", - "Size": 20, - "VolumeType": "gp3", - "Iops": 3000, - "Throughput": 125, - "Encrypted": false, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.etcd-main.minimal.example.com" - }, - { - "Key": "k8s.io/etcd/main", - "Value": "us-test-1a/us-test-1a" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - }, - "AWSIAMInstanceProfileapiserversminimalexamplecom": { - "Type": "AWS::IAM::InstanceProfile", - "Properties": { - "InstanceProfileName": "apiservers.minimal.example.com", - "Roles": [ - { - "Ref": "AWSIAMRoleapiserversminimalexamplecom" - } - ] - } - }, - "AWSIAMInstanceProfilemastersminimalexamplecom": { - "Type": "AWS::IAM::InstanceProfile", - "Properties": { - "InstanceProfileName": "masters.minimal.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolemastersminimalexamplecom" - } - ] - } - }, - "AWSIAMInstanceProfilenodesminimalexamplecom": { - "Type": "AWS::IAM::InstanceProfile", - "Properties": { - "InstanceProfileName": "nodes.minimal.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolenodesminimalexamplecom" - } - ] - } - }, - "AWSIAMPolicyapiserversminimalexamplecom": { - "Type": "AWS::IAM::Policy", - "Properties": { - "PolicyName": "apiservers.minimal.example.com", - "Roles": [ - { - "Ref": "AWSIAMRoleapiserversminimalexamplecom" - } - ], - "PolicyDocument": { - "Statement": [ - { - "Action": [ - "s3:Get*" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-read-bucket/clusters.example.com/minimal.example.com/*" - }, - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-read-bucket" - ] - }, - { - "Action": [ - "autoscaling:DescribeAutoScalingInstances", - "ec2:DescribeInstanceTypes", - "ec2:DescribeInstances", - "ec2:DescribeRegions", - "iam:GetServerCertificate", - "iam:ListServerCertificates", - "kms:GenerateRandom" - ], - "Effect": "Allow", - "Resource": "*" - } - ], - "Version": "2012-10-17" - } - } - }, - "AWSIAMPolicymastersminimalexamplecom": { - "Type": "AWS::IAM::Policy", - "Properties": { - "PolicyName": "masters.minimal.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolemastersminimalexamplecom" - } - ], - "PolicyDocument": { - "Statement": [ - { - "Action": "ec2:AttachVolume", - "Condition": { - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "minimal.example.com", - "aws:ResourceTag/k8s.io/role/master": "1" - } - }, - "Effect": "Allow", - "Resource": [ - "*" - ] - }, - { - "Action": [ - "s3:Get*" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-read-bucket/clusters.example.com/minimal.example.com/*" - }, - { - "Action": [ - "s3:GetObject", - "s3:DeleteObject", - "s3:DeleteObjectVersion", - "s3:PutObject" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/minimal.example.com/backups/etcd/main/*" - }, - { - "Action": [ - "s3:GetObject", - "s3:DeleteObject", - "s3:DeleteObjectVersion", - "s3:PutObject" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/minimal.example.com/backups/etcd/events/*" - }, - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-read-bucket" - ] - }, - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-write-bucket" - ] - }, - { - "Action": [ - "route53:ChangeResourceRecordSets", - "route53:ListResourceRecordSets", - "route53:GetHostedZone" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:route53:::hostedzone/Z1AFAKE1ZON3YO" - ] - }, - { - "Action": [ - "route53:GetChange" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:route53:::change/*" - ] - }, - { - "Action": [ - "route53:ListHostedZones", - "route53:ListTagsForResource" - ], - "Effect": "Allow", - "Resource": [ - "*" - ] - }, - { - "Action": "ec2:CreateTags", - "Condition": { - "StringEquals": { - "aws:RequestTag/KubernetesCluster": "minimal.example.com", - "ec2:CreateAction": [ - "CreateSecurityGroup" - ] - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:security-group/*" - ] - }, - { - "Action": [ - "ec2:CreateTags", - "ec2:DeleteTags" - ], - "Condition": { - "Null": { - "aws:RequestTag/KubernetesCluster": "true" - }, - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "minimal.example.com" - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:security-group/*" - ] - }, - { - "Action": "ec2:CreateTags", - "Condition": { - "StringEquals": { - "aws:RequestTag/KubernetesCluster": "minimal.example.com", - "ec2:CreateAction": [ - "CreateVolume", - "CreateSnapshot" - ] - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:volume/*", - "arn:aws-test:ec2:*:*:snapshot/*" - ] - }, - { - "Action": [ - "ec2:CreateTags", - "ec2:DeleteTags" - ], - "Condition": { - "Null": { - "aws:RequestTag/KubernetesCluster": "true" - }, - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "minimal.example.com" - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:volume/*", - "arn:aws-test:ec2:*:*:snapshot/*" - ] - }, - { - "Action": [ - "autoscaling:DescribeAutoScalingGroups", - "autoscaling:DescribeAutoScalingInstances", - "autoscaling:DescribeLaunchConfigurations", - "autoscaling:DescribeScalingActivities", - "autoscaling:DescribeTags", - "ec2:AttachVolume", - "ec2:AuthorizeSecurityGroupIngress", - "ec2:CreateSecurityGroup", - "ec2:CreateTags", - "ec2:DeleteRoute", - "ec2:DeleteSecurityGroup", - "ec2:DeleteVolume", - "ec2:DescribeAccountAttributes", - "ec2:DescribeInstanceTypes", - "ec2:DescribeInstances", - "ec2:DescribeLaunchTemplateVersions", - "ec2:DescribeRegions", - "ec2:DescribeRouteTables", - "ec2:DescribeSecurityGroups", - "ec2:DescribeSubnets", - "ec2:DescribeTags", - "ec2:DescribeVolumes", - "ec2:DescribeVolumesModifications", - "ec2:DescribeVpcs", - "ec2:DetachVolume", - "ec2:ModifyInstanceAttribute", - "ec2:ModifyVolume", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:CreateListener", - "elasticloadbalancing:CreateTargetGroup", - "elasticloadbalancing:DeleteListener", - "elasticloadbalancing:DescribeListeners", - "elasticloadbalancing:DescribeLoadBalancerAttributes", - "elasticloadbalancing:DescribeLoadBalancerPolicies", - "elasticloadbalancing:DescribeLoadBalancers", - "elasticloadbalancing:DescribeTargetGroups", - "elasticloadbalancing:DescribeTargetHealth", - "elasticloadbalancing:ModifyListener", - "elasticloadbalancing:RegisterTargets", - "iam:GetServerCertificate", - "iam:ListServerCertificates", - "kms:DescribeKey", - "kms:GenerateRandom" - ], - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": [ - "autoscaling:SetDesiredCapacity", - "autoscaling:TerminateInstanceInAutoScalingGroup", - "ec2:AttachVolume", - "ec2:AuthorizeSecurityGroupIngress", - "ec2:DeleteSecurityGroup", - "ec2:DeleteVolume", - "ec2:DetachVolume", - "ec2:ModifyInstanceAttribute", - "ec2:ModifyVolume", - "ec2:RevokeSecurityGroupIngress", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", - "elasticloadbalancing:AttachLoadBalancerToSubnets", - "elasticloadbalancing:ConfigureHealthCheck", - "elasticloadbalancing:CreateLoadBalancerListeners", - "elasticloadbalancing:CreateLoadBalancerPolicy", - "elasticloadbalancing:DeleteListener", - "elasticloadbalancing:DeleteLoadBalancer", - "elasticloadbalancing:DeleteLoadBalancerListeners", - "elasticloadbalancing:DeleteTargetGroup", - "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", - "elasticloadbalancing:DeregisterTargets", - "elasticloadbalancing:DetachLoadBalancerFromSubnets", - "elasticloadbalancing:ModifyListener", - "elasticloadbalancing:ModifyLoadBalancerAttributes", - "elasticloadbalancing:ModifyTargetGroup", - "elasticloadbalancing:RegisterInstancesWithLoadBalancer", - "elasticloadbalancing:RegisterTargets", - "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", - "elasticloadbalancing:SetLoadBalancerPoliciesOfListener" - ], - "Condition": { - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "minimal.example.com" - } - }, - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": [ - "ec2:CreateSecurityGroup", - "ec2:CreateSnapshot", - "ec2:CreateVolume", - "elasticloadbalancing:CreateListener", - "elasticloadbalancing:CreateLoadBalancer", - "elasticloadbalancing:CreateTargetGroup" - ], - "Condition": { - "StringEquals": { - "aws:RequestTag/KubernetesCluster": "minimal.example.com" - } - }, - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": "ec2:CreateSecurityGroup", - "Effect": "Allow", - "Resource": "arn:aws-test:ec2:*:*:vpc/*" - } - ], - "Version": "2012-10-17" - } - } - }, - "AWSIAMPolicynodesminimalexamplecom": { - "Type": "AWS::IAM::Policy", - "Properties": { - "PolicyName": "nodes.minimal.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolenodesminimalexamplecom" - } - ], - "PolicyDocument": { - "Statement": [ - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-read-bucket" - ] - }, - { - "Action": [ - "autoscaling:DescribeAutoScalingInstances", - "ec2:DescribeInstanceTypes", - "ec2:DescribeInstances", - "ec2:DescribeRegions", - "iam:GetServerCertificate", - "iam:ListServerCertificates", - "kms:GenerateRandom" - ], - "Effect": "Allow", - "Resource": "*" - } - ], - "Version": "2012-10-17" - } - } - }, - "AWSIAMRoleapiserversminimalexamplecom": { - "Type": "AWS::IAM::Role", - "Properties": { - "RoleName": "apiservers.minimal.example.com", - "AssumeRolePolicyDocument": { - "Statement": [ - { - "Action": "sts:AssumeRole", - "Effect": "Allow", - "Principal": { - "Service": "ec2.amazonaws.com" - } - } - ], - "Version": "2012-10-17" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "apiservers.minimal.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - }, - "AWSIAMRolemastersminimalexamplecom": { - "Type": "AWS::IAM::Role", - "Properties": { - "RoleName": "masters.minimal.example.com", - "AssumeRolePolicyDocument": { - "Statement": [ - { - "Action": "sts:AssumeRole", - "Effect": "Allow", - "Principal": { - "Service": "ec2.amazonaws.com" - } - } - ], - "Version": "2012-10-17" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "masters.minimal.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - }, - "AWSIAMRolenodesminimalexamplecom": { - "Type": "AWS::IAM::Role", - "Properties": { - "RoleName": "nodes.minimal.example.com", - "AssumeRolePolicyDocument": { - "Statement": [ - { - "Action": "sts:AssumeRole", - "Effect": "Allow", - "Principal": { - "Service": "ec2.amazonaws.com" - } - } - ], - "Version": "2012-10-17" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "nodes.minimal.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - } - } -} diff --git a/tests/integration/update_cluster/apiservernodes/cloudformation.json.extracted.yaml b/tests/integration/update_cluster/apiservernodes/cloudformation.json.extracted.yaml deleted file mode 100644 index 039e9c5d6e755..0000000000000 --- a/tests/integration/update_cluster/apiservernodes/cloudformation.json.extracted.yaml +++ /dev/null @@ -1,670 +0,0 @@ -Resources.AWSEC2LaunchTemplateapiserverapiserversminimalexamplecom.Properties.LaunchTemplateData.UserData: | - #!/bin/bash - set -o errexit - set -o nounset - set -o pipefail - - NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 - NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 - NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 - NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 - - export AWS_REGION=us-test-1 - - - - - sysctl -w net.core.rmem_max=16777216 || true - sysctl -w net.core.wmem_max=16777216 || true - sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true - sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true - - - function ensure-install-dir() { - INSTALL_DIR="/opt/kops" - # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec - if [[ -d /var/lib/toolbox ]]; then - INSTALL_DIR="/var/lib/toolbox/kops" - fi - mkdir -p ${INSTALL_DIR}/bin - mkdir -p ${INSTALL_DIR}/conf - cd ${INSTALL_DIR} - } - - # Retry a download until we get it. args: name, sha, urls - download-or-bust() { - local -r file="$1" - local -r hash="$2" - local -r urls=( $(split-commas "$3") ) - - if [[ -f "${file}" ]]; then - if ! validate-hash "${file}" "${hash}"; then - rm -f "${file}" - else - return 0 - fi - fi - - while true; do - for url in "${urls[@]}"; do - commands=( - "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - ) - for cmd in "${commands[@]}"; do - echo "Attempting download with: ${cmd} {url}" - if ! (${cmd} "${url}"); then - echo "== Download failed with ${cmd} ==" - continue - fi - if ! validate-hash "${file}" "${hash}"; then - echo "== Hash validation of ${url} failed. Retrying. ==" - rm -f "${file}" - else - echo "== Downloaded ${url} (SHA256 = ${hash}) ==" - return 0 - fi - done - done - - echo "All downloads failed; sleeping before retrying" - sleep 60 - done - } - - validate-hash() { - local -r file="$1" - local -r expected="$2" - local actual - - actual=$(sha256sum ${file} | awk '{ print $1 }') || true - if [[ "${actual}" != "${expected}" ]]; then - echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" - return 1 - fi - } - - function split-commas() { - echo $1 | tr "," "\n" - } - - function download-release() { - case "$(uname -m)" in - x86_64*|i?86_64*|amd64*) - NODEUP_URL="${NODEUP_URL_AMD64}" - NODEUP_HASH="${NODEUP_HASH_AMD64}" - ;; - aarch64*|arm64*) - NODEUP_URL="${NODEUP_URL_ARM64}" - NODEUP_HASH="${NODEUP_HASH_ARM64}" - ;; - *) - echo "Unsupported host arch: $(uname -m)" >&2 - exit 1 - ;; - esac - - cd ${INSTALL_DIR}/bin - download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" - - chmod +x nodeup - - echo "Running nodeup" - # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 - ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) - } - - #################################################################################### - - /bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" - - echo "== nodeup node config starting ==" - ensure-install-dir - - cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' - cloudConfig: - awsEBSCSIDriver: - enabled: true - version: v1.12.0 - manageStorageClasses: true - containerRuntime: containerd - containerd: - logLevel: info - version: 1.4.13 - docker: - skipInstall: true - kubeProxy: - clusterCIDR: 100.96.0.0/11 - cpuRequest: 100m - image: registry.k8s.io/kube-proxy:v1.22.0 - logLevel: 2 - kubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: aws - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - featureGates: - CSIMigrationAWS: "true" - InTreePluginAWSUnregister: "true" - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - protectKernelDefaults: true - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - - __EOF_CLUSTER_SPEC - - cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' - CloudProvider: aws - ConfigServer: - CACertificates: | - -----BEGIN CERTIFICATE----- - MIIBbjCCARigAwIBAgIMFpANqBD8NSD82AUSMA0GCSqGSIb3DQEBCwUAMBgxFjAU - BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwODAwWhcNMzEwNzA3MDcw - ODAwWjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD - SwAwSAJBANFI3zr0Tk8krsW8vwjfMpzJOlWQ8616vG3YPa2qAgI7V4oKwfV0yIg1 - jt+H6f4P/wkPAPTPTfRp9Iy8oHEEFw0CAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG - MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNG3zVjTcLlJwDsJ4/K9DV7KohUA - MA0GCSqGSIb3DQEBCwUAA0EAB8d03fY2w7WKpfO29qI295pu2C4ca9AiVGOpgSc8 - tmQsq6rcxt3T+rb589PVtz0mw/cKTxOk6gH2CCC+yHfy2w== - -----END CERTIFICATE----- - -----BEGIN CERTIFICATE----- - MIIBbjCCARigAwIBAgIMFpANvmSa0OAlYmXKMA0GCSqGSIb3DQEBCwUAMBgxFjAU - BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwOTM2WhcNMzEwNzA3MDcw - OTM2WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD - SwAwSAJBAMF6F4aZdpe0RUpyykaBpWwZCnwbffhYGOw+fs6RdLuUq7QCNmJm/Eq7 - WWOziMYDiI9SbclpD+6QiJ0N3EqppVUCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG - MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLImp6ARjPDAH6nhI+scWVt3Q9bn - MA0GCSqGSIb3DQEBCwUAA0EAVQVx5MUtuAIeePuP9o51xtpT2S6Fvfi8J4ICxnlA - 9B7UD2ushcVFPtaeoL9Gfu8aY4KJBeqqg5ojl4qmRnThjw== - -----END CERTIFICATE----- - server: https://kops-controller.internal.minimal.example.com:3988/ - InstanceGroupName: apiserver - InstanceGroupRole: APIServer - NodeupConfigHash: 5yiCVw+BD0+36GR/X1I5O0qfDuUXCvYxqqwaNy5BYFU= - - __EOF_KUBE_ENV - - download-release - echo "== nodeup node config done ==" -Resources.AWSEC2LaunchTemplatemasterustest1amastersminimalexamplecom.Properties.LaunchTemplateData.UserData: | - #!/bin/bash - set -o errexit - set -o nounset - set -o pipefail - - NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 - NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 - NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 - NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 - - export AWS_REGION=us-test-1 - - - - - sysctl -w net.core.rmem_max=16777216 || true - sysctl -w net.core.wmem_max=16777216 || true - sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true - sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true - - - function ensure-install-dir() { - INSTALL_DIR="/opt/kops" - # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec - if [[ -d /var/lib/toolbox ]]; then - INSTALL_DIR="/var/lib/toolbox/kops" - fi - mkdir -p ${INSTALL_DIR}/bin - mkdir -p ${INSTALL_DIR}/conf - cd ${INSTALL_DIR} - } - - # Retry a download until we get it. args: name, sha, urls - download-or-bust() { - local -r file="$1" - local -r hash="$2" - local -r urls=( $(split-commas "$3") ) - - if [[ -f "${file}" ]]; then - if ! validate-hash "${file}" "${hash}"; then - rm -f "${file}" - else - return 0 - fi - fi - - while true; do - for url in "${urls[@]}"; do - commands=( - "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - ) - for cmd in "${commands[@]}"; do - echo "Attempting download with: ${cmd} {url}" - if ! (${cmd} "${url}"); then - echo "== Download failed with ${cmd} ==" - continue - fi - if ! validate-hash "${file}" "${hash}"; then - echo "== Hash validation of ${url} failed. Retrying. ==" - rm -f "${file}" - else - echo "== Downloaded ${url} (SHA256 = ${hash}) ==" - return 0 - fi - done - done - - echo "All downloads failed; sleeping before retrying" - sleep 60 - done - } - - validate-hash() { - local -r file="$1" - local -r expected="$2" - local actual - - actual=$(sha256sum ${file} | awk '{ print $1 }') || true - if [[ "${actual}" != "${expected}" ]]; then - echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" - return 1 - fi - } - - function split-commas() { - echo $1 | tr "," "\n" - } - - function download-release() { - case "$(uname -m)" in - x86_64*|i?86_64*|amd64*) - NODEUP_URL="${NODEUP_URL_AMD64}" - NODEUP_HASH="${NODEUP_HASH_AMD64}" - ;; - aarch64*|arm64*) - NODEUP_URL="${NODEUP_URL_ARM64}" - NODEUP_HASH="${NODEUP_HASH_ARM64}" - ;; - *) - echo "Unsupported host arch: $(uname -m)" >&2 - exit 1 - ;; - esac - - cd ${INSTALL_DIR}/bin - download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" - - chmod +x nodeup - - echo "Running nodeup" - # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 - ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) - } - - #################################################################################### - - /bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" - - echo "== nodeup node config starting ==" - ensure-install-dir - - cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' - cloudConfig: - awsEBSCSIDriver: - enabled: true - version: v1.12.0 - manageStorageClasses: true - containerRuntime: containerd - containerd: - logLevel: info - version: 1.4.13 - docker: - skipInstall: true - encryptionConfig: null - etcdClusters: - events: - version: 3.5.4 - main: - version: 3.5.4 - kubeAPIServer: - allowPrivileged: true - anonymousAuth: false - apiAudiences: - - kubernetes.svc.default - apiServerCount: 1 - authorizationMode: AlwaysAllow - bindAddress: 0.0.0.0 - cloudProvider: aws - enableAdmissionPlugins: - - NamespaceLifecycle - - LimitRanger - - ServiceAccount - - DefaultStorageClass - - DefaultTolerationSeconds - - MutatingAdmissionWebhook - - ValidatingAdmissionWebhook - - NodeRestriction - - ResourceQuota - etcdServers: - - https://127.0.0.1:4001 - etcdServersOverrides: - - /events#https://127.0.0.1:4002 - featureGates: - CSIMigrationAWS: "true" - InTreePluginAWSUnregister: "true" - image: registry.k8s.io/kube-apiserver:v1.22.0 - kubeletPreferredAddressTypes: - - InternalIP - - Hostname - - ExternalIP - logLevel: 2 - requestheaderAllowedNames: - - aggregator - requestheaderExtraHeaderPrefixes: - - X-Remote-Extra- - requestheaderGroupHeaders: - - X-Remote-Group - requestheaderUsernameHeaders: - - X-Remote-User - securePort: 443 - serviceAccountIssuer: https://api.internal.minimal.example.com - serviceAccountJWKSURI: https://api.internal.minimal.example.com/openid/v1/jwks - serviceClusterIPRange: 100.64.0.0/13 - storageBackend: etcd3 - kubeControllerManager: - allocateNodeCIDRs: true - attachDetachReconcileSyncPeriod: 1m0s - cloudProvider: aws - clusterCIDR: 100.96.0.0/11 - clusterName: minimal.example.com - configureCloudRoutes: false - featureGates: - CSIMigrationAWS: "true" - InTreePluginAWSUnregister: "true" - image: registry.k8s.io/kube-controller-manager:v1.22.0 - leaderElection: - leaderElect: true - logLevel: 2 - useServiceAccountCredentials: true - kubeProxy: - clusterCIDR: 100.96.0.0/11 - cpuRequest: 100m - image: registry.k8s.io/kube-proxy:v1.22.0 - logLevel: 2 - kubeScheduler: - featureGates: - CSIMigrationAWS: "true" - InTreePluginAWSUnregister: "true" - image: registry.k8s.io/kube-scheduler:v1.22.0 - leaderElection: - leaderElect: true - logLevel: 2 - kubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: aws - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - featureGates: - CSIMigrationAWS: "true" - InTreePluginAWSUnregister: "true" - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - protectKernelDefaults: true - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - masterKubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: aws - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - featureGates: - CSIMigrationAWS: "true" - InTreePluginAWSUnregister: "true" - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - protectKernelDefaults: true - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - - __EOF_CLUSTER_SPEC - - cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' - CloudProvider: aws - ConfigBase: memfs://clusters.example.com/minimal.example.com - InstanceGroupName: master-us-test-1a - InstanceGroupRole: Master - NodeupConfigHash: 5riiUtWaXLjbfRaTO/R7YcCfdXNmNtRfKdFQOEA+xAU= - - __EOF_KUBE_ENV - - download-release - echo "== nodeup node config done ==" -Resources.AWSEC2LaunchTemplatenodesminimalexamplecom.Properties.LaunchTemplateData.UserData: | - #!/bin/bash - set -o errexit - set -o nounset - set -o pipefail - - NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 - NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 - NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 - NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 - - export AWS_REGION=us-test-1 - - - - - sysctl -w net.core.rmem_max=16777216 || true - sysctl -w net.core.wmem_max=16777216 || true - sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true - sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true - - - function ensure-install-dir() { - INSTALL_DIR="/opt/kops" - # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec - if [[ -d /var/lib/toolbox ]]; then - INSTALL_DIR="/var/lib/toolbox/kops" - fi - mkdir -p ${INSTALL_DIR}/bin - mkdir -p ${INSTALL_DIR}/conf - cd ${INSTALL_DIR} - } - - # Retry a download until we get it. args: name, sha, urls - download-or-bust() { - local -r file="$1" - local -r hash="$2" - local -r urls=( $(split-commas "$3") ) - - if [[ -f "${file}" ]]; then - if ! validate-hash "${file}" "${hash}"; then - rm -f "${file}" - else - return 0 - fi - fi - - while true; do - for url in "${urls[@]}"; do - commands=( - "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - ) - for cmd in "${commands[@]}"; do - echo "Attempting download with: ${cmd} {url}" - if ! (${cmd} "${url}"); then - echo "== Download failed with ${cmd} ==" - continue - fi - if ! validate-hash "${file}" "${hash}"; then - echo "== Hash validation of ${url} failed. Retrying. ==" - rm -f "${file}" - else - echo "== Downloaded ${url} (SHA256 = ${hash}) ==" - return 0 - fi - done - done - - echo "All downloads failed; sleeping before retrying" - sleep 60 - done - } - - validate-hash() { - local -r file="$1" - local -r expected="$2" - local actual - - actual=$(sha256sum ${file} | awk '{ print $1 }') || true - if [[ "${actual}" != "${expected}" ]]; then - echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" - return 1 - fi - } - - function split-commas() { - echo $1 | tr "," "\n" - } - - function download-release() { - case "$(uname -m)" in - x86_64*|i?86_64*|amd64*) - NODEUP_URL="${NODEUP_URL_AMD64}" - NODEUP_HASH="${NODEUP_HASH_AMD64}" - ;; - aarch64*|arm64*) - NODEUP_URL="${NODEUP_URL_ARM64}" - NODEUP_HASH="${NODEUP_HASH_ARM64}" - ;; - *) - echo "Unsupported host arch: $(uname -m)" >&2 - exit 1 - ;; - esac - - cd ${INSTALL_DIR}/bin - download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" - - chmod +x nodeup - - echo "Running nodeup" - # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 - ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) - } - - #################################################################################### - - /bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" - - echo "== nodeup node config starting ==" - ensure-install-dir - - cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' - cloudConfig: - awsEBSCSIDriver: - enabled: true - version: v1.12.0 - manageStorageClasses: true - containerRuntime: containerd - containerd: - logLevel: info - version: 1.4.13 - docker: - skipInstall: true - kubeProxy: - clusterCIDR: 100.96.0.0/11 - cpuRequest: 100m - image: registry.k8s.io/kube-proxy:v1.22.0 - logLevel: 2 - kubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: aws - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - featureGates: - CSIMigrationAWS: "true" - InTreePluginAWSUnregister: "true" - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - protectKernelDefaults: true - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - - __EOF_CLUSTER_SPEC - - cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' - CloudProvider: aws - ConfigServer: - CACertificates: | - -----BEGIN CERTIFICATE----- - MIIBbjCCARigAwIBAgIMFpANqBD8NSD82AUSMA0GCSqGSIb3DQEBCwUAMBgxFjAU - BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwODAwWhcNMzEwNzA3MDcw - ODAwWjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD - SwAwSAJBANFI3zr0Tk8krsW8vwjfMpzJOlWQ8616vG3YPa2qAgI7V4oKwfV0yIg1 - jt+H6f4P/wkPAPTPTfRp9Iy8oHEEFw0CAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG - MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNG3zVjTcLlJwDsJ4/K9DV7KohUA - MA0GCSqGSIb3DQEBCwUAA0EAB8d03fY2w7WKpfO29qI295pu2C4ca9AiVGOpgSc8 - tmQsq6rcxt3T+rb589PVtz0mw/cKTxOk6gH2CCC+yHfy2w== - -----END CERTIFICATE----- - -----BEGIN CERTIFICATE----- - MIIBbjCCARigAwIBAgIMFpANvmSa0OAlYmXKMA0GCSqGSIb3DQEBCwUAMBgxFjAU - BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwOTM2WhcNMzEwNzA3MDcw - OTM2WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD - SwAwSAJBAMF6F4aZdpe0RUpyykaBpWwZCnwbffhYGOw+fs6RdLuUq7QCNmJm/Eq7 - WWOziMYDiI9SbclpD+6QiJ0N3EqppVUCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG - MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLImp6ARjPDAH6nhI+scWVt3Q9bn - MA0GCSqGSIb3DQEBCwUAA0EAVQVx5MUtuAIeePuP9o51xtpT2S6Fvfi8J4ICxnlA - 9B7UD2ushcVFPtaeoL9Gfu8aY4KJBeqqg5ojl4qmRnThjw== - -----END CERTIFICATE----- - server: https://kops-controller.internal.minimal.example.com:3988/ - InstanceGroupName: nodes - InstanceGroupRole: Node - NodeupConfigHash: 354orPsI7dNYmQ6/gBPyTojuACUP6YzTe5OmY4RWhYs= - - __EOF_KUBE_ENV - - download-release - echo "== nodeup node config done ==" diff --git a/tests/integration/update_cluster/apiservernodes/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/apiservernodes/data/aws_s3_object_cluster-completed.spec_content index 3661580b4fd1e..dcd11893e2b6f 100644 --- a/tests/integration/update_cluster/apiservernodes/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/apiservernodes/data/aws_s3_object_cluster-completed.spec_content @@ -156,7 +156,6 @@ spec: kubernetesApiAccess: - 0.0.0.0/0 kubernetesVersion: 1.22.0 - masterInternalName: api.internal.minimal.example.com masterKubelet: anonymousAuth: false cgroupDriver: systemd diff --git a/tests/integration/update_cluster/aws-lb-controller/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/aws-lb-controller/data/aws_s3_object_cluster-completed.spec_content index d9d3cd47e07f8..17e34df0562fd 100644 --- a/tests/integration/update_cluster/aws-lb-controller/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/aws-lb-controller/data/aws_s3_object_cluster-completed.spec_content @@ -147,7 +147,6 @@ spec: kubernetesApiAccess: - 0.0.0.0/0 kubernetesVersion: 1.21.0 - masterInternalName: api.internal.minimal.example.com masterKubelet: anonymousAuth: false cgroupDriver: systemd diff --git a/tests/integration/update_cluster/aws-lb-controller/in-v1alpha2.yaml b/tests/integration/update_cluster/aws-lb-controller/in-v1alpha2.yaml index cf85f3f82bc09..7a6b231104cd9 100644 --- a/tests/integration/update_cluster/aws-lb-controller/in-v1alpha2.yaml +++ b/tests/integration/update_cluster/aws-lb-controller/in-v1alpha2.yaml @@ -27,7 +27,6 @@ spec: kubelet: anonymousAuth: false kubernetesVersion: v1.21.0 - masterInternalName: api.internal.minimal.example.com masterPublicName: api.minimal.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/tests/integration/update_cluster/bastionadditional_user-data/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/bastionadditional_user-data/data/aws_s3_object_cluster-completed.spec_content index e813e84cbf710..0149ca6deb84f 100644 --- a/tests/integration/update_cluster/bastionadditional_user-data/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/bastionadditional_user-data/data/aws_s3_object_cluster-completed.spec_content @@ -144,7 +144,6 @@ spec: kubernetesApiAccess: - 0.0.0.0/0 kubernetesVersion: 1.21.0 - masterInternalName: api.internal.bastionuserdata.example.com masterKubelet: anonymousAuth: false cgroupDriver: systemd diff --git a/tests/integration/update_cluster/complex/cloudformation.json b/tests/integration/update_cluster/complex/cloudformation.json deleted file mode 100644 index 6f387fc0859e9..0000000000000 --- a/tests/integration/update_cluster/complex/cloudformation.json +++ /dev/null @@ -1,2020 +0,0 @@ -{ - "Resources": { - "AWSAutoScalingAutoScalingGroupmasterustest1amasterscomplexexamplecom": { - "Type": "AWS::AutoScaling::AutoScalingGroup", - "Properties": { - "AutoScalingGroupName": "master-us-test-1a.masters.complex.example.com", - "LaunchTemplate": { - "LaunchTemplateId": { - "Ref": "AWSEC2LaunchTemplatemasterustest1amasterscomplexexamplecom" - }, - "Version": { - "Fn::GetAtt": [ - "AWSEC2LaunchTemplatemasterustest1amasterscomplexexamplecom", - "LatestVersionNumber" - ] - } - }, - "MaxSize": "1", - "MinSize": "1", - "VPCZoneIdentifier": [ - { - "Ref": "AWSEC2Subnetustest1acomplexexamplecom" - } - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "complex.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.complex.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "Owner", - "Value": "John Doe", - "PropagateAtLaunch": true - }, - { - "Key": "foo/bar", - "Value": "fib+baz", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/role/master", - "Value": "1", - "PropagateAtLaunch": true - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a", - "PropagateAtLaunch": true - }, - { - "Key": "kubernetes.io/cluster/complex.example.com", - "Value": "owned", - "PropagateAtLaunch": true - } - ], - "MetricsCollection": [ - { - "Granularity": "1Minute", - "Metrics": [ - "GroupDesiredCapacity", - "GroupInServiceInstances", - "GroupMaxSize", - "GroupMinSize", - "GroupPendingInstances", - "GroupStandbyInstances", - "GroupTerminatingInstances", - "GroupTotalInstances" - ] - } - ], - "LoadBalancerNames": [ - "my-external-lb-1" - ], - "TargetGroupARNs": [ - { - "Ref": "AWSElasticLoadBalancingV2TargetGrouptcpcomplexexamplecomvpjolq" - }, - { - "Ref": "AWSElasticLoadBalancingV2TargetGrouptlscomplexexamplecom5nursn" - } - ] - } - }, - "AWSAutoScalingAutoScalingGroupnodescomplexexamplecom": { - "Type": "AWS::AutoScaling::AutoScalingGroup", - "Properties": { - "AutoScalingGroupName": "nodes.complex.example.com", - "LaunchTemplate": { - "LaunchTemplateId": { - "Ref": "AWSEC2LaunchTemplatenodescomplexexamplecom" - }, - "Version": { - "Fn::GetAtt": [ - "AWSEC2LaunchTemplatenodescomplexexamplecom", - "LatestVersionNumber" - ] - } - }, - "MaxSize": "2", - "MinSize": "2", - "VPCZoneIdentifier": [ - { - "Ref": "AWSEC2Subnetustest1acomplexexamplecom" - } - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "complex.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "Name", - "Value": "nodes.complex.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "Owner", - "Value": "John Doe", - "PropagateAtLaunch": true - }, - { - "Key": "foo/bar", - "Value": "fib+baz", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/role/node", - "Value": "1", - "PropagateAtLaunch": true - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes", - "PropagateAtLaunch": true - }, - { - "Key": "kubernetes.io/cluster/complex.example.com", - "Value": "owned", - "PropagateAtLaunch": true - } - ], - "MetricsCollection": [ - { - "Granularity": "1Minute", - "Metrics": [ - "GroupDesiredCapacity", - "GroupInServiceInstances", - "GroupMaxSize", - "GroupMinSize", - "GroupPendingInstances", - "GroupStandbyInstances", - "GroupTerminatingInstances", - "GroupTotalInstances" - ] - } - ], - "LoadBalancerNames": [ - "my-external-lb-1" - ] - } - }, - "AWSEC2DHCPOptionscomplexexamplecom": { - "Type": "AWS::EC2::DHCPOptions", - "Properties": { - "DomainName": "us-test-1.compute.internal", - "DomainNameServers": [ - "AmazonProvidedDNS" - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "complex.example.com" - }, - { - "Key": "Name", - "Value": "complex.example.com" - }, - { - "Key": "Owner", - "Value": "John Doe" - }, - { - "Key": "foo/bar", - "Value": "fib+baz" - }, - { - "Key": "kubernetes.io/cluster/complex.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2InternetGatewaycomplexexamplecom": { - "Type": "AWS::EC2::InternetGateway", - "Properties": { - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "complex.example.com" - }, - { - "Key": "Name", - "Value": "complex.example.com" - }, - { - "Key": "Owner", - "Value": "John Doe" - }, - { - "Key": "foo/bar", - "Value": "fib+baz" - }, - { - "Key": "kubernetes.io/cluster/complex.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2LaunchTemplatemasterustest1amasterscomplexexamplecom": { - "Type": "AWS::EC2::LaunchTemplate", - "Properties": { - "LaunchTemplateName": "master-us-test-1a.masters.complex.example.com", - "LaunchTemplateData": { - "BlockDeviceMappings": [ - { - "DeviceName": "/dev/xvda", - "Ebs": { - "VolumeType": "gp3", - "VolumeSize": 64, - "Iops": 3000, - "Throughput": 125, - "DeleteOnTermination": true, - "Encrypted": true, - "KmsKeyId": "arn:aws-test:kms:us-test-1:000000000000:key/1234abcd-12ab-34cd-56ef-1234567890ab" - } - }, - { - "DeviceName": "/dev/sdc", - "VirtualName": "ephemeral0" - } - ], - "IamInstanceProfile": { - "Name": { - "Ref": "AWSIAMInstanceProfilemasterscomplexexamplecom" - } - }, - "ImageId": "ami-12345678", - "InstanceType": "m3.medium", - "MetadataOptions": { - "HttpPutResponseHopLimit": 1, - "HttpTokens": "required" - }, - "Monitoring": { - "Enabled": false - }, - "NetworkInterfaces": [ - { - "AssociatePublicIpAddress": true, - "DeleteOnTermination": true, - "DeviceIndex": 0, - "Ipv6AddressCount": 0, - "Groups": [ - { - "Ref": "AWSEC2SecurityGroupmasterscomplexexamplecom" - }, - "sg-exampleid5", - "sg-exampleid6" - ] - } - ], - "TagSpecifications": [ - { - "ResourceType": "instance", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "complex.example.com" - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.complex.example.com" - }, - { - "Key": "Owner", - "Value": "John Doe" - }, - { - "Key": "foo/bar", - "Value": "fib+baz" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a" - }, - { - "Key": "kubernetes.io/cluster/complex.example.com", - "Value": "owned" - } - ] - }, - { - "ResourceType": "volume", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "complex.example.com" - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.complex.example.com" - }, - { - "Key": "Owner", - "Value": "John Doe" - }, - { - "Key": "foo/bar", - "Value": "fib+baz" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a" - }, - { - "Key": "kubernetes.io/cluster/complex.example.com", - "Value": "owned" - } - ] - } - ], - "UserData": "extracted" - } - } - }, - "AWSEC2LaunchTemplatenodescomplexexamplecom": { - "Type": "AWS::EC2::LaunchTemplate", - "Properties": { - "LaunchTemplateName": "nodes.complex.example.com", - "LaunchTemplateData": { - "BlockDeviceMappings": [ - { - "DeviceName": "/dev/xvda", - "Ebs": { - "VolumeType": "gp3", - "VolumeSize": 128, - "Iops": 3000, - "Throughput": 125, - "DeleteOnTermination": true, - "Encrypted": true - } - }, - { - "DeviceName": "/dev/xvdd", - "Ebs": { - "VolumeType": "gp2", - "VolumeSize": 20, - "DeleteOnTermination": true, - "Encrypted": true, - "KmsKeyId": "arn:aws-test:kms:us-test-1:000000000000:key/1234abcd-12ab-34cd-56ef-1234567890ab" - } - } - ], - "CreditSpecification": { - "CpuCredits": "standard" - }, - "IamInstanceProfile": { - "Name": { - "Ref": "AWSIAMInstanceProfilenodescomplexexamplecom" - } - }, - "ImageId": "ami-12345678", - "InstanceType": "t2.medium", - "MetadataOptions": { - "HttpPutResponseHopLimit": 1, - "HttpTokens": "optional" - }, - "Monitoring": { - "Enabled": true - }, - "NetworkInterfaces": [ - { - "AssociatePublicIpAddress": true, - "DeleteOnTermination": true, - "DeviceIndex": 0, - "Ipv6AddressCount": 0, - "Groups": [ - { - "Ref": "AWSEC2SecurityGroupnodescomplexexamplecom" - }, - "sg-exampleid3", - "sg-exampleid4" - ] - } - ], - "TagSpecifications": [ - { - "ResourceType": "instance", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "complex.example.com" - }, - { - "Key": "Name", - "Value": "nodes.complex.example.com" - }, - { - "Key": "Owner", - "Value": "John Doe" - }, - { - "Key": "foo/bar", - "Value": "fib+baz" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "" - }, - { - "Key": "k8s.io/role/node", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes" - }, - { - "Key": "kubernetes.io/cluster/complex.example.com", - "Value": "owned" - } - ] - }, - { - "ResourceType": "volume", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "complex.example.com" - }, - { - "Key": "Name", - "Value": "nodes.complex.example.com" - }, - { - "Key": "Owner", - "Value": "John Doe" - }, - { - "Key": "foo/bar", - "Value": "fib+baz" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "" - }, - { - "Key": "k8s.io/role/node", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes" - }, - { - "Key": "kubernetes.io/cluster/complex.example.com", - "Value": "owned" - } - ] - } - ], - "UserData": "extracted" - } - } - }, - "AWSEC2Route0": { - "Type": "AWS::EC2::Route", - "Properties": { - "RouteTableId": { - "Ref": "AWSEC2RouteTablecomplexexamplecom" - }, - "DestinationIpv6CidrBlock": "::/0", - "GatewayId": { - "Ref": "AWSEC2InternetGatewaycomplexexamplecom" - } - } - }, - "AWSEC2Route00000": { - "Type": "AWS::EC2::Route", - "Properties": { - "RouteTableId": { - "Ref": "AWSEC2RouteTablecomplexexamplecom" - }, - "DestinationCidrBlock": "0.0.0.0/0", - "GatewayId": { - "Ref": "AWSEC2InternetGatewaycomplexexamplecom" - } - } - }, - "AWSEC2RouteTablecomplexexamplecom": { - "Type": "AWS::EC2::RouteTable", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCcomplexexamplecom" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "complex.example.com" - }, - { - "Key": "Name", - "Value": "complex.example.com" - }, - { - "Key": "Owner", - "Value": "John Doe" - }, - { - "Key": "foo/bar", - "Value": "fib+baz" - }, - { - "Key": "kubernetes.io/cluster/complex.example.com", - "Value": "owned" - }, - { - "Key": "kubernetes.io/kops/role", - "Value": "public" - } - ] - } - }, - "AWSEC2RouteTableprivateustest1acomplexexamplecom": { - "Type": "AWS::EC2::RouteTable", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCcomplexexamplecom" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "complex.example.com" - }, - { - "Key": "Name", - "Value": "private-us-test-1a.complex.example.com" - }, - { - "Key": "Owner", - "Value": "John Doe" - }, - { - "Key": "foo/bar", - "Value": "fib+baz" - }, - { - "Key": "kubernetes.io/cluster/complex.example.com", - "Value": "owned" - }, - { - "Key": "kubernetes.io/kops/role", - "Value": "private-us-test-1a" - } - ] - } - }, - "AWSEC2Routeprivateustest1a00000": { - "Type": "AWS::EC2::Route", - "Properties": { - "RouteTableId": { - "Ref": "AWSEC2RouteTableprivateustest1acomplexexamplecom" - }, - "DestinationCidrBlock": "0.0.0.0/0", - "TransitGatewayId": "tgw-123456" - } - }, - "AWSEC2Routeuseast1aprivate19216811032": { - "Type": "AWS::EC2::Route", - "Properties": { - "RouteTableId": { - "Ref": "AWSEC2RouteTableprivateustest1acomplexexamplecom" - }, - "DestinationCidrBlock": "192.168.1.10/32", - "TransitGatewayId": "tgw-0123456" - } - }, - "AWSEC2SecurityGroupEgressfrommasterscomplexexamplecomegressall0to00": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmasterscomplexexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIpv6": "::/0" - } - }, - "AWSEC2SecurityGroupEgressfrommasterscomplexexamplecomegressall0to000000": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmasterscomplexexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupEgressfromnodescomplexexamplecomegressall0to00": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodescomplexexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIpv6": "::/0" - } - }, - "AWSEC2SecurityGroupEgressfromnodescomplexexamplecomegressall0to000000": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodescomplexexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom00000ingresstcp22to22masterscomplexexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmasterscomplexexamplecom" - }, - "FromPort": 22, - "ToPort": 22, - "IpProtocol": "tcp", - "SourcePrefixListId": "pl-66666666" - } - }, - "AWSEC2SecurityGroupIngressfrom00000ingresstcp22to22nodescomplexexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodescomplexexamplecom" - }, - "FromPort": 22, - "ToPort": 22, - "IpProtocol": "tcp", - "SourcePrefixListId": "pl-66666666" - } - }, - "AWSEC2SecurityGroupIngressfrom00000ingresstcp443to443masterscomplexexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmasterscomplexexamplecom" - }, - "FromPort": 443, - "ToPort": 443, - "IpProtocol": "tcp", - "SourcePrefixListId": "pl-44444444" - } - }, - "AWSEC2SecurityGroupIngressfrom111024ingresstcp443to443masterscomplexexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmasterscomplexexamplecom" - }, - "FromPort": 443, - "ToPort": 443, - "IpProtocol": "tcp", - "CidrIp": "1.1.1.0/24" - } - }, - "AWSEC2SecurityGroupIngressfrom111132ingresstcp22to22masterscomplexexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmasterscomplexexamplecom" - }, - "FromPort": 22, - "ToPort": 22, - "IpProtocol": "tcp", - "CidrIp": "1.1.1.1/32" - } - }, - "AWSEC2SecurityGroupIngressfrom111132ingresstcp22to22nodescomplexexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodescomplexexamplecom" - }, - "FromPort": 22, - "ToPort": 22, - "IpProtocol": "tcp", - "CidrIp": "1.1.1.1/32" - } - }, - "AWSEC2SecurityGroupIngressfrommasterscomplexexamplecomingressall0to0masterscomplexexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmasterscomplexexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupmasterscomplexexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfrommasterscomplexexamplecomingressall0to0nodescomplexexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodescomplexexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupmasterscomplexexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfromnodescomplexexamplecomingressall0to0nodescomplexexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodescomplexexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodescomplexexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfromnodescomplexexamplecomingresstcp1to2379masterscomplexexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmasterscomplexexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodescomplexexamplecom" - }, - "FromPort": 1, - "ToPort": 2379, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodescomplexexamplecomingresstcp2382to4000masterscomplexexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmasterscomplexexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodescomplexexamplecom" - }, - "FromPort": 2382, - "ToPort": 4000, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodescomplexexamplecomingresstcp4003to65535masterscomplexexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmasterscomplexexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodescomplexexamplecom" - }, - "FromPort": 4003, - "ToPort": 65535, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodescomplexexamplecomingressudp1to65535masterscomplexexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmasterscomplexexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodescomplexexamplecom" - }, - "FromPort": 1, - "ToPort": 65535, - "IpProtocol": "udp" - } - }, - "AWSEC2SecurityGroupIngresshttpselbtomaster": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmasterscomplexexamplecom" - }, - "FromPort": 443, - "ToPort": 443, - "IpProtocol": "tcp", - "CidrIp": "172.20.0.0/16" - } - }, - "AWSEC2SecurityGroupIngresshttpslbtomaster1010016": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmasterscomplexexamplecom" - }, - "FromPort": 443, - "ToPort": 443, - "IpProtocol": "tcp", - "CidrIp": "10.1.0.0/16" - } - }, - "AWSEC2SecurityGroupIngresshttpslbtomaster1020016": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmasterscomplexexamplecom" - }, - "FromPort": 443, - "ToPort": 443, - "IpProtocol": "tcp", - "CidrIp": "10.2.0.0/16" - } - }, - "AWSEC2SecurityGroupIngressicmppmtuapielb111024": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmasterscomplexexamplecom" - }, - "FromPort": 3, - "ToPort": 4, - "IpProtocol": "icmp", - "CidrIp": "1.1.1.0/24" - } - }, - "AWSEC2SecurityGroupIngressnodeporttcpexternaltonode102030024": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodescomplexexamplecom" - }, - "FromPort": 28000, - "ToPort": 32767, - "IpProtocol": "tcp", - "CidrIp": "10.20.30.0/24" - } - }, - "AWSEC2SecurityGroupIngressnodeporttcpexternaltonode123432": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodescomplexexamplecom" - }, - "FromPort": 28000, - "ToPort": 32767, - "IpProtocol": "tcp", - "CidrIp": "1.2.3.4/32" - } - }, - "AWSEC2SecurityGroupIngressnodeportudpexternaltonode102030024": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodescomplexexamplecom" - }, - "FromPort": 28000, - "ToPort": 32767, - "IpProtocol": "udp", - "CidrIp": "10.20.30.0/24" - } - }, - "AWSEC2SecurityGroupIngressnodeportudpexternaltonode123432": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodescomplexexamplecom" - }, - "FromPort": 28000, - "ToPort": 32767, - "IpProtocol": "udp", - "CidrIp": "1.2.3.4/32" - } - }, - "AWSEC2SecurityGroupIngresstcpapi111024": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmasterscomplexexamplecom" - }, - "FromPort": 8443, - "ToPort": 8443, - "IpProtocol": "tcp", - "CidrIp": "1.1.1.0/24" - } - }, - "AWSEC2SecurityGroupIngresstcpapipl44444444": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmasterscomplexexamplecom" - }, - "FromPort": 8443, - "ToPort": 8443, - "IpProtocol": "tcp", - "SourcePrefixListId": "pl-44444444" - } - }, - "AWSEC2SecurityGroupapielbcomplexexamplecom": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupName": "api-elb.complex.example.com", - "VpcId": { - "Ref": "AWSEC2VPCcomplexexamplecom" - }, - "GroupDescription": "Security group for api ELB", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "complex.example.com" - }, - { - "Key": "Name", - "Value": "api-elb.complex.example.com" - }, - { - "Key": "Owner", - "Value": "John Doe" - }, - { - "Key": "foo/bar", - "Value": "fib+baz" - }, - { - "Key": "kubernetes.io/cluster/complex.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2SecurityGroupmasterscomplexexamplecom": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupName": "masters.complex.example.com", - "VpcId": { - "Ref": "AWSEC2VPCcomplexexamplecom" - }, - "GroupDescription": "Security group for masters", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "complex.example.com" - }, - { - "Key": "Name", - "Value": "masters.complex.example.com" - }, - { - "Key": "Owner", - "Value": "John Doe" - }, - { - "Key": "foo/bar", - "Value": "fib+baz" - }, - { - "Key": "kubernetes.io/cluster/complex.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2SecurityGroupnodescomplexexamplecom": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupName": "nodes.complex.example.com", - "VpcId": { - "Ref": "AWSEC2VPCcomplexexamplecom" - }, - "GroupDescription": "Security group for nodes", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "complex.example.com" - }, - { - "Key": "Name", - "Value": "nodes.complex.example.com" - }, - { - "Key": "Owner", - "Value": "John Doe" - }, - { - "Key": "foo/bar", - "Value": "fib+baz" - }, - { - "Key": "kubernetes.io/cluster/complex.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2SubnetRouteTableAssociationprivateuseast1aprivatecomplexexamplecom": { - "Type": "AWS::EC2::SubnetRouteTableAssociation", - "Properties": { - "SubnetId": { - "Ref": "AWSEC2Subnetuseast1aprivatecomplexexamplecom" - }, - "RouteTableId": { - "Ref": "AWSEC2RouteTableprivateustest1acomplexexamplecom" - } - } - }, - "AWSEC2SubnetRouteTableAssociationuseast1autilitycomplexexamplecom": { - "Type": "AWS::EC2::SubnetRouteTableAssociation", - "Properties": { - "SubnetId": { - "Ref": "AWSEC2Subnetuseast1autilitycomplexexamplecom" - }, - "RouteTableId": { - "Ref": "AWSEC2RouteTablecomplexexamplecom" - } - } - }, - "AWSEC2SubnetRouteTableAssociationustest1acomplexexamplecom": { - "Type": "AWS::EC2::SubnetRouteTableAssociation", - "Properties": { - "SubnetId": { - "Ref": "AWSEC2Subnetustest1acomplexexamplecom" - }, - "RouteTableId": { - "Ref": "AWSEC2RouteTablecomplexexamplecom" - } - } - }, - "AWSEC2Subnetuseast1aprivatecomplexexamplecom": { - "Type": "AWS::EC2::Subnet", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCcomplexexamplecom" - }, - "CidrBlock": "172.20.64.0/19", - "AvailabilityZone": "us-test-1a", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "complex.example.com" - }, - { - "Key": "Name", - "Value": "us-east-1a-private.complex.example.com" - }, - { - "Key": "Owner", - "Value": "John Doe" - }, - { - "Key": "SubnetType", - "Value": "Private" - }, - { - "Key": "foo/bar", - "Value": "fib+baz" - }, - { - "Key": "kubernetes.io/cluster/complex.example.com", - "Value": "owned" - }, - { - "Key": "kubernetes.io/role/internal-elb", - "Value": "1" - } - ] - } - }, - "AWSEC2Subnetuseast1autilitycomplexexamplecom": { - "Type": "AWS::EC2::Subnet", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCcomplexexamplecom" - }, - "CidrBlock": "172.20.96.0/19", - "AvailabilityZone": "us-test-1a", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "complex.example.com" - }, - { - "Key": "Name", - "Value": "us-east-1a-utility.complex.example.com" - }, - { - "Key": "Owner", - "Value": "John Doe" - }, - { - "Key": "SubnetType", - "Value": "Utility" - }, - { - "Key": "foo/bar", - "Value": "fib+baz" - }, - { - "Key": "kubernetes.io/cluster/complex.example.com", - "Value": "owned" - }, - { - "Key": "kubernetes.io/role/elb", - "Value": "1" - }, - { - "Key": "kubernetes.io/role/internal-elb", - "Value": "1" - } - ] - } - }, - "AWSEC2Subnetustest1acomplexexamplecom": { - "Type": "AWS::EC2::Subnet", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCcomplexexamplecom" - }, - "CidrBlock": "172.20.32.0/19", - "AvailabilityZone": "us-test-1a", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "complex.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.complex.example.com" - }, - { - "Key": "Owner", - "Value": "John Doe" - }, - { - "Key": "SubnetType", - "Value": "Public" - }, - { - "Key": "foo/bar", - "Value": "fib+baz" - }, - { - "Key": "kops.k8s.io/instance-group/master-us-test-1a", - "Value": "true" - }, - { - "Key": "kops.k8s.io/instance-group/nodes", - "Value": "true" - }, - { - "Key": "kubernetes.io/cluster/complex.example.com", - "Value": "owned" - }, - { - "Key": "kubernetes.io/role/elb", - "Value": "1" - }, - { - "Key": "kubernetes.io/role/internal-elb", - "Value": "1" - } - ] - } - }, - "AWSEC2VPCCidrBlock1010016": { - "Type": "AWS::EC2::VPCCidrBlock", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCcomplexexamplecom" - }, - "CidrBlock": "10.1.0.0/16" - } - }, - "AWSEC2VPCCidrBlock1020016": { - "Type": "AWS::EC2::VPCCidrBlock", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCcomplexexamplecom" - }, - "CidrBlock": "10.2.0.0/16" - } - }, - "AWSEC2VPCCidrBlockAmazonIPv6": { - "Type": "AWS::EC2::VPCCidrBlock", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCcomplexexamplecom" - }, - "AmazonProvidedIpv6CidrBlock": true - } - }, - "AWSEC2VPCDHCPOptionsAssociationcomplexexamplecom": { - "Type": "AWS::EC2::VPCDHCPOptionsAssociation", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCcomplexexamplecom" - }, - "DhcpOptionsId": { - "Ref": "AWSEC2DHCPOptionscomplexexamplecom" - } - } - }, - "AWSEC2VPCGatewayAttachmentcomplexexamplecom": { - "Type": "AWS::EC2::VPCGatewayAttachment", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCcomplexexamplecom" - }, - "InternetGatewayId": { - "Ref": "AWSEC2InternetGatewaycomplexexamplecom" - } - } - }, - "AWSEC2VPCcomplexexamplecom": { - "Type": "AWS::EC2::VPC", - "Properties": { - "CidrBlock": "172.20.0.0/16", - "EnableDnsHostnames": true, - "EnableDnsSupport": true, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "complex.example.com" - }, - { - "Key": "Name", - "Value": "complex.example.com" - }, - { - "Key": "Owner", - "Value": "John Doe" - }, - { - "Key": "foo/bar", - "Value": "fib+baz" - }, - { - "Key": "kubernetes.io/cluster/complex.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2Volumeaetcdeventscomplexexamplecom": { - "Type": "AWS::EC2::Volume", - "Properties": { - "AvailabilityZone": "us-test-1a", - "Size": 20, - "VolumeType": "gp3", - "Iops": 3000, - "Throughput": 125, - "Encrypted": false, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "complex.example.com" - }, - { - "Key": "Name", - "Value": "a.etcd-events.complex.example.com" - }, - { - "Key": "Owner", - "Value": "John Doe" - }, - { - "Key": "foo/bar", - "Value": "fib+baz" - }, - { - "Key": "k8s.io/etcd/events", - "Value": "a/a" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kubernetes.io/cluster/complex.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2Volumeaetcdmaincomplexexamplecom": { - "Type": "AWS::EC2::Volume", - "Properties": { - "AvailabilityZone": "us-test-1a", - "Size": 20, - "VolumeType": "gp3", - "Iops": 3000, - "Throughput": 125, - "Encrypted": false, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "complex.example.com" - }, - { - "Key": "Name", - "Value": "a.etcd-main.complex.example.com" - }, - { - "Key": "Owner", - "Value": "John Doe" - }, - { - "Key": "foo/bar", - "Value": "fib+baz" - }, - { - "Key": "k8s.io/etcd/main", - "Value": "a/a" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kubernetes.io/cluster/complex.example.com", - "Value": "owned" - } - ] - } - }, - "AWSElasticLoadBalancingV2Listenerapicomplexexamplecom443": { - "Type": "AWS::ElasticLoadBalancingV2::Listener", - "Properties": { - "Certificates": [ - { - "CertificateArn": "arn:aws-test:acm:us-test-1:000000000000:certificate/123456789012-1234-1234-1234-12345678" - } - ], - "DefaultActions": [ - { - "Type": "forward", - "TargetGroupArn": { - "Ref": "AWSElasticLoadBalancingV2TargetGrouptlscomplexexamplecom5nursn" - } - } - ], - "LoadBalancerArn": { - "Ref": "AWSElasticLoadBalancingV2LoadBalancerapicomplexexamplecom" - }, - "Port": 443, - "Protocol": "TLS", - "SslPolicy": "ELBSecurityPolicy-2016-08" - } - }, - "AWSElasticLoadBalancingV2Listenerapicomplexexamplecom8443": { - "Type": "AWS::ElasticLoadBalancingV2::Listener", - "Properties": { - "DefaultActions": [ - { - "Type": "forward", - "TargetGroupArn": { - "Ref": "AWSElasticLoadBalancingV2TargetGrouptcpcomplexexamplecomvpjolq" - } - } - ], - "LoadBalancerArn": { - "Ref": "AWSElasticLoadBalancingV2LoadBalancerapicomplexexamplecom" - }, - "Port": 8443, - "Protocol": "TCP" - } - }, - "AWSElasticLoadBalancingV2LoadBalancerapicomplexexamplecom": { - "Type": "AWS::ElasticLoadBalancingV2::LoadBalancer", - "Properties": { - "Name": "api-complex-example-com-vd3t5n", - "Scheme": "internet-facing", - "SubnetMappings": [ - { - "SubnetId": { - "Ref": "AWSEC2Subnetustest1acomplexexamplecom" - }, - "AllocationId": "eipalloc-012345a678b9cdefa" - } - ], - "Type": "network", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "complex.example.com" - }, - { - "Key": "Name", - "Value": "api.complex.example.com" - }, - { - "Key": "Owner", - "Value": "John Doe" - }, - { - "Key": "foo/bar", - "Value": "fib+baz" - }, - { - "Key": "kubernetes.io/cluster/complex.example.com", - "Value": "owned" - } - ], - "LoadBalancerAttributes": [ - { - "Key": "access_logs.s3.enabled", - "Value": "true" - }, - { - "Key": "access_logs.s3.bucket", - "Value": "access-log-example" - }, - { - "Key": "access_logs.s3.prefix" - } - ] - } - }, - "AWSElasticLoadBalancingV2TargetGrouptcpcomplexexamplecomvpjolq": { - "Type": "AWS::ElasticLoadBalancingV2::TargetGroup", - "Properties": { - "Name": "tcp-complex-example-com-vpjolq", - "Port": 443, - "Protocol": "TCP", - "VpcId": { - "Ref": "AWSEC2VPCcomplexexamplecom" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "complex.example.com" - }, - { - "Key": "Name", - "Value": "tcp-complex-example-com-vpjolq" - }, - { - "Key": "Owner", - "Value": "John Doe" - }, - { - "Key": "foo/bar", - "Value": "fib+baz" - }, - { - "Key": "kubernetes.io/cluster/complex.example.com", - "Value": "owned" - } - ], - "HealthCheckProtocol": "TCP", - "HealthyThresholdCount": 2, - "UnhealthyThresholdCount": 2 - } - }, - "AWSElasticLoadBalancingV2TargetGrouptlscomplexexamplecom5nursn": { - "Type": "AWS::ElasticLoadBalancingV2::TargetGroup", - "Properties": { - "Name": "tls-complex-example-com-5nursn", - "Port": 443, - "Protocol": "TLS", - "VpcId": { - "Ref": "AWSEC2VPCcomplexexamplecom" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "complex.example.com" - }, - { - "Key": "Name", - "Value": "tls-complex-example-com-5nursn" - }, - { - "Key": "Owner", - "Value": "John Doe" - }, - { - "Key": "foo/bar", - "Value": "fib+baz" - }, - { - "Key": "kubernetes.io/cluster/complex.example.com", - "Value": "owned" - } - ], - "HealthCheckProtocol": "TLS", - "HealthyThresholdCount": 2, - "UnhealthyThresholdCount": 2 - } - }, - "AWSIAMInstanceProfilemasterscomplexexamplecom": { - "Type": "AWS::IAM::InstanceProfile", - "Properties": { - "InstanceProfileName": "masters.complex.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolemasterscomplexexamplecom" - } - ] - } - }, - "AWSIAMInstanceProfilenodescomplexexamplecom": { - "Type": "AWS::IAM::InstanceProfile", - "Properties": { - "InstanceProfileName": "nodes.complex.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolenodescomplexexamplecom" - } - ] - } - }, - "AWSIAMPolicymasterscomplexexamplecom": { - "Type": "AWS::IAM::Policy", - "Properties": { - "PolicyName": "masters.complex.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolemasterscomplexexamplecom" - } - ], - "PolicyDocument": { - "Statement": [ - { - "Action": "ec2:AttachVolume", - "Condition": { - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "complex.example.com", - "aws:ResourceTag/k8s.io/role/master": "1" - } - }, - "Effect": "Allow", - "Resource": [ - "*" - ] - }, - { - "Action": [ - "s3:Get*" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-read-bucket/clusters.example.com/complex.example.com/*" - }, - { - "Action": [ - "s3:GetObject", - "s3:DeleteObject", - "s3:DeleteObjectVersion", - "s3:PutObject" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/complex.example.com/backups/etcd/main/*" - }, - { - "Action": [ - "s3:GetObject", - "s3:DeleteObject", - "s3:DeleteObjectVersion", - "s3:PutObject" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/complex.example.com/backups/etcd/events/*" - }, - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-read-bucket" - ] - }, - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-write-bucket" - ] - }, - { - "Action": [ - "route53:ChangeResourceRecordSets", - "route53:ListResourceRecordSets", - "route53:GetHostedZone" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:route53:::hostedzone/Z1AFAKE1ZON3YO" - ] - }, - { - "Action": [ - "route53:GetChange" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:route53:::change/*" - ] - }, - { - "Action": [ - "route53:ListHostedZones", - "route53:ListTagsForResource" - ], - "Effect": "Allow", - "Resource": [ - "*" - ] - }, - { - "Action": "ec2:CreateTags", - "Condition": { - "StringEquals": { - "aws:RequestTag/KubernetesCluster": "complex.example.com", - "ec2:CreateAction": [ - "CreateVolume", - "CreateSnapshot" - ] - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:volume/*", - "arn:aws-test:ec2:*:*:snapshot/*" - ] - }, - { - "Action": [ - "ec2:CreateTags", - "ec2:DeleteTags" - ], - "Condition": { - "Null": { - "aws:RequestTag/KubernetesCluster": "true" - }, - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "complex.example.com" - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:volume/*", - "arn:aws-test:ec2:*:*:snapshot/*" - ] - }, - { - "Action": "ec2:CreateTags", - "Condition": { - "StringEquals": { - "aws:RequestTag/KubernetesCluster": "complex.example.com", - "ec2:CreateAction": [ - "CreateSecurityGroup" - ] - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:security-group/*" - ] - }, - { - "Action": [ - "ec2:CreateTags", - "ec2:DeleteTags" - ], - "Condition": { - "Null": { - "aws:RequestTag/KubernetesCluster": "true" - }, - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "complex.example.com" - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:security-group/*" - ] - }, - { - "Action": [ - "autoscaling:DescribeAutoScalingGroups", - "autoscaling:DescribeAutoScalingInstances", - "autoscaling:DescribeLaunchConfigurations", - "autoscaling:DescribeScalingActivities", - "autoscaling:DescribeTags", - "ec2:DescribeAccountAttributes", - "ec2:DescribeInstanceTypes", - "ec2:DescribeInstances", - "ec2:DescribeLaunchTemplateVersions", - "ec2:DescribeRegions", - "ec2:DescribeRouteTables", - "ec2:DescribeSecurityGroups", - "ec2:DescribeSubnets", - "ec2:DescribeTags", - "ec2:DescribeVolumes", - "ec2:DescribeVolumesModifications", - "ec2:DescribeVpcs", - "elasticloadbalancing:DescribeListeners", - "elasticloadbalancing:DescribeLoadBalancerAttributes", - "elasticloadbalancing:DescribeLoadBalancerPolicies", - "elasticloadbalancing:DescribeLoadBalancers", - "elasticloadbalancing:DescribeTargetGroups", - "elasticloadbalancing:DescribeTargetHealth", - "iam:GetServerCertificate", - "iam:ListServerCertificates", - "kms:DescribeKey", - "kms:GenerateRandom" - ], - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": [ - "autoscaling:SetDesiredCapacity", - "autoscaling:TerminateInstanceInAutoScalingGroup", - "ec2:AttachVolume", - "ec2:AuthorizeSecurityGroupIngress", - "ec2:DeleteSecurityGroup", - "ec2:DeleteVolume", - "ec2:DetachVolume", - "ec2:ModifyInstanceAttribute", - "ec2:ModifyVolume", - "ec2:RevokeSecurityGroupIngress", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", - "elasticloadbalancing:AttachLoadBalancerToSubnets", - "elasticloadbalancing:ConfigureHealthCheck", - "elasticloadbalancing:CreateLoadBalancerListeners", - "elasticloadbalancing:CreateLoadBalancerPolicy", - "elasticloadbalancing:DeleteListener", - "elasticloadbalancing:DeleteLoadBalancer", - "elasticloadbalancing:DeleteLoadBalancerListeners", - "elasticloadbalancing:DeleteTargetGroup", - "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", - "elasticloadbalancing:DeregisterTargets", - "elasticloadbalancing:DetachLoadBalancerFromSubnets", - "elasticloadbalancing:ModifyListener", - "elasticloadbalancing:ModifyLoadBalancerAttributes", - "elasticloadbalancing:ModifyTargetGroup", - "elasticloadbalancing:RegisterInstancesWithLoadBalancer", - "elasticloadbalancing:RegisterTargets", - "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", - "elasticloadbalancing:SetLoadBalancerPoliciesOfListener" - ], - "Condition": { - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "complex.example.com" - } - }, - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": [ - "ec2:CreateSecurityGroup", - "ec2:CreateSnapshot", - "ec2:CreateVolume", - "elasticloadbalancing:CreateListener", - "elasticloadbalancing:CreateLoadBalancer", - "elasticloadbalancing:CreateTargetGroup" - ], - "Condition": { - "StringEquals": { - "aws:RequestTag/KubernetesCluster": "complex.example.com" - } - }, - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": "ec2:CreateSecurityGroup", - "Effect": "Allow", - "Resource": "arn:aws-test:ec2:*:*:vpc/*" - } - ], - "Version": "2012-10-17" - } - } - }, - "AWSIAMPolicynodescomplexexamplecom": { - "Type": "AWS::IAM::Policy", - "Properties": { - "PolicyName": "nodes.complex.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolenodescomplexexamplecom" - } - ], - "PolicyDocument": { - "Statement": [ - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-read-bucket" - ] - }, - { - "Action": [ - "autoscaling:DescribeAutoScalingInstances", - "ec2:DescribeInstanceTypes", - "ec2:DescribeInstances", - "ec2:DescribeRegions", - "iam:GetServerCertificate", - "iam:ListServerCertificates", - "kms:GenerateRandom" - ], - "Effect": "Allow", - "Resource": "*" - } - ], - "Version": "2012-10-17" - } - } - }, - "AWSIAMRolemasterscomplexexamplecom": { - "Type": "AWS::IAM::Role", - "Properties": { - "RoleName": "masters.complex.example.com", - "AssumeRolePolicyDocument": { - "Statement": [ - { - "Action": "sts:AssumeRole", - "Effect": "Allow", - "Principal": { - "Service": "ec2.amazonaws.com" - } - } - ], - "Version": "2012-10-17" - }, - "PermissionsBoundary": "arn:aws-test:iam::000000000000:policy/boundaries", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "complex.example.com" - }, - { - "Key": "Name", - "Value": "masters.complex.example.com" - }, - { - "Key": "Owner", - "Value": "John Doe" - }, - { - "Key": "foo/bar", - "Value": "fib+baz" - }, - { - "Key": "kubernetes.io/cluster/complex.example.com", - "Value": "owned" - } - ] - } - }, - "AWSIAMRolenodescomplexexamplecom": { - "Type": "AWS::IAM::Role", - "Properties": { - "RoleName": "nodes.complex.example.com", - "AssumeRolePolicyDocument": { - "Statement": [ - { - "Action": "sts:AssumeRole", - "Effect": "Allow", - "Principal": { - "Service": "ec2.amazonaws.com" - } - } - ], - "Version": "2012-10-17" - }, - "PermissionsBoundary": "arn:aws-test:iam::000000000000:policy/boundaries", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "complex.example.com" - }, - { - "Key": "Name", - "Value": "nodes.complex.example.com" - }, - { - "Key": "Owner", - "Value": "John Doe" - }, - { - "Key": "foo/bar", - "Value": "fib+baz" - }, - { - "Key": "kubernetes.io/cluster/complex.example.com", - "Value": "owned" - } - ] - } - }, - "AWSRoute53RecordSetapicomplexexamplecom": { - "Type": "AWS::Route53::RecordSet", - "Properties": { - "Name": "api.complex.example.com", - "Type": "A", - "AliasTarget": { - "DNSName": { - "Fn::GetAtt": [ - "AWSElasticLoadBalancingV2LoadBalancerapicomplexexamplecom", - "DNSName" - ] - }, - "HostedZoneId": { - "Fn::GetAtt": [ - "AWSElasticLoadBalancingV2LoadBalancerapicomplexexamplecom", - "CanonicalHostedZoneID" - ] - }, - "EvaluateTargetHealth": false - }, - "HostedZoneId": "/hostedzone/Z1AFAKE1ZON3YO" - } - } - } -} diff --git a/tests/integration/update_cluster/complex/cloudformation.json.extracted.yaml b/tests/integration/update_cluster/complex/cloudformation.json.extracted.yaml deleted file mode 100644 index d5ca6f3996eae..0000000000000 --- a/tests/integration/update_cluster/complex/cloudformation.json.extracted.yaml +++ /dev/null @@ -1,518 +0,0 @@ -Resources.AWSEC2LaunchTemplatemasterustest1amasterscomplexexamplecom.Properties.LaunchTemplateData.UserData: | - Content-Type: multipart/mixed; boundary="MIMEBOUNDARY" - MIME-Version: 1.0 - - --MIMEBOUNDARY - Content-Disposition: attachment; filename="nodeup.sh" - Content-Transfer-Encoding: 7bit - Content-Type: text/x-shellscript - Mime-Version: 1.0 - - #!/bin/bash - set -o errexit - set -o nounset - set -o pipefail - - NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 - NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 - NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 - NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 - - export AWS_REGION=us-test-1 - - - - - sysctl -w net.core.rmem_max=16777216 || true - sysctl -w net.core.wmem_max=16777216 || true - sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true - sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true - - - function ensure-install-dir() { - INSTALL_DIR="/opt/kops" - # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec - if [[ -d /var/lib/toolbox ]]; then - INSTALL_DIR="/var/lib/toolbox/kops" - fi - mkdir -p ${INSTALL_DIR}/bin - mkdir -p ${INSTALL_DIR}/conf - cd ${INSTALL_DIR} - } - - # Retry a download until we get it. args: name, sha, urls - download-or-bust() { - local -r file="$1" - local -r hash="$2" - local -r urls=( $(split-commas "$3") ) - - if [[ -f "${file}" ]]; then - if ! validate-hash "${file}" "${hash}"; then - rm -f "${file}" - else - return 0 - fi - fi - - while true; do - for url in "${urls[@]}"; do - commands=( - "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - ) - for cmd in "${commands[@]}"; do - echo "Attempting download with: ${cmd} {url}" - if ! (${cmd} "${url}"); then - echo "== Download failed with ${cmd} ==" - continue - fi - if ! validate-hash "${file}" "${hash}"; then - echo "== Hash validation of ${url} failed. Retrying. ==" - rm -f "${file}" - else - echo "== Downloaded ${url} (SHA256 = ${hash}) ==" - return 0 - fi - done - done - - echo "All downloads failed; sleeping before retrying" - sleep 60 - done - } - - validate-hash() { - local -r file="$1" - local -r expected="$2" - local actual - - actual=$(sha256sum ${file} | awk '{ print $1 }') || true - if [[ "${actual}" != "${expected}" ]]; then - echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" - return 1 - fi - } - - function split-commas() { - echo $1 | tr "," "\n" - } - - function download-release() { - case "$(uname -m)" in - x86_64*|i?86_64*|amd64*) - NODEUP_URL="${NODEUP_URL_AMD64}" - NODEUP_HASH="${NODEUP_HASH_AMD64}" - ;; - aarch64*|arm64*) - NODEUP_URL="${NODEUP_URL_ARM64}" - NODEUP_HASH="${NODEUP_HASH_ARM64}" - ;; - *) - echo "Unsupported host arch: $(uname -m)" >&2 - exit 1 - ;; - esac - - cd ${INSTALL_DIR}/bin - download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" - - chmod +x nodeup - - echo "Running nodeup" - # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 - ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) - } - - #################################################################################### - - /bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" - - echo "== nodeup node config starting ==" - ensure-install-dir - - cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' - cloudConfig: - awsEBSCSIDriver: - enabled: true - version: v1.12.0 - manageStorageClasses: true - containerRuntime: containerd - containerd: - logLevel: info - runc: - version: 1.1.4 - version: 1.6.10 - docker: - skipInstall: true - encryptionConfig: null - etcdClusters: - events: - version: 3.5.4 - main: - version: 3.5.4 - kubeAPIServer: - allowPrivileged: true - anonymousAuth: false - apiAudiences: - - kubernetes.svc.default - apiServerCount: 1 - auditWebhookBatchThrottleQps: 3140m - authorizationMode: AlwaysAllow - bindAddress: 0.0.0.0 - cloudProvider: external - cpuLimit: 500m - cpuRequest: 200m - enableAdmissionPlugins: - - NamespaceLifecycle - - LimitRanger - - ServiceAccount - - DefaultStorageClass - - DefaultTolerationSeconds - - MutatingAdmissionWebhook - - ValidatingAdmissionWebhook - - NodeRestriction - - ResourceQuota - etcdServers: - - https://127.0.0.1:4001 - etcdServersOverrides: - - /events#https://127.0.0.1:4002 - featureGates: - CSIMigrationAWS: "true" - InTreePluginAWSUnregister: "true" - image: registry.k8s.io/kube-apiserver:v1.24.0 - kubeletPreferredAddressTypes: - - InternalIP - - Hostname - - ExternalIP - logLevel: 2 - memoryLimit: 1000Mi - memoryRequest: 800Mi - requestheaderAllowedNames: - - aggregator - requestheaderExtraHeaderPrefixes: - - X-Remote-Extra- - requestheaderGroupHeaders: - - X-Remote-Group - requestheaderUsernameHeaders: - - X-Remote-User - securePort: 443 - serviceAccountIssuer: https://api.internal.complex.example.com - serviceAccountJWKSURI: https://api.internal.complex.example.com/openid/v1/jwks - serviceClusterIPRange: 100.64.0.0/13 - serviceNodePortRange: 28000-32767 - storageBackend: etcd3 - kubeControllerManager: - allocateNodeCIDRs: true - attachDetachReconcileSyncPeriod: 1m0s - cloudProvider: external - clusterCIDR: 100.96.0.0/11 - clusterName: complex.example.com - configureCloudRoutes: false - featureGates: - CSIMigrationAWS: "true" - InTreePluginAWSUnregister: "true" - image: registry.k8s.io/kube-controller-manager:v1.24.0 - leaderElection: - leaderElect: true - logLevel: 2 - useServiceAccountCredentials: true - kubeProxy: - clusterCIDR: 100.96.0.0/11 - cpuRequest: 100m - image: registry.k8s.io/kube-proxy:v1.24.0 - logLevel: 2 - kubeScheduler: - featureGates: - CSIMigrationAWS: "true" - InTreePluginAWSUnregister: "true" - image: registry.k8s.io/kube-scheduler:v1.24.0 - leaderElection: - leaderElect: true - logLevel: 2 - kubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: external - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - featureGates: - CSIMigrationAWS: "true" - InTreePluginAWSUnregister: "true" - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - protectKernelDefaults: true - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - masterKubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: external - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - featureGates: - CSIMigrationAWS: "true" - InTreePluginAWSUnregister: "true" - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - protectKernelDefaults: true - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - - __EOF_CLUSTER_SPEC - - cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' - CloudProvider: aws - ConfigBase: memfs://clusters.example.com/complex.example.com - InstanceGroupName: master-us-test-1a - InstanceGroupRole: Master - NodeupConfigHash: JCABoxKJoSiZnQPrew3IALhFdf/V9H1/wtS+FtaDkog= - - __EOF_KUBE_ENV - - download-release - echo "== nodeup node config done ==" - - --MIMEBOUNDARY - Content-Disposition: attachment; filename="myscript.sh" - Content-Transfer-Encoding: 7bit - Content-Type: text/x-shellscript - Mime-Version: 1.0 - - #!/bin/sh - echo "nodes: The time is now $(date -R)!" | tee /root/output.txt - - --MIMEBOUNDARY-- -Resources.AWSEC2LaunchTemplatenodescomplexexamplecom.Properties.LaunchTemplateData.UserData: | - Content-Type: multipart/mixed; boundary="MIMEBOUNDARY" - MIME-Version: 1.0 - - --MIMEBOUNDARY - Content-Disposition: attachment; filename="nodeup.sh" - Content-Transfer-Encoding: 7bit - Content-Type: text/x-shellscript - Mime-Version: 1.0 - - #!/bin/bash - set -o errexit - set -o nounset - set -o pipefail - - NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 - NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 - NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 - NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 - - export AWS_REGION=us-test-1 - - - - - sysctl -w net.core.rmem_max=16777216 || true - sysctl -w net.core.wmem_max=16777216 || true - sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true - sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true - - - function ensure-install-dir() { - INSTALL_DIR="/opt/kops" - # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec - if [[ -d /var/lib/toolbox ]]; then - INSTALL_DIR="/var/lib/toolbox/kops" - fi - mkdir -p ${INSTALL_DIR}/bin - mkdir -p ${INSTALL_DIR}/conf - cd ${INSTALL_DIR} - } - - # Retry a download until we get it. args: name, sha, urls - download-or-bust() { - local -r file="$1" - local -r hash="$2" - local -r urls=( $(split-commas "$3") ) - - if [[ -f "${file}" ]]; then - if ! validate-hash "${file}" "${hash}"; then - rm -f "${file}" - else - return 0 - fi - fi - - while true; do - for url in "${urls[@]}"; do - commands=( - "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - ) - for cmd in "${commands[@]}"; do - echo "Attempting download with: ${cmd} {url}" - if ! (${cmd} "${url}"); then - echo "== Download failed with ${cmd} ==" - continue - fi - if ! validate-hash "${file}" "${hash}"; then - echo "== Hash validation of ${url} failed. Retrying. ==" - rm -f "${file}" - else - echo "== Downloaded ${url} (SHA256 = ${hash}) ==" - return 0 - fi - done - done - - echo "All downloads failed; sleeping before retrying" - sleep 60 - done - } - - validate-hash() { - local -r file="$1" - local -r expected="$2" - local actual - - actual=$(sha256sum ${file} | awk '{ print $1 }') || true - if [[ "${actual}" != "${expected}" ]]; then - echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" - return 1 - fi - } - - function split-commas() { - echo $1 | tr "," "\n" - } - - function download-release() { - case "$(uname -m)" in - x86_64*|i?86_64*|amd64*) - NODEUP_URL="${NODEUP_URL_AMD64}" - NODEUP_HASH="${NODEUP_HASH_AMD64}" - ;; - aarch64*|arm64*) - NODEUP_URL="${NODEUP_URL_ARM64}" - NODEUP_HASH="${NODEUP_HASH_ARM64}" - ;; - *) - echo "Unsupported host arch: $(uname -m)" >&2 - exit 1 - ;; - esac - - cd ${INSTALL_DIR}/bin - download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" - - chmod +x nodeup - - echo "Running nodeup" - # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 - ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) - } - - #################################################################################### - - /bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" - - echo "== nodeup node config starting ==" - ensure-install-dir - - cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' - cloudConfig: - awsEBSCSIDriver: - enabled: true - version: v1.12.0 - manageStorageClasses: true - containerRuntime: containerd - containerd: - logLevel: info - runc: - version: 1.1.4 - version: 1.6.10 - docker: - skipInstall: true - kubeProxy: - clusterCIDR: 100.96.0.0/11 - cpuRequest: 100m - image: registry.k8s.io/kube-proxy:v1.24.0 - logLevel: 2 - kubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: external - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - featureGates: - CSIMigrationAWS: "true" - InTreePluginAWSUnregister: "true" - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - protectKernelDefaults: true - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - - __EOF_CLUSTER_SPEC - - cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' - CloudProvider: aws - ConfigServer: - CACertificates: | - -----BEGIN CERTIFICATE----- - MIIBbjCCARigAwIBAgIMFpANqBD8NSD82AUSMA0GCSqGSIb3DQEBCwUAMBgxFjAU - BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwODAwWhcNMzEwNzA3MDcw - ODAwWjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD - SwAwSAJBANFI3zr0Tk8krsW8vwjfMpzJOlWQ8616vG3YPa2qAgI7V4oKwfV0yIg1 - jt+H6f4P/wkPAPTPTfRp9Iy8oHEEFw0CAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG - MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNG3zVjTcLlJwDsJ4/K9DV7KohUA - MA0GCSqGSIb3DQEBCwUAA0EAB8d03fY2w7WKpfO29qI295pu2C4ca9AiVGOpgSc8 - tmQsq6rcxt3T+rb589PVtz0mw/cKTxOk6gH2CCC+yHfy2w== - -----END CERTIFICATE----- - -----BEGIN CERTIFICATE----- - MIIBbjCCARigAwIBAgIMFpANvmSa0OAlYmXKMA0GCSqGSIb3DQEBCwUAMBgxFjAU - BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwOTM2WhcNMzEwNzA3MDcw - OTM2WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD - SwAwSAJBAMF6F4aZdpe0RUpyykaBpWwZCnwbffhYGOw+fs6RdLuUq7QCNmJm/Eq7 - WWOziMYDiI9SbclpD+6QiJ0N3EqppVUCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG - MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLImp6ARjPDAH6nhI+scWVt3Q9bn - MA0GCSqGSIb3DQEBCwUAA0EAVQVx5MUtuAIeePuP9o51xtpT2S6Fvfi8J4ICxnlA - 9B7UD2ushcVFPtaeoL9Gfu8aY4KJBeqqg5ojl4qmRnThjw== - -----END CERTIFICATE----- - server: https://kops-controller.internal.complex.example.com:3988/ - InstanceGroupName: nodes - InstanceGroupRole: Node - NodeupConfigHash: /ooG10dKeNIrNpp1uS31RvO28btpPPY6Qsu3yeVSknk= - - __EOF_KUBE_ENV - - download-release - echo "== nodeup node config done ==" - - --MIMEBOUNDARY - Content-Disposition: attachment; filename="myscript.sh" - Content-Transfer-Encoding: 7bit - Content-Type: text/x-shellscript - Mime-Version: 1.0 - - #!/bin/sh - echo "nodes: The time is now $(date -R)!" | tee /root/output.txt - - --MIMEBOUNDARY-- diff --git a/tests/integration/update_cluster/complex/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/complex/data/aws_s3_object_cluster-completed.spec_content index 3d042b255f5b9..29ef1e88b957b 100644 --- a/tests/integration/update_cluster/complex/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/complex/data/aws_s3_object_cluster-completed.spec_content @@ -195,7 +195,6 @@ spec: - 1.1.1.0/24 - pl-44444444 kubernetesVersion: 1.24.0 - masterInternalName: api.internal.complex.example.com masterKubelet: anonymousAuth: false cgroupDriver: systemd diff --git a/tests/integration/update_cluster/complex/in-legacy-v1alpha2.yaml b/tests/integration/update_cluster/complex/in-legacy-v1alpha2.yaml index bcb75937f6d7a..e942243457e37 100644 --- a/tests/integration/update_cluster/complex/in-legacy-v1alpha2.yaml +++ b/tests/integration/update_cluster/complex/in-legacy-v1alpha2.yaml @@ -51,7 +51,6 @@ spec: kubelet: anonymousAuth: false kubernetesVersion: v1.24.0 - masterInternalName: api.internal.complex.example.com masterPublicName: api.complex.example.com networkCIDR: 172.20.0.0/16 additionalNetworkCIDRs: diff --git a/tests/integration/update_cluster/complex/in-v1alpha2.yaml b/tests/integration/update_cluster/complex/in-v1alpha2.yaml index 6a46d4b91a9d0..b8c61a4af892d 100644 --- a/tests/integration/update_cluster/complex/in-v1alpha2.yaml +++ b/tests/integration/update_cluster/complex/in-v1alpha2.yaml @@ -51,7 +51,6 @@ spec: kubelet: anonymousAuth: false kubernetesVersion: v1.24.0 - masterInternalName: api.internal.complex.example.com masterPublicName: api.complex.example.com networkCIDR: 172.20.0.0/16 additionalNetworkCIDRs: diff --git a/tests/integration/update_cluster/compress/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/compress/data/aws_s3_object_cluster-completed.spec_content index 679e5fc838281..2ea5870330025 100644 --- a/tests/integration/update_cluster/compress/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/compress/data/aws_s3_object_cluster-completed.spec_content @@ -142,7 +142,6 @@ spec: kubernetesApiAccess: - 0.0.0.0/0 kubernetesVersion: 1.21.0 - masterInternalName: api.internal.compress.example.com masterKubelet: anonymousAuth: false cgroupDriver: systemd diff --git a/tests/integration/update_cluster/compress/in-v1alpha2.yaml b/tests/integration/update_cluster/compress/in-v1alpha2.yaml index 242dc105fabe7..7960372289ace 100644 --- a/tests/integration/update_cluster/compress/in-v1alpha2.yaml +++ b/tests/integration/update_cluster/compress/in-v1alpha2.yaml @@ -22,7 +22,6 @@ spec: kubelet: anonymousAuth: false kubernetesVersion: v1.21.0 - masterInternalName: api.internal.compress.example.com masterPublicName: api.compress.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/tests/integration/update_cluster/containerd-custom/cloudformation.json b/tests/integration/update_cluster/containerd-custom/cloudformation.json deleted file mode 100644 index be100b57e577b..0000000000000 --- a/tests/integration/update_cluster/containerd-custom/cloudformation.json +++ /dev/null @@ -1,1349 +0,0 @@ -{ - "Resources": { - "AWSAutoScalingAutoScalingGroupmasterustest1amasterscontainerdexamplecom": { - "Type": "AWS::AutoScaling::AutoScalingGroup", - "Properties": { - "AutoScalingGroupName": "master-us-test-1a.masters.containerd.example.com", - "LaunchTemplate": { - "LaunchTemplateId": { - "Ref": "AWSEC2LaunchTemplatemasterustest1amasterscontainerdexamplecom" - }, - "Version": { - "Fn::GetAtt": [ - "AWSEC2LaunchTemplatemasterustest1amasterscontainerdexamplecom", - "LatestVersionNumber" - ] - } - }, - "MaxSize": "1", - "MinSize": "1", - "VPCZoneIdentifier": [ - { - "Ref": "AWSEC2Subnetustest1acontainerdexamplecom" - } - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "containerd.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.containerd.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/role/master", - "Value": "1", - "PropagateAtLaunch": true - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a", - "PropagateAtLaunch": true - }, - { - "Key": "kubernetes.io/cluster/containerd.example.com", - "Value": "owned", - "PropagateAtLaunch": true - } - ], - "MetricsCollection": [ - { - "Granularity": "1Minute", - "Metrics": [ - "GroupDesiredCapacity", - "GroupInServiceInstances", - "GroupMaxSize", - "GroupMinSize", - "GroupPendingInstances", - "GroupStandbyInstances", - "GroupTerminatingInstances", - "GroupTotalInstances" - ] - } - ] - } - }, - "AWSAutoScalingAutoScalingGroupnodescontainerdexamplecom": { - "Type": "AWS::AutoScaling::AutoScalingGroup", - "Properties": { - "AutoScalingGroupName": "nodes.containerd.example.com", - "LaunchTemplate": { - "LaunchTemplateId": { - "Ref": "AWSEC2LaunchTemplatenodescontainerdexamplecom" - }, - "Version": { - "Fn::GetAtt": [ - "AWSEC2LaunchTemplatenodescontainerdexamplecom", - "LatestVersionNumber" - ] - } - }, - "MaxSize": "2", - "MinSize": "2", - "VPCZoneIdentifier": [ - { - "Ref": "AWSEC2Subnetustest1acontainerdexamplecom" - } - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "containerd.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "Name", - "Value": "nodes.containerd.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/role/node", - "Value": "1", - "PropagateAtLaunch": true - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes", - "PropagateAtLaunch": true - }, - { - "Key": "kubernetes.io/cluster/containerd.example.com", - "Value": "owned", - "PropagateAtLaunch": true - } - ], - "MetricsCollection": [ - { - "Granularity": "1Minute", - "Metrics": [ - "GroupDesiredCapacity", - "GroupInServiceInstances", - "GroupMaxSize", - "GroupMinSize", - "GroupPendingInstances", - "GroupStandbyInstances", - "GroupTerminatingInstances", - "GroupTotalInstances" - ] - } - ] - } - }, - "AWSEC2DHCPOptionscontainerdexamplecom": { - "Type": "AWS::EC2::DHCPOptions", - "Properties": { - "DomainName": "us-test-1.compute.internal", - "DomainNameServers": [ - "AmazonProvidedDNS" - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "containerd.example.com" - }, - { - "Key": "Name", - "Value": "containerd.example.com" - }, - { - "Key": "kubernetes.io/cluster/containerd.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2InternetGatewaycontainerdexamplecom": { - "Type": "AWS::EC2::InternetGateway", - "Properties": { - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "containerd.example.com" - }, - { - "Key": "Name", - "Value": "containerd.example.com" - }, - { - "Key": "kubernetes.io/cluster/containerd.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2LaunchTemplatemasterustest1amasterscontainerdexamplecom": { - "Type": "AWS::EC2::LaunchTemplate", - "Properties": { - "LaunchTemplateName": "master-us-test-1a.masters.containerd.example.com", - "LaunchTemplateData": { - "BlockDeviceMappings": [ - { - "DeviceName": "/dev/xvda", - "Ebs": { - "VolumeType": "gp3", - "VolumeSize": 64, - "Iops": 3000, - "Throughput": 125, - "DeleteOnTermination": true, - "Encrypted": true - } - }, - { - "DeviceName": "/dev/sdc", - "VirtualName": "ephemeral0" - } - ], - "IamInstanceProfile": { - "Name": { - "Ref": "AWSIAMInstanceProfilemasterscontainerdexamplecom" - } - }, - "ImageId": "ami-12345678", - "InstanceType": "m3.medium", - "KeyName": "kubernetes.containerd.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57", - "MetadataOptions": { - "HttpPutResponseHopLimit": 1, - "HttpTokens": "optional" - }, - "Monitoring": { - "Enabled": false - }, - "NetworkInterfaces": [ - { - "AssociatePublicIpAddress": true, - "DeleteOnTermination": true, - "DeviceIndex": 0, - "Ipv6AddressCount": 0, - "Groups": [ - { - "Ref": "AWSEC2SecurityGroupmasterscontainerdexamplecom" - } - ] - } - ], - "TagSpecifications": [ - { - "ResourceType": "instance", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "containerd.example.com" - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.containerd.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a" - }, - { - "Key": "kubernetes.io/cluster/containerd.example.com", - "Value": "owned" - } - ] - }, - { - "ResourceType": "volume", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "containerd.example.com" - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.containerd.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a" - }, - { - "Key": "kubernetes.io/cluster/containerd.example.com", - "Value": "owned" - } - ] - } - ], - "UserData": "extracted" - } - } - }, - "AWSEC2LaunchTemplatenodescontainerdexamplecom": { - "Type": "AWS::EC2::LaunchTemplate", - "Properties": { - "LaunchTemplateName": "nodes.containerd.example.com", - "LaunchTemplateData": { - "BlockDeviceMappings": [ - { - "DeviceName": "/dev/xvda", - "Ebs": { - "VolumeType": "gp3", - "VolumeSize": 128, - "Iops": 3000, - "Throughput": 125, - "DeleteOnTermination": true, - "Encrypted": true - } - } - ], - "IamInstanceProfile": { - "Name": { - "Ref": "AWSIAMInstanceProfilenodescontainerdexamplecom" - } - }, - "ImageId": "ami-12345678", - "InstanceType": "t2.medium", - "KeyName": "kubernetes.containerd.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57", - "MetadataOptions": { - "HttpPutResponseHopLimit": 1, - "HttpTokens": "optional" - }, - "Monitoring": { - "Enabled": false - }, - "NetworkInterfaces": [ - { - "AssociatePublicIpAddress": true, - "DeleteOnTermination": true, - "DeviceIndex": 0, - "Ipv6AddressCount": 0, - "Groups": [ - { - "Ref": "AWSEC2SecurityGroupnodescontainerdexamplecom" - } - ] - } - ], - "TagSpecifications": [ - { - "ResourceType": "instance", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "containerd.example.com" - }, - { - "Key": "Name", - "Value": "nodes.containerd.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "" - }, - { - "Key": "k8s.io/role/node", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes" - }, - { - "Key": "kubernetes.io/cluster/containerd.example.com", - "Value": "owned" - } - ] - }, - { - "ResourceType": "volume", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "containerd.example.com" - }, - { - "Key": "Name", - "Value": "nodes.containerd.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "" - }, - { - "Key": "k8s.io/role/node", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes" - }, - { - "Key": "kubernetes.io/cluster/containerd.example.com", - "Value": "owned" - } - ] - } - ], - "UserData": "extracted" - } - } - }, - "AWSEC2Route0": { - "Type": "AWS::EC2::Route", - "Properties": { - "RouteTableId": { - "Ref": "AWSEC2RouteTablecontainerdexamplecom" - }, - "DestinationIpv6CidrBlock": "::/0", - "GatewayId": { - "Ref": "AWSEC2InternetGatewaycontainerdexamplecom" - } - } - }, - "AWSEC2Route00000": { - "Type": "AWS::EC2::Route", - "Properties": { - "RouteTableId": { - "Ref": "AWSEC2RouteTablecontainerdexamplecom" - }, - "DestinationCidrBlock": "0.0.0.0/0", - "GatewayId": { - "Ref": "AWSEC2InternetGatewaycontainerdexamplecom" - } - } - }, - "AWSEC2RouteTablecontainerdexamplecom": { - "Type": "AWS::EC2::RouteTable", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCcontainerdexamplecom" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "containerd.example.com" - }, - { - "Key": "Name", - "Value": "containerd.example.com" - }, - { - "Key": "kubernetes.io/cluster/containerd.example.com", - "Value": "owned" - }, - { - "Key": "kubernetes.io/kops/role", - "Value": "public" - } - ] - } - }, - "AWSEC2SecurityGroupEgressfrommasterscontainerdexamplecomegressall0to00": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmasterscontainerdexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIpv6": "::/0" - } - }, - "AWSEC2SecurityGroupEgressfrommasterscontainerdexamplecomegressall0to000000": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmasterscontainerdexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupEgressfromnodescontainerdexamplecomegressall0to00": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodescontainerdexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIpv6": "::/0" - } - }, - "AWSEC2SecurityGroupEgressfromnodescontainerdexamplecomegressall0to000000": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodescontainerdexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom00000ingresstcp22to22masterscontainerdexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmasterscontainerdexamplecom" - }, - "FromPort": 22, - "ToPort": 22, - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom00000ingresstcp22to22nodescontainerdexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodescontainerdexamplecom" - }, - "FromPort": 22, - "ToPort": 22, - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom00000ingresstcp443to443masterscontainerdexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmasterscontainerdexamplecom" - }, - "FromPort": 443, - "ToPort": 443, - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrommasterscontainerdexamplecomingressall0to0masterscontainerdexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmasterscontainerdexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupmasterscontainerdexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfrommasterscontainerdexamplecomingressall0to0nodescontainerdexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodescontainerdexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupmasterscontainerdexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfromnodescontainerdexamplecomingressall0to0nodescontainerdexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodescontainerdexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodescontainerdexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfromnodescontainerdexamplecomingresstcp1to2379masterscontainerdexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmasterscontainerdexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodescontainerdexamplecom" - }, - "FromPort": 1, - "ToPort": 2379, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodescontainerdexamplecomingresstcp2382to4000masterscontainerdexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmasterscontainerdexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodescontainerdexamplecom" - }, - "FromPort": 2382, - "ToPort": 4000, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodescontainerdexamplecomingresstcp4003to65535masterscontainerdexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmasterscontainerdexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodescontainerdexamplecom" - }, - "FromPort": 4003, - "ToPort": 65535, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodescontainerdexamplecomingressudp1to65535masterscontainerdexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmasterscontainerdexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodescontainerdexamplecom" - }, - "FromPort": 1, - "ToPort": 65535, - "IpProtocol": "udp" - } - }, - "AWSEC2SecurityGroupmasterscontainerdexamplecom": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupName": "masters.containerd.example.com", - "VpcId": { - "Ref": "AWSEC2VPCcontainerdexamplecom" - }, - "GroupDescription": "Security group for masters", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "containerd.example.com" - }, - { - "Key": "Name", - "Value": "masters.containerd.example.com" - }, - { - "Key": "kubernetes.io/cluster/containerd.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2SecurityGroupnodescontainerdexamplecom": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupName": "nodes.containerd.example.com", - "VpcId": { - "Ref": "AWSEC2VPCcontainerdexamplecom" - }, - "GroupDescription": "Security group for nodes", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "containerd.example.com" - }, - { - "Key": "Name", - "Value": "nodes.containerd.example.com" - }, - { - "Key": "kubernetes.io/cluster/containerd.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2SubnetRouteTableAssociationustest1acontainerdexamplecom": { - "Type": "AWS::EC2::SubnetRouteTableAssociation", - "Properties": { - "SubnetId": { - "Ref": "AWSEC2Subnetustest1acontainerdexamplecom" - }, - "RouteTableId": { - "Ref": "AWSEC2RouteTablecontainerdexamplecom" - } - } - }, - "AWSEC2Subnetustest1acontainerdexamplecom": { - "Type": "AWS::EC2::Subnet", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCcontainerdexamplecom" - }, - "CidrBlock": "172.20.32.0/19", - "AvailabilityZone": "us-test-1a", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "containerd.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.containerd.example.com" - }, - { - "Key": "SubnetType", - "Value": "Public" - }, - { - "Key": "kops.k8s.io/instance-group/master-us-test-1a", - "Value": "true" - }, - { - "Key": "kops.k8s.io/instance-group/nodes", - "Value": "true" - }, - { - "Key": "kubernetes.io/cluster/containerd.example.com", - "Value": "owned" - }, - { - "Key": "kubernetes.io/role/elb", - "Value": "1" - }, - { - "Key": "kubernetes.io/role/internal-elb", - "Value": "1" - } - ] - } - }, - "AWSEC2VPCCidrBlockAmazonIPv6": { - "Type": "AWS::EC2::VPCCidrBlock", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCcontainerdexamplecom" - }, - "AmazonProvidedIpv6CidrBlock": true - } - }, - "AWSEC2VPCDHCPOptionsAssociationcontainerdexamplecom": { - "Type": "AWS::EC2::VPCDHCPOptionsAssociation", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCcontainerdexamplecom" - }, - "DhcpOptionsId": { - "Ref": "AWSEC2DHCPOptionscontainerdexamplecom" - } - } - }, - "AWSEC2VPCGatewayAttachmentcontainerdexamplecom": { - "Type": "AWS::EC2::VPCGatewayAttachment", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCcontainerdexamplecom" - }, - "InternetGatewayId": { - "Ref": "AWSEC2InternetGatewaycontainerdexamplecom" - } - } - }, - "AWSEC2VPCcontainerdexamplecom": { - "Type": "AWS::EC2::VPC", - "Properties": { - "CidrBlock": "172.20.0.0/16", - "EnableDnsHostnames": true, - "EnableDnsSupport": true, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "containerd.example.com" - }, - { - "Key": "Name", - "Value": "containerd.example.com" - }, - { - "Key": "kubernetes.io/cluster/containerd.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2Volumeustest1aetcdeventscontainerdexamplecom": { - "Type": "AWS::EC2::Volume", - "Properties": { - "AvailabilityZone": "us-test-1a", - "Size": 20, - "VolumeType": "gp3", - "Iops": 3000, - "Throughput": 125, - "Encrypted": false, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "containerd.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.etcd-events.containerd.example.com" - }, - { - "Key": "k8s.io/etcd/events", - "Value": "us-test-1a/us-test-1a" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kubernetes.io/cluster/containerd.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2Volumeustest1aetcdmaincontainerdexamplecom": { - "Type": "AWS::EC2::Volume", - "Properties": { - "AvailabilityZone": "us-test-1a", - "Size": 20, - "VolumeType": "gp3", - "Iops": 3000, - "Throughput": 125, - "Encrypted": false, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "containerd.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.etcd-main.containerd.example.com" - }, - { - "Key": "k8s.io/etcd/main", - "Value": "us-test-1a/us-test-1a" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kubernetes.io/cluster/containerd.example.com", - "Value": "owned" - } - ] - } - }, - "AWSIAMInstanceProfilemasterscontainerdexamplecom": { - "Type": "AWS::IAM::InstanceProfile", - "Properties": { - "InstanceProfileName": "masters.containerd.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolemasterscontainerdexamplecom" - } - ] - } - }, - "AWSIAMInstanceProfilenodescontainerdexamplecom": { - "Type": "AWS::IAM::InstanceProfile", - "Properties": { - "InstanceProfileName": "nodes.containerd.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolenodescontainerdexamplecom" - } - ] - } - }, - "AWSIAMPolicymasterscontainerdexamplecom": { - "Type": "AWS::IAM::Policy", - "Properties": { - "PolicyName": "masters.containerd.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolemasterscontainerdexamplecom" - } - ], - "PolicyDocument": { - "Statement": [ - { - "Action": "ec2:AttachVolume", - "Condition": { - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "containerd.example.com", - "aws:ResourceTag/k8s.io/role/master": "1" - } - }, - "Effect": "Allow", - "Resource": [ - "*" - ] - }, - { - "Action": [ - "s3:Get*" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-read-bucket/clusters.example.com/containerd.example.com/*" - }, - { - "Action": [ - "s3:GetObject", - "s3:DeleteObject", - "s3:DeleteObjectVersion", - "s3:PutObject" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/containerd.example.com/backups/etcd/main/*" - }, - { - "Action": [ - "s3:GetObject", - "s3:DeleteObject", - "s3:DeleteObjectVersion", - "s3:PutObject" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/containerd.example.com/backups/etcd/events/*" - }, - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-read-bucket" - ] - }, - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-write-bucket" - ] - }, - { - "Action": [ - "route53:ChangeResourceRecordSets", - "route53:ListResourceRecordSets", - "route53:GetHostedZone" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:route53:::hostedzone/Z1AFAKE1ZON3YO" - ] - }, - { - "Action": [ - "route53:GetChange" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:route53:::change/*" - ] - }, - { - "Action": [ - "route53:ListHostedZones", - "route53:ListTagsForResource" - ], - "Effect": "Allow", - "Resource": [ - "*" - ] - }, - { - "Action": "ec2:CreateTags", - "Condition": { - "StringEquals": { - "aws:RequestTag/KubernetesCluster": "containerd.example.com", - "ec2:CreateAction": [ - "CreateSecurityGroup" - ] - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:security-group/*" - ] - }, - { - "Action": [ - "ec2:CreateTags", - "ec2:DeleteTags" - ], - "Condition": { - "Null": { - "aws:RequestTag/KubernetesCluster": "true" - }, - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "containerd.example.com" - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:security-group/*" - ] - }, - { - "Action": "ec2:CreateTags", - "Condition": { - "StringEquals": { - "aws:RequestTag/KubernetesCluster": "containerd.example.com", - "ec2:CreateAction": [ - "CreateVolume", - "CreateSnapshot" - ] - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:volume/*", - "arn:aws-test:ec2:*:*:snapshot/*" - ] - }, - { - "Action": [ - "ec2:CreateTags", - "ec2:DeleteTags" - ], - "Condition": { - "Null": { - "aws:RequestTag/KubernetesCluster": "true" - }, - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "containerd.example.com" - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:volume/*", - "arn:aws-test:ec2:*:*:snapshot/*" - ] - }, - { - "Action": [ - "autoscaling:DescribeAutoScalingGroups", - "autoscaling:DescribeAutoScalingInstances", - "autoscaling:DescribeLaunchConfigurations", - "autoscaling:DescribeScalingActivities", - "autoscaling:DescribeTags", - "ec2:AttachVolume", - "ec2:AuthorizeSecurityGroupIngress", - "ec2:CreateSecurityGroup", - "ec2:CreateTags", - "ec2:DeleteRoute", - "ec2:DeleteSecurityGroup", - "ec2:DeleteVolume", - "ec2:DescribeAccountAttributes", - "ec2:DescribeInstanceTypes", - "ec2:DescribeInstances", - "ec2:DescribeLaunchTemplateVersions", - "ec2:DescribeRegions", - "ec2:DescribeRouteTables", - "ec2:DescribeSecurityGroups", - "ec2:DescribeSubnets", - "ec2:DescribeTags", - "ec2:DescribeVolumes", - "ec2:DescribeVolumesModifications", - "ec2:DescribeVpcs", - "ec2:DetachVolume", - "ec2:ModifyInstanceAttribute", - "ec2:ModifyVolume", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:CreateListener", - "elasticloadbalancing:CreateTargetGroup", - "elasticloadbalancing:DeleteListener", - "elasticloadbalancing:DescribeListeners", - "elasticloadbalancing:DescribeLoadBalancerAttributes", - "elasticloadbalancing:DescribeLoadBalancerPolicies", - "elasticloadbalancing:DescribeLoadBalancers", - "elasticloadbalancing:DescribeTargetGroups", - "elasticloadbalancing:DescribeTargetHealth", - "elasticloadbalancing:ModifyListener", - "elasticloadbalancing:RegisterTargets", - "iam:GetServerCertificate", - "iam:ListServerCertificates", - "kms:DescribeKey", - "kms:GenerateRandom" - ], - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": [ - "autoscaling:SetDesiredCapacity", - "autoscaling:TerminateInstanceInAutoScalingGroup", - "ec2:AttachVolume", - "ec2:AuthorizeSecurityGroupIngress", - "ec2:DeleteSecurityGroup", - "ec2:DeleteVolume", - "ec2:DetachVolume", - "ec2:ModifyInstanceAttribute", - "ec2:ModifyVolume", - "ec2:RevokeSecurityGroupIngress", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", - "elasticloadbalancing:AttachLoadBalancerToSubnets", - "elasticloadbalancing:ConfigureHealthCheck", - "elasticloadbalancing:CreateLoadBalancerListeners", - "elasticloadbalancing:CreateLoadBalancerPolicy", - "elasticloadbalancing:DeleteListener", - "elasticloadbalancing:DeleteLoadBalancer", - "elasticloadbalancing:DeleteLoadBalancerListeners", - "elasticloadbalancing:DeleteTargetGroup", - "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", - "elasticloadbalancing:DeregisterTargets", - "elasticloadbalancing:DetachLoadBalancerFromSubnets", - "elasticloadbalancing:ModifyListener", - "elasticloadbalancing:ModifyLoadBalancerAttributes", - "elasticloadbalancing:ModifyTargetGroup", - "elasticloadbalancing:RegisterInstancesWithLoadBalancer", - "elasticloadbalancing:RegisterTargets", - "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", - "elasticloadbalancing:SetLoadBalancerPoliciesOfListener" - ], - "Condition": { - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "containerd.example.com" - } - }, - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": [ - "ec2:CreateSecurityGroup", - "ec2:CreateSnapshot", - "ec2:CreateVolume", - "elasticloadbalancing:CreateListener", - "elasticloadbalancing:CreateLoadBalancer", - "elasticloadbalancing:CreateTargetGroup" - ], - "Condition": { - "StringEquals": { - "aws:RequestTag/KubernetesCluster": "containerd.example.com" - } - }, - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": "ec2:CreateSecurityGroup", - "Effect": "Allow", - "Resource": "arn:aws-test:ec2:*:*:vpc/*" - } - ], - "Version": "2012-10-17" - } - } - }, - "AWSIAMPolicynodescontainerdexamplecom": { - "Type": "AWS::IAM::Policy", - "Properties": { - "PolicyName": "nodes.containerd.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolenodescontainerdexamplecom" - } - ], - "PolicyDocument": { - "Statement": [ - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-read-bucket" - ] - }, - { - "Action": [ - "autoscaling:DescribeAutoScalingInstances", - "ec2:DescribeInstanceTypes", - "ec2:DescribeInstances", - "ec2:DescribeRegions", - "iam:GetServerCertificate", - "iam:ListServerCertificates", - "kms:GenerateRandom" - ], - "Effect": "Allow", - "Resource": "*" - } - ], - "Version": "2012-10-17" - } - } - }, - "AWSIAMRolemasterscontainerdexamplecom": { - "Type": "AWS::IAM::Role", - "Properties": { - "RoleName": "masters.containerd.example.com", - "AssumeRolePolicyDocument": { - "Statement": [ - { - "Action": "sts:AssumeRole", - "Effect": "Allow", - "Principal": { - "Service": "ec2.amazonaws.com" - } - } - ], - "Version": "2012-10-17" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "containerd.example.com" - }, - { - "Key": "Name", - "Value": "masters.containerd.example.com" - }, - { - "Key": "kubernetes.io/cluster/containerd.example.com", - "Value": "owned" - } - ] - } - }, - "AWSIAMRolenodescontainerdexamplecom": { - "Type": "AWS::IAM::Role", - "Properties": { - "RoleName": "nodes.containerd.example.com", - "AssumeRolePolicyDocument": { - "Statement": [ - { - "Action": "sts:AssumeRole", - "Effect": "Allow", - "Principal": { - "Service": "ec2.amazonaws.com" - } - } - ], - "Version": "2012-10-17" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "containerd.example.com" - }, - { - "Key": "Name", - "Value": "nodes.containerd.example.com" - }, - { - "Key": "kubernetes.io/cluster/containerd.example.com", - "Value": "owned" - } - ] - } - } - } -} diff --git a/tests/integration/update_cluster/containerd-custom/cloudformation.json.extracted.yaml b/tests/integration/update_cluster/containerd-custom/cloudformation.json.extracted.yaml deleted file mode 100644 index e24f8f7c2dcbc..0000000000000 --- a/tests/integration/update_cluster/containerd-custom/cloudformation.json.extracted.yaml +++ /dev/null @@ -1,464 +0,0 @@ -Resources.AWSEC2LaunchTemplatemasterustest1amasterscontainerdexamplecom.Properties.LaunchTemplateData.UserData: | - #!/bin/bash - set -o errexit - set -o nounset - set -o pipefail - - NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 - NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 - NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 - NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 - - export AWS_REGION=us-test-1 - - - - - sysctl -w net.core.rmem_max=16777216 || true - sysctl -w net.core.wmem_max=16777216 || true - sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true - sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true - - - function ensure-install-dir() { - INSTALL_DIR="/opt/kops" - # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec - if [[ -d /var/lib/toolbox ]]; then - INSTALL_DIR="/var/lib/toolbox/kops" - fi - mkdir -p ${INSTALL_DIR}/bin - mkdir -p ${INSTALL_DIR}/conf - cd ${INSTALL_DIR} - } - - # Retry a download until we get it. args: name, sha, urls - download-or-bust() { - local -r file="$1" - local -r hash="$2" - local -r urls=( $(split-commas "$3") ) - - if [[ -f "${file}" ]]; then - if ! validate-hash "${file}" "${hash}"; then - rm -f "${file}" - else - return 0 - fi - fi - - while true; do - for url in "${urls[@]}"; do - commands=( - "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - ) - for cmd in "${commands[@]}"; do - echo "Attempting download with: ${cmd} {url}" - if ! (${cmd} "${url}"); then - echo "== Download failed with ${cmd} ==" - continue - fi - if ! validate-hash "${file}" "${hash}"; then - echo "== Hash validation of ${url} failed. Retrying. ==" - rm -f "${file}" - else - echo "== Downloaded ${url} (SHA256 = ${hash}) ==" - return 0 - fi - done - done - - echo "All downloads failed; sleeping before retrying" - sleep 60 - done - } - - validate-hash() { - local -r file="$1" - local -r expected="$2" - local actual - - actual=$(sha256sum ${file} | awk '{ print $1 }') || true - if [[ "${actual}" != "${expected}" ]]; then - echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" - return 1 - fi - } - - function split-commas() { - echo $1 | tr "," "\n" - } - - function download-release() { - case "$(uname -m)" in - x86_64*|i?86_64*|amd64*) - NODEUP_URL="${NODEUP_URL_AMD64}" - NODEUP_HASH="${NODEUP_HASH_AMD64}" - ;; - aarch64*|arm64*) - NODEUP_URL="${NODEUP_URL_ARM64}" - NODEUP_HASH="${NODEUP_HASH_ARM64}" - ;; - *) - echo "Unsupported host arch: $(uname -m)" >&2 - exit 1 - ;; - esac - - cd ${INSTALL_DIR}/bin - download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" - - chmod +x nodeup - - echo "Running nodeup" - # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 - ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) - } - - #################################################################################### - - /bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" - - echo "== nodeup node config starting ==" - ensure-install-dir - - cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' - cloudConfig: - awsEBSCSIDriver: - enabled: false - manageStorageClasses: true - containerRuntime: containerd - containerd: - logLevel: info - packages: - hashAmd64: "0000000000000000000000000000000000000000000000000000000000000000" - urlAmd64: https://github.com/containerd/containerd/releases/download/v1.3.9/cri-containerd-cni-1.3.9-linux-amd64.tar.gz - registryMirrors: - '*': - - http://HostIP2:Port2 - docker.io: - - https://registry-1.docker.io - version: 1.4.13 - docker: - skipInstall: true - encryptionConfig: null - etcdClusters: - events: - version: 3.4.13 - main: - version: 3.4.13 - kubeAPIServer: - allowPrivileged: true - anonymousAuth: false - apiAudiences: - - kubernetes.svc.default - apiServerCount: 1 - authorizationMode: AlwaysAllow - bindAddress: 0.0.0.0 - cloudProvider: aws - enableAdmissionPlugins: - - NamespaceLifecycle - - LimitRanger - - ServiceAccount - - DefaultStorageClass - - DefaultTolerationSeconds - - MutatingAdmissionWebhook - - ValidatingAdmissionWebhook - - NodeRestriction - - ResourceQuota - etcdServers: - - https://127.0.0.1:4001 - etcdServersOverrides: - - /events#https://127.0.0.1:4002 - image: registry.k8s.io/kube-apiserver:v1.21.0 - kubeletPreferredAddressTypes: - - InternalIP - - Hostname - - ExternalIP - logLevel: 2 - requestheaderAllowedNames: - - aggregator - requestheaderExtraHeaderPrefixes: - - X-Remote-Extra- - requestheaderGroupHeaders: - - X-Remote-Group - requestheaderUsernameHeaders: - - X-Remote-User - securePort: 443 - serviceAccountIssuer: https://api.internal.containerd.example.com - serviceAccountJWKSURI: https://api.internal.containerd.example.com/openid/v1/jwks - serviceClusterIPRange: 100.64.0.0/13 - storageBackend: etcd3 - kubeControllerManager: - allocateNodeCIDRs: true - attachDetachReconcileSyncPeriod: 1m0s - cloudProvider: aws - clusterCIDR: 100.96.0.0/11 - clusterName: containerd.example.com - configureCloudRoutes: false - image: registry.k8s.io/kube-controller-manager:v1.21.0 - leaderElection: - leaderElect: true - logLevel: 2 - useServiceAccountCredentials: true - kubeProxy: - clusterCIDR: 100.96.0.0/11 - cpuRequest: 100m - image: registry.k8s.io/kube-proxy:v1.21.0 - logLevel: 2 - kubeScheduler: - image: registry.k8s.io/kube-scheduler:v1.21.0 - leaderElection: - leaderElect: true - logLevel: 2 - kubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: aws - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - masterKubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: aws - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - - __EOF_CLUSTER_SPEC - - cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' - CloudProvider: aws - ConfigBase: memfs://clusters.example.com/containerd.example.com - InstanceGroupName: master-us-test-1a - InstanceGroupRole: Master - NodeupConfigHash: D17eEbi1tjySAFR4gX5V6ka6BJFYuqams8eskMQ1sRY= - - __EOF_KUBE_ENV - - download-release - echo "== nodeup node config done ==" -Resources.AWSEC2LaunchTemplatenodescontainerdexamplecom.Properties.LaunchTemplateData.UserData: | - #!/bin/bash - set -o errexit - set -o nounset - set -o pipefail - - NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 - NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 - NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 - NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 - - export AWS_REGION=us-test-1 - - - - - sysctl -w net.core.rmem_max=16777216 || true - sysctl -w net.core.wmem_max=16777216 || true - sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true - sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true - - - function ensure-install-dir() { - INSTALL_DIR="/opt/kops" - # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec - if [[ -d /var/lib/toolbox ]]; then - INSTALL_DIR="/var/lib/toolbox/kops" - fi - mkdir -p ${INSTALL_DIR}/bin - mkdir -p ${INSTALL_DIR}/conf - cd ${INSTALL_DIR} - } - - # Retry a download until we get it. args: name, sha, urls - download-or-bust() { - local -r file="$1" - local -r hash="$2" - local -r urls=( $(split-commas "$3") ) - - if [[ -f "${file}" ]]; then - if ! validate-hash "${file}" "${hash}"; then - rm -f "${file}" - else - return 0 - fi - fi - - while true; do - for url in "${urls[@]}"; do - commands=( - "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - ) - for cmd in "${commands[@]}"; do - echo "Attempting download with: ${cmd} {url}" - if ! (${cmd} "${url}"); then - echo "== Download failed with ${cmd} ==" - continue - fi - if ! validate-hash "${file}" "${hash}"; then - echo "== Hash validation of ${url} failed. Retrying. ==" - rm -f "${file}" - else - echo "== Downloaded ${url} (SHA256 = ${hash}) ==" - return 0 - fi - done - done - - echo "All downloads failed; sleeping before retrying" - sleep 60 - done - } - - validate-hash() { - local -r file="$1" - local -r expected="$2" - local actual - - actual=$(sha256sum ${file} | awk '{ print $1 }') || true - if [[ "${actual}" != "${expected}" ]]; then - echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" - return 1 - fi - } - - function split-commas() { - echo $1 | tr "," "\n" - } - - function download-release() { - case "$(uname -m)" in - x86_64*|i?86_64*|amd64*) - NODEUP_URL="${NODEUP_URL_AMD64}" - NODEUP_HASH="${NODEUP_HASH_AMD64}" - ;; - aarch64*|arm64*) - NODEUP_URL="${NODEUP_URL_ARM64}" - NODEUP_HASH="${NODEUP_HASH_ARM64}" - ;; - *) - echo "Unsupported host arch: $(uname -m)" >&2 - exit 1 - ;; - esac - - cd ${INSTALL_DIR}/bin - download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" - - chmod +x nodeup - - echo "Running nodeup" - # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 - ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) - } - - #################################################################################### - - /bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" - - echo "== nodeup node config starting ==" - ensure-install-dir - - cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' - cloudConfig: - awsEBSCSIDriver: - enabled: false - manageStorageClasses: true - containerRuntime: containerd - containerd: - logLevel: info - packages: - hashAmd64: "0000000000000000000000000000000000000000000000000000000000000000" - urlAmd64: https://github.com/containerd/containerd/releases/download/v1.3.9/cri-containerd-cni-1.3.9-linux-amd64.tar.gz - registryMirrors: - '*': - - http://HostIP2:Port2 - docker.io: - - https://registry-1.docker.io - version: 1.4.13 - docker: - skipInstall: true - kubeProxy: - clusterCIDR: 100.96.0.0/11 - cpuRequest: 100m - image: registry.k8s.io/kube-proxy:v1.21.0 - logLevel: 2 - kubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: aws - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - - __EOF_CLUSTER_SPEC - - cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' - CloudProvider: aws - ConfigServer: - CACertificates: | - -----BEGIN CERTIFICATE----- - MIIBbjCCARigAwIBAgIMFpANqBD8NSD82AUSMA0GCSqGSIb3DQEBCwUAMBgxFjAU - BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwODAwWhcNMzEwNzA3MDcw - ODAwWjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD - SwAwSAJBANFI3zr0Tk8krsW8vwjfMpzJOlWQ8616vG3YPa2qAgI7V4oKwfV0yIg1 - jt+H6f4P/wkPAPTPTfRp9Iy8oHEEFw0CAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG - MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNG3zVjTcLlJwDsJ4/K9DV7KohUA - MA0GCSqGSIb3DQEBCwUAA0EAB8d03fY2w7WKpfO29qI295pu2C4ca9AiVGOpgSc8 - tmQsq6rcxt3T+rb589PVtz0mw/cKTxOk6gH2CCC+yHfy2w== - -----END CERTIFICATE----- - -----BEGIN CERTIFICATE----- - MIIBbjCCARigAwIBAgIMFpANvmSa0OAlYmXKMA0GCSqGSIb3DQEBCwUAMBgxFjAU - BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwOTM2WhcNMzEwNzA3MDcw - OTM2WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD - SwAwSAJBAMF6F4aZdpe0RUpyykaBpWwZCnwbffhYGOw+fs6RdLuUq7QCNmJm/Eq7 - WWOziMYDiI9SbclpD+6QiJ0N3EqppVUCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG - MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLImp6ARjPDAH6nhI+scWVt3Q9bn - MA0GCSqGSIb3DQEBCwUAA0EAVQVx5MUtuAIeePuP9o51xtpT2S6Fvfi8J4ICxnlA - 9B7UD2ushcVFPtaeoL9Gfu8aY4KJBeqqg5ojl4qmRnThjw== - -----END CERTIFICATE----- - server: https://kops-controller.internal.containerd.example.com:3988/ - InstanceGroupName: nodes - InstanceGroupRole: Node - NodeupConfigHash: cPB7xe/jlGjsPr0RVNbD4+kgsaZ2L7gBA+0MRKukLAw= - - __EOF_KUBE_ENV - - download-release - echo "== nodeup node config done ==" diff --git a/tests/integration/update_cluster/containerd-custom/data/aws_iam_role_masters.containerd.example.com_policy b/tests/integration/update_cluster/containerd-custom/data/aws_iam_role_masters.containerd.example.com_policy new file mode 100644 index 0000000000000..66d5de1d5ae1e --- /dev/null +++ b/tests/integration/update_cluster/containerd-custom/data/aws_iam_role_masters.containerd.example.com_policy @@ -0,0 +1,10 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { "Service": "ec2.amazonaws.com"}, + "Action": "sts:AssumeRole" + } + ] +} diff --git a/tests/integration/update_cluster/containerd-custom/data/aws_iam_role_nodes.containerd.example.com_policy b/tests/integration/update_cluster/containerd-custom/data/aws_iam_role_nodes.containerd.example.com_policy new file mode 100644 index 0000000000000..66d5de1d5ae1e --- /dev/null +++ b/tests/integration/update_cluster/containerd-custom/data/aws_iam_role_nodes.containerd.example.com_policy @@ -0,0 +1,10 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { "Service": "ec2.amazonaws.com"}, + "Action": "sts:AssumeRole" + } + ] +} diff --git a/tests/integration/update_cluster/containerd-custom/data/aws_iam_role_policy_masters.containerd.example.com_policy b/tests/integration/update_cluster/containerd-custom/data/aws_iam_role_policy_masters.containerd.example.com_policy new file mode 100644 index 0000000000000..b6629a1de7332 --- /dev/null +++ b/tests/integration/update_cluster/containerd-custom/data/aws_iam_role_policy_masters.containerd.example.com_policy @@ -0,0 +1,280 @@ +{ + "Statement": [ + { + "Action": "ec2:AttachVolume", + "Condition": { + "StringEquals": { + "aws:ResourceTag/KubernetesCluster": "containerd.example.com", + "aws:ResourceTag/k8s.io/role/master": "1" + } + }, + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "s3:Get*" + ], + "Effect": "Allow", + "Resource": "arn:aws-test:s3:::placeholder-read-bucket/clusters.example.com/containerd.example.com/*" + }, + { + "Action": [ + "s3:GetObject", + "s3:DeleteObject", + "s3:DeleteObjectVersion", + "s3:PutObject" + ], + "Effect": "Allow", + "Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/containerd.example.com/backups/etcd/main/*" + }, + { + "Action": [ + "s3:GetObject", + "s3:DeleteObject", + "s3:DeleteObjectVersion", + "s3:PutObject" + ], + "Effect": "Allow", + "Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/containerd.example.com/backups/etcd/events/*" + }, + { + "Action": [ + "s3:GetBucketLocation", + "s3:GetEncryptionConfiguration", + "s3:ListBucket", + "s3:ListBucketVersions" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws-test:s3:::placeholder-read-bucket" + ] + }, + { + "Action": [ + "s3:GetBucketLocation", + "s3:GetEncryptionConfiguration", + "s3:ListBucket", + "s3:ListBucketVersions" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws-test:s3:::placeholder-write-bucket" + ] + }, + { + "Action": [ + "route53:ChangeResourceRecordSets", + "route53:ListResourceRecordSets", + "route53:GetHostedZone" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws-test:route53:::hostedzone/Z1AFAKE1ZON3YO" + ] + }, + { + "Action": [ + "route53:GetChange" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws-test:route53:::change/*" + ] + }, + { + "Action": [ + "route53:ListHostedZones", + "route53:ListTagsForResource" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": "ec2:CreateTags", + "Condition": { + "StringEquals": { + "aws:RequestTag/KubernetesCluster": "containerd.example.com", + "ec2:CreateAction": [ + "CreateSecurityGroup" + ] + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws-test:ec2:*:*:security-group/*" + ] + }, + { + "Action": [ + "ec2:CreateTags", + "ec2:DeleteTags" + ], + "Condition": { + "Null": { + "aws:RequestTag/KubernetesCluster": "true" + }, + "StringEquals": { + "aws:ResourceTag/KubernetesCluster": "containerd.example.com" + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws-test:ec2:*:*:security-group/*" + ] + }, + { + "Action": "ec2:CreateTags", + "Condition": { + "StringEquals": { + "aws:RequestTag/KubernetesCluster": "containerd.example.com", + "ec2:CreateAction": [ + "CreateVolume", + "CreateSnapshot" + ] + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws-test:ec2:*:*:volume/*", + "arn:aws-test:ec2:*:*:snapshot/*" + ] + }, + { + "Action": [ + "ec2:CreateTags", + "ec2:DeleteTags" + ], + "Condition": { + "Null": { + "aws:RequestTag/KubernetesCluster": "true" + }, + "StringEquals": { + "aws:ResourceTag/KubernetesCluster": "containerd.example.com" + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws-test:ec2:*:*:volume/*", + "arn:aws-test:ec2:*:*:snapshot/*" + ] + }, + { + "Action": [ + "autoscaling:DescribeAutoScalingGroups", + "autoscaling:DescribeAutoScalingInstances", + "autoscaling:DescribeLaunchConfigurations", + "autoscaling:DescribeScalingActivities", + "autoscaling:DescribeTags", + "ec2:AttachVolume", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CreateSecurityGroup", + "ec2:CreateTags", + "ec2:DeleteRoute", + "ec2:DeleteSecurityGroup", + "ec2:DeleteVolume", + "ec2:DescribeAccountAttributes", + "ec2:DescribeInstanceTypes", + "ec2:DescribeInstances", + "ec2:DescribeLaunchTemplateVersions", + "ec2:DescribeRegions", + "ec2:DescribeRouteTables", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeTags", + "ec2:DescribeVolumes", + "ec2:DescribeVolumesModifications", + "ec2:DescribeVpcs", + "ec2:DetachVolume", + "ec2:ModifyInstanceAttribute", + "ec2:ModifyVolume", + "elasticloadbalancing:AddTags", + "elasticloadbalancing:CreateListener", + "elasticloadbalancing:CreateTargetGroup", + "elasticloadbalancing:DeleteListener", + "elasticloadbalancing:DescribeListeners", + "elasticloadbalancing:DescribeLoadBalancerAttributes", + "elasticloadbalancing:DescribeLoadBalancerPolicies", + "elasticloadbalancing:DescribeLoadBalancers", + "elasticloadbalancing:DescribeTargetGroups", + "elasticloadbalancing:DescribeTargetHealth", + "elasticloadbalancing:ModifyListener", + "elasticloadbalancing:RegisterTargets", + "iam:GetServerCertificate", + "iam:ListServerCertificates", + "kms:DescribeKey", + "kms:GenerateRandom" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "autoscaling:SetDesiredCapacity", + "autoscaling:TerminateInstanceInAutoScalingGroup", + "ec2:AttachVolume", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:DeleteSecurityGroup", + "ec2:DeleteVolume", + "ec2:DetachVolume", + "ec2:ModifyInstanceAttribute", + "ec2:ModifyVolume", + "ec2:RevokeSecurityGroupIngress", + "elasticloadbalancing:AddTags", + "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", + "elasticloadbalancing:AttachLoadBalancerToSubnets", + "elasticloadbalancing:ConfigureHealthCheck", + "elasticloadbalancing:CreateLoadBalancerListeners", + "elasticloadbalancing:CreateLoadBalancerPolicy", + "elasticloadbalancing:DeleteListener", + "elasticloadbalancing:DeleteLoadBalancer", + "elasticloadbalancing:DeleteLoadBalancerListeners", + "elasticloadbalancing:DeleteTargetGroup", + "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", + "elasticloadbalancing:DeregisterTargets", + "elasticloadbalancing:DetachLoadBalancerFromSubnets", + "elasticloadbalancing:ModifyListener", + "elasticloadbalancing:ModifyLoadBalancerAttributes", + "elasticloadbalancing:ModifyTargetGroup", + "elasticloadbalancing:RegisterInstancesWithLoadBalancer", + "elasticloadbalancing:RegisterTargets", + "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", + "elasticloadbalancing:SetLoadBalancerPoliciesOfListener" + ], + "Condition": { + "StringEquals": { + "aws:ResourceTag/KubernetesCluster": "containerd.example.com" + } + }, + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "ec2:CreateSecurityGroup", + "ec2:CreateSnapshot", + "ec2:CreateVolume", + "elasticloadbalancing:CreateListener", + "elasticloadbalancing:CreateLoadBalancer", + "elasticloadbalancing:CreateTargetGroup" + ], + "Condition": { + "StringEquals": { + "aws:RequestTag/KubernetesCluster": "containerd.example.com" + } + }, + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "ec2:CreateSecurityGroup", + "Effect": "Allow", + "Resource": "arn:aws-test:ec2:*:*:vpc/*" + } + ], + "Version": "2012-10-17" +} diff --git a/tests/integration/update_cluster/containerd-custom/data/aws_iam_role_policy_nodes.containerd.example.com_policy b/tests/integration/update_cluster/containerd-custom/data/aws_iam_role_policy_nodes.containerd.example.com_policy new file mode 100644 index 0000000000000..153ab3c7f64f4 --- /dev/null +++ b/tests/integration/update_cluster/containerd-custom/data/aws_iam_role_policy_nodes.containerd.example.com_policy @@ -0,0 +1,30 @@ +{ + "Statement": [ + { + "Action": [ + "s3:GetBucketLocation", + "s3:GetEncryptionConfiguration", + "s3:ListBucket", + "s3:ListBucketVersions" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws-test:s3:::placeholder-read-bucket" + ] + }, + { + "Action": [ + "autoscaling:DescribeAutoScalingInstances", + "ec2:DescribeInstanceTypes", + "ec2:DescribeInstances", + "ec2:DescribeRegions", + "iam:GetServerCertificate", + "iam:ListServerCertificates", + "kms:GenerateRandom" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" +} diff --git a/tests/integration/update_cluster/existing_iam_cloudformation/id_rsa.pub b/tests/integration/update_cluster/containerd-custom/data/aws_key_pair_kubernetes.containerd.example.com-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key old mode 100755 new mode 100644 similarity index 100% rename from tests/integration/update_cluster/existing_iam_cloudformation/id_rsa.pub rename to tests/integration/update_cluster/containerd-custom/data/aws_key_pair_kubernetes.containerd.example.com-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key diff --git a/tests/integration/update_cluster/containerd-custom/data/aws_launch_template_master-us-test-1a.masters.containerd.example.com_user_data b/tests/integration/update_cluster/containerd-custom/data/aws_launch_template_master-us-test-1a.masters.containerd.example.com_user_data new file mode 100644 index 0000000000000..fd5f7fa7e358b --- /dev/null +++ b/tests/integration/update_cluster/containerd-custom/data/aws_launch_template_master-us-test-1a.masters.containerd.example.com_user_data @@ -0,0 +1,261 @@ +#!/bin/bash +set -o errexit +set -o nounset +set -o pipefail + +NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 +NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 +NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 +NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 + +export AWS_REGION=us-test-1 + + + + +sysctl -w net.core.rmem_max=16777216 || true +sysctl -w net.core.wmem_max=16777216 || true +sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true +sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true + + +function ensure-install-dir() { + INSTALL_DIR="/opt/kops" + # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec + if [[ -d /var/lib/toolbox ]]; then + INSTALL_DIR="/var/lib/toolbox/kops" + fi + mkdir -p ${INSTALL_DIR}/bin + mkdir -p ${INSTALL_DIR}/conf + cd ${INSTALL_DIR} +} + +# Retry a download until we get it. args: name, sha, urls +download-or-bust() { + local -r file="$1" + local -r hash="$2" + local -r urls=( $(split-commas "$3") ) + + if [[ -f "${file}" ]]; then + if ! validate-hash "${file}" "${hash}"; then + rm -f "${file}" + else + return 0 + fi + fi + + while true; do + for url in "${urls[@]}"; do + commands=( + "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + ) + for cmd in "${commands[@]}"; do + echo "Attempting download with: ${cmd} {url}" + if ! (${cmd} "${url}"); then + echo "== Download failed with ${cmd} ==" + continue + fi + if ! validate-hash "${file}" "${hash}"; then + echo "== Hash validation of ${url} failed. Retrying. ==" + rm -f "${file}" + else + echo "== Downloaded ${url} (SHA256 = ${hash}) ==" + return 0 + fi + done + done + + echo "All downloads failed; sleeping before retrying" + sleep 60 + done +} + +validate-hash() { + local -r file="$1" + local -r expected="$2" + local actual + + actual=$(sha256sum ${file} | awk '{ print $1 }') || true + if [[ "${actual}" != "${expected}" ]]; then + echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" + return 1 + fi +} + +function split-commas() { + echo $1 | tr "," "\n" +} + +function download-release() { + case "$(uname -m)" in + x86_64*|i?86_64*|amd64*) + NODEUP_URL="${NODEUP_URL_AMD64}" + NODEUP_HASH="${NODEUP_HASH_AMD64}" + ;; + aarch64*|arm64*) + NODEUP_URL="${NODEUP_URL_ARM64}" + NODEUP_HASH="${NODEUP_HASH_ARM64}" + ;; + *) + echo "Unsupported host arch: $(uname -m)" >&2 + exit 1 + ;; + esac + + cd ${INSTALL_DIR}/bin + download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" + + chmod +x nodeup + + echo "Running nodeup" + # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 + ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) +} + +#################################################################################### + +/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" + +echo "== nodeup node config starting ==" +ensure-install-dir + +cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' +cloudConfig: + awsEBSCSIDriver: + enabled: false + manageStorageClasses: true +containerRuntime: containerd +containerd: + logLevel: info + packages: + hashAmd64: "0000000000000000000000000000000000000000000000000000000000000000" + urlAmd64: https://github.com/containerd/containerd/releases/download/v1.3.9/cri-containerd-cni-1.3.9-linux-amd64.tar.gz + registryMirrors: + '*': + - http://HostIP2:Port2 + docker.io: + - https://registry-1.docker.io + version: 1.4.13 +docker: + skipInstall: true +encryptionConfig: null +etcdClusters: + events: + version: 3.4.13 + main: + version: 3.4.13 +kubeAPIServer: + allowPrivileged: true + anonymousAuth: false + apiAudiences: + - kubernetes.svc.default + apiServerCount: 1 + authorizationMode: AlwaysAllow + bindAddress: 0.0.0.0 + cloudProvider: aws + enableAdmissionPlugins: + - NamespaceLifecycle + - LimitRanger + - ServiceAccount + - DefaultStorageClass + - DefaultTolerationSeconds + - MutatingAdmissionWebhook + - ValidatingAdmissionWebhook + - NodeRestriction + - ResourceQuota + etcdServers: + - https://127.0.0.1:4001 + etcdServersOverrides: + - /events#https://127.0.0.1:4002 + image: registry.k8s.io/kube-apiserver:v1.21.0 + kubeletPreferredAddressTypes: + - InternalIP + - Hostname + - ExternalIP + logLevel: 2 + requestheaderAllowedNames: + - aggregator + requestheaderExtraHeaderPrefixes: + - X-Remote-Extra- + requestheaderGroupHeaders: + - X-Remote-Group + requestheaderUsernameHeaders: + - X-Remote-User + securePort: 443 + serviceAccountIssuer: https://api.internal.containerd.example.com + serviceAccountJWKSURI: https://api.internal.containerd.example.com/openid/v1/jwks + serviceClusterIPRange: 100.64.0.0/13 + storageBackend: etcd3 +kubeControllerManager: + allocateNodeCIDRs: true + attachDetachReconcileSyncPeriod: 1m0s + cloudProvider: aws + clusterCIDR: 100.96.0.0/11 + clusterName: containerd.example.com + configureCloudRoutes: false + image: registry.k8s.io/kube-controller-manager:v1.21.0 + leaderElection: + leaderElect: true + logLevel: 2 + useServiceAccountCredentials: true +kubeProxy: + clusterCIDR: 100.96.0.0/11 + cpuRequest: 100m + image: registry.k8s.io/kube-proxy:v1.21.0 + logLevel: 2 +kubeScheduler: + image: registry.k8s.io/kube-scheduler:v1.21.0 + leaderElection: + leaderElect: true + logLevel: 2 +kubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: registry.k8s.io/pause:3.6 + podManifestPath: /etc/kubernetes/manifests + registerSchedulable: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s +masterKubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: registry.k8s.io/pause:3.6 + podManifestPath: /etc/kubernetes/manifests + registerSchedulable: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + +__EOF_CLUSTER_SPEC + +cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' +CloudProvider: aws +ConfigBase: memfs://clusters.example.com/containerd.example.com +InstanceGroupName: master-us-test-1a +InstanceGroupRole: Master +NodeupConfigHash: D17eEbi1tjySAFR4gX5V6ka6BJFYuqams8eskMQ1sRY= + +__EOF_KUBE_ENV + +download-release +echo "== nodeup node config done ==" diff --git a/tests/integration/update_cluster/containerd-custom/data/aws_launch_template_nodes.containerd.example.com_user_data b/tests/integration/update_cluster/containerd-custom/data/aws_launch_template_nodes.containerd.example.com_user_data new file mode 100644 index 0000000000000..f5eca192cf978 --- /dev/null +++ b/tests/integration/update_cluster/containerd-custom/data/aws_launch_template_nodes.containerd.example.com_user_data @@ -0,0 +1,201 @@ +#!/bin/bash +set -o errexit +set -o nounset +set -o pipefail + +NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 +NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 +NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 +NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 + +export AWS_REGION=us-test-1 + + + + +sysctl -w net.core.rmem_max=16777216 || true +sysctl -w net.core.wmem_max=16777216 || true +sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true +sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true + + +function ensure-install-dir() { + INSTALL_DIR="/opt/kops" + # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec + if [[ -d /var/lib/toolbox ]]; then + INSTALL_DIR="/var/lib/toolbox/kops" + fi + mkdir -p ${INSTALL_DIR}/bin + mkdir -p ${INSTALL_DIR}/conf + cd ${INSTALL_DIR} +} + +# Retry a download until we get it. args: name, sha, urls +download-or-bust() { + local -r file="$1" + local -r hash="$2" + local -r urls=( $(split-commas "$3") ) + + if [[ -f "${file}" ]]; then + if ! validate-hash "${file}" "${hash}"; then + rm -f "${file}" + else + return 0 + fi + fi + + while true; do + for url in "${urls[@]}"; do + commands=( + "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + ) + for cmd in "${commands[@]}"; do + echo "Attempting download with: ${cmd} {url}" + if ! (${cmd} "${url}"); then + echo "== Download failed with ${cmd} ==" + continue + fi + if ! validate-hash "${file}" "${hash}"; then + echo "== Hash validation of ${url} failed. Retrying. ==" + rm -f "${file}" + else + echo "== Downloaded ${url} (SHA256 = ${hash}) ==" + return 0 + fi + done + done + + echo "All downloads failed; sleeping before retrying" + sleep 60 + done +} + +validate-hash() { + local -r file="$1" + local -r expected="$2" + local actual + + actual=$(sha256sum ${file} | awk '{ print $1 }') || true + if [[ "${actual}" != "${expected}" ]]; then + echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" + return 1 + fi +} + +function split-commas() { + echo $1 | tr "," "\n" +} + +function download-release() { + case "$(uname -m)" in + x86_64*|i?86_64*|amd64*) + NODEUP_URL="${NODEUP_URL_AMD64}" + NODEUP_HASH="${NODEUP_HASH_AMD64}" + ;; + aarch64*|arm64*) + NODEUP_URL="${NODEUP_URL_ARM64}" + NODEUP_HASH="${NODEUP_HASH_ARM64}" + ;; + *) + echo "Unsupported host arch: $(uname -m)" >&2 + exit 1 + ;; + esac + + cd ${INSTALL_DIR}/bin + download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" + + chmod +x nodeup + + echo "Running nodeup" + # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 + ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) +} + +#################################################################################### + +/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" + +echo "== nodeup node config starting ==" +ensure-install-dir + +cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' +cloudConfig: + awsEBSCSIDriver: + enabled: false + manageStorageClasses: true +containerRuntime: containerd +containerd: + logLevel: info + packages: + hashAmd64: "0000000000000000000000000000000000000000000000000000000000000000" + urlAmd64: https://github.com/containerd/containerd/releases/download/v1.3.9/cri-containerd-cni-1.3.9-linux-amd64.tar.gz + registryMirrors: + '*': + - http://HostIP2:Port2 + docker.io: + - https://registry-1.docker.io + version: 1.4.13 +docker: + skipInstall: true +kubeProxy: + clusterCIDR: 100.96.0.0/11 + cpuRequest: 100m + image: registry.k8s.io/kube-proxy:v1.21.0 + logLevel: 2 +kubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: registry.k8s.io/pause:3.6 + podManifestPath: /etc/kubernetes/manifests + registerSchedulable: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + +__EOF_CLUSTER_SPEC + +cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' +CloudProvider: aws +ConfigServer: + CACertificates: | + -----BEGIN CERTIFICATE----- + MIIBbjCCARigAwIBAgIMFpANqBD8NSD82AUSMA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwODAwWhcNMzEwNzA3MDcw + ODAwWjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD + SwAwSAJBANFI3zr0Tk8krsW8vwjfMpzJOlWQ8616vG3YPa2qAgI7V4oKwfV0yIg1 + jt+H6f4P/wkPAPTPTfRp9Iy8oHEEFw0CAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG + MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNG3zVjTcLlJwDsJ4/K9DV7KohUA + MA0GCSqGSIb3DQEBCwUAA0EAB8d03fY2w7WKpfO29qI295pu2C4ca9AiVGOpgSc8 + tmQsq6rcxt3T+rb589PVtz0mw/cKTxOk6gH2CCC+yHfy2w== + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBbjCCARigAwIBAgIMFpANvmSa0OAlYmXKMA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwOTM2WhcNMzEwNzA3MDcw + OTM2WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD + SwAwSAJBAMF6F4aZdpe0RUpyykaBpWwZCnwbffhYGOw+fs6RdLuUq7QCNmJm/Eq7 + WWOziMYDiI9SbclpD+6QiJ0N3EqppVUCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG + MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLImp6ARjPDAH6nhI+scWVt3Q9bn + MA0GCSqGSIb3DQEBCwUAA0EAVQVx5MUtuAIeePuP9o51xtpT2S6Fvfi8J4ICxnlA + 9B7UD2ushcVFPtaeoL9Gfu8aY4KJBeqqg5ojl4qmRnThjw== + -----END CERTIFICATE----- + server: https://kops-controller.internal.containerd.example.com:3988/ +InstanceGroupName: nodes +InstanceGroupRole: Node +NodeupConfigHash: cPB7xe/jlGjsPr0RVNbD4+kgsaZ2L7gBA+0MRKukLAw= + +__EOF_KUBE_ENV + +download-release +echo "== nodeup node config done ==" diff --git a/tests/integration/update_cluster/containerd-custom/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/containerd-custom/data/aws_s3_object_cluster-completed.spec_content new file mode 100644 index 0000000000000..fc94d00de747a --- /dev/null +++ b/tests/integration/update_cluster/containerd-custom/data/aws_s3_object_cluster-completed.spec_content @@ -0,0 +1,189 @@ +apiVersion: kops.k8s.io/v1alpha2 +kind: Cluster +metadata: + creationTimestamp: "2016-12-10T22:42:27Z" + name: containerd.example.com +spec: + api: + dns: {} + authorization: + alwaysAllow: {} + channel: stable + cloudConfig: + awsEBSCSIDriver: + enabled: false + manageStorageClasses: true + cloudProvider: aws + clusterDNSDomain: cluster.local + configBase: memfs://clusters.example.com/containerd.example.com + configStore: memfs://clusters.example.com/containerd.example.com + containerRuntime: containerd + containerd: + logLevel: info + packages: + hashAmd64: "0000000000000000000000000000000000000000000000000000000000000000" + urlAmd64: https://github.com/containerd/containerd/releases/download/v1.3.9/cri-containerd-cni-1.3.9-linux-amd64.tar.gz + registryMirrors: + '*': + - http://HostIP2:Port2 + docker.io: + - https://registry-1.docker.io + version: 1.4.13 + dnsZone: Z1AFAKE1ZON3YO + docker: + skipInstall: true + etcdClusters: + - backups: + backupStore: memfs://clusters.example.com/containerd.example.com/backups/etcd/main + etcdMembers: + - instanceGroup: master-us-test-1a + name: us-test-1a + name: main + version: 3.4.13 + - backups: + backupStore: memfs://clusters.example.com/containerd.example.com/backups/etcd/events + etcdMembers: + - instanceGroup: master-us-test-1a + name: us-test-1a + name: events + version: 3.4.13 + externalDns: + provider: dns-controller + iam: + legacy: false + keyStore: memfs://clusters.example.com/containerd.example.com/pki + kubeAPIServer: + allowPrivileged: true + anonymousAuth: false + apiAudiences: + - kubernetes.svc.default + apiServerCount: 1 + authorizationMode: AlwaysAllow + bindAddress: 0.0.0.0 + cloudProvider: aws + enableAdmissionPlugins: + - NamespaceLifecycle + - LimitRanger + - ServiceAccount + - DefaultStorageClass + - DefaultTolerationSeconds + - MutatingAdmissionWebhook + - ValidatingAdmissionWebhook + - NodeRestriction + - ResourceQuota + etcdServers: + - https://127.0.0.1:4001 + etcdServersOverrides: + - /events#https://127.0.0.1:4002 + image: registry.k8s.io/kube-apiserver:v1.21.0 + kubeletPreferredAddressTypes: + - InternalIP + - Hostname + - ExternalIP + logLevel: 2 + requestheaderAllowedNames: + - aggregator + requestheaderExtraHeaderPrefixes: + - X-Remote-Extra- + requestheaderGroupHeaders: + - X-Remote-Group + requestheaderUsernameHeaders: + - X-Remote-User + securePort: 443 + serviceAccountIssuer: https://api.internal.containerd.example.com + serviceAccountJWKSURI: https://api.internal.containerd.example.com/openid/v1/jwks + serviceClusterIPRange: 100.64.0.0/13 + storageBackend: etcd3 + kubeControllerManager: + allocateNodeCIDRs: true + attachDetachReconcileSyncPeriod: 1m0s + cloudProvider: aws + clusterCIDR: 100.96.0.0/11 + clusterName: containerd.example.com + configureCloudRoutes: false + image: registry.k8s.io/kube-controller-manager:v1.21.0 + leaderElection: + leaderElect: true + logLevel: 2 + useServiceAccountCredentials: true + kubeDNS: + cacheMaxConcurrent: 150 + cacheMaxSize: 1000 + cpuRequest: 100m + domain: cluster.local + memoryLimit: 170Mi + memoryRequest: 70Mi + nodeLocalDNS: + cpuRequest: 25m + enabled: false + image: registry.k8s.io/dns/k8s-dns-node-cache:1.22.8 + memoryRequest: 5Mi + provider: CoreDNS + serverIP: 100.64.0.10 + kubeProxy: + clusterCIDR: 100.96.0.0/11 + cpuRequest: 100m + image: registry.k8s.io/kube-proxy:v1.21.0 + logLevel: 2 + kubeScheduler: + image: registry.k8s.io/kube-scheduler:v1.21.0 + leaderElection: + leaderElect: true + logLevel: 2 + kubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: registry.k8s.io/pause:3.6 + podManifestPath: /etc/kubernetes/manifests + registerSchedulable: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + kubernetesApiAccess: + - 0.0.0.0/0 + kubernetesVersion: 1.21.0 + masterKubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: registry.k8s.io/pause:3.6 + podManifestPath: /etc/kubernetes/manifests + registerSchedulable: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + masterPublicName: api.containerd.example.com + networkCIDR: 172.20.0.0/16 + networking: + cni: {} + nonMasqueradeCIDR: 100.64.0.0/10 + podCIDR: 100.96.0.0/11 + secretStore: memfs://clusters.example.com/containerd.example.com/secrets + serviceClusterIPRange: 100.64.0.0/13 + sshAccess: + - 0.0.0.0/0 + subnets: + - cidr: 172.20.32.0/19 + name: us-test-1a + type: Public + zone: us-test-1a + topology: + dns: + type: Public + masters: public + nodes: public diff --git a/tests/integration/update_cluster/containerd-custom/data/aws_s3_object_containerd.example.com-addons-bootstrap_content b/tests/integration/update_cluster/containerd-custom/data/aws_s3_object_containerd.example.com-addons-bootstrap_content new file mode 100644 index 0000000000000..8c05ab188640e --- /dev/null +++ b/tests/integration/update_cluster/containerd-custom/data/aws_s3_object_containerd.example.com-addons-bootstrap_content @@ -0,0 +1,48 @@ +kind: Addons +metadata: + creationTimestamp: null + name: bootstrap +spec: + addons: + - id: k8s-1.16 + manifest: kops-controller.addons.k8s.io/k8s-1.16.yaml + manifestHash: 885c107be5524b5aefca6e9b368d92c929e5abf009086acf49328e40a44c0d5e + name: kops-controller.addons.k8s.io + needsRollingUpdate: control-plane + selector: + k8s-addon: kops-controller.addons.k8s.io + version: 9.99.0 + - id: k8s-1.12 + manifest: coredns.addons.k8s.io/k8s-1.12.yaml + manifestHash: cd1e8f47fe52b13fee5536b0d4b4429ef256829d87a51cbc189fa0f21ff3503b + name: coredns.addons.k8s.io + selector: + k8s-addon: coredns.addons.k8s.io + version: 9.99.0 + - id: k8s-1.9 + manifest: kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml + manifestHash: 01c120e887bd98d82ef57983ad58a0b22bc85efb48108092a24c4b82e4c9ea81 + name: kubelet-api.rbac.addons.k8s.io + selector: + k8s-addon: kubelet-api.rbac.addons.k8s.io + version: 9.99.0 + - manifest: limit-range.addons.k8s.io/v1.5.0.yaml + manifestHash: 2d55c3bc5e354e84a3730a65b42f39aba630a59dc8d32b30859fcce3d3178bc2 + name: limit-range.addons.k8s.io + selector: + k8s-addon: limit-range.addons.k8s.io + version: 9.99.0 + - id: k8s-1.12 + manifest: dns-controller.addons.k8s.io/k8s-1.12.yaml + manifestHash: 6c8f01b2470d323965dfb22d410f322e0b429f7acc3831f41a763ec072dfc69b + name: dns-controller.addons.k8s.io + selector: + k8s-addon: dns-controller.addons.k8s.io + version: 9.99.0 + - id: v1.15.0 + manifest: storage-aws.addons.k8s.io/v1.15.0.yaml + manifestHash: 065ae832ddac8d0931e9992d6a76f43a33a36975a38003b34f4c5d86a7d42780 + name: storage-aws.addons.k8s.io + selector: + k8s-addon: storage-aws.addons.k8s.io + version: 9.99.0 diff --git a/tests/integration/update_cluster/containerd-custom/data/aws_s3_object_containerd.example.com-addons-coredns.addons.k8s.io-k8s-1.12_content b/tests/integration/update_cluster/containerd-custom/data/aws_s3_object_containerd.example.com-addons-coredns.addons.k8s.io-k8s-1.12_content new file mode 100644 index 0000000000000..fd5b8a7c053f2 --- /dev/null +++ b/tests/integration/update_cluster/containerd-custom/data/aws_s3_object_containerd.example.com-addons-coredns.addons.k8s.io-k8s-1.12_content @@ -0,0 +1,383 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + kubernetes.io/cluster-service: "true" + name: coredns + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + kubernetes.io/bootstrapping: rbac-defaults + name: system:coredns +rules: +- apiGroups: + - "" + resources: + - endpoints + - services + - pods + - namespaces + verbs: + - list + - watch +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - list + - watch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + kubernetes.io/bootstrapping: rbac-defaults + name: system:coredns +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:coredns +subjects: +- kind: ServiceAccount + name: coredns + namespace: kube-system + +--- + +apiVersion: v1 +data: + Corefile: |- + .:53 { + errors + health { + lameduck 5s + } + ready + kubernetes cluster.local. in-addr.arpa ip6.arpa { + pods insecure + fallthrough in-addr.arpa ip6.arpa + ttl 30 + } + prometheus :9153 + forward . /etc/resolv.conf { + max_concurrent 1000 + } + cache 30 + loop + reload + loadbalance + } +kind: ConfigMap +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + addonmanager.kubernetes.io/mode: EnsureExists + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + name: coredns + namespace: kube-system + +--- + +apiVersion: apps/v1 +kind: Deployment +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + k8s-app: kube-dns + kubernetes.io/cluster-service: "true" + kubernetes.io/name: CoreDNS + name: coredns + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: kube-dns + strategy: + rollingUpdate: + maxSurge: 10% + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + creationTimestamp: null + labels: + k8s-app: kube-dns + kops.k8s.io/managed-by: kops + spec: + containers: + - args: + - -conf + - /etc/coredns/Corefile + image: registry.k8s.io/coredns/coredns:v1.9.3 + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 5 + httpGet: + path: /health + port: 8080 + scheme: HTTP + initialDelaySeconds: 60 + successThreshold: 1 + timeoutSeconds: 5 + name: coredns + ports: + - containerPort: 53 + name: dns + protocol: UDP + - containerPort: 53 + name: dns-tcp + protocol: TCP + - containerPort: 9153 + name: metrics + protocol: TCP + readinessProbe: + httpGet: + path: /ready + port: 8181 + scheme: HTTP + resources: + limits: + memory: 170Mi + requests: + cpu: 100m + memory: 70Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_BIND_SERVICE + drop: + - all + readOnlyRootFilesystem: true + volumeMounts: + - mountPath: /etc/coredns + name: config-volume + readOnly: true + dnsPolicy: Default + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-cluster-critical + serviceAccountName: coredns + tolerations: + - key: CriticalAddonsOnly + operator: Exists + topologySpreadConstraints: + - labelSelector: + matchLabels: + k8s-app: kube-dns + maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: ScheduleAnyway + - labelSelector: + matchLabels: + k8s-app: kube-dns + maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + volumes: + - configMap: + name: coredns + name: config-volume + +--- + +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/port: "9153" + prometheus.io/scrape: "true" + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + k8s-app: kube-dns + kubernetes.io/cluster-service: "true" + kubernetes.io/name: CoreDNS + name: kube-dns + namespace: kube-system + resourceVersion: "0" +spec: + clusterIP: 100.64.0.10 + ports: + - name: dns + port: 53 + protocol: UDP + - name: dns-tcp + port: 53 + protocol: TCP + - name: metrics + port: 9153 + protocol: TCP + selector: + k8s-app: kube-dns + +--- + +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + name: kube-dns + namespace: kube-system +spec: + maxUnavailable: 50% + selector: + matchLabels: + k8s-app: kube-dns + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + name: coredns-autoscaler + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + name: coredns-autoscaler +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch +- apiGroups: + - "" + resources: + - replicationcontrollers/scale + verbs: + - get + - update +- apiGroups: + - extensions + - apps + resources: + - deployments/scale + - replicasets/scale + verbs: + - get + - update +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - create + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + name: coredns-autoscaler +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: coredns-autoscaler +subjects: +- kind: ServiceAccount + name: coredns-autoscaler + namespace: kube-system + +--- + +apiVersion: apps/v1 +kind: Deployment +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + k8s-app: coredns-autoscaler + kubernetes.io/cluster-service: "true" + name: coredns-autoscaler + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: coredns-autoscaler + template: + metadata: + creationTimestamp: null + labels: + k8s-app: coredns-autoscaler + kops.k8s.io/managed-by: kops + spec: + containers: + - command: + - /cluster-proportional-autoscaler + - --namespace=kube-system + - --configmap=coredns-autoscaler + - --target=Deployment/coredns + - --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":16,"preventSinglePointFailure":true}} + - --logtostderr=true + - --v=2 + image: registry.k8s.io/cpa/cluster-proportional-autoscaler:1.8.5 + name: autoscaler + resources: + requests: + cpu: 20m + memory: 10Mi + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-cluster-critical + serviceAccountName: coredns-autoscaler + tolerations: + - key: CriticalAddonsOnly + operator: Exists diff --git a/tests/integration/update_cluster/containerd-custom/data/aws_s3_object_containerd.example.com-addons-dns-controller.addons.k8s.io-k8s-1.12_content b/tests/integration/update_cluster/containerd-custom/data/aws_s3_object_containerd.example.com-addons-dns-controller.addons.k8s.io-k8s-1.12_content new file mode 100644 index 0000000000000..2eab063fdb45a --- /dev/null +++ b/tests/integration/update_cluster/containerd-custom/data/aws_s3_object_containerd.example.com-addons-dns-controller.addons.k8s.io-k8s-1.12_content @@ -0,0 +1,138 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: dns-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: dns-controller.addons.k8s.io + k8s-app: dns-controller + version: v1.26.0-alpha.1 + name: dns-controller + namespace: kube-system +spec: + replicas: 1 + selector: + matchLabels: + k8s-app: dns-controller + strategy: + type: Recreate + template: + metadata: + creationTimestamp: null + labels: + k8s-addon: dns-controller.addons.k8s.io + k8s-app: dns-controller + kops.k8s.io/managed-by: kops + version: v1.26.0-alpha.1 + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + - matchExpressions: + - key: node-role.kubernetes.io/master + operator: Exists + containers: + - args: + - --watch-ingress=false + - --dns=aws-route53 + - --zone=*/Z1AFAKE1ZON3YO + - --internal-ipv4 + - --zone=*/* + - -v=2 + command: null + env: + - name: KUBERNETES_SERVICE_HOST + value: 127.0.0.1 + image: registry.k8s.io/kops/dns-controller:1.26.0-alpha.1 + name: dns-controller + resources: + requests: + cpu: 50m + memory: 50Mi + securityContext: + runAsNonRoot: true + dnsPolicy: Default + hostNetwork: true + nodeSelector: null + priorityClassName: system-cluster-critical + serviceAccount: dns-controller + tolerations: + - key: node.cloudprovider.kubernetes.io/uninitialized + operator: Exists + - key: node.kubernetes.io/not-ready + operator: Exists + - key: node-role.kubernetes.io/control-plane + operator: Exists + - key: node-role.kubernetes.io/master + operator: Exists + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: dns-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: dns-controller.addons.k8s.io + name: dns-controller + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: dns-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: dns-controller.addons.k8s.io + name: kops:dns-controller +rules: +- apiGroups: + - "" + resources: + - endpoints + - services + - pods + - ingress + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: dns-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: dns-controller.addons.k8s.io + name: kops:dns-controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kops:dns-controller +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: system:serviceaccount:kube-system:dns-controller diff --git a/tests/integration/update_cluster/containerd-custom/data/aws_s3_object_containerd.example.com-addons-kops-controller.addons.k8s.io-k8s-1.16_content b/tests/integration/update_cluster/containerd-custom/data/aws_s3_object_containerd.example.com-addons-kops-controller.addons.k8s.io-k8s-1.16_content new file mode 100644 index 0000000000000..e2792373032c4 --- /dev/null +++ b/tests/integration/update_cluster/containerd-custom/data/aws_s3_object_containerd.example.com-addons-kops-controller.addons.k8s.io-k8s-1.16_content @@ -0,0 +1,225 @@ +apiVersion: v1 +data: + config.yaml: | + {"cloud":"aws","configBase":"memfs://clusters.example.com/containerd.example.com","secretStore":"memfs://clusters.example.com/containerd.example.com/secrets","server":{"Listen":":3988","provider":{"aws":{"nodesRoles":["nodes.containerd.example.com"],"Region":"us-test-1"}},"serverKeyPath":"/etc/kubernetes/kops-controller/pki/kops-controller.key","serverCertificatePath":"/etc/kubernetes/kops-controller/pki/kops-controller.crt","caBasePath":"/etc/kubernetes/kops-controller/pki","signingCAs":["kubernetes-ca"],"certNames":["kubelet","kubelet-server","kube-proxy"]}} +kind: ConfigMap +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller + namespace: kube-system + +--- + +apiVersion: apps/v1 +kind: DaemonSet +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + k8s-app: kops-controller + version: v1.26.0-alpha.1 + name: kops-controller + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: kops-controller + template: + metadata: + annotations: + dns.alpha.kubernetes.io/internal: kops-controller.internal.containerd.example.com + creationTimestamp: null + labels: + k8s-addon: kops-controller.addons.k8s.io + k8s-app: kops-controller + kops.k8s.io/managed-by: kops + version: v1.26.0-alpha.1 + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + - key: kops.k8s.io/kops-controller-pki + operator: Exists + - matchExpressions: + - key: node-role.kubernetes.io/master + operator: Exists + - key: kops.k8s.io/kops-controller-pki + operator: Exists + containers: + - args: + - --v=2 + - --conf=/etc/kubernetes/kops-controller/config/config.yaml + command: null + env: + - name: KUBERNETES_SERVICE_HOST + value: 127.0.0.1 + image: registry.k8s.io/kops/kops-controller:1.26.0-alpha.1 + name: kops-controller + resources: + requests: + cpu: 50m + memory: 50Mi + securityContext: + runAsNonRoot: true + runAsUser: 10011 + volumeMounts: + - mountPath: /etc/kubernetes/kops-controller/config/ + name: kops-controller-config + - mountPath: /etc/kubernetes/kops-controller/pki/ + name: kops-controller-pki + dnsPolicy: Default + hostNetwork: true + nodeSelector: null + priorityClassName: system-cluster-critical + serviceAccount: kops-controller + tolerations: + - key: node.cloudprovider.kubernetes.io/uninitialized + operator: Exists + - key: node.kubernetes.io/not-ready + operator: Exists + - key: node-role.kubernetes.io/master + operator: Exists + - key: node-role.kubernetes.io/control-plane + operator: Exists + volumes: + - configMap: + name: kops-controller + name: kops-controller-config + - hostPath: + path: /etc/kubernetes/kops-controller/ + type: Directory + name: kops-controller-pki + updateStrategy: + type: OnDelete + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - patch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kops-controller +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: system:serviceaccount:kube-system:kops-controller + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller + namespace: kube-system +rules: +- apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + - create +- apiGroups: + - "" + - coordination.k8s.io + resourceNames: + - kops-controller-leader + resources: + - configmaps + - leases + verbs: + - get + - list + - watch + - patch + - update + - delete +- apiGroups: + - "" + - coordination.k8s.io + resources: + - configmaps + - leases + verbs: + - create + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: kops-controller +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: system:serviceaccount:kube-system:kops-controller diff --git a/tests/integration/update_cluster/containerd-custom/data/aws_s3_object_containerd.example.com-addons-kubelet-api.rbac.addons.k8s.io-k8s-1.9_content b/tests/integration/update_cluster/containerd-custom/data/aws_s3_object_containerd.example.com-addons-kubelet-api.rbac.addons.k8s.io-k8s-1.9_content new file mode 100644 index 0000000000000..36761e1c56255 --- /dev/null +++ b/tests/integration/update_cluster/containerd-custom/data/aws_s3_object_containerd.example.com-addons-kubelet-api.rbac.addons.k8s.io-k8s-1.9_content @@ -0,0 +1,17 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kubelet-api.rbac.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kubelet-api.rbac.addons.k8s.io + name: kops:system:kubelet-api-admin +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:kubelet-api-admin +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: kubelet-api diff --git a/tests/integration/update_cluster/containerd-custom/data/aws_s3_object_containerd.example.com-addons-limit-range.addons.k8s.io_content b/tests/integration/update_cluster/containerd-custom/data/aws_s3_object_containerd.example.com-addons-limit-range.addons.k8s.io_content new file mode 100644 index 0000000000000..4dcdce48b9ab9 --- /dev/null +++ b/tests/integration/update_cluster/containerd-custom/data/aws_s3_object_containerd.example.com-addons-limit-range.addons.k8s.io_content @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: LimitRange +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: limit-range.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: limit-range.addons.k8s.io + name: limits + namespace: default +spec: + limits: + - defaultRequest: + cpu: 100m + type: Container diff --git a/tests/integration/update_cluster/containerd-custom/data/aws_s3_object_containerd.example.com-addons-storage-aws.addons.k8s.io-v1.15.0_content b/tests/integration/update_cluster/containerd-custom/data/aws_s3_object_containerd.example.com-addons-storage-aws.addons.k8s.io-v1.15.0_content new file mode 100644 index 0000000000000..21efd54326518 --- /dev/null +++ b/tests/integration/update_cluster/containerd-custom/data/aws_s3_object_containerd.example.com-addons-storage-aws.addons.k8s.io-v1.15.0_content @@ -0,0 +1,98 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: storage-aws.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: storage-aws.addons.k8s.io + name: default +parameters: + type: gp2 +provisioner: kubernetes.io/aws-ebs + +--- + +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + annotations: + storageclass.kubernetes.io/is-default-class: "false" + creationTimestamp: null + labels: + addon.kops.k8s.io/name: storage-aws.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: storage-aws.addons.k8s.io + name: gp2 +parameters: + type: gp2 +provisioner: kubernetes.io/aws-ebs + +--- + +allowVolumeExpansion: true +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + annotations: + storageclass.kubernetes.io/is-default-class: "true" + creationTimestamp: null + labels: + addon.kops.k8s.io/name: storage-aws.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: storage-aws.addons.k8s.io + name: kops-ssd-1-17 +parameters: + encrypted: "true" + type: gp2 +provisioner: kubernetes.io/aws-ebs +volumeBindingMode: WaitForFirstConsumer + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: storage-aws.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: storage-aws.addons.k8s.io + name: system:aws-cloud-provider +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - patch +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: storage-aws.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: storage-aws.addons.k8s.io + name: system:aws-cloud-provider +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:aws-cloud-provider +subjects: +- kind: ServiceAccount + name: aws-cloud-provider + namespace: kube-system diff --git a/tests/integration/update_cluster/containerd-custom/data/aws_s3_object_etcd-cluster-spec-events_content b/tests/integration/update_cluster/containerd-custom/data/aws_s3_object_etcd-cluster-spec-events_content new file mode 100644 index 0000000000000..bb8ddb0e2e0ec --- /dev/null +++ b/tests/integration/update_cluster/containerd-custom/data/aws_s3_object_etcd-cluster-spec-events_content @@ -0,0 +1,4 @@ +{ + "memberCount": 1, + "etcdVersion": "3.4.13" +} diff --git a/tests/integration/update_cluster/containerd-custom/data/aws_s3_object_etcd-cluster-spec-main_content b/tests/integration/update_cluster/containerd-custom/data/aws_s3_object_etcd-cluster-spec-main_content new file mode 100644 index 0000000000000..bb8ddb0e2e0ec --- /dev/null +++ b/tests/integration/update_cluster/containerd-custom/data/aws_s3_object_etcd-cluster-spec-main_content @@ -0,0 +1,4 @@ +{ + "memberCount": 1, + "etcdVersion": "3.4.13" +} diff --git a/tests/integration/update_cluster/containerd-custom/data/aws_s3_object_kops-version.txt_content b/tests/integration/update_cluster/containerd-custom/data/aws_s3_object_kops-version.txt_content new file mode 100644 index 0000000000000..b7340298dcdd5 --- /dev/null +++ b/tests/integration/update_cluster/containerd-custom/data/aws_s3_object_kops-version.txt_content @@ -0,0 +1 @@ +1.21.0-alpha.1 diff --git a/tests/integration/update_cluster/containerd-custom/data/aws_s3_object_manifests-etcdmanager-events-master-us-test-1a_content b/tests/integration/update_cluster/containerd-custom/data/aws_s3_object_manifests-etcdmanager-events-master-us-test-1a_content new file mode 100644 index 0000000000000..8b661481b3a5c --- /dev/null +++ b/tests/integration/update_cluster/containerd-custom/data/aws_s3_object_manifests-etcdmanager-events-master-us-test-1a_content @@ -0,0 +1,62 @@ +apiVersion: v1 +kind: Pod +metadata: + creationTimestamp: null + labels: + k8s-app: etcd-manager-events + name: etcd-manager-events + namespace: kube-system +spec: + containers: + - command: + - /bin/sh + - -c + - mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /etcd-manager + --backup-store=memfs://clusters.example.com/containerd.example.com/backups/etcd/events + --client-urls=https://__name__:4002 --cluster-name=etcd-events --containerized=true + --dns-suffix=.internal.containerd.example.com --grpc-port=3997 --peer-urls=https://__name__:2381 + --quarantine-client-urls=https://__name__:3995 --v=6 --volume-name-tag=k8s.io/etcd/events + --volume-provider=aws --volume-tag=k8s.io/etcd/events --volume-tag=k8s.io/role/master=1 + --volume-tag=kubernetes.io/cluster/containerd.example.com=owned > /tmp/pipe + 2>&1 + image: registry.k8s.io/etcdadm/etcd-manager:v3.0.20220831 + name: etcd-manager + resources: + requests: + cpu: 200m + memory: 100Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /rootfs + name: rootfs + - mountPath: /run + name: run + - mountPath: /etc/kubernetes/pki/etcd-manager + name: pki + - mountPath: /var/log/etcd.log + name: varlogetcd + hostNetwork: true + hostPID: true + priorityClassName: system-cluster-critical + tolerations: + - key: CriticalAddonsOnly + operator: Exists + volumes: + - hostPath: + path: / + type: Directory + name: rootfs + - hostPath: + path: /run + type: DirectoryOrCreate + name: run + - hostPath: + path: /etc/kubernetes/pki/etcd-manager-events + type: DirectoryOrCreate + name: pki + - hostPath: + path: /var/log/etcd-events.log + type: FileOrCreate + name: varlogetcd +status: {} diff --git a/tests/integration/update_cluster/containerd-custom/data/aws_s3_object_manifests-etcdmanager-main-master-us-test-1a_content b/tests/integration/update_cluster/containerd-custom/data/aws_s3_object_manifests-etcdmanager-main-master-us-test-1a_content new file mode 100644 index 0000000000000..a4e37fec06100 --- /dev/null +++ b/tests/integration/update_cluster/containerd-custom/data/aws_s3_object_manifests-etcdmanager-main-master-us-test-1a_content @@ -0,0 +1,62 @@ +apiVersion: v1 +kind: Pod +metadata: + creationTimestamp: null + labels: + k8s-app: etcd-manager-main + name: etcd-manager-main + namespace: kube-system +spec: + containers: + - command: + - /bin/sh + - -c + - mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /etcd-manager + --backup-store=memfs://clusters.example.com/containerd.example.com/backups/etcd/main + --client-urls=https://__name__:4001 --cluster-name=etcd --containerized=true + --dns-suffix=.internal.containerd.example.com --grpc-port=3996 --peer-urls=https://__name__:2380 + --quarantine-client-urls=https://__name__:3994 --v=6 --volume-name-tag=k8s.io/etcd/main + --volume-provider=aws --volume-tag=k8s.io/etcd/main --volume-tag=k8s.io/role/master=1 + --volume-tag=kubernetes.io/cluster/containerd.example.com=owned > /tmp/pipe + 2>&1 + image: registry.k8s.io/etcdadm/etcd-manager:v3.0.20220831 + name: etcd-manager + resources: + requests: + cpu: 200m + memory: 100Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /rootfs + name: rootfs + - mountPath: /run + name: run + - mountPath: /etc/kubernetes/pki/etcd-manager + name: pki + - mountPath: /var/log/etcd.log + name: varlogetcd + hostNetwork: true + hostPID: true + priorityClassName: system-cluster-critical + tolerations: + - key: CriticalAddonsOnly + operator: Exists + volumes: + - hostPath: + path: / + type: Directory + name: rootfs + - hostPath: + path: /run + type: DirectoryOrCreate + name: run + - hostPath: + path: /etc/kubernetes/pki/etcd-manager-main + type: DirectoryOrCreate + name: pki + - hostPath: + path: /var/log/etcd.log + type: FileOrCreate + name: varlogetcd +status: {} diff --git a/tests/integration/update_cluster/containerd-custom/data/aws_s3_object_manifests-static-kube-apiserver-healthcheck_content b/tests/integration/update_cluster/containerd-custom/data/aws_s3_object_manifests-static-kube-apiserver-healthcheck_content new file mode 100644 index 0000000000000..5cb249fea763e --- /dev/null +++ b/tests/integration/update_cluster/containerd-custom/data/aws_s3_object_manifests-static-kube-apiserver-healthcheck_content @@ -0,0 +1,33 @@ +apiVersion: v1 +kind: Pod +metadata: + creationTimestamp: null +spec: + containers: + - args: + - --ca-cert=/secrets/ca.crt + - --client-cert=/secrets/client.crt + - --client-key=/secrets/client.key + image: registry.k8s.io/kops/kube-apiserver-healthcheck:1.26.0-alpha.1 + livenessProbe: + httpGet: + host: 127.0.0.1 + path: /.kube-apiserver-healthcheck/healthz + port: 3990 + initialDelaySeconds: 5 + timeoutSeconds: 5 + name: healthcheck + resources: {} + securityContext: + runAsNonRoot: true + runAsUser: 10012 + volumeMounts: + - mountPath: /secrets + name: healthcheck-secrets + readOnly: true + volumes: + - hostPath: + path: /etc/kubernetes/kube-apiserver-healthcheck/secrets + type: Directory + name: healthcheck-secrets +status: {} diff --git a/tests/integration/update_cluster/containerd-custom/data/aws_s3_object_nodeupconfig-master-us-test-1a_content b/tests/integration/update_cluster/containerd-custom/data/aws_s3_object_nodeupconfig-master-us-test-1a_content new file mode 100644 index 0000000000000..eba5998b0e8a4 --- /dev/null +++ b/tests/integration/update_cluster/containerd-custom/data/aws_s3_object_nodeupconfig-master-us-test-1a_content @@ -0,0 +1,284 @@ +APIServerConfig: + KubeAPIServer: + allowPrivileged: true + anonymousAuth: false + apiAudiences: + - kubernetes.svc.default + apiServerCount: 1 + authorizationMode: AlwaysAllow + bindAddress: 0.0.0.0 + cloudProvider: aws + enableAdmissionPlugins: + - NamespaceLifecycle + - LimitRanger + - ServiceAccount + - DefaultStorageClass + - DefaultTolerationSeconds + - MutatingAdmissionWebhook + - ValidatingAdmissionWebhook + - NodeRestriction + - ResourceQuota + etcdServers: + - https://127.0.0.1:4001 + etcdServersOverrides: + - /events#https://127.0.0.1:4002 + image: registry.k8s.io/kube-apiserver:v1.21.0 + kubeletPreferredAddressTypes: + - InternalIP + - Hostname + - ExternalIP + logLevel: 2 + requestheaderAllowedNames: + - aggregator + requestheaderExtraHeaderPrefixes: + - X-Remote-Extra- + requestheaderGroupHeaders: + - X-Remote-Group + requestheaderUsernameHeaders: + - X-Remote-User + securePort: 443 + serviceAccountIssuer: https://api.internal.containerd.example.com + serviceAccountJWKSURI: https://api.internal.containerd.example.com/openid/v1/jwks + serviceClusterIPRange: 100.64.0.0/13 + storageBackend: etcd3 + ServiceAccountPublicKeys: | + -----BEGIN RSA PUBLIC KEY----- + MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBANiW3hfHTcKnxCig+uWhpVbOfH1pANKm + XVSysPKgE80QSU4tZ6m49pAEeIMsvwvDMaLsb2v6JvXe0qvCmueU+/sCAwEAAQ== + -----END RSA PUBLIC KEY----- + -----BEGIN RSA PUBLIC KEY----- + MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAKOE64nZbH+GM91AIrqf7HEk4hvzqsZF + Ftxc+8xir1XC3mI/RhCCrs6AdVRZNZ26A6uHArhi33c2kHQkCjyLA7sCAwEAAQ== + -----END RSA PUBLIC KEY----- +Assets: + amd64: + - 681c81b7934ae2bf38b9f12d891683972d1fbbf6d7d97e50940a47b139d41b35@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kubelet + - 9f74f2fa7ee32ad07e17211725992248470310ca1988214518806b39b1dad9f0@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kubectl + - 977824932d5667c7a37aa6a3cbba40100a6873e7bd97e83e8be837e3e7afd0a8@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-amd64-v0.8.7.tgz + - 0000000000000000000000000000000000000000000000000000000000000000@https://github.com/containerd/containerd/releases/download/v1.3.9/cri-containerd-cni-1.3.9-linux-amd64.tar.gz + - f90ed6dcef534e6d1ae17907dc7eb40614b8945ad4af7f0e98d2be7cde8165c6@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/protokube,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/protokube-linux-amd64 + - 9992e7eb2a2e93f799e5a9e98eb718637433524bc65f630357201a79f49b13d0@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/channels,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/channels-linux-amd64 + arm64: + - 17832b192be5ea314714f7e16efd5e5f65347974bbbf41def6b02f68931380c4@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/arm64/kubelet + - a4dd7100f547a40d3e2f83850d0bab75c6ea5eb553f0a80adcf73155bef1fd0d@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/arm64/kubectl + - ae13d7b5c05bd180ea9b5b68f44bdaa7bfb41034a2ef1d68fd8e1259797d642f@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-arm64-v0.8.7.tgz + - debed306ed9a4e70dcbcb228a0b3898f9730099e324f34bb0e76abbaddf7a6a7@https://download.docker.com/linux/static/stable/aarch64/docker-20.10.13.tgz + - 2f599c3d54f4c4bdbcc95aaf0c7b513a845d8f9503ec5b34c9f86aa1bc34fc0c@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/protokube,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/protokube-linux-arm64 + - 9d842e3636a95de2315cdea2be7a282355aac0658ef0b86d5dc2449066538f13@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/channels,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/channels-linux-arm64 +CAs: + apiserver-aggregator-ca: | + -----BEGIN CERTIFICATE----- + MIIBgjCCASygAwIBAgIMFo3gINaZLHjisEcbMA0GCSqGSIb3DQEBCwUAMCIxIDAe + BgNVBAMTF2FwaXNlcnZlci1hZ2dyZWdhdG9yLWNhMB4XDTIxMDYzMDA0NTExMloX + DTMxMDYzMDA0NTExMlowIjEgMB4GA1UEAxMXYXBpc2VydmVyLWFnZ3JlZ2F0b3It + Y2EwXDANBgkqhkiG9w0BAQEFAANLADBIAkEAyyE71AOU3go5XFegLQ6fidI0LhhM + x7CzpTzh2xWKcHUfbNI7itgJvC/+GlyG5W+DF5V7ba0IJiQLsFve0oLdewIDAQAB + o0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU + ALfqF5ZmfqvqORuJIFilZYKF3d0wDQYJKoZIhvcNAQELBQADQQAHAomFKsF4jvYX + WM/UzQXDj9nSAFTf8dBPCXyZZNotsOH7+P6W4mMiuVs8bAuGiXGUdbsQ2lpiT/Rk + CzMeMdr4 + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBgjCCASygAwIBAgIMFo3gM0nxQpiX/agfMA0GCSqGSIb3DQEBCwUAMCIxIDAe + BgNVBAMTF2FwaXNlcnZlci1hZ2dyZWdhdG9yLWNhMB4XDTIxMDYzMDA0NTIzMVoX + DTMxMDYzMDA0NTIzMVowIjEgMB4GA1UEAxMXYXBpc2VydmVyLWFnZ3JlZ2F0b3It + Y2EwXDANBgkqhkiG9w0BAQEFAANLADBIAkEAyyE71AOU3go5XFegLQ6fidI0LhhM + x7CzpTzh2xWKcHUfbNI7itgJvC/+GlyG5W+DF5V7ba0IJiQLsFve0oLdewIDAQAB + o0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU + ALfqF5ZmfqvqORuJIFilZYKF3d0wDQYJKoZIhvcNAQELBQADQQCXsoezoxXu2CEN + QdlXZOfmBT6cqxIX/RMHXhpHwRiqPsTO8IO2bVA8CSzxNwMuSv/ZtrMHoh8+PcVW + HLtkTXH8 + -----END CERTIFICATE----- + etcd-clients-ca: | + -----BEGIN CERTIFICATE----- + MIIBcjCCARygAwIBAgIMFo1ogHnr26DL9YkqMA0GCSqGSIb3DQEBCwUAMBoxGDAW + BgNVBAMTD2V0Y2QtY2xpZW50cy1jYTAeFw0yMTA2MjgxNjE5MDFaFw0zMTA2Mjgx + NjE5MDFaMBoxGDAWBgNVBAMTD2V0Y2QtY2xpZW50cy1jYTBcMA0GCSqGSIb3DQEB + AQUAA0sAMEgCQQDYlt4Xx03Cp8QooPrloaVWznx9aQDSpl1UsrDyoBPNEElOLWep + uPaQBHiDLL8LwzGi7G9r+ib13tKrwprnlPv7AgMBAAGjQjBAMA4GA1UdDwEB/wQE + AwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQjlt4Ue54AbJPWlDpRM51s + x+PeBDANBgkqhkiG9w0BAQsFAANBAAZAdf8ROEVkr3Rf7I+s+CQOil2toadlKWOY + qCeJ2XaEROfp9aUTEIU1MGM3g57MPyAPPU7mURskuOQz6B1UFaY= + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBcjCCARygAwIBAgIMFo1olfBnC/CsT+dqMA0GCSqGSIb3DQEBCwUAMBoxGDAW + BgNVBAMTD2V0Y2QtY2xpZW50cy1jYTAeFw0yMTA2MjgxNjIwMzNaFw0zMTA2Mjgx + NjIwMzNaMBoxGDAWBgNVBAMTD2V0Y2QtY2xpZW50cy1jYTBcMA0GCSqGSIb3DQEB + AQUAA0sAMEgCQQDYlt4Xx03Cp8QooPrloaVWznx9aQDSpl1UsrDyoBPNEElOLWep + uPaQBHiDLL8LwzGi7G9r+ib13tKrwprnlPv7AgMBAAGjQjBAMA4GA1UdDwEB/wQE + AwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQjlt4Ue54AbJPWlDpRM51s + x+PeBDANBgkqhkiG9w0BAQsFAANBAF1xUz77PlUVUnd9duF8F7plou0TONC9R6/E + YQ8C6vM1b+9NSDGjCW8YmwEU2fBgskb/BBX2lwVZ32/RUEju4Co= + -----END CERTIFICATE----- + etcd-manager-ca-events: | + -----BEGIN CERTIFICATE----- + MIIBgDCCASqgAwIBAgIMFo+bKjm04vB4rNtaMA0GCSqGSIb3DQEBCwUAMCExHzAd + BgNVBAMTFmV0Y2QtbWFuYWdlci1jYS1ldmVudHMwHhcNMjEwNzA1MjAwOTU2WhcN + MzEwNzA1MjAwOTU2WjAhMR8wHQYDVQQDExZldGNkLW1hbmFnZXItY2EtZXZlbnRz + MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAKiC8tndMlEFZ7qzeKxeKqFVjaYpsh/H + g7RxWo15+1kgH3suO0lxp9+RxSVv97hnsfbySTPZVhy2cIQj7eZtZt8CAwEAAaNC + MEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFBg6 + CEZkQNnRkARBwFce03AEWa+sMA0GCSqGSIb3DQEBCwUAA0EAJMnBThok/uUe8q8O + sS5q19KUuE8YCTUzMDj36EBKf6NX4NoakCa1h6kfQVtlMtEIMWQZCjbm8xGK5ffs + GS/VUw== + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBgDCCASqgAwIBAgIMFo+bQ+EgIiBmGghjMA0GCSqGSIb3DQEBCwUAMCExHzAd + BgNVBAMTFmV0Y2QtbWFuYWdlci1jYS1ldmVudHMwHhcNMjEwNzA1MjAxMTQ2WhcN + MzEwNzA1MjAxMTQ2WjAhMR8wHQYDVQQDExZldGNkLW1hbmFnZXItY2EtZXZlbnRz + MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAKFhHVVxxDGv8d1jBvtdSxz7KIVoBOjL + DMxsmTsINiQkTQaFlb+XPlnY1ar4+RhE519AFUkqfhypk4Zxqf1YFXUCAwEAAaNC + MEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNuW + LLH5c8kDubDbr6BHgedW0iJ9MA0GCSqGSIb3DQEBCwUAA0EAiKUoBoaGu7XzboFE + hjfKlX0TujqWuW3qMxDEJwj4dVzlSLrAoB/G01MJ+xxYKh456n48aG6N827UPXhV + cPfVNg== + -----END CERTIFICATE----- + etcd-manager-ca-main: | + -----BEGIN CERTIFICATE----- + MIIBfDCCASagAwIBAgIMFo+bKjm1c3jfv6hIMA0GCSqGSIb3DQEBCwUAMB8xHTAb + BgNVBAMTFGV0Y2QtbWFuYWdlci1jYS1tYWluMB4XDTIxMDcwNTIwMDk1NloXDTMx + MDcwNTIwMDk1NlowHzEdMBsGA1UEAxMUZXRjZC1tYW5hZ2VyLWNhLW1haW4wXDAN + BgkqhkiG9w0BAQEFAANLADBIAkEAxbkDbGYmCSShpRG3r+lzTOFujyuruRfjOhYm + ZRX4w1Utd5y63dUc98sjc9GGUYMHd+0k1ql/a48tGhnK6N6jJwIDAQABo0IwQDAO + BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUWZLkbBFx + GAgPU4i62c52unSo7RswDQYJKoZIhvcNAQELBQADQQAj6Pgd0va/8FtkyMlnohLu + Gf4v8RJO6zk3Y6jJ4+cwWziipFM1ielMzSOZfFcCZgH3m5Io40is4hPSqyq2TOA6 + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBfDCCASagAwIBAgIMFo+bQ+Eg8Si30gr4MA0GCSqGSIb3DQEBCwUAMB8xHTAb + BgNVBAMTFGV0Y2QtbWFuYWdlci1jYS1tYWluMB4XDTIxMDcwNTIwMTE0NloXDTMx + MDcwNTIwMTE0NlowHzEdMBsGA1UEAxMUZXRjZC1tYW5hZ2VyLWNhLW1haW4wXDAN + BgkqhkiG9w0BAQEFAANLADBIAkEAw33jzcd/iosN04b0WXbDt7B0c3sJ3aafcGLP + vG3xRB9N5bYr9+qZAq3mzAFkxscn4j1ce5b1/GKTDEAClmZgdQIDAQABo0IwQDAO + BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUE/h+3gDP + DvKwHRyiYlXM8voZ1wowDQYJKoZIhvcNAQELBQADQQBXuimeEoAOu5HN4hG7NqL9 + t40K3ZRhRZv3JQWnRVJCBDjg1rD0GQJR/n+DoWvbeijI5C9pNjr2pWSIYR1eYCvd + -----END CERTIFICATE----- + etcd-peers-ca-events: | + -----BEGIN CERTIFICATE----- + MIIBfDCCASagAwIBAgIMFo+bKjmxTPh3/lYJMA0GCSqGSIb3DQEBCwUAMB8xHTAb + BgNVBAMTFGV0Y2QtcGVlcnMtY2EtZXZlbnRzMB4XDTIxMDcwNTIwMDk1NloXDTMx + MDcwNTIwMDk1NlowHzEdMBsGA1UEAxMUZXRjZC1wZWVycy1jYS1ldmVudHMwXDAN + BgkqhkiG9w0BAQEFAANLADBIAkEAv5g4HF2xmrYyouJfY9jXx1M3gPLD/pupvxPY + xyjJw5pNCy5M5XGS3iTqRD5RDE0fWudVHFZKLIe8WPc06NApXwIDAQABo0IwQDAO + BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUf6xiDI+O + Yph1ziCGr2hZaQYt+fUwDQYJKoZIhvcNAQELBQADQQBBxj5hqEQstonTb8lnqeGB + DEYtUeAk4eR/HzvUMjF52LVGuvN3XVt+JTrFeKNvb6/RDUbBNRj3azalcUkpPh6V + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBfDCCASagAwIBAgIMFo+bQ+Eq69jgzpKwMA0GCSqGSIb3DQEBCwUAMB8xHTAb + BgNVBAMTFGV0Y2QtcGVlcnMtY2EtZXZlbnRzMB4XDTIxMDcwNTIwMTE0NloXDTMx + MDcwNTIwMTE0NlowHzEdMBsGA1UEAxMUZXRjZC1wZWVycy1jYS1ldmVudHMwXDAN + BgkqhkiG9w0BAQEFAANLADBIAkEAo5Nj2CjX1qp3mEPw1H5nHAFWLoGNSLSlRFJW + 03NxaNPMFzL5PrCoyOXrX8/MWczuZYw0Crf8EPOOQWi2+W0XLwIDAQABo0IwQDAO + BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUxauhhKQh + cvdZND78rHe0RQVTTiswDQYJKoZIhvcNAQELBQADQQB+cq4jIS9q0zXslaRa+ViI + J+dviA3sMygbmSJO0s4DxYmoazKJblux5q0ASSvS9iL1l9ShuZ1dWyp2tpZawHyb + -----END CERTIFICATE----- + etcd-peers-ca-main: | + -----BEGIN CERTIFICATE----- + MIIBeDCCASKgAwIBAgIMFo+bKjmuLDDLcDHsMA0GCSqGSIb3DQEBCwUAMB0xGzAZ + BgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjAeFw0yMTA3MDUyMDA5NTZaFw0zMTA3 + MDUyMDA5NTZaMB0xGzAZBgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjBcMA0GCSqG + SIb3DQEBAQUAA0sAMEgCQQCyRaXWpwgN6INQqws9p/BvPElJv2Rno9dVTFhlQqDA + aUJXe7MBmiO4NJcW76EozeBh5ztR3/4NE1FM2x8TisS3AgMBAAGjQjBAMA4GA1Ud + DwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQtE1d49uSvpURf + OQ25Vlu6liY20DANBgkqhkiG9w0BAQsFAANBAAgLVaetJZcfOA3OIMMvQbz2Ydrt + uWF9BKkIad8jrcIrm3IkOtR8bKGmDIIaRKuG/ZUOL6NMe2fky3AAfKwleL4= + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBeDCCASKgAwIBAgIMFo+bQ+EuVthBfuZvMA0GCSqGSIb3DQEBCwUAMB0xGzAZ + BgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjAeFw0yMTA3MDUyMDExNDZaFw0zMTA3 + MDUyMDExNDZaMB0xGzAZBgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjBcMA0GCSqG + SIb3DQEBAQUAA0sAMEgCQQCxNbycDZNx5V1ZOiXxZSvaFpHRwKeHDfcuMUitdoPt + naVMlMTGDWAMuCVmFHFAWohIYynemEegmZkZ15S7AErfAgMBAAGjQjBAMA4GA1Ud + DwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTAjQ8T4HclPIsC + qipEfUIcLP6jqTANBgkqhkiG9w0BAQsFAANBAJdZ17TN3HlWrH7HQgfR12UBwz8K + G9DurDznVaBVUYaHY8Sg5AvAXeb+yIF2JMmRR+bK+/G1QYY2D3/P31Ic2Oo= + -----END CERTIFICATE----- + kubernetes-ca: | + -----BEGIN CERTIFICATE----- + MIIBbjCCARigAwIBAgIMFpANqBD8NSD82AUSMA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwODAwWhcNMzEwNzA3MDcw + ODAwWjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD + SwAwSAJBANFI3zr0Tk8krsW8vwjfMpzJOlWQ8616vG3YPa2qAgI7V4oKwfV0yIg1 + jt+H6f4P/wkPAPTPTfRp9Iy8oHEEFw0CAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG + MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNG3zVjTcLlJwDsJ4/K9DV7KohUA + MA0GCSqGSIb3DQEBCwUAA0EAB8d03fY2w7WKpfO29qI295pu2C4ca9AiVGOpgSc8 + tmQsq6rcxt3T+rb589PVtz0mw/cKTxOk6gH2CCC+yHfy2w== + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBbjCCARigAwIBAgIMFpANvmSa0OAlYmXKMA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwOTM2WhcNMzEwNzA3MDcw + OTM2WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD + SwAwSAJBAMF6F4aZdpe0RUpyykaBpWwZCnwbffhYGOw+fs6RdLuUq7QCNmJm/Eq7 + WWOziMYDiI9SbclpD+6QiJ0N3EqppVUCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG + MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLImp6ARjPDAH6nhI+scWVt3Q9bn + MA0GCSqGSIb3DQEBCwUAA0EAVQVx5MUtuAIeePuP9o51xtpT2S6Fvfi8J4ICxnlA + 9B7UD2ushcVFPtaeoL9Gfu8aY4KJBeqqg5ojl4qmRnThjw== + -----END CERTIFICATE----- +ClusterName: containerd.example.com +FileAssets: +- content: | + apiVersion: kubescheduler.config.k8s.io/v1beta1 + clientConnection: + kubeconfig: /var/lib/kube-scheduler/kubeconfig + kind: KubeSchedulerConfiguration + path: /var/lib/kube-scheduler/config.yaml +Hooks: +- null +- null +KeypairIDs: + apiserver-aggregator-ca: "6980187172486667078076483355" + etcd-clients-ca: "6979622252718071085282986282" + etcd-manager-ca-events: "6982279354000777253151890266" + etcd-manager-ca-main: "6982279354000936168671127624" + etcd-peers-ca-events: "6982279353999767935825892873" + etcd-peers-ca-main: "6982279353998887468930183660" + kubernetes-ca: "6982820025135291416230495506" + service-account: "2" +KubeletConfig: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + nodeLabels: + kops.k8s.io/kops-controller-pki: "" + kubernetes.io/role: master + node-role.kubernetes.io/control-plane: "" + node-role.kubernetes.io/master: "" + node.kubernetes.io/exclude-from-external-load-balancers: "" + podInfraContainerImage: registry.k8s.io/pause:3.6 + podManifestPath: /etc/kubernetes/manifests + registerSchedulable: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + taints: + - node-role.kubernetes.io/master=:NoSchedule +UpdatePolicy: automatic +channels: +- memfs://clusters.example.com/containerd.example.com/addons/bootstrap-channel.yaml +containerdConfig: + logLevel: info + packages: + hashAmd64: "0000000000000000000000000000000000000000000000000000000000000000" + urlAmd64: https://github.com/containerd/containerd/releases/download/v1.3.9/cri-containerd-cni-1.3.9-linux-amd64.tar.gz + registryMirrors: + '*': + - http://HostIP2:Port2 + docker.io: + - https://registry-1.docker.io + version: 1.4.13 +etcdManifests: +- memfs://clusters.example.com/containerd.example.com/manifests/etcd/main-master-us-test-1a.yaml +- memfs://clusters.example.com/containerd.example.com/manifests/etcd/events-master-us-test-1a.yaml +staticManifests: +- key: kube-apiserver-healthcheck + path: manifests/static/kube-apiserver-healthcheck.yaml diff --git a/tests/integration/update_cluster/containerd-custom/data/aws_s3_object_nodeupconfig-nodes_content b/tests/integration/update_cluster/containerd-custom/data/aws_s3_object_nodeupconfig-nodes_content new file mode 100644 index 0000000000000..b0d13304afb2b --- /dev/null +++ b/tests/integration/update_cluster/containerd-custom/data/aws_s3_object_nodeupconfig-nodes_content @@ -0,0 +1,52 @@ +Assets: + amd64: + - 681c81b7934ae2bf38b9f12d891683972d1fbbf6d7d97e50940a47b139d41b35@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kubelet + - 9f74f2fa7ee32ad07e17211725992248470310ca1988214518806b39b1dad9f0@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kubectl + - 977824932d5667c7a37aa6a3cbba40100a6873e7bd97e83e8be837e3e7afd0a8@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-amd64-v0.8.7.tgz + - 0000000000000000000000000000000000000000000000000000000000000000@https://github.com/containerd/containerd/releases/download/v1.3.9/cri-containerd-cni-1.3.9-linux-amd64.tar.gz + arm64: + - 17832b192be5ea314714f7e16efd5e5f65347974bbbf41def6b02f68931380c4@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/arm64/kubelet + - a4dd7100f547a40d3e2f83850d0bab75c6ea5eb553f0a80adcf73155bef1fd0d@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/arm64/kubectl + - ae13d7b5c05bd180ea9b5b68f44bdaa7bfb41034a2ef1d68fd8e1259797d642f@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-arm64-v0.8.7.tgz + - debed306ed9a4e70dcbcb228a0b3898f9730099e324f34bb0e76abbaddf7a6a7@https://download.docker.com/linux/static/stable/aarch64/docker-20.10.13.tgz +CAs: {} +ClusterName: containerd.example.com +Hooks: +- null +- null +KeypairIDs: + kubernetes-ca: "6982820025135291416230495506" +KubeletConfig: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + nodeLabels: + kubernetes.io/role: node + node-role.kubernetes.io/node: "" + podInfraContainerImage: registry.k8s.io/pause:3.6 + podManifestPath: /etc/kubernetes/manifests + registerSchedulable: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s +UpdatePolicy: automatic +channels: +- memfs://clusters.example.com/containerd.example.com/addons/bootstrap-channel.yaml +containerdConfig: + logLevel: info + packages: + hashAmd64: "0000000000000000000000000000000000000000000000000000000000000000" + urlAmd64: https://github.com/containerd/containerd/releases/download/v1.3.9/cri-containerd-cni-1.3.9-linux-amd64.tar.gz + registryMirrors: + '*': + - http://HostIP2:Port2 + docker.io: + - https://registry-1.docker.io + version: 1.4.13 diff --git a/tests/integration/update_cluster/containerd-custom/in-v1alpha2.yaml b/tests/integration/update_cluster/containerd-custom/in-v1alpha2.yaml index 803c4130af69e..bb7bb21bd5faf 100644 --- a/tests/integration/update_cluster/containerd-custom/in-v1alpha2.yaml +++ b/tests/integration/update_cluster/containerd-custom/in-v1alpha2.yaml @@ -32,7 +32,6 @@ spec: kubelet: anonymousAuth: false kubernetesVersion: v1.21.0 - masterInternalName: api.internal.containerd.example.com masterPublicName: api.containerd.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/tests/integration/update_cluster/containerd-custom/kubernetes.tf b/tests/integration/update_cluster/containerd-custom/kubernetes.tf new file mode 100644 index 0000000000000..fc522fb5c69c1 --- /dev/null +++ b/tests/integration/update_cluster/containerd-custom/kubernetes.tf @@ -0,0 +1,820 @@ +locals { + cluster_name = "containerd.example.com" + master_autoscaling_group_ids = [aws_autoscaling_group.master-us-test-1a-masters-containerd-example-com.id] + master_security_group_ids = [aws_security_group.masters-containerd-example-com.id] + masters_role_arn = aws_iam_role.masters-containerd-example-com.arn + masters_role_name = aws_iam_role.masters-containerd-example-com.name + node_autoscaling_group_ids = [aws_autoscaling_group.nodes-containerd-example-com.id] + node_security_group_ids = [aws_security_group.nodes-containerd-example-com.id] + node_subnet_ids = [aws_subnet.us-test-1a-containerd-example-com.id] + nodes_role_arn = aws_iam_role.nodes-containerd-example-com.arn + nodes_role_name = aws_iam_role.nodes-containerd-example-com.name + region = "us-test-1" + route_table_public_id = aws_route_table.containerd-example-com.id + subnet_us-test-1a_id = aws_subnet.us-test-1a-containerd-example-com.id + vpc_cidr_block = aws_vpc.containerd-example-com.cidr_block + vpc_id = aws_vpc.containerd-example-com.id +} + +output "cluster_name" { + value = "containerd.example.com" +} + +output "master_autoscaling_group_ids" { + value = [aws_autoscaling_group.master-us-test-1a-masters-containerd-example-com.id] +} + +output "master_security_group_ids" { + value = [aws_security_group.masters-containerd-example-com.id] +} + +output "masters_role_arn" { + value = aws_iam_role.masters-containerd-example-com.arn +} + +output "masters_role_name" { + value = aws_iam_role.masters-containerd-example-com.name +} + +output "node_autoscaling_group_ids" { + value = [aws_autoscaling_group.nodes-containerd-example-com.id] +} + +output "node_security_group_ids" { + value = [aws_security_group.nodes-containerd-example-com.id] +} + +output "node_subnet_ids" { + value = [aws_subnet.us-test-1a-containerd-example-com.id] +} + +output "nodes_role_arn" { + value = aws_iam_role.nodes-containerd-example-com.arn +} + +output "nodes_role_name" { + value = aws_iam_role.nodes-containerd-example-com.name +} + +output "region" { + value = "us-test-1" +} + +output "route_table_public_id" { + value = aws_route_table.containerd-example-com.id +} + +output "subnet_us-test-1a_id" { + value = aws_subnet.us-test-1a-containerd-example-com.id +} + +output "vpc_cidr_block" { + value = aws_vpc.containerd-example-com.cidr_block +} + +output "vpc_id" { + value = aws_vpc.containerd-example-com.id +} + +provider "aws" { + region = "us-test-1" +} + +provider "aws" { + alias = "files" + region = "us-test-1" +} + +resource "aws_autoscaling_group" "master-us-test-1a-masters-containerd-example-com" { + enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"] + launch_template { + id = aws_launch_template.master-us-test-1a-masters-containerd-example-com.id + version = aws_launch_template.master-us-test-1a-masters-containerd-example-com.latest_version + } + max_instance_lifetime = 0 + max_size = 1 + metrics_granularity = "1Minute" + min_size = 1 + name = "master-us-test-1a.masters.containerd.example.com" + protect_from_scale_in = false + tag { + key = "KubernetesCluster" + propagate_at_launch = true + value = "containerd.example.com" + } + tag { + key = "Name" + propagate_at_launch = true + value = "master-us-test-1a.masters.containerd.example.com" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" + propagate_at_launch = true + value = "master" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/role/master" + propagate_at_launch = true + value = "1" + } + tag { + key = "kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "master-us-test-1a" + } + tag { + key = "kubernetes.io/cluster/containerd.example.com" + propagate_at_launch = true + value = "owned" + } + vpc_zone_identifier = [aws_subnet.us-test-1a-containerd-example-com.id] +} + +resource "aws_autoscaling_group" "nodes-containerd-example-com" { + enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"] + launch_template { + id = aws_launch_template.nodes-containerd-example-com.id + version = aws_launch_template.nodes-containerd-example-com.latest_version + } + max_instance_lifetime = 0 + max_size = 2 + metrics_granularity = "1Minute" + min_size = 2 + name = "nodes.containerd.example.com" + protect_from_scale_in = false + tag { + key = "KubernetesCluster" + propagate_at_launch = true + value = "containerd.example.com" + } + tag { + key = "Name" + propagate_at_launch = true + value = "nodes.containerd.example.com" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" + propagate_at_launch = true + value = "node" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/role/node" + propagate_at_launch = true + value = "1" + } + tag { + key = "kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "nodes" + } + tag { + key = "kubernetes.io/cluster/containerd.example.com" + propagate_at_launch = true + value = "owned" + } + vpc_zone_identifier = [aws_subnet.us-test-1a-containerd-example-com.id] +} + +resource "aws_ebs_volume" "us-test-1a-etcd-events-containerd-example-com" { + availability_zone = "us-test-1a" + encrypted = false + iops = 3000 + size = 20 + tags = { + "KubernetesCluster" = "containerd.example.com" + "Name" = "us-test-1a.etcd-events.containerd.example.com" + "k8s.io/etcd/events" = "us-test-1a/us-test-1a" + "k8s.io/role/master" = "1" + "kubernetes.io/cluster/containerd.example.com" = "owned" + } + throughput = 125 + type = "gp3" +} + +resource "aws_ebs_volume" "us-test-1a-etcd-main-containerd-example-com" { + availability_zone = "us-test-1a" + encrypted = false + iops = 3000 + size = 20 + tags = { + "KubernetesCluster" = "containerd.example.com" + "Name" = "us-test-1a.etcd-main.containerd.example.com" + "k8s.io/etcd/main" = "us-test-1a/us-test-1a" + "k8s.io/role/master" = "1" + "kubernetes.io/cluster/containerd.example.com" = "owned" + } + throughput = 125 + type = "gp3" +} + +resource "aws_iam_instance_profile" "masters-containerd-example-com" { + name = "masters.containerd.example.com" + role = aws_iam_role.masters-containerd-example-com.name + tags = { + "KubernetesCluster" = "containerd.example.com" + "Name" = "masters.containerd.example.com" + "kubernetes.io/cluster/containerd.example.com" = "owned" + } +} + +resource "aws_iam_instance_profile" "nodes-containerd-example-com" { + name = "nodes.containerd.example.com" + role = aws_iam_role.nodes-containerd-example-com.name + tags = { + "KubernetesCluster" = "containerd.example.com" + "Name" = "nodes.containerd.example.com" + "kubernetes.io/cluster/containerd.example.com" = "owned" + } +} + +resource "aws_iam_role" "masters-containerd-example-com" { + assume_role_policy = file("${path.module}/data/aws_iam_role_masters.containerd.example.com_policy") + name = "masters.containerd.example.com" + tags = { + "KubernetesCluster" = "containerd.example.com" + "Name" = "masters.containerd.example.com" + "kubernetes.io/cluster/containerd.example.com" = "owned" + } +} + +resource "aws_iam_role" "nodes-containerd-example-com" { + assume_role_policy = file("${path.module}/data/aws_iam_role_nodes.containerd.example.com_policy") + name = "nodes.containerd.example.com" + tags = { + "KubernetesCluster" = "containerd.example.com" + "Name" = "nodes.containerd.example.com" + "kubernetes.io/cluster/containerd.example.com" = "owned" + } +} + +resource "aws_iam_role_policy" "masters-containerd-example-com" { + name = "masters.containerd.example.com" + policy = file("${path.module}/data/aws_iam_role_policy_masters.containerd.example.com_policy") + role = aws_iam_role.masters-containerd-example-com.name +} + +resource "aws_iam_role_policy" "nodes-containerd-example-com" { + name = "nodes.containerd.example.com" + policy = file("${path.module}/data/aws_iam_role_policy_nodes.containerd.example.com_policy") + role = aws_iam_role.nodes-containerd-example-com.name +} + +resource "aws_internet_gateway" "containerd-example-com" { + tags = { + "KubernetesCluster" = "containerd.example.com" + "Name" = "containerd.example.com" + "kubernetes.io/cluster/containerd.example.com" = "owned" + } + vpc_id = aws_vpc.containerd-example-com.id +} + +resource "aws_key_pair" "kubernetes-containerd-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157" { + key_name = "kubernetes.containerd.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57" + public_key = file("${path.module}/data/aws_key_pair_kubernetes.containerd.example.com-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key") + tags = { + "KubernetesCluster" = "containerd.example.com" + "Name" = "containerd.example.com" + "kubernetes.io/cluster/containerd.example.com" = "owned" + } +} + +resource "aws_launch_template" "master-us-test-1a-masters-containerd-example-com" { + block_device_mappings { + device_name = "/dev/xvda" + ebs { + delete_on_termination = true + encrypted = true + iops = 3000 + throughput = 125 + volume_size = 64 + volume_type = "gp3" + } + } + block_device_mappings { + device_name = "/dev/sdc" + virtual_name = "ephemeral0" + } + iam_instance_profile { + name = aws_iam_instance_profile.masters-containerd-example-com.id + } + image_id = "ami-12345678" + instance_type = "m3.medium" + key_name = aws_key_pair.kubernetes-containerd-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id + lifecycle { + create_before_destroy = true + } + metadata_options { + http_endpoint = "enabled" + http_protocol_ipv6 = "disabled" + http_put_response_hop_limit = 1 + http_tokens = "optional" + } + monitoring { + enabled = false + } + name = "master-us-test-1a.masters.containerd.example.com" + network_interfaces { + associate_public_ip_address = true + delete_on_termination = true + ipv6_address_count = 0 + security_groups = [aws_security_group.masters-containerd-example-com.id] + } + tag_specifications { + resource_type = "instance" + tags = { + "KubernetesCluster" = "containerd.example.com" + "Name" = "master-us-test-1a.masters.containerd.example.com" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" = "" + "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = "" + "k8s.io/role/master" = "1" + "kops.k8s.io/instancegroup" = "master-us-test-1a" + "kubernetes.io/cluster/containerd.example.com" = "owned" + } + } + tag_specifications { + resource_type = "volume" + tags = { + "KubernetesCluster" = "containerd.example.com" + "Name" = "master-us-test-1a.masters.containerd.example.com" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" = "" + "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = "" + "k8s.io/role/master" = "1" + "kops.k8s.io/instancegroup" = "master-us-test-1a" + "kubernetes.io/cluster/containerd.example.com" = "owned" + } + } + tags = { + "KubernetesCluster" = "containerd.example.com" + "Name" = "master-us-test-1a.masters.containerd.example.com" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" = "" + "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = "" + "k8s.io/role/master" = "1" + "kops.k8s.io/instancegroup" = "master-us-test-1a" + "kubernetes.io/cluster/containerd.example.com" = "owned" + } + user_data = filebase64("${path.module}/data/aws_launch_template_master-us-test-1a.masters.containerd.example.com_user_data") +} + +resource "aws_launch_template" "nodes-containerd-example-com" { + block_device_mappings { + device_name = "/dev/xvda" + ebs { + delete_on_termination = true + encrypted = true + iops = 3000 + throughput = 125 + volume_size = 128 + volume_type = "gp3" + } + } + iam_instance_profile { + name = aws_iam_instance_profile.nodes-containerd-example-com.id + } + image_id = "ami-12345678" + instance_type = "t2.medium" + key_name = aws_key_pair.kubernetes-containerd-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id + lifecycle { + create_before_destroy = true + } + metadata_options { + http_endpoint = "enabled" + http_protocol_ipv6 = "disabled" + http_put_response_hop_limit = 1 + http_tokens = "optional" + } + monitoring { + enabled = false + } + name = "nodes.containerd.example.com" + network_interfaces { + associate_public_ip_address = true + delete_on_termination = true + ipv6_address_count = 0 + security_groups = [aws_security_group.nodes-containerd-example-com.id] + } + tag_specifications { + resource_type = "instance" + tags = { + "KubernetesCluster" = "containerd.example.com" + "Name" = "nodes.containerd.example.com" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "nodes" + "kubernetes.io/cluster/containerd.example.com" = "owned" + } + } + tag_specifications { + resource_type = "volume" + tags = { + "KubernetesCluster" = "containerd.example.com" + "Name" = "nodes.containerd.example.com" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "nodes" + "kubernetes.io/cluster/containerd.example.com" = "owned" + } + } + tags = { + "KubernetesCluster" = "containerd.example.com" + "Name" = "nodes.containerd.example.com" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "nodes" + "kubernetes.io/cluster/containerd.example.com" = "owned" + } + user_data = filebase64("${path.module}/data/aws_launch_template_nodes.containerd.example.com_user_data") +} + +resource "aws_route" "route-0-0-0-0--0" { + destination_cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.containerd-example-com.id + route_table_id = aws_route_table.containerd-example-com.id +} + +resource "aws_route" "route-__--0" { + destination_ipv6_cidr_block = "::/0" + gateway_id = aws_internet_gateway.containerd-example-com.id + route_table_id = aws_route_table.containerd-example-com.id +} + +resource "aws_route_table" "containerd-example-com" { + tags = { + "KubernetesCluster" = "containerd.example.com" + "Name" = "containerd.example.com" + "kubernetes.io/cluster/containerd.example.com" = "owned" + "kubernetes.io/kops/role" = "public" + } + vpc_id = aws_vpc.containerd-example-com.id +} + +resource "aws_route_table_association" "us-test-1a-containerd-example-com" { + route_table_id = aws_route_table.containerd-example-com.id + subnet_id = aws_subnet.us-test-1a-containerd-example-com.id +} + +resource "aws_s3_object" "cluster-completed-spec" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_cluster-completed.spec_content") + key = "clusters.example.com/containerd.example.com/cluster-completed.spec" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "containerd-example-com-addons-bootstrap" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_containerd.example.com-addons-bootstrap_content") + key = "clusters.example.com/containerd.example.com/addons/bootstrap-channel.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "containerd-example-com-addons-coredns-addons-k8s-io-k8s-1-12" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_containerd.example.com-addons-coredns.addons.k8s.io-k8s-1.12_content") + key = "clusters.example.com/containerd.example.com/addons/coredns.addons.k8s.io/k8s-1.12.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "containerd-example-com-addons-dns-controller-addons-k8s-io-k8s-1-12" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_containerd.example.com-addons-dns-controller.addons.k8s.io-k8s-1.12_content") + key = "clusters.example.com/containerd.example.com/addons/dns-controller.addons.k8s.io/k8s-1.12.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "containerd-example-com-addons-kops-controller-addons-k8s-io-k8s-1-16" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_containerd.example.com-addons-kops-controller.addons.k8s.io-k8s-1.16_content") + key = "clusters.example.com/containerd.example.com/addons/kops-controller.addons.k8s.io/k8s-1.16.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "containerd-example-com-addons-kubelet-api-rbac-addons-k8s-io-k8s-1-9" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_containerd.example.com-addons-kubelet-api.rbac.addons.k8s.io-k8s-1.9_content") + key = "clusters.example.com/containerd.example.com/addons/kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "containerd-example-com-addons-limit-range-addons-k8s-io" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_containerd.example.com-addons-limit-range.addons.k8s.io_content") + key = "clusters.example.com/containerd.example.com/addons/limit-range.addons.k8s.io/v1.5.0.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "containerd-example-com-addons-storage-aws-addons-k8s-io-v1-15-0" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_containerd.example.com-addons-storage-aws.addons.k8s.io-v1.15.0_content") + key = "clusters.example.com/containerd.example.com/addons/storage-aws.addons.k8s.io/v1.15.0.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "etcd-cluster-spec-events" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_etcd-cluster-spec-events_content") + key = "clusters.example.com/containerd.example.com/backups/etcd/events/control/etcd-cluster-spec" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "etcd-cluster-spec-main" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_etcd-cluster-spec-main_content") + key = "clusters.example.com/containerd.example.com/backups/etcd/main/control/etcd-cluster-spec" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "kops-version-txt" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_kops-version.txt_content") + key = "clusters.example.com/containerd.example.com/kops-version.txt" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "manifests-etcdmanager-events-master-us-test-1a" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_manifests-etcdmanager-events-master-us-test-1a_content") + key = "clusters.example.com/containerd.example.com/manifests/etcd/events-master-us-test-1a.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "manifests-etcdmanager-main-master-us-test-1a" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_manifests-etcdmanager-main-master-us-test-1a_content") + key = "clusters.example.com/containerd.example.com/manifests/etcd/main-master-us-test-1a.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "manifests-static-kube-apiserver-healthcheck" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_manifests-static-kube-apiserver-healthcheck_content") + key = "clusters.example.com/containerd.example.com/manifests/static/kube-apiserver-healthcheck.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nodeupconfig-master-us-test-1a" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_nodeupconfig-master-us-test-1a_content") + key = "clusters.example.com/containerd.example.com/igconfig/master/master-us-test-1a/nodeupconfig.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nodeupconfig-nodes" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_nodeupconfig-nodes_content") + key = "clusters.example.com/containerd.example.com/igconfig/node/nodes/nodeupconfig.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_security_group" "masters-containerd-example-com" { + description = "Security group for masters" + name = "masters.containerd.example.com" + tags = { + "KubernetesCluster" = "containerd.example.com" + "Name" = "masters.containerd.example.com" + "kubernetes.io/cluster/containerd.example.com" = "owned" + } + vpc_id = aws_vpc.containerd-example-com.id +} + +resource "aws_security_group" "nodes-containerd-example-com" { + description = "Security group for nodes" + name = "nodes.containerd.example.com" + tags = { + "KubernetesCluster" = "containerd.example.com" + "Name" = "nodes.containerd.example.com" + "kubernetes.io/cluster/containerd.example.com" = "owned" + } + vpc_id = aws_vpc.containerd-example-com.id +} + +resource "aws_security_group_rule" "from-0-0-0-0--0-ingress-tcp-22to22-masters-containerd-example-com" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 22 + protocol = "tcp" + security_group_id = aws_security_group.masters-containerd-example-com.id + to_port = 22 + type = "ingress" +} + +resource "aws_security_group_rule" "from-0-0-0-0--0-ingress-tcp-22to22-nodes-containerd-example-com" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 22 + protocol = "tcp" + security_group_id = aws_security_group.nodes-containerd-example-com.id + to_port = 22 + type = "ingress" +} + +resource "aws_security_group_rule" "from-0-0-0-0--0-ingress-tcp-443to443-masters-containerd-example-com" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 443 + protocol = "tcp" + security_group_id = aws_security_group.masters-containerd-example-com.id + to_port = 443 + type = "ingress" +} + +resource "aws_security_group_rule" "from-masters-containerd-example-com-egress-all-0to0-0-0-0-0--0" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.masters-containerd-example-com.id + to_port = 0 + type = "egress" +} + +resource "aws_security_group_rule" "from-masters-containerd-example-com-egress-all-0to0-__--0" { + from_port = 0 + ipv6_cidr_blocks = ["::/0"] + protocol = "-1" + security_group_id = aws_security_group.masters-containerd-example-com.id + to_port = 0 + type = "egress" +} + +resource "aws_security_group_rule" "from-masters-containerd-example-com-ingress-all-0to0-masters-containerd-example-com" { + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.masters-containerd-example-com.id + source_security_group_id = aws_security_group.masters-containerd-example-com.id + to_port = 0 + type = "ingress" +} + +resource "aws_security_group_rule" "from-masters-containerd-example-com-ingress-all-0to0-nodes-containerd-example-com" { + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.nodes-containerd-example-com.id + source_security_group_id = aws_security_group.masters-containerd-example-com.id + to_port = 0 + type = "ingress" +} + +resource "aws_security_group_rule" "from-nodes-containerd-example-com-egress-all-0to0-0-0-0-0--0" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.nodes-containerd-example-com.id + to_port = 0 + type = "egress" +} + +resource "aws_security_group_rule" "from-nodes-containerd-example-com-egress-all-0to0-__--0" { + from_port = 0 + ipv6_cidr_blocks = ["::/0"] + protocol = "-1" + security_group_id = aws_security_group.nodes-containerd-example-com.id + to_port = 0 + type = "egress" +} + +resource "aws_security_group_rule" "from-nodes-containerd-example-com-ingress-all-0to0-nodes-containerd-example-com" { + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.nodes-containerd-example-com.id + source_security_group_id = aws_security_group.nodes-containerd-example-com.id + to_port = 0 + type = "ingress" +} + +resource "aws_security_group_rule" "from-nodes-containerd-example-com-ingress-tcp-1to2379-masters-containerd-example-com" { + from_port = 1 + protocol = "tcp" + security_group_id = aws_security_group.masters-containerd-example-com.id + source_security_group_id = aws_security_group.nodes-containerd-example-com.id + to_port = 2379 + type = "ingress" +} + +resource "aws_security_group_rule" "from-nodes-containerd-example-com-ingress-tcp-2382to4000-masters-containerd-example-com" { + from_port = 2382 + protocol = "tcp" + security_group_id = aws_security_group.masters-containerd-example-com.id + source_security_group_id = aws_security_group.nodes-containerd-example-com.id + to_port = 4000 + type = "ingress" +} + +resource "aws_security_group_rule" "from-nodes-containerd-example-com-ingress-tcp-4003to65535-masters-containerd-example-com" { + from_port = 4003 + protocol = "tcp" + security_group_id = aws_security_group.masters-containerd-example-com.id + source_security_group_id = aws_security_group.nodes-containerd-example-com.id + to_port = 65535 + type = "ingress" +} + +resource "aws_security_group_rule" "from-nodes-containerd-example-com-ingress-udp-1to65535-masters-containerd-example-com" { + from_port = 1 + protocol = "udp" + security_group_id = aws_security_group.masters-containerd-example-com.id + source_security_group_id = aws_security_group.nodes-containerd-example-com.id + to_port = 65535 + type = "ingress" +} + +resource "aws_subnet" "us-test-1a-containerd-example-com" { + availability_zone = "us-test-1a" + cidr_block = "172.20.32.0/19" + tags = { + "KubernetesCluster" = "containerd.example.com" + "Name" = "us-test-1a.containerd.example.com" + "SubnetType" = "Public" + "kops.k8s.io/instance-group/master-us-test-1a" = "true" + "kops.k8s.io/instance-group/nodes" = "true" + "kubernetes.io/cluster/containerd.example.com" = "owned" + "kubernetes.io/role/elb" = "1" + "kubernetes.io/role/internal-elb" = "1" + } + vpc_id = aws_vpc.containerd-example-com.id +} + +resource "aws_vpc" "containerd-example-com" { + assign_generated_ipv6_cidr_block = true + cidr_block = "172.20.0.0/16" + enable_dns_hostnames = true + enable_dns_support = true + tags = { + "KubernetesCluster" = "containerd.example.com" + "Name" = "containerd.example.com" + "kubernetes.io/cluster/containerd.example.com" = "owned" + } +} + +resource "aws_vpc_dhcp_options" "containerd-example-com" { + domain_name = "us-test-1.compute.internal" + domain_name_servers = ["AmazonProvidedDNS"] + tags = { + "KubernetesCluster" = "containerd.example.com" + "Name" = "containerd.example.com" + "kubernetes.io/cluster/containerd.example.com" = "owned" + } +} + +resource "aws_vpc_dhcp_options_association" "containerd-example-com" { + dhcp_options_id = aws_vpc_dhcp_options.containerd-example-com.id + vpc_id = aws_vpc.containerd-example-com.id +} + +terraform { + required_version = ">= 0.15.0" + required_providers { + aws = { + "configuration_aliases" = [aws.files] + "source" = "hashicorp/aws" + "version" = ">= 4.0.0" + } + } +} diff --git a/tests/integration/update_cluster/containerd/cloudformation.json b/tests/integration/update_cluster/containerd/cloudformation.json deleted file mode 100644 index be100b57e577b..0000000000000 --- a/tests/integration/update_cluster/containerd/cloudformation.json +++ /dev/null @@ -1,1349 +0,0 @@ -{ - "Resources": { - "AWSAutoScalingAutoScalingGroupmasterustest1amasterscontainerdexamplecom": { - "Type": "AWS::AutoScaling::AutoScalingGroup", - "Properties": { - "AutoScalingGroupName": "master-us-test-1a.masters.containerd.example.com", - "LaunchTemplate": { - "LaunchTemplateId": { - "Ref": "AWSEC2LaunchTemplatemasterustest1amasterscontainerdexamplecom" - }, - "Version": { - "Fn::GetAtt": [ - "AWSEC2LaunchTemplatemasterustest1amasterscontainerdexamplecom", - "LatestVersionNumber" - ] - } - }, - "MaxSize": "1", - "MinSize": "1", - "VPCZoneIdentifier": [ - { - "Ref": "AWSEC2Subnetustest1acontainerdexamplecom" - } - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "containerd.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.containerd.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/role/master", - "Value": "1", - "PropagateAtLaunch": true - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a", - "PropagateAtLaunch": true - }, - { - "Key": "kubernetes.io/cluster/containerd.example.com", - "Value": "owned", - "PropagateAtLaunch": true - } - ], - "MetricsCollection": [ - { - "Granularity": "1Minute", - "Metrics": [ - "GroupDesiredCapacity", - "GroupInServiceInstances", - "GroupMaxSize", - "GroupMinSize", - "GroupPendingInstances", - "GroupStandbyInstances", - "GroupTerminatingInstances", - "GroupTotalInstances" - ] - } - ] - } - }, - "AWSAutoScalingAutoScalingGroupnodescontainerdexamplecom": { - "Type": "AWS::AutoScaling::AutoScalingGroup", - "Properties": { - "AutoScalingGroupName": "nodes.containerd.example.com", - "LaunchTemplate": { - "LaunchTemplateId": { - "Ref": "AWSEC2LaunchTemplatenodescontainerdexamplecom" - }, - "Version": { - "Fn::GetAtt": [ - "AWSEC2LaunchTemplatenodescontainerdexamplecom", - "LatestVersionNumber" - ] - } - }, - "MaxSize": "2", - "MinSize": "2", - "VPCZoneIdentifier": [ - { - "Ref": "AWSEC2Subnetustest1acontainerdexamplecom" - } - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "containerd.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "Name", - "Value": "nodes.containerd.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/role/node", - "Value": "1", - "PropagateAtLaunch": true - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes", - "PropagateAtLaunch": true - }, - { - "Key": "kubernetes.io/cluster/containerd.example.com", - "Value": "owned", - "PropagateAtLaunch": true - } - ], - "MetricsCollection": [ - { - "Granularity": "1Minute", - "Metrics": [ - "GroupDesiredCapacity", - "GroupInServiceInstances", - "GroupMaxSize", - "GroupMinSize", - "GroupPendingInstances", - "GroupStandbyInstances", - "GroupTerminatingInstances", - "GroupTotalInstances" - ] - } - ] - } - }, - "AWSEC2DHCPOptionscontainerdexamplecom": { - "Type": "AWS::EC2::DHCPOptions", - "Properties": { - "DomainName": "us-test-1.compute.internal", - "DomainNameServers": [ - "AmazonProvidedDNS" - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "containerd.example.com" - }, - { - "Key": "Name", - "Value": "containerd.example.com" - }, - { - "Key": "kubernetes.io/cluster/containerd.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2InternetGatewaycontainerdexamplecom": { - "Type": "AWS::EC2::InternetGateway", - "Properties": { - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "containerd.example.com" - }, - { - "Key": "Name", - "Value": "containerd.example.com" - }, - { - "Key": "kubernetes.io/cluster/containerd.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2LaunchTemplatemasterustest1amasterscontainerdexamplecom": { - "Type": "AWS::EC2::LaunchTemplate", - "Properties": { - "LaunchTemplateName": "master-us-test-1a.masters.containerd.example.com", - "LaunchTemplateData": { - "BlockDeviceMappings": [ - { - "DeviceName": "/dev/xvda", - "Ebs": { - "VolumeType": "gp3", - "VolumeSize": 64, - "Iops": 3000, - "Throughput": 125, - "DeleteOnTermination": true, - "Encrypted": true - } - }, - { - "DeviceName": "/dev/sdc", - "VirtualName": "ephemeral0" - } - ], - "IamInstanceProfile": { - "Name": { - "Ref": "AWSIAMInstanceProfilemasterscontainerdexamplecom" - } - }, - "ImageId": "ami-12345678", - "InstanceType": "m3.medium", - "KeyName": "kubernetes.containerd.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57", - "MetadataOptions": { - "HttpPutResponseHopLimit": 1, - "HttpTokens": "optional" - }, - "Monitoring": { - "Enabled": false - }, - "NetworkInterfaces": [ - { - "AssociatePublicIpAddress": true, - "DeleteOnTermination": true, - "DeviceIndex": 0, - "Ipv6AddressCount": 0, - "Groups": [ - { - "Ref": "AWSEC2SecurityGroupmasterscontainerdexamplecom" - } - ] - } - ], - "TagSpecifications": [ - { - "ResourceType": "instance", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "containerd.example.com" - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.containerd.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a" - }, - { - "Key": "kubernetes.io/cluster/containerd.example.com", - "Value": "owned" - } - ] - }, - { - "ResourceType": "volume", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "containerd.example.com" - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.containerd.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a" - }, - { - "Key": "kubernetes.io/cluster/containerd.example.com", - "Value": "owned" - } - ] - } - ], - "UserData": "extracted" - } - } - }, - "AWSEC2LaunchTemplatenodescontainerdexamplecom": { - "Type": "AWS::EC2::LaunchTemplate", - "Properties": { - "LaunchTemplateName": "nodes.containerd.example.com", - "LaunchTemplateData": { - "BlockDeviceMappings": [ - { - "DeviceName": "/dev/xvda", - "Ebs": { - "VolumeType": "gp3", - "VolumeSize": 128, - "Iops": 3000, - "Throughput": 125, - "DeleteOnTermination": true, - "Encrypted": true - } - } - ], - "IamInstanceProfile": { - "Name": { - "Ref": "AWSIAMInstanceProfilenodescontainerdexamplecom" - } - }, - "ImageId": "ami-12345678", - "InstanceType": "t2.medium", - "KeyName": "kubernetes.containerd.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57", - "MetadataOptions": { - "HttpPutResponseHopLimit": 1, - "HttpTokens": "optional" - }, - "Monitoring": { - "Enabled": false - }, - "NetworkInterfaces": [ - { - "AssociatePublicIpAddress": true, - "DeleteOnTermination": true, - "DeviceIndex": 0, - "Ipv6AddressCount": 0, - "Groups": [ - { - "Ref": "AWSEC2SecurityGroupnodescontainerdexamplecom" - } - ] - } - ], - "TagSpecifications": [ - { - "ResourceType": "instance", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "containerd.example.com" - }, - { - "Key": "Name", - "Value": "nodes.containerd.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "" - }, - { - "Key": "k8s.io/role/node", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes" - }, - { - "Key": "kubernetes.io/cluster/containerd.example.com", - "Value": "owned" - } - ] - }, - { - "ResourceType": "volume", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "containerd.example.com" - }, - { - "Key": "Name", - "Value": "nodes.containerd.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "" - }, - { - "Key": "k8s.io/role/node", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes" - }, - { - "Key": "kubernetes.io/cluster/containerd.example.com", - "Value": "owned" - } - ] - } - ], - "UserData": "extracted" - } - } - }, - "AWSEC2Route0": { - "Type": "AWS::EC2::Route", - "Properties": { - "RouteTableId": { - "Ref": "AWSEC2RouteTablecontainerdexamplecom" - }, - "DestinationIpv6CidrBlock": "::/0", - "GatewayId": { - "Ref": "AWSEC2InternetGatewaycontainerdexamplecom" - } - } - }, - "AWSEC2Route00000": { - "Type": "AWS::EC2::Route", - "Properties": { - "RouteTableId": { - "Ref": "AWSEC2RouteTablecontainerdexamplecom" - }, - "DestinationCidrBlock": "0.0.0.0/0", - "GatewayId": { - "Ref": "AWSEC2InternetGatewaycontainerdexamplecom" - } - } - }, - "AWSEC2RouteTablecontainerdexamplecom": { - "Type": "AWS::EC2::RouteTable", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCcontainerdexamplecom" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "containerd.example.com" - }, - { - "Key": "Name", - "Value": "containerd.example.com" - }, - { - "Key": "kubernetes.io/cluster/containerd.example.com", - "Value": "owned" - }, - { - "Key": "kubernetes.io/kops/role", - "Value": "public" - } - ] - } - }, - "AWSEC2SecurityGroupEgressfrommasterscontainerdexamplecomegressall0to00": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmasterscontainerdexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIpv6": "::/0" - } - }, - "AWSEC2SecurityGroupEgressfrommasterscontainerdexamplecomegressall0to000000": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmasterscontainerdexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupEgressfromnodescontainerdexamplecomegressall0to00": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodescontainerdexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIpv6": "::/0" - } - }, - "AWSEC2SecurityGroupEgressfromnodescontainerdexamplecomegressall0to000000": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodescontainerdexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom00000ingresstcp22to22masterscontainerdexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmasterscontainerdexamplecom" - }, - "FromPort": 22, - "ToPort": 22, - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom00000ingresstcp22to22nodescontainerdexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodescontainerdexamplecom" - }, - "FromPort": 22, - "ToPort": 22, - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom00000ingresstcp443to443masterscontainerdexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmasterscontainerdexamplecom" - }, - "FromPort": 443, - "ToPort": 443, - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrommasterscontainerdexamplecomingressall0to0masterscontainerdexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmasterscontainerdexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupmasterscontainerdexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfrommasterscontainerdexamplecomingressall0to0nodescontainerdexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodescontainerdexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupmasterscontainerdexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfromnodescontainerdexamplecomingressall0to0nodescontainerdexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodescontainerdexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodescontainerdexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfromnodescontainerdexamplecomingresstcp1to2379masterscontainerdexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmasterscontainerdexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodescontainerdexamplecom" - }, - "FromPort": 1, - "ToPort": 2379, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodescontainerdexamplecomingresstcp2382to4000masterscontainerdexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmasterscontainerdexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodescontainerdexamplecom" - }, - "FromPort": 2382, - "ToPort": 4000, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodescontainerdexamplecomingresstcp4003to65535masterscontainerdexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmasterscontainerdexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodescontainerdexamplecom" - }, - "FromPort": 4003, - "ToPort": 65535, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodescontainerdexamplecomingressudp1to65535masterscontainerdexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmasterscontainerdexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodescontainerdexamplecom" - }, - "FromPort": 1, - "ToPort": 65535, - "IpProtocol": "udp" - } - }, - "AWSEC2SecurityGroupmasterscontainerdexamplecom": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupName": "masters.containerd.example.com", - "VpcId": { - "Ref": "AWSEC2VPCcontainerdexamplecom" - }, - "GroupDescription": "Security group for masters", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "containerd.example.com" - }, - { - "Key": "Name", - "Value": "masters.containerd.example.com" - }, - { - "Key": "kubernetes.io/cluster/containerd.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2SecurityGroupnodescontainerdexamplecom": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupName": "nodes.containerd.example.com", - "VpcId": { - "Ref": "AWSEC2VPCcontainerdexamplecom" - }, - "GroupDescription": "Security group for nodes", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "containerd.example.com" - }, - { - "Key": "Name", - "Value": "nodes.containerd.example.com" - }, - { - "Key": "kubernetes.io/cluster/containerd.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2SubnetRouteTableAssociationustest1acontainerdexamplecom": { - "Type": "AWS::EC2::SubnetRouteTableAssociation", - "Properties": { - "SubnetId": { - "Ref": "AWSEC2Subnetustest1acontainerdexamplecom" - }, - "RouteTableId": { - "Ref": "AWSEC2RouteTablecontainerdexamplecom" - } - } - }, - "AWSEC2Subnetustest1acontainerdexamplecom": { - "Type": "AWS::EC2::Subnet", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCcontainerdexamplecom" - }, - "CidrBlock": "172.20.32.0/19", - "AvailabilityZone": "us-test-1a", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "containerd.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.containerd.example.com" - }, - { - "Key": "SubnetType", - "Value": "Public" - }, - { - "Key": "kops.k8s.io/instance-group/master-us-test-1a", - "Value": "true" - }, - { - "Key": "kops.k8s.io/instance-group/nodes", - "Value": "true" - }, - { - "Key": "kubernetes.io/cluster/containerd.example.com", - "Value": "owned" - }, - { - "Key": "kubernetes.io/role/elb", - "Value": "1" - }, - { - "Key": "kubernetes.io/role/internal-elb", - "Value": "1" - } - ] - } - }, - "AWSEC2VPCCidrBlockAmazonIPv6": { - "Type": "AWS::EC2::VPCCidrBlock", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCcontainerdexamplecom" - }, - "AmazonProvidedIpv6CidrBlock": true - } - }, - "AWSEC2VPCDHCPOptionsAssociationcontainerdexamplecom": { - "Type": "AWS::EC2::VPCDHCPOptionsAssociation", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCcontainerdexamplecom" - }, - "DhcpOptionsId": { - "Ref": "AWSEC2DHCPOptionscontainerdexamplecom" - } - } - }, - "AWSEC2VPCGatewayAttachmentcontainerdexamplecom": { - "Type": "AWS::EC2::VPCGatewayAttachment", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCcontainerdexamplecom" - }, - "InternetGatewayId": { - "Ref": "AWSEC2InternetGatewaycontainerdexamplecom" - } - } - }, - "AWSEC2VPCcontainerdexamplecom": { - "Type": "AWS::EC2::VPC", - "Properties": { - "CidrBlock": "172.20.0.0/16", - "EnableDnsHostnames": true, - "EnableDnsSupport": true, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "containerd.example.com" - }, - { - "Key": "Name", - "Value": "containerd.example.com" - }, - { - "Key": "kubernetes.io/cluster/containerd.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2Volumeustest1aetcdeventscontainerdexamplecom": { - "Type": "AWS::EC2::Volume", - "Properties": { - "AvailabilityZone": "us-test-1a", - "Size": 20, - "VolumeType": "gp3", - "Iops": 3000, - "Throughput": 125, - "Encrypted": false, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "containerd.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.etcd-events.containerd.example.com" - }, - { - "Key": "k8s.io/etcd/events", - "Value": "us-test-1a/us-test-1a" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kubernetes.io/cluster/containerd.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2Volumeustest1aetcdmaincontainerdexamplecom": { - "Type": "AWS::EC2::Volume", - "Properties": { - "AvailabilityZone": "us-test-1a", - "Size": 20, - "VolumeType": "gp3", - "Iops": 3000, - "Throughput": 125, - "Encrypted": false, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "containerd.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.etcd-main.containerd.example.com" - }, - { - "Key": "k8s.io/etcd/main", - "Value": "us-test-1a/us-test-1a" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kubernetes.io/cluster/containerd.example.com", - "Value": "owned" - } - ] - } - }, - "AWSIAMInstanceProfilemasterscontainerdexamplecom": { - "Type": "AWS::IAM::InstanceProfile", - "Properties": { - "InstanceProfileName": "masters.containerd.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolemasterscontainerdexamplecom" - } - ] - } - }, - "AWSIAMInstanceProfilenodescontainerdexamplecom": { - "Type": "AWS::IAM::InstanceProfile", - "Properties": { - "InstanceProfileName": "nodes.containerd.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolenodescontainerdexamplecom" - } - ] - } - }, - "AWSIAMPolicymasterscontainerdexamplecom": { - "Type": "AWS::IAM::Policy", - "Properties": { - "PolicyName": "masters.containerd.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolemasterscontainerdexamplecom" - } - ], - "PolicyDocument": { - "Statement": [ - { - "Action": "ec2:AttachVolume", - "Condition": { - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "containerd.example.com", - "aws:ResourceTag/k8s.io/role/master": "1" - } - }, - "Effect": "Allow", - "Resource": [ - "*" - ] - }, - { - "Action": [ - "s3:Get*" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-read-bucket/clusters.example.com/containerd.example.com/*" - }, - { - "Action": [ - "s3:GetObject", - "s3:DeleteObject", - "s3:DeleteObjectVersion", - "s3:PutObject" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/containerd.example.com/backups/etcd/main/*" - }, - { - "Action": [ - "s3:GetObject", - "s3:DeleteObject", - "s3:DeleteObjectVersion", - "s3:PutObject" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/containerd.example.com/backups/etcd/events/*" - }, - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-read-bucket" - ] - }, - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-write-bucket" - ] - }, - { - "Action": [ - "route53:ChangeResourceRecordSets", - "route53:ListResourceRecordSets", - "route53:GetHostedZone" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:route53:::hostedzone/Z1AFAKE1ZON3YO" - ] - }, - { - "Action": [ - "route53:GetChange" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:route53:::change/*" - ] - }, - { - "Action": [ - "route53:ListHostedZones", - "route53:ListTagsForResource" - ], - "Effect": "Allow", - "Resource": [ - "*" - ] - }, - { - "Action": "ec2:CreateTags", - "Condition": { - "StringEquals": { - "aws:RequestTag/KubernetesCluster": "containerd.example.com", - "ec2:CreateAction": [ - "CreateSecurityGroup" - ] - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:security-group/*" - ] - }, - { - "Action": [ - "ec2:CreateTags", - "ec2:DeleteTags" - ], - "Condition": { - "Null": { - "aws:RequestTag/KubernetesCluster": "true" - }, - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "containerd.example.com" - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:security-group/*" - ] - }, - { - "Action": "ec2:CreateTags", - "Condition": { - "StringEquals": { - "aws:RequestTag/KubernetesCluster": "containerd.example.com", - "ec2:CreateAction": [ - "CreateVolume", - "CreateSnapshot" - ] - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:volume/*", - "arn:aws-test:ec2:*:*:snapshot/*" - ] - }, - { - "Action": [ - "ec2:CreateTags", - "ec2:DeleteTags" - ], - "Condition": { - "Null": { - "aws:RequestTag/KubernetesCluster": "true" - }, - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "containerd.example.com" - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:volume/*", - "arn:aws-test:ec2:*:*:snapshot/*" - ] - }, - { - "Action": [ - "autoscaling:DescribeAutoScalingGroups", - "autoscaling:DescribeAutoScalingInstances", - "autoscaling:DescribeLaunchConfigurations", - "autoscaling:DescribeScalingActivities", - "autoscaling:DescribeTags", - "ec2:AttachVolume", - "ec2:AuthorizeSecurityGroupIngress", - "ec2:CreateSecurityGroup", - "ec2:CreateTags", - "ec2:DeleteRoute", - "ec2:DeleteSecurityGroup", - "ec2:DeleteVolume", - "ec2:DescribeAccountAttributes", - "ec2:DescribeInstanceTypes", - "ec2:DescribeInstances", - "ec2:DescribeLaunchTemplateVersions", - "ec2:DescribeRegions", - "ec2:DescribeRouteTables", - "ec2:DescribeSecurityGroups", - "ec2:DescribeSubnets", - "ec2:DescribeTags", - "ec2:DescribeVolumes", - "ec2:DescribeVolumesModifications", - "ec2:DescribeVpcs", - "ec2:DetachVolume", - "ec2:ModifyInstanceAttribute", - "ec2:ModifyVolume", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:CreateListener", - "elasticloadbalancing:CreateTargetGroup", - "elasticloadbalancing:DeleteListener", - "elasticloadbalancing:DescribeListeners", - "elasticloadbalancing:DescribeLoadBalancerAttributes", - "elasticloadbalancing:DescribeLoadBalancerPolicies", - "elasticloadbalancing:DescribeLoadBalancers", - "elasticloadbalancing:DescribeTargetGroups", - "elasticloadbalancing:DescribeTargetHealth", - "elasticloadbalancing:ModifyListener", - "elasticloadbalancing:RegisterTargets", - "iam:GetServerCertificate", - "iam:ListServerCertificates", - "kms:DescribeKey", - "kms:GenerateRandom" - ], - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": [ - "autoscaling:SetDesiredCapacity", - "autoscaling:TerminateInstanceInAutoScalingGroup", - "ec2:AttachVolume", - "ec2:AuthorizeSecurityGroupIngress", - "ec2:DeleteSecurityGroup", - "ec2:DeleteVolume", - "ec2:DetachVolume", - "ec2:ModifyInstanceAttribute", - "ec2:ModifyVolume", - "ec2:RevokeSecurityGroupIngress", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", - "elasticloadbalancing:AttachLoadBalancerToSubnets", - "elasticloadbalancing:ConfigureHealthCheck", - "elasticloadbalancing:CreateLoadBalancerListeners", - "elasticloadbalancing:CreateLoadBalancerPolicy", - "elasticloadbalancing:DeleteListener", - "elasticloadbalancing:DeleteLoadBalancer", - "elasticloadbalancing:DeleteLoadBalancerListeners", - "elasticloadbalancing:DeleteTargetGroup", - "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", - "elasticloadbalancing:DeregisterTargets", - "elasticloadbalancing:DetachLoadBalancerFromSubnets", - "elasticloadbalancing:ModifyListener", - "elasticloadbalancing:ModifyLoadBalancerAttributes", - "elasticloadbalancing:ModifyTargetGroup", - "elasticloadbalancing:RegisterInstancesWithLoadBalancer", - "elasticloadbalancing:RegisterTargets", - "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", - "elasticloadbalancing:SetLoadBalancerPoliciesOfListener" - ], - "Condition": { - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "containerd.example.com" - } - }, - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": [ - "ec2:CreateSecurityGroup", - "ec2:CreateSnapshot", - "ec2:CreateVolume", - "elasticloadbalancing:CreateListener", - "elasticloadbalancing:CreateLoadBalancer", - "elasticloadbalancing:CreateTargetGroup" - ], - "Condition": { - "StringEquals": { - "aws:RequestTag/KubernetesCluster": "containerd.example.com" - } - }, - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": "ec2:CreateSecurityGroup", - "Effect": "Allow", - "Resource": "arn:aws-test:ec2:*:*:vpc/*" - } - ], - "Version": "2012-10-17" - } - } - }, - "AWSIAMPolicynodescontainerdexamplecom": { - "Type": "AWS::IAM::Policy", - "Properties": { - "PolicyName": "nodes.containerd.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolenodescontainerdexamplecom" - } - ], - "PolicyDocument": { - "Statement": [ - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-read-bucket" - ] - }, - { - "Action": [ - "autoscaling:DescribeAutoScalingInstances", - "ec2:DescribeInstanceTypes", - "ec2:DescribeInstances", - "ec2:DescribeRegions", - "iam:GetServerCertificate", - "iam:ListServerCertificates", - "kms:GenerateRandom" - ], - "Effect": "Allow", - "Resource": "*" - } - ], - "Version": "2012-10-17" - } - } - }, - "AWSIAMRolemasterscontainerdexamplecom": { - "Type": "AWS::IAM::Role", - "Properties": { - "RoleName": "masters.containerd.example.com", - "AssumeRolePolicyDocument": { - "Statement": [ - { - "Action": "sts:AssumeRole", - "Effect": "Allow", - "Principal": { - "Service": "ec2.amazonaws.com" - } - } - ], - "Version": "2012-10-17" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "containerd.example.com" - }, - { - "Key": "Name", - "Value": "masters.containerd.example.com" - }, - { - "Key": "kubernetes.io/cluster/containerd.example.com", - "Value": "owned" - } - ] - } - }, - "AWSIAMRolenodescontainerdexamplecom": { - "Type": "AWS::IAM::Role", - "Properties": { - "RoleName": "nodes.containerd.example.com", - "AssumeRolePolicyDocument": { - "Statement": [ - { - "Action": "sts:AssumeRole", - "Effect": "Allow", - "Principal": { - "Service": "ec2.amazonaws.com" - } - } - ], - "Version": "2012-10-17" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "containerd.example.com" - }, - { - "Key": "Name", - "Value": "nodes.containerd.example.com" - }, - { - "Key": "kubernetes.io/cluster/containerd.example.com", - "Value": "owned" - } - ] - } - } - } -} diff --git a/tests/integration/update_cluster/containerd/cloudformation.json.extracted.yaml b/tests/integration/update_cluster/containerd/cloudformation.json.extracted.yaml deleted file mode 100644 index f246cb9b74b69..0000000000000 --- a/tests/integration/update_cluster/containerd/cloudformation.json.extracted.yaml +++ /dev/null @@ -1,448 +0,0 @@ -Resources.AWSEC2LaunchTemplatemasterustest1amasterscontainerdexamplecom.Properties.LaunchTemplateData.UserData: | - #!/bin/bash - set -o errexit - set -o nounset - set -o pipefail - - NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 - NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 - NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 - NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 - - export AWS_REGION=us-test-1 - - - - - sysctl -w net.core.rmem_max=16777216 || true - sysctl -w net.core.wmem_max=16777216 || true - sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true - sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true - - - function ensure-install-dir() { - INSTALL_DIR="/opt/kops" - # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec - if [[ -d /var/lib/toolbox ]]; then - INSTALL_DIR="/var/lib/toolbox/kops" - fi - mkdir -p ${INSTALL_DIR}/bin - mkdir -p ${INSTALL_DIR}/conf - cd ${INSTALL_DIR} - } - - # Retry a download until we get it. args: name, sha, urls - download-or-bust() { - local -r file="$1" - local -r hash="$2" - local -r urls=( $(split-commas "$3") ) - - if [[ -f "${file}" ]]; then - if ! validate-hash "${file}" "${hash}"; then - rm -f "${file}" - else - return 0 - fi - fi - - while true; do - for url in "${urls[@]}"; do - commands=( - "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - ) - for cmd in "${commands[@]}"; do - echo "Attempting download with: ${cmd} {url}" - if ! (${cmd} "${url}"); then - echo "== Download failed with ${cmd} ==" - continue - fi - if ! validate-hash "${file}" "${hash}"; then - echo "== Hash validation of ${url} failed. Retrying. ==" - rm -f "${file}" - else - echo "== Downloaded ${url} (SHA256 = ${hash}) ==" - return 0 - fi - done - done - - echo "All downloads failed; sleeping before retrying" - sleep 60 - done - } - - validate-hash() { - local -r file="$1" - local -r expected="$2" - local actual - - actual=$(sha256sum ${file} | awk '{ print $1 }') || true - if [[ "${actual}" != "${expected}" ]]; then - echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" - return 1 - fi - } - - function split-commas() { - echo $1 | tr "," "\n" - } - - function download-release() { - case "$(uname -m)" in - x86_64*|i?86_64*|amd64*) - NODEUP_URL="${NODEUP_URL_AMD64}" - NODEUP_HASH="${NODEUP_HASH_AMD64}" - ;; - aarch64*|arm64*) - NODEUP_URL="${NODEUP_URL_ARM64}" - NODEUP_HASH="${NODEUP_HASH_ARM64}" - ;; - *) - echo "Unsupported host arch: $(uname -m)" >&2 - exit 1 - ;; - esac - - cd ${INSTALL_DIR}/bin - download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" - - chmod +x nodeup - - echo "Running nodeup" - # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 - ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) - } - - #################################################################################### - - /bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" - - echo "== nodeup node config starting ==" - ensure-install-dir - - cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' - cloudConfig: - awsEBSCSIDriver: - enabled: false - manageStorageClasses: true - containerRuntime: containerd - containerd: - logLevel: info - version: 1.4.13 - docker: - skipInstall: true - encryptionConfig: null - etcdClusters: - events: - version: 3.4.13 - main: - version: 3.4.13 - kubeAPIServer: - allowPrivileged: true - anonymousAuth: false - apiAudiences: - - kubernetes.svc.default - apiServerCount: 1 - authorizationMode: AlwaysAllow - bindAddress: 0.0.0.0 - cloudProvider: aws - enableAdmissionPlugins: - - NamespaceLifecycle - - LimitRanger - - ServiceAccount - - DefaultStorageClass - - DefaultTolerationSeconds - - MutatingAdmissionWebhook - - ValidatingAdmissionWebhook - - NodeRestriction - - ResourceQuota - etcdServers: - - https://127.0.0.1:4001 - etcdServersOverrides: - - /events#https://127.0.0.1:4002 - image: registry.k8s.io/kube-apiserver:v1.21.0 - kubeletPreferredAddressTypes: - - InternalIP - - Hostname - - ExternalIP - logLevel: 2 - requestheaderAllowedNames: - - aggregator - requestheaderExtraHeaderPrefixes: - - X-Remote-Extra- - requestheaderGroupHeaders: - - X-Remote-Group - requestheaderUsernameHeaders: - - X-Remote-User - securePort: 443 - serviceAccountIssuer: https://api.internal.containerd.example.com - serviceAccountJWKSURI: https://api.internal.containerd.example.com/openid/v1/jwks - serviceClusterIPRange: 100.64.0.0/13 - storageBackend: etcd3 - kubeControllerManager: - allocateNodeCIDRs: true - attachDetachReconcileSyncPeriod: 1m0s - cloudProvider: aws - clusterCIDR: 100.96.0.0/11 - clusterName: containerd.example.com - configureCloudRoutes: false - image: registry.k8s.io/kube-controller-manager:v1.21.0 - leaderElection: - leaderElect: true - logLevel: 2 - useServiceAccountCredentials: true - kubeProxy: - clusterCIDR: 100.96.0.0/11 - cpuRequest: 100m - image: registry.k8s.io/kube-proxy:v1.21.0 - logLevel: 2 - kubeScheduler: - image: registry.k8s.io/kube-scheduler:v1.21.0 - leaderElection: - leaderElect: true - logLevel: 2 - kubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: aws - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - masterKubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: aws - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - - __EOF_CLUSTER_SPEC - - cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' - CloudProvider: aws - ConfigBase: memfs://clusters.example.com/containerd.example.com - InstanceGroupName: master-us-test-1a - InstanceGroupRole: Master - NodeupConfigHash: wFIDTRELOAmyLzdZjG/y98wEvHusLiKuEyTzdB9grqs= - - __EOF_KUBE_ENV - - download-release - echo "== nodeup node config done ==" -Resources.AWSEC2LaunchTemplatenodescontainerdexamplecom.Properties.LaunchTemplateData.UserData: | - #!/bin/bash - set -o errexit - set -o nounset - set -o pipefail - - NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 - NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 - NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 - NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 - - export AWS_REGION=us-test-1 - - - - - sysctl -w net.core.rmem_max=16777216 || true - sysctl -w net.core.wmem_max=16777216 || true - sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true - sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true - - - function ensure-install-dir() { - INSTALL_DIR="/opt/kops" - # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec - if [[ -d /var/lib/toolbox ]]; then - INSTALL_DIR="/var/lib/toolbox/kops" - fi - mkdir -p ${INSTALL_DIR}/bin - mkdir -p ${INSTALL_DIR}/conf - cd ${INSTALL_DIR} - } - - # Retry a download until we get it. args: name, sha, urls - download-or-bust() { - local -r file="$1" - local -r hash="$2" - local -r urls=( $(split-commas "$3") ) - - if [[ -f "${file}" ]]; then - if ! validate-hash "${file}" "${hash}"; then - rm -f "${file}" - else - return 0 - fi - fi - - while true; do - for url in "${urls[@]}"; do - commands=( - "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - ) - for cmd in "${commands[@]}"; do - echo "Attempting download with: ${cmd} {url}" - if ! (${cmd} "${url}"); then - echo "== Download failed with ${cmd} ==" - continue - fi - if ! validate-hash "${file}" "${hash}"; then - echo "== Hash validation of ${url} failed. Retrying. ==" - rm -f "${file}" - else - echo "== Downloaded ${url} (SHA256 = ${hash}) ==" - return 0 - fi - done - done - - echo "All downloads failed; sleeping before retrying" - sleep 60 - done - } - - validate-hash() { - local -r file="$1" - local -r expected="$2" - local actual - - actual=$(sha256sum ${file} | awk '{ print $1 }') || true - if [[ "${actual}" != "${expected}" ]]; then - echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" - return 1 - fi - } - - function split-commas() { - echo $1 | tr "," "\n" - } - - function download-release() { - case "$(uname -m)" in - x86_64*|i?86_64*|amd64*) - NODEUP_URL="${NODEUP_URL_AMD64}" - NODEUP_HASH="${NODEUP_HASH_AMD64}" - ;; - aarch64*|arm64*) - NODEUP_URL="${NODEUP_URL_ARM64}" - NODEUP_HASH="${NODEUP_HASH_ARM64}" - ;; - *) - echo "Unsupported host arch: $(uname -m)" >&2 - exit 1 - ;; - esac - - cd ${INSTALL_DIR}/bin - download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" - - chmod +x nodeup - - echo "Running nodeup" - # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 - ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) - } - - #################################################################################### - - /bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" - - echo "== nodeup node config starting ==" - ensure-install-dir - - cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' - cloudConfig: - awsEBSCSIDriver: - enabled: false - manageStorageClasses: true - containerRuntime: containerd - containerd: - logLevel: info - version: 1.4.13 - docker: - skipInstall: true - kubeProxy: - clusterCIDR: 100.96.0.0/11 - cpuRequest: 100m - image: registry.k8s.io/kube-proxy:v1.21.0 - logLevel: 2 - kubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: aws - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - - __EOF_CLUSTER_SPEC - - cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' - CloudProvider: aws - ConfigServer: - CACertificates: | - -----BEGIN CERTIFICATE----- - MIIBbjCCARigAwIBAgIMFpANqBD8NSD82AUSMA0GCSqGSIb3DQEBCwUAMBgxFjAU - BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwODAwWhcNMzEwNzA3MDcw - ODAwWjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD - SwAwSAJBANFI3zr0Tk8krsW8vwjfMpzJOlWQ8616vG3YPa2qAgI7V4oKwfV0yIg1 - jt+H6f4P/wkPAPTPTfRp9Iy8oHEEFw0CAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG - MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNG3zVjTcLlJwDsJ4/K9DV7KohUA - MA0GCSqGSIb3DQEBCwUAA0EAB8d03fY2w7WKpfO29qI295pu2C4ca9AiVGOpgSc8 - tmQsq6rcxt3T+rb589PVtz0mw/cKTxOk6gH2CCC+yHfy2w== - -----END CERTIFICATE----- - -----BEGIN CERTIFICATE----- - MIIBbjCCARigAwIBAgIMFpANvmSa0OAlYmXKMA0GCSqGSIb3DQEBCwUAMBgxFjAU - BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwOTM2WhcNMzEwNzA3MDcw - OTM2WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD - SwAwSAJBAMF6F4aZdpe0RUpyykaBpWwZCnwbffhYGOw+fs6RdLuUq7QCNmJm/Eq7 - WWOziMYDiI9SbclpD+6QiJ0N3EqppVUCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG - MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLImp6ARjPDAH6nhI+scWVt3Q9bn - MA0GCSqGSIb3DQEBCwUAA0EAVQVx5MUtuAIeePuP9o51xtpT2S6Fvfi8J4ICxnlA - 9B7UD2ushcVFPtaeoL9Gfu8aY4KJBeqqg5ojl4qmRnThjw== - -----END CERTIFICATE----- - server: https://kops-controller.internal.containerd.example.com:3988/ - InstanceGroupName: nodes - InstanceGroupRole: Node - NodeupConfigHash: W8uT9A2ihqTlOR5YC7ISEHGboF8gSu9FXWGPJoDtzFo= - - __EOF_KUBE_ENV - - download-release - echo "== nodeup node config done ==" diff --git a/tests/integration/update_cluster/containerd/data/aws_iam_role_masters.containerd.example.com_policy b/tests/integration/update_cluster/containerd/data/aws_iam_role_masters.containerd.example.com_policy new file mode 100644 index 0000000000000..66d5de1d5ae1e --- /dev/null +++ b/tests/integration/update_cluster/containerd/data/aws_iam_role_masters.containerd.example.com_policy @@ -0,0 +1,10 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { "Service": "ec2.amazonaws.com"}, + "Action": "sts:AssumeRole" + } + ] +} diff --git a/tests/integration/update_cluster/containerd/data/aws_iam_role_nodes.containerd.example.com_policy b/tests/integration/update_cluster/containerd/data/aws_iam_role_nodes.containerd.example.com_policy new file mode 100644 index 0000000000000..66d5de1d5ae1e --- /dev/null +++ b/tests/integration/update_cluster/containerd/data/aws_iam_role_nodes.containerd.example.com_policy @@ -0,0 +1,10 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { "Service": "ec2.amazonaws.com"}, + "Action": "sts:AssumeRole" + } + ] +} diff --git a/tests/integration/update_cluster/containerd/data/aws_iam_role_policy_masters.containerd.example.com_policy b/tests/integration/update_cluster/containerd/data/aws_iam_role_policy_masters.containerd.example.com_policy new file mode 100644 index 0000000000000..b6629a1de7332 --- /dev/null +++ b/tests/integration/update_cluster/containerd/data/aws_iam_role_policy_masters.containerd.example.com_policy @@ -0,0 +1,280 @@ +{ + "Statement": [ + { + "Action": "ec2:AttachVolume", + "Condition": { + "StringEquals": { + "aws:ResourceTag/KubernetesCluster": "containerd.example.com", + "aws:ResourceTag/k8s.io/role/master": "1" + } + }, + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "s3:Get*" + ], + "Effect": "Allow", + "Resource": "arn:aws-test:s3:::placeholder-read-bucket/clusters.example.com/containerd.example.com/*" + }, + { + "Action": [ + "s3:GetObject", + "s3:DeleteObject", + "s3:DeleteObjectVersion", + "s3:PutObject" + ], + "Effect": "Allow", + "Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/containerd.example.com/backups/etcd/main/*" + }, + { + "Action": [ + "s3:GetObject", + "s3:DeleteObject", + "s3:DeleteObjectVersion", + "s3:PutObject" + ], + "Effect": "Allow", + "Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/containerd.example.com/backups/etcd/events/*" + }, + { + "Action": [ + "s3:GetBucketLocation", + "s3:GetEncryptionConfiguration", + "s3:ListBucket", + "s3:ListBucketVersions" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws-test:s3:::placeholder-read-bucket" + ] + }, + { + "Action": [ + "s3:GetBucketLocation", + "s3:GetEncryptionConfiguration", + "s3:ListBucket", + "s3:ListBucketVersions" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws-test:s3:::placeholder-write-bucket" + ] + }, + { + "Action": [ + "route53:ChangeResourceRecordSets", + "route53:ListResourceRecordSets", + "route53:GetHostedZone" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws-test:route53:::hostedzone/Z1AFAKE1ZON3YO" + ] + }, + { + "Action": [ + "route53:GetChange" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws-test:route53:::change/*" + ] + }, + { + "Action": [ + "route53:ListHostedZones", + "route53:ListTagsForResource" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": "ec2:CreateTags", + "Condition": { + "StringEquals": { + "aws:RequestTag/KubernetesCluster": "containerd.example.com", + "ec2:CreateAction": [ + "CreateSecurityGroup" + ] + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws-test:ec2:*:*:security-group/*" + ] + }, + { + "Action": [ + "ec2:CreateTags", + "ec2:DeleteTags" + ], + "Condition": { + "Null": { + "aws:RequestTag/KubernetesCluster": "true" + }, + "StringEquals": { + "aws:ResourceTag/KubernetesCluster": "containerd.example.com" + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws-test:ec2:*:*:security-group/*" + ] + }, + { + "Action": "ec2:CreateTags", + "Condition": { + "StringEquals": { + "aws:RequestTag/KubernetesCluster": "containerd.example.com", + "ec2:CreateAction": [ + "CreateVolume", + "CreateSnapshot" + ] + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws-test:ec2:*:*:volume/*", + "arn:aws-test:ec2:*:*:snapshot/*" + ] + }, + { + "Action": [ + "ec2:CreateTags", + "ec2:DeleteTags" + ], + "Condition": { + "Null": { + "aws:RequestTag/KubernetesCluster": "true" + }, + "StringEquals": { + "aws:ResourceTag/KubernetesCluster": "containerd.example.com" + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws-test:ec2:*:*:volume/*", + "arn:aws-test:ec2:*:*:snapshot/*" + ] + }, + { + "Action": [ + "autoscaling:DescribeAutoScalingGroups", + "autoscaling:DescribeAutoScalingInstances", + "autoscaling:DescribeLaunchConfigurations", + "autoscaling:DescribeScalingActivities", + "autoscaling:DescribeTags", + "ec2:AttachVolume", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CreateSecurityGroup", + "ec2:CreateTags", + "ec2:DeleteRoute", + "ec2:DeleteSecurityGroup", + "ec2:DeleteVolume", + "ec2:DescribeAccountAttributes", + "ec2:DescribeInstanceTypes", + "ec2:DescribeInstances", + "ec2:DescribeLaunchTemplateVersions", + "ec2:DescribeRegions", + "ec2:DescribeRouteTables", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeTags", + "ec2:DescribeVolumes", + "ec2:DescribeVolumesModifications", + "ec2:DescribeVpcs", + "ec2:DetachVolume", + "ec2:ModifyInstanceAttribute", + "ec2:ModifyVolume", + "elasticloadbalancing:AddTags", + "elasticloadbalancing:CreateListener", + "elasticloadbalancing:CreateTargetGroup", + "elasticloadbalancing:DeleteListener", + "elasticloadbalancing:DescribeListeners", + "elasticloadbalancing:DescribeLoadBalancerAttributes", + "elasticloadbalancing:DescribeLoadBalancerPolicies", + "elasticloadbalancing:DescribeLoadBalancers", + "elasticloadbalancing:DescribeTargetGroups", + "elasticloadbalancing:DescribeTargetHealth", + "elasticloadbalancing:ModifyListener", + "elasticloadbalancing:RegisterTargets", + "iam:GetServerCertificate", + "iam:ListServerCertificates", + "kms:DescribeKey", + "kms:GenerateRandom" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "autoscaling:SetDesiredCapacity", + "autoscaling:TerminateInstanceInAutoScalingGroup", + "ec2:AttachVolume", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:DeleteSecurityGroup", + "ec2:DeleteVolume", + "ec2:DetachVolume", + "ec2:ModifyInstanceAttribute", + "ec2:ModifyVolume", + "ec2:RevokeSecurityGroupIngress", + "elasticloadbalancing:AddTags", + "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", + "elasticloadbalancing:AttachLoadBalancerToSubnets", + "elasticloadbalancing:ConfigureHealthCheck", + "elasticloadbalancing:CreateLoadBalancerListeners", + "elasticloadbalancing:CreateLoadBalancerPolicy", + "elasticloadbalancing:DeleteListener", + "elasticloadbalancing:DeleteLoadBalancer", + "elasticloadbalancing:DeleteLoadBalancerListeners", + "elasticloadbalancing:DeleteTargetGroup", + "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", + "elasticloadbalancing:DeregisterTargets", + "elasticloadbalancing:DetachLoadBalancerFromSubnets", + "elasticloadbalancing:ModifyListener", + "elasticloadbalancing:ModifyLoadBalancerAttributes", + "elasticloadbalancing:ModifyTargetGroup", + "elasticloadbalancing:RegisterInstancesWithLoadBalancer", + "elasticloadbalancing:RegisterTargets", + "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", + "elasticloadbalancing:SetLoadBalancerPoliciesOfListener" + ], + "Condition": { + "StringEquals": { + "aws:ResourceTag/KubernetesCluster": "containerd.example.com" + } + }, + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "ec2:CreateSecurityGroup", + "ec2:CreateSnapshot", + "ec2:CreateVolume", + "elasticloadbalancing:CreateListener", + "elasticloadbalancing:CreateLoadBalancer", + "elasticloadbalancing:CreateTargetGroup" + ], + "Condition": { + "StringEquals": { + "aws:RequestTag/KubernetesCluster": "containerd.example.com" + } + }, + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "ec2:CreateSecurityGroup", + "Effect": "Allow", + "Resource": "arn:aws-test:ec2:*:*:vpc/*" + } + ], + "Version": "2012-10-17" +} diff --git a/tests/integration/update_cluster/containerd/data/aws_iam_role_policy_nodes.containerd.example.com_policy b/tests/integration/update_cluster/containerd/data/aws_iam_role_policy_nodes.containerd.example.com_policy new file mode 100644 index 0000000000000..153ab3c7f64f4 --- /dev/null +++ b/tests/integration/update_cluster/containerd/data/aws_iam_role_policy_nodes.containerd.example.com_policy @@ -0,0 +1,30 @@ +{ + "Statement": [ + { + "Action": [ + "s3:GetBucketLocation", + "s3:GetEncryptionConfiguration", + "s3:ListBucket", + "s3:ListBucketVersions" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws-test:s3:::placeholder-read-bucket" + ] + }, + { + "Action": [ + "autoscaling:DescribeAutoScalingInstances", + "ec2:DescribeInstanceTypes", + "ec2:DescribeInstances", + "ec2:DescribeRegions", + "iam:GetServerCertificate", + "iam:ListServerCertificates", + "kms:GenerateRandom" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" +} diff --git a/tests/integration/update_cluster/containerd/data/aws_key_pair_kubernetes.containerd.example.com-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key b/tests/integration/update_cluster/containerd/data/aws_key_pair_kubernetes.containerd.example.com-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key new file mode 100644 index 0000000000000..81cb0127830e7 --- /dev/null +++ b/tests/integration/update_cluster/containerd/data/aws_key_pair_kubernetes.containerd.example.com-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key @@ -0,0 +1 @@ +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQCtWu40XQo8dczLsCq0OWV+hxm9uV3WxeH9Kgh4sMzQxNtoU1pvW0XdjpkBesRKGoolfWeCLXWxpyQb1IaiMkKoz7MdhQ/6UKjMjP66aFWWp3pwD0uj0HuJ7tq4gKHKRYGTaZIRWpzUiANBrjugVgA+Sd7E/mYwc/DMXkIyRZbvhQ== diff --git a/tests/integration/update_cluster/containerd/data/aws_launch_template_master-us-test-1a.masters.containerd.example.com_user_data b/tests/integration/update_cluster/containerd/data/aws_launch_template_master-us-test-1a.masters.containerd.example.com_user_data new file mode 100644 index 0000000000000..8717c90b45a37 --- /dev/null +++ b/tests/integration/update_cluster/containerd/data/aws_launch_template_master-us-test-1a.masters.containerd.example.com_user_data @@ -0,0 +1,253 @@ +#!/bin/bash +set -o errexit +set -o nounset +set -o pipefail + +NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 +NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 +NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 +NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 + +export AWS_REGION=us-test-1 + + + + +sysctl -w net.core.rmem_max=16777216 || true +sysctl -w net.core.wmem_max=16777216 || true +sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true +sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true + + +function ensure-install-dir() { + INSTALL_DIR="/opt/kops" + # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec + if [[ -d /var/lib/toolbox ]]; then + INSTALL_DIR="/var/lib/toolbox/kops" + fi + mkdir -p ${INSTALL_DIR}/bin + mkdir -p ${INSTALL_DIR}/conf + cd ${INSTALL_DIR} +} + +# Retry a download until we get it. args: name, sha, urls +download-or-bust() { + local -r file="$1" + local -r hash="$2" + local -r urls=( $(split-commas "$3") ) + + if [[ -f "${file}" ]]; then + if ! validate-hash "${file}" "${hash}"; then + rm -f "${file}" + else + return 0 + fi + fi + + while true; do + for url in "${urls[@]}"; do + commands=( + "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + ) + for cmd in "${commands[@]}"; do + echo "Attempting download with: ${cmd} {url}" + if ! (${cmd} "${url}"); then + echo "== Download failed with ${cmd} ==" + continue + fi + if ! validate-hash "${file}" "${hash}"; then + echo "== Hash validation of ${url} failed. Retrying. ==" + rm -f "${file}" + else + echo "== Downloaded ${url} (SHA256 = ${hash}) ==" + return 0 + fi + done + done + + echo "All downloads failed; sleeping before retrying" + sleep 60 + done +} + +validate-hash() { + local -r file="$1" + local -r expected="$2" + local actual + + actual=$(sha256sum ${file} | awk '{ print $1 }') || true + if [[ "${actual}" != "${expected}" ]]; then + echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" + return 1 + fi +} + +function split-commas() { + echo $1 | tr "," "\n" +} + +function download-release() { + case "$(uname -m)" in + x86_64*|i?86_64*|amd64*) + NODEUP_URL="${NODEUP_URL_AMD64}" + NODEUP_HASH="${NODEUP_HASH_AMD64}" + ;; + aarch64*|arm64*) + NODEUP_URL="${NODEUP_URL_ARM64}" + NODEUP_HASH="${NODEUP_HASH_ARM64}" + ;; + *) + echo "Unsupported host arch: $(uname -m)" >&2 + exit 1 + ;; + esac + + cd ${INSTALL_DIR}/bin + download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" + + chmod +x nodeup + + echo "Running nodeup" + # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 + ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) +} + +#################################################################################### + +/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" + +echo "== nodeup node config starting ==" +ensure-install-dir + +cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' +cloudConfig: + awsEBSCSIDriver: + enabled: false + manageStorageClasses: true +containerRuntime: containerd +containerd: + logLevel: info + version: 1.4.13 +docker: + skipInstall: true +encryptionConfig: null +etcdClusters: + events: + version: 3.4.13 + main: + version: 3.4.13 +kubeAPIServer: + allowPrivileged: true + anonymousAuth: false + apiAudiences: + - kubernetes.svc.default + apiServerCount: 1 + authorizationMode: AlwaysAllow + bindAddress: 0.0.0.0 + cloudProvider: aws + enableAdmissionPlugins: + - NamespaceLifecycle + - LimitRanger + - ServiceAccount + - DefaultStorageClass + - DefaultTolerationSeconds + - MutatingAdmissionWebhook + - ValidatingAdmissionWebhook + - NodeRestriction + - ResourceQuota + etcdServers: + - https://127.0.0.1:4001 + etcdServersOverrides: + - /events#https://127.0.0.1:4002 + image: registry.k8s.io/kube-apiserver:v1.21.0 + kubeletPreferredAddressTypes: + - InternalIP + - Hostname + - ExternalIP + logLevel: 2 + requestheaderAllowedNames: + - aggregator + requestheaderExtraHeaderPrefixes: + - X-Remote-Extra- + requestheaderGroupHeaders: + - X-Remote-Group + requestheaderUsernameHeaders: + - X-Remote-User + securePort: 443 + serviceAccountIssuer: https://api.internal.containerd.example.com + serviceAccountJWKSURI: https://api.internal.containerd.example.com/openid/v1/jwks + serviceClusterIPRange: 100.64.0.0/13 + storageBackend: etcd3 +kubeControllerManager: + allocateNodeCIDRs: true + attachDetachReconcileSyncPeriod: 1m0s + cloudProvider: aws + clusterCIDR: 100.96.0.0/11 + clusterName: containerd.example.com + configureCloudRoutes: false + image: registry.k8s.io/kube-controller-manager:v1.21.0 + leaderElection: + leaderElect: true + logLevel: 2 + useServiceAccountCredentials: true +kubeProxy: + clusterCIDR: 100.96.0.0/11 + cpuRequest: 100m + image: registry.k8s.io/kube-proxy:v1.21.0 + logLevel: 2 +kubeScheduler: + image: registry.k8s.io/kube-scheduler:v1.21.0 + leaderElection: + leaderElect: true + logLevel: 2 +kubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: registry.k8s.io/pause:3.6 + podManifestPath: /etc/kubernetes/manifests + registerSchedulable: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s +masterKubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: registry.k8s.io/pause:3.6 + podManifestPath: /etc/kubernetes/manifests + registerSchedulable: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + +__EOF_CLUSTER_SPEC + +cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' +CloudProvider: aws +ConfigBase: memfs://clusters.example.com/containerd.example.com +InstanceGroupName: master-us-test-1a +InstanceGroupRole: Master +NodeupConfigHash: wFIDTRELOAmyLzdZjG/y98wEvHusLiKuEyTzdB9grqs= + +__EOF_KUBE_ENV + +download-release +echo "== nodeup node config done ==" diff --git a/tests/integration/update_cluster/containerd/data/aws_launch_template_nodes.containerd.example.com_user_data b/tests/integration/update_cluster/containerd/data/aws_launch_template_nodes.containerd.example.com_user_data new file mode 100644 index 0000000000000..bd66c79990a95 --- /dev/null +++ b/tests/integration/update_cluster/containerd/data/aws_launch_template_nodes.containerd.example.com_user_data @@ -0,0 +1,193 @@ +#!/bin/bash +set -o errexit +set -o nounset +set -o pipefail + +NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 +NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 +NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 +NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 + +export AWS_REGION=us-test-1 + + + + +sysctl -w net.core.rmem_max=16777216 || true +sysctl -w net.core.wmem_max=16777216 || true +sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true +sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true + + +function ensure-install-dir() { + INSTALL_DIR="/opt/kops" + # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec + if [[ -d /var/lib/toolbox ]]; then + INSTALL_DIR="/var/lib/toolbox/kops" + fi + mkdir -p ${INSTALL_DIR}/bin + mkdir -p ${INSTALL_DIR}/conf + cd ${INSTALL_DIR} +} + +# Retry a download until we get it. args: name, sha, urls +download-or-bust() { + local -r file="$1" + local -r hash="$2" + local -r urls=( $(split-commas "$3") ) + + if [[ -f "${file}" ]]; then + if ! validate-hash "${file}" "${hash}"; then + rm -f "${file}" + else + return 0 + fi + fi + + while true; do + for url in "${urls[@]}"; do + commands=( + "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + ) + for cmd in "${commands[@]}"; do + echo "Attempting download with: ${cmd} {url}" + if ! (${cmd} "${url}"); then + echo "== Download failed with ${cmd} ==" + continue + fi + if ! validate-hash "${file}" "${hash}"; then + echo "== Hash validation of ${url} failed. Retrying. ==" + rm -f "${file}" + else + echo "== Downloaded ${url} (SHA256 = ${hash}) ==" + return 0 + fi + done + done + + echo "All downloads failed; sleeping before retrying" + sleep 60 + done +} + +validate-hash() { + local -r file="$1" + local -r expected="$2" + local actual + + actual=$(sha256sum ${file} | awk '{ print $1 }') || true + if [[ "${actual}" != "${expected}" ]]; then + echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" + return 1 + fi +} + +function split-commas() { + echo $1 | tr "," "\n" +} + +function download-release() { + case "$(uname -m)" in + x86_64*|i?86_64*|amd64*) + NODEUP_URL="${NODEUP_URL_AMD64}" + NODEUP_HASH="${NODEUP_HASH_AMD64}" + ;; + aarch64*|arm64*) + NODEUP_URL="${NODEUP_URL_ARM64}" + NODEUP_HASH="${NODEUP_HASH_ARM64}" + ;; + *) + echo "Unsupported host arch: $(uname -m)" >&2 + exit 1 + ;; + esac + + cd ${INSTALL_DIR}/bin + download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" + + chmod +x nodeup + + echo "Running nodeup" + # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 + ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) +} + +#################################################################################### + +/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" + +echo "== nodeup node config starting ==" +ensure-install-dir + +cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' +cloudConfig: + awsEBSCSIDriver: + enabled: false + manageStorageClasses: true +containerRuntime: containerd +containerd: + logLevel: info + version: 1.4.13 +docker: + skipInstall: true +kubeProxy: + clusterCIDR: 100.96.0.0/11 + cpuRequest: 100m + image: registry.k8s.io/kube-proxy:v1.21.0 + logLevel: 2 +kubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: registry.k8s.io/pause:3.6 + podManifestPath: /etc/kubernetes/manifests + registerSchedulable: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + +__EOF_CLUSTER_SPEC + +cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' +CloudProvider: aws +ConfigServer: + CACertificates: | + -----BEGIN CERTIFICATE----- + MIIBbjCCARigAwIBAgIMFpANqBD8NSD82AUSMA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwODAwWhcNMzEwNzA3MDcw + ODAwWjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD + SwAwSAJBANFI3zr0Tk8krsW8vwjfMpzJOlWQ8616vG3YPa2qAgI7V4oKwfV0yIg1 + jt+H6f4P/wkPAPTPTfRp9Iy8oHEEFw0CAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG + MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNG3zVjTcLlJwDsJ4/K9DV7KohUA + MA0GCSqGSIb3DQEBCwUAA0EAB8d03fY2w7WKpfO29qI295pu2C4ca9AiVGOpgSc8 + tmQsq6rcxt3T+rb589PVtz0mw/cKTxOk6gH2CCC+yHfy2w== + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBbjCCARigAwIBAgIMFpANvmSa0OAlYmXKMA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwOTM2WhcNMzEwNzA3MDcw + OTM2WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD + SwAwSAJBAMF6F4aZdpe0RUpyykaBpWwZCnwbffhYGOw+fs6RdLuUq7QCNmJm/Eq7 + WWOziMYDiI9SbclpD+6QiJ0N3EqppVUCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG + MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLImp6ARjPDAH6nhI+scWVt3Q9bn + MA0GCSqGSIb3DQEBCwUAA0EAVQVx5MUtuAIeePuP9o51xtpT2S6Fvfi8J4ICxnlA + 9B7UD2ushcVFPtaeoL9Gfu8aY4KJBeqqg5ojl4qmRnThjw== + -----END CERTIFICATE----- + server: https://kops-controller.internal.containerd.example.com:3988/ +InstanceGroupName: nodes +InstanceGroupRole: Node +NodeupConfigHash: W8uT9A2ihqTlOR5YC7ISEHGboF8gSu9FXWGPJoDtzFo= + +__EOF_KUBE_ENV + +download-release +echo "== nodeup node config done ==" diff --git a/tests/integration/update_cluster/containerd/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/containerd/data/aws_s3_object_cluster-completed.spec_content new file mode 100644 index 0000000000000..0ce304c3eaac2 --- /dev/null +++ b/tests/integration/update_cluster/containerd/data/aws_s3_object_cluster-completed.spec_content @@ -0,0 +1,181 @@ +apiVersion: kops.k8s.io/v1alpha2 +kind: Cluster +metadata: + creationTimestamp: "2016-12-10T22:42:27Z" + name: containerd.example.com +spec: + api: + dns: {} + authorization: + alwaysAllow: {} + channel: stable + cloudConfig: + awsEBSCSIDriver: + enabled: false + manageStorageClasses: true + cloudProvider: aws + clusterDNSDomain: cluster.local + configBase: memfs://clusters.example.com/containerd.example.com + configStore: memfs://clusters.example.com/containerd.example.com + containerRuntime: containerd + containerd: + logLevel: info + version: 1.4.13 + dnsZone: Z1AFAKE1ZON3YO + docker: + skipInstall: true + etcdClusters: + - backups: + backupStore: memfs://clusters.example.com/containerd.example.com/backups/etcd/main + etcdMembers: + - instanceGroup: master-us-test-1a + name: us-test-1a + name: main + version: 3.4.13 + - backups: + backupStore: memfs://clusters.example.com/containerd.example.com/backups/etcd/events + etcdMembers: + - instanceGroup: master-us-test-1a + name: us-test-1a + name: events + version: 3.4.13 + externalDns: + provider: dns-controller + iam: + legacy: false + keyStore: memfs://clusters.example.com/containerd.example.com/pki + kubeAPIServer: + allowPrivileged: true + anonymousAuth: false + apiAudiences: + - kubernetes.svc.default + apiServerCount: 1 + authorizationMode: AlwaysAllow + bindAddress: 0.0.0.0 + cloudProvider: aws + enableAdmissionPlugins: + - NamespaceLifecycle + - LimitRanger + - ServiceAccount + - DefaultStorageClass + - DefaultTolerationSeconds + - MutatingAdmissionWebhook + - ValidatingAdmissionWebhook + - NodeRestriction + - ResourceQuota + etcdServers: + - https://127.0.0.1:4001 + etcdServersOverrides: + - /events#https://127.0.0.1:4002 + image: registry.k8s.io/kube-apiserver:v1.21.0 + kubeletPreferredAddressTypes: + - InternalIP + - Hostname + - ExternalIP + logLevel: 2 + requestheaderAllowedNames: + - aggregator + requestheaderExtraHeaderPrefixes: + - X-Remote-Extra- + requestheaderGroupHeaders: + - X-Remote-Group + requestheaderUsernameHeaders: + - X-Remote-User + securePort: 443 + serviceAccountIssuer: https://api.internal.containerd.example.com + serviceAccountJWKSURI: https://api.internal.containerd.example.com/openid/v1/jwks + serviceClusterIPRange: 100.64.0.0/13 + storageBackend: etcd3 + kubeControllerManager: + allocateNodeCIDRs: true + attachDetachReconcileSyncPeriod: 1m0s + cloudProvider: aws + clusterCIDR: 100.96.0.0/11 + clusterName: containerd.example.com + configureCloudRoutes: false + image: registry.k8s.io/kube-controller-manager:v1.21.0 + leaderElection: + leaderElect: true + logLevel: 2 + useServiceAccountCredentials: true + kubeDNS: + cacheMaxConcurrent: 150 + cacheMaxSize: 1000 + cpuRequest: 100m + domain: cluster.local + memoryLimit: 170Mi + memoryRequest: 70Mi + nodeLocalDNS: + cpuRequest: 25m + enabled: false + image: registry.k8s.io/dns/k8s-dns-node-cache:1.22.8 + memoryRequest: 5Mi + provider: CoreDNS + serverIP: 100.64.0.10 + kubeProxy: + clusterCIDR: 100.96.0.0/11 + cpuRequest: 100m + image: registry.k8s.io/kube-proxy:v1.21.0 + logLevel: 2 + kubeScheduler: + image: registry.k8s.io/kube-scheduler:v1.21.0 + leaderElection: + leaderElect: true + logLevel: 2 + kubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: registry.k8s.io/pause:3.6 + podManifestPath: /etc/kubernetes/manifests + registerSchedulable: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + kubernetesApiAccess: + - 0.0.0.0/0 + kubernetesVersion: 1.21.0 + masterKubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: registry.k8s.io/pause:3.6 + podManifestPath: /etc/kubernetes/manifests + registerSchedulable: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + masterPublicName: api.containerd.example.com + networkCIDR: 172.20.0.0/16 + networking: + cni: {} + nonMasqueradeCIDR: 100.64.0.0/10 + podCIDR: 100.96.0.0/11 + secretStore: memfs://clusters.example.com/containerd.example.com/secrets + serviceClusterIPRange: 100.64.0.0/13 + sshAccess: + - 0.0.0.0/0 + subnets: + - cidr: 172.20.32.0/19 + name: us-test-1a + type: Public + zone: us-test-1a + topology: + dns: + type: Public + masters: public + nodes: public diff --git a/tests/integration/update_cluster/containerd/data/aws_s3_object_containerd.example.com-addons-bootstrap_content b/tests/integration/update_cluster/containerd/data/aws_s3_object_containerd.example.com-addons-bootstrap_content new file mode 100644 index 0000000000000..8c05ab188640e --- /dev/null +++ b/tests/integration/update_cluster/containerd/data/aws_s3_object_containerd.example.com-addons-bootstrap_content @@ -0,0 +1,48 @@ +kind: Addons +metadata: + creationTimestamp: null + name: bootstrap +spec: + addons: + - id: k8s-1.16 + manifest: kops-controller.addons.k8s.io/k8s-1.16.yaml + manifestHash: 885c107be5524b5aefca6e9b368d92c929e5abf009086acf49328e40a44c0d5e + name: kops-controller.addons.k8s.io + needsRollingUpdate: control-plane + selector: + k8s-addon: kops-controller.addons.k8s.io + version: 9.99.0 + - id: k8s-1.12 + manifest: coredns.addons.k8s.io/k8s-1.12.yaml + manifestHash: cd1e8f47fe52b13fee5536b0d4b4429ef256829d87a51cbc189fa0f21ff3503b + name: coredns.addons.k8s.io + selector: + k8s-addon: coredns.addons.k8s.io + version: 9.99.0 + - id: k8s-1.9 + manifest: kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml + manifestHash: 01c120e887bd98d82ef57983ad58a0b22bc85efb48108092a24c4b82e4c9ea81 + name: kubelet-api.rbac.addons.k8s.io + selector: + k8s-addon: kubelet-api.rbac.addons.k8s.io + version: 9.99.0 + - manifest: limit-range.addons.k8s.io/v1.5.0.yaml + manifestHash: 2d55c3bc5e354e84a3730a65b42f39aba630a59dc8d32b30859fcce3d3178bc2 + name: limit-range.addons.k8s.io + selector: + k8s-addon: limit-range.addons.k8s.io + version: 9.99.0 + - id: k8s-1.12 + manifest: dns-controller.addons.k8s.io/k8s-1.12.yaml + manifestHash: 6c8f01b2470d323965dfb22d410f322e0b429f7acc3831f41a763ec072dfc69b + name: dns-controller.addons.k8s.io + selector: + k8s-addon: dns-controller.addons.k8s.io + version: 9.99.0 + - id: v1.15.0 + manifest: storage-aws.addons.k8s.io/v1.15.0.yaml + manifestHash: 065ae832ddac8d0931e9992d6a76f43a33a36975a38003b34f4c5d86a7d42780 + name: storage-aws.addons.k8s.io + selector: + k8s-addon: storage-aws.addons.k8s.io + version: 9.99.0 diff --git a/tests/integration/update_cluster/containerd/data/aws_s3_object_containerd.example.com-addons-coredns.addons.k8s.io-k8s-1.12_content b/tests/integration/update_cluster/containerd/data/aws_s3_object_containerd.example.com-addons-coredns.addons.k8s.io-k8s-1.12_content new file mode 100644 index 0000000000000..fd5b8a7c053f2 --- /dev/null +++ b/tests/integration/update_cluster/containerd/data/aws_s3_object_containerd.example.com-addons-coredns.addons.k8s.io-k8s-1.12_content @@ -0,0 +1,383 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + kubernetes.io/cluster-service: "true" + name: coredns + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + kubernetes.io/bootstrapping: rbac-defaults + name: system:coredns +rules: +- apiGroups: + - "" + resources: + - endpoints + - services + - pods + - namespaces + verbs: + - list + - watch +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - list + - watch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + kubernetes.io/bootstrapping: rbac-defaults + name: system:coredns +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:coredns +subjects: +- kind: ServiceAccount + name: coredns + namespace: kube-system + +--- + +apiVersion: v1 +data: + Corefile: |- + .:53 { + errors + health { + lameduck 5s + } + ready + kubernetes cluster.local. in-addr.arpa ip6.arpa { + pods insecure + fallthrough in-addr.arpa ip6.arpa + ttl 30 + } + prometheus :9153 + forward . /etc/resolv.conf { + max_concurrent 1000 + } + cache 30 + loop + reload + loadbalance + } +kind: ConfigMap +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + addonmanager.kubernetes.io/mode: EnsureExists + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + name: coredns + namespace: kube-system + +--- + +apiVersion: apps/v1 +kind: Deployment +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + k8s-app: kube-dns + kubernetes.io/cluster-service: "true" + kubernetes.io/name: CoreDNS + name: coredns + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: kube-dns + strategy: + rollingUpdate: + maxSurge: 10% + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + creationTimestamp: null + labels: + k8s-app: kube-dns + kops.k8s.io/managed-by: kops + spec: + containers: + - args: + - -conf + - /etc/coredns/Corefile + image: registry.k8s.io/coredns/coredns:v1.9.3 + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 5 + httpGet: + path: /health + port: 8080 + scheme: HTTP + initialDelaySeconds: 60 + successThreshold: 1 + timeoutSeconds: 5 + name: coredns + ports: + - containerPort: 53 + name: dns + protocol: UDP + - containerPort: 53 + name: dns-tcp + protocol: TCP + - containerPort: 9153 + name: metrics + protocol: TCP + readinessProbe: + httpGet: + path: /ready + port: 8181 + scheme: HTTP + resources: + limits: + memory: 170Mi + requests: + cpu: 100m + memory: 70Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_BIND_SERVICE + drop: + - all + readOnlyRootFilesystem: true + volumeMounts: + - mountPath: /etc/coredns + name: config-volume + readOnly: true + dnsPolicy: Default + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-cluster-critical + serviceAccountName: coredns + tolerations: + - key: CriticalAddonsOnly + operator: Exists + topologySpreadConstraints: + - labelSelector: + matchLabels: + k8s-app: kube-dns + maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: ScheduleAnyway + - labelSelector: + matchLabels: + k8s-app: kube-dns + maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + volumes: + - configMap: + name: coredns + name: config-volume + +--- + +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/port: "9153" + prometheus.io/scrape: "true" + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + k8s-app: kube-dns + kubernetes.io/cluster-service: "true" + kubernetes.io/name: CoreDNS + name: kube-dns + namespace: kube-system + resourceVersion: "0" +spec: + clusterIP: 100.64.0.10 + ports: + - name: dns + port: 53 + protocol: UDP + - name: dns-tcp + port: 53 + protocol: TCP + - name: metrics + port: 9153 + protocol: TCP + selector: + k8s-app: kube-dns + +--- + +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + name: kube-dns + namespace: kube-system +spec: + maxUnavailable: 50% + selector: + matchLabels: + k8s-app: kube-dns + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + name: coredns-autoscaler + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + name: coredns-autoscaler +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch +- apiGroups: + - "" + resources: + - replicationcontrollers/scale + verbs: + - get + - update +- apiGroups: + - extensions + - apps + resources: + - deployments/scale + - replicasets/scale + verbs: + - get + - update +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - create + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + name: coredns-autoscaler +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: coredns-autoscaler +subjects: +- kind: ServiceAccount + name: coredns-autoscaler + namespace: kube-system + +--- + +apiVersion: apps/v1 +kind: Deployment +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + k8s-app: coredns-autoscaler + kubernetes.io/cluster-service: "true" + name: coredns-autoscaler + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: coredns-autoscaler + template: + metadata: + creationTimestamp: null + labels: + k8s-app: coredns-autoscaler + kops.k8s.io/managed-by: kops + spec: + containers: + - command: + - /cluster-proportional-autoscaler + - --namespace=kube-system + - --configmap=coredns-autoscaler + - --target=Deployment/coredns + - --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":16,"preventSinglePointFailure":true}} + - --logtostderr=true + - --v=2 + image: registry.k8s.io/cpa/cluster-proportional-autoscaler:1.8.5 + name: autoscaler + resources: + requests: + cpu: 20m + memory: 10Mi + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-cluster-critical + serviceAccountName: coredns-autoscaler + tolerations: + - key: CriticalAddonsOnly + operator: Exists diff --git a/tests/integration/update_cluster/containerd/data/aws_s3_object_containerd.example.com-addons-dns-controller.addons.k8s.io-k8s-1.12_content b/tests/integration/update_cluster/containerd/data/aws_s3_object_containerd.example.com-addons-dns-controller.addons.k8s.io-k8s-1.12_content new file mode 100644 index 0000000000000..2eab063fdb45a --- /dev/null +++ b/tests/integration/update_cluster/containerd/data/aws_s3_object_containerd.example.com-addons-dns-controller.addons.k8s.io-k8s-1.12_content @@ -0,0 +1,138 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: dns-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: dns-controller.addons.k8s.io + k8s-app: dns-controller + version: v1.26.0-alpha.1 + name: dns-controller + namespace: kube-system +spec: + replicas: 1 + selector: + matchLabels: + k8s-app: dns-controller + strategy: + type: Recreate + template: + metadata: + creationTimestamp: null + labels: + k8s-addon: dns-controller.addons.k8s.io + k8s-app: dns-controller + kops.k8s.io/managed-by: kops + version: v1.26.0-alpha.1 + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + - matchExpressions: + - key: node-role.kubernetes.io/master + operator: Exists + containers: + - args: + - --watch-ingress=false + - --dns=aws-route53 + - --zone=*/Z1AFAKE1ZON3YO + - --internal-ipv4 + - --zone=*/* + - -v=2 + command: null + env: + - name: KUBERNETES_SERVICE_HOST + value: 127.0.0.1 + image: registry.k8s.io/kops/dns-controller:1.26.0-alpha.1 + name: dns-controller + resources: + requests: + cpu: 50m + memory: 50Mi + securityContext: + runAsNonRoot: true + dnsPolicy: Default + hostNetwork: true + nodeSelector: null + priorityClassName: system-cluster-critical + serviceAccount: dns-controller + tolerations: + - key: node.cloudprovider.kubernetes.io/uninitialized + operator: Exists + - key: node.kubernetes.io/not-ready + operator: Exists + - key: node-role.kubernetes.io/control-plane + operator: Exists + - key: node-role.kubernetes.io/master + operator: Exists + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: dns-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: dns-controller.addons.k8s.io + name: dns-controller + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: dns-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: dns-controller.addons.k8s.io + name: kops:dns-controller +rules: +- apiGroups: + - "" + resources: + - endpoints + - services + - pods + - ingress + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: dns-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: dns-controller.addons.k8s.io + name: kops:dns-controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kops:dns-controller +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: system:serviceaccount:kube-system:dns-controller diff --git a/tests/integration/update_cluster/containerd/data/aws_s3_object_containerd.example.com-addons-kops-controller.addons.k8s.io-k8s-1.16_content b/tests/integration/update_cluster/containerd/data/aws_s3_object_containerd.example.com-addons-kops-controller.addons.k8s.io-k8s-1.16_content new file mode 100644 index 0000000000000..e2792373032c4 --- /dev/null +++ b/tests/integration/update_cluster/containerd/data/aws_s3_object_containerd.example.com-addons-kops-controller.addons.k8s.io-k8s-1.16_content @@ -0,0 +1,225 @@ +apiVersion: v1 +data: + config.yaml: | + {"cloud":"aws","configBase":"memfs://clusters.example.com/containerd.example.com","secretStore":"memfs://clusters.example.com/containerd.example.com/secrets","server":{"Listen":":3988","provider":{"aws":{"nodesRoles":["nodes.containerd.example.com"],"Region":"us-test-1"}},"serverKeyPath":"/etc/kubernetes/kops-controller/pki/kops-controller.key","serverCertificatePath":"/etc/kubernetes/kops-controller/pki/kops-controller.crt","caBasePath":"/etc/kubernetes/kops-controller/pki","signingCAs":["kubernetes-ca"],"certNames":["kubelet","kubelet-server","kube-proxy"]}} +kind: ConfigMap +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller + namespace: kube-system + +--- + +apiVersion: apps/v1 +kind: DaemonSet +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + k8s-app: kops-controller + version: v1.26.0-alpha.1 + name: kops-controller + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: kops-controller + template: + metadata: + annotations: + dns.alpha.kubernetes.io/internal: kops-controller.internal.containerd.example.com + creationTimestamp: null + labels: + k8s-addon: kops-controller.addons.k8s.io + k8s-app: kops-controller + kops.k8s.io/managed-by: kops + version: v1.26.0-alpha.1 + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + - key: kops.k8s.io/kops-controller-pki + operator: Exists + - matchExpressions: + - key: node-role.kubernetes.io/master + operator: Exists + - key: kops.k8s.io/kops-controller-pki + operator: Exists + containers: + - args: + - --v=2 + - --conf=/etc/kubernetes/kops-controller/config/config.yaml + command: null + env: + - name: KUBERNETES_SERVICE_HOST + value: 127.0.0.1 + image: registry.k8s.io/kops/kops-controller:1.26.0-alpha.1 + name: kops-controller + resources: + requests: + cpu: 50m + memory: 50Mi + securityContext: + runAsNonRoot: true + runAsUser: 10011 + volumeMounts: + - mountPath: /etc/kubernetes/kops-controller/config/ + name: kops-controller-config + - mountPath: /etc/kubernetes/kops-controller/pki/ + name: kops-controller-pki + dnsPolicy: Default + hostNetwork: true + nodeSelector: null + priorityClassName: system-cluster-critical + serviceAccount: kops-controller + tolerations: + - key: node.cloudprovider.kubernetes.io/uninitialized + operator: Exists + - key: node.kubernetes.io/not-ready + operator: Exists + - key: node-role.kubernetes.io/master + operator: Exists + - key: node-role.kubernetes.io/control-plane + operator: Exists + volumes: + - configMap: + name: kops-controller + name: kops-controller-config + - hostPath: + path: /etc/kubernetes/kops-controller/ + type: Directory + name: kops-controller-pki + updateStrategy: + type: OnDelete + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - patch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kops-controller +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: system:serviceaccount:kube-system:kops-controller + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller + namespace: kube-system +rules: +- apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + - create +- apiGroups: + - "" + - coordination.k8s.io + resourceNames: + - kops-controller-leader + resources: + - configmaps + - leases + verbs: + - get + - list + - watch + - patch + - update + - delete +- apiGroups: + - "" + - coordination.k8s.io + resources: + - configmaps + - leases + verbs: + - create + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: kops-controller +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: system:serviceaccount:kube-system:kops-controller diff --git a/tests/integration/update_cluster/containerd/data/aws_s3_object_containerd.example.com-addons-kubelet-api.rbac.addons.k8s.io-k8s-1.9_content b/tests/integration/update_cluster/containerd/data/aws_s3_object_containerd.example.com-addons-kubelet-api.rbac.addons.k8s.io-k8s-1.9_content new file mode 100644 index 0000000000000..36761e1c56255 --- /dev/null +++ b/tests/integration/update_cluster/containerd/data/aws_s3_object_containerd.example.com-addons-kubelet-api.rbac.addons.k8s.io-k8s-1.9_content @@ -0,0 +1,17 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kubelet-api.rbac.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kubelet-api.rbac.addons.k8s.io + name: kops:system:kubelet-api-admin +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:kubelet-api-admin +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: kubelet-api diff --git a/tests/integration/update_cluster/containerd/data/aws_s3_object_containerd.example.com-addons-limit-range.addons.k8s.io_content b/tests/integration/update_cluster/containerd/data/aws_s3_object_containerd.example.com-addons-limit-range.addons.k8s.io_content new file mode 100644 index 0000000000000..4dcdce48b9ab9 --- /dev/null +++ b/tests/integration/update_cluster/containerd/data/aws_s3_object_containerd.example.com-addons-limit-range.addons.k8s.io_content @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: LimitRange +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: limit-range.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: limit-range.addons.k8s.io + name: limits + namespace: default +spec: + limits: + - defaultRequest: + cpu: 100m + type: Container diff --git a/tests/integration/update_cluster/containerd/data/aws_s3_object_containerd.example.com-addons-storage-aws.addons.k8s.io-v1.15.0_content b/tests/integration/update_cluster/containerd/data/aws_s3_object_containerd.example.com-addons-storage-aws.addons.k8s.io-v1.15.0_content new file mode 100644 index 0000000000000..21efd54326518 --- /dev/null +++ b/tests/integration/update_cluster/containerd/data/aws_s3_object_containerd.example.com-addons-storage-aws.addons.k8s.io-v1.15.0_content @@ -0,0 +1,98 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: storage-aws.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: storage-aws.addons.k8s.io + name: default +parameters: + type: gp2 +provisioner: kubernetes.io/aws-ebs + +--- + +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + annotations: + storageclass.kubernetes.io/is-default-class: "false" + creationTimestamp: null + labels: + addon.kops.k8s.io/name: storage-aws.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: storage-aws.addons.k8s.io + name: gp2 +parameters: + type: gp2 +provisioner: kubernetes.io/aws-ebs + +--- + +allowVolumeExpansion: true +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + annotations: + storageclass.kubernetes.io/is-default-class: "true" + creationTimestamp: null + labels: + addon.kops.k8s.io/name: storage-aws.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: storage-aws.addons.k8s.io + name: kops-ssd-1-17 +parameters: + encrypted: "true" + type: gp2 +provisioner: kubernetes.io/aws-ebs +volumeBindingMode: WaitForFirstConsumer + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: storage-aws.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: storage-aws.addons.k8s.io + name: system:aws-cloud-provider +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - patch +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: storage-aws.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: storage-aws.addons.k8s.io + name: system:aws-cloud-provider +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:aws-cloud-provider +subjects: +- kind: ServiceAccount + name: aws-cloud-provider + namespace: kube-system diff --git a/tests/integration/update_cluster/containerd/data/aws_s3_object_etcd-cluster-spec-events_content b/tests/integration/update_cluster/containerd/data/aws_s3_object_etcd-cluster-spec-events_content new file mode 100644 index 0000000000000..bb8ddb0e2e0ec --- /dev/null +++ b/tests/integration/update_cluster/containerd/data/aws_s3_object_etcd-cluster-spec-events_content @@ -0,0 +1,4 @@ +{ + "memberCount": 1, + "etcdVersion": "3.4.13" +} diff --git a/tests/integration/update_cluster/containerd/data/aws_s3_object_etcd-cluster-spec-main_content b/tests/integration/update_cluster/containerd/data/aws_s3_object_etcd-cluster-spec-main_content new file mode 100644 index 0000000000000..bb8ddb0e2e0ec --- /dev/null +++ b/tests/integration/update_cluster/containerd/data/aws_s3_object_etcd-cluster-spec-main_content @@ -0,0 +1,4 @@ +{ + "memberCount": 1, + "etcdVersion": "3.4.13" +} diff --git a/tests/integration/update_cluster/containerd/data/aws_s3_object_kops-version.txt_content b/tests/integration/update_cluster/containerd/data/aws_s3_object_kops-version.txt_content new file mode 100644 index 0000000000000..b7340298dcdd5 --- /dev/null +++ b/tests/integration/update_cluster/containerd/data/aws_s3_object_kops-version.txt_content @@ -0,0 +1 @@ +1.21.0-alpha.1 diff --git a/tests/integration/update_cluster/containerd/data/aws_s3_object_manifests-etcdmanager-events-master-us-test-1a_content b/tests/integration/update_cluster/containerd/data/aws_s3_object_manifests-etcdmanager-events-master-us-test-1a_content new file mode 100644 index 0000000000000..8b661481b3a5c --- /dev/null +++ b/tests/integration/update_cluster/containerd/data/aws_s3_object_manifests-etcdmanager-events-master-us-test-1a_content @@ -0,0 +1,62 @@ +apiVersion: v1 +kind: Pod +metadata: + creationTimestamp: null + labels: + k8s-app: etcd-manager-events + name: etcd-manager-events + namespace: kube-system +spec: + containers: + - command: + - /bin/sh + - -c + - mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /etcd-manager + --backup-store=memfs://clusters.example.com/containerd.example.com/backups/etcd/events + --client-urls=https://__name__:4002 --cluster-name=etcd-events --containerized=true + --dns-suffix=.internal.containerd.example.com --grpc-port=3997 --peer-urls=https://__name__:2381 + --quarantine-client-urls=https://__name__:3995 --v=6 --volume-name-tag=k8s.io/etcd/events + --volume-provider=aws --volume-tag=k8s.io/etcd/events --volume-tag=k8s.io/role/master=1 + --volume-tag=kubernetes.io/cluster/containerd.example.com=owned > /tmp/pipe + 2>&1 + image: registry.k8s.io/etcdadm/etcd-manager:v3.0.20220831 + name: etcd-manager + resources: + requests: + cpu: 200m + memory: 100Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /rootfs + name: rootfs + - mountPath: /run + name: run + - mountPath: /etc/kubernetes/pki/etcd-manager + name: pki + - mountPath: /var/log/etcd.log + name: varlogetcd + hostNetwork: true + hostPID: true + priorityClassName: system-cluster-critical + tolerations: + - key: CriticalAddonsOnly + operator: Exists + volumes: + - hostPath: + path: / + type: Directory + name: rootfs + - hostPath: + path: /run + type: DirectoryOrCreate + name: run + - hostPath: + path: /etc/kubernetes/pki/etcd-manager-events + type: DirectoryOrCreate + name: pki + - hostPath: + path: /var/log/etcd-events.log + type: FileOrCreate + name: varlogetcd +status: {} diff --git a/tests/integration/update_cluster/containerd/data/aws_s3_object_manifests-etcdmanager-main-master-us-test-1a_content b/tests/integration/update_cluster/containerd/data/aws_s3_object_manifests-etcdmanager-main-master-us-test-1a_content new file mode 100644 index 0000000000000..a4e37fec06100 --- /dev/null +++ b/tests/integration/update_cluster/containerd/data/aws_s3_object_manifests-etcdmanager-main-master-us-test-1a_content @@ -0,0 +1,62 @@ +apiVersion: v1 +kind: Pod +metadata: + creationTimestamp: null + labels: + k8s-app: etcd-manager-main + name: etcd-manager-main + namespace: kube-system +spec: + containers: + - command: + - /bin/sh + - -c + - mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /etcd-manager + --backup-store=memfs://clusters.example.com/containerd.example.com/backups/etcd/main + --client-urls=https://__name__:4001 --cluster-name=etcd --containerized=true + --dns-suffix=.internal.containerd.example.com --grpc-port=3996 --peer-urls=https://__name__:2380 + --quarantine-client-urls=https://__name__:3994 --v=6 --volume-name-tag=k8s.io/etcd/main + --volume-provider=aws --volume-tag=k8s.io/etcd/main --volume-tag=k8s.io/role/master=1 + --volume-tag=kubernetes.io/cluster/containerd.example.com=owned > /tmp/pipe + 2>&1 + image: registry.k8s.io/etcdadm/etcd-manager:v3.0.20220831 + name: etcd-manager + resources: + requests: + cpu: 200m + memory: 100Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /rootfs + name: rootfs + - mountPath: /run + name: run + - mountPath: /etc/kubernetes/pki/etcd-manager + name: pki + - mountPath: /var/log/etcd.log + name: varlogetcd + hostNetwork: true + hostPID: true + priorityClassName: system-cluster-critical + tolerations: + - key: CriticalAddonsOnly + operator: Exists + volumes: + - hostPath: + path: / + type: Directory + name: rootfs + - hostPath: + path: /run + type: DirectoryOrCreate + name: run + - hostPath: + path: /etc/kubernetes/pki/etcd-manager-main + type: DirectoryOrCreate + name: pki + - hostPath: + path: /var/log/etcd.log + type: FileOrCreate + name: varlogetcd +status: {} diff --git a/tests/integration/update_cluster/containerd/data/aws_s3_object_manifests-static-kube-apiserver-healthcheck_content b/tests/integration/update_cluster/containerd/data/aws_s3_object_manifests-static-kube-apiserver-healthcheck_content new file mode 100644 index 0000000000000..5cb249fea763e --- /dev/null +++ b/tests/integration/update_cluster/containerd/data/aws_s3_object_manifests-static-kube-apiserver-healthcheck_content @@ -0,0 +1,33 @@ +apiVersion: v1 +kind: Pod +metadata: + creationTimestamp: null +spec: + containers: + - args: + - --ca-cert=/secrets/ca.crt + - --client-cert=/secrets/client.crt + - --client-key=/secrets/client.key + image: registry.k8s.io/kops/kube-apiserver-healthcheck:1.26.0-alpha.1 + livenessProbe: + httpGet: + host: 127.0.0.1 + path: /.kube-apiserver-healthcheck/healthz + port: 3990 + initialDelaySeconds: 5 + timeoutSeconds: 5 + name: healthcheck + resources: {} + securityContext: + runAsNonRoot: true + runAsUser: 10012 + volumeMounts: + - mountPath: /secrets + name: healthcheck-secrets + readOnly: true + volumes: + - hostPath: + path: /etc/kubernetes/kube-apiserver-healthcheck/secrets + type: Directory + name: healthcheck-secrets +status: {} diff --git a/tests/integration/update_cluster/containerd/data/aws_s3_object_nodeupconfig-master-us-test-1a_content b/tests/integration/update_cluster/containerd/data/aws_s3_object_nodeupconfig-master-us-test-1a_content new file mode 100644 index 0000000000000..6505459738007 --- /dev/null +++ b/tests/integration/update_cluster/containerd/data/aws_s3_object_nodeupconfig-master-us-test-1a_content @@ -0,0 +1,276 @@ +APIServerConfig: + KubeAPIServer: + allowPrivileged: true + anonymousAuth: false + apiAudiences: + - kubernetes.svc.default + apiServerCount: 1 + authorizationMode: AlwaysAllow + bindAddress: 0.0.0.0 + cloudProvider: aws + enableAdmissionPlugins: + - NamespaceLifecycle + - LimitRanger + - ServiceAccount + - DefaultStorageClass + - DefaultTolerationSeconds + - MutatingAdmissionWebhook + - ValidatingAdmissionWebhook + - NodeRestriction + - ResourceQuota + etcdServers: + - https://127.0.0.1:4001 + etcdServersOverrides: + - /events#https://127.0.0.1:4002 + image: registry.k8s.io/kube-apiserver:v1.21.0 + kubeletPreferredAddressTypes: + - InternalIP + - Hostname + - ExternalIP + logLevel: 2 + requestheaderAllowedNames: + - aggregator + requestheaderExtraHeaderPrefixes: + - X-Remote-Extra- + requestheaderGroupHeaders: + - X-Remote-Group + requestheaderUsernameHeaders: + - X-Remote-User + securePort: 443 + serviceAccountIssuer: https://api.internal.containerd.example.com + serviceAccountJWKSURI: https://api.internal.containerd.example.com/openid/v1/jwks + serviceClusterIPRange: 100.64.0.0/13 + storageBackend: etcd3 + ServiceAccountPublicKeys: | + -----BEGIN RSA PUBLIC KEY----- + MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBANiW3hfHTcKnxCig+uWhpVbOfH1pANKm + XVSysPKgE80QSU4tZ6m49pAEeIMsvwvDMaLsb2v6JvXe0qvCmueU+/sCAwEAAQ== + -----END RSA PUBLIC KEY----- + -----BEGIN RSA PUBLIC KEY----- + MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAKOE64nZbH+GM91AIrqf7HEk4hvzqsZF + Ftxc+8xir1XC3mI/RhCCrs6AdVRZNZ26A6uHArhi33c2kHQkCjyLA7sCAwEAAQ== + -----END RSA PUBLIC KEY----- +Assets: + amd64: + - 681c81b7934ae2bf38b9f12d891683972d1fbbf6d7d97e50940a47b139d41b35@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kubelet + - 9f74f2fa7ee32ad07e17211725992248470310ca1988214518806b39b1dad9f0@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kubectl + - 977824932d5667c7a37aa6a3cbba40100a6873e7bd97e83e8be837e3e7afd0a8@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-amd64-v0.8.7.tgz + - 29ef1e8635795c2a49a20a56e778f45ff163c5400a5428ca33999ed53d44e3d8@https://github.com/containerd/containerd/releases/download/v1.4.13/cri-containerd-cni-1.4.13-linux-amd64.tar.gz + - f90ed6dcef534e6d1ae17907dc7eb40614b8945ad4af7f0e98d2be7cde8165c6@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/protokube,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/protokube-linux-amd64 + - 9992e7eb2a2e93f799e5a9e98eb718637433524bc65f630357201a79f49b13d0@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/channels,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/channels-linux-amd64 + arm64: + - 17832b192be5ea314714f7e16efd5e5f65347974bbbf41def6b02f68931380c4@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/arm64/kubelet + - a4dd7100f547a40d3e2f83850d0bab75c6ea5eb553f0a80adcf73155bef1fd0d@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/arm64/kubectl + - ae13d7b5c05bd180ea9b5b68f44bdaa7bfb41034a2ef1d68fd8e1259797d642f@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-arm64-v0.8.7.tgz + - debed306ed9a4e70dcbcb228a0b3898f9730099e324f34bb0e76abbaddf7a6a7@https://download.docker.com/linux/static/stable/aarch64/docker-20.10.13.tgz + - 2f599c3d54f4c4bdbcc95aaf0c7b513a845d8f9503ec5b34c9f86aa1bc34fc0c@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/protokube,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/protokube-linux-arm64 + - 9d842e3636a95de2315cdea2be7a282355aac0658ef0b86d5dc2449066538f13@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/channels,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/channels-linux-arm64 +CAs: + apiserver-aggregator-ca: | + -----BEGIN CERTIFICATE----- + MIIBgjCCASygAwIBAgIMFo3gINaZLHjisEcbMA0GCSqGSIb3DQEBCwUAMCIxIDAe + BgNVBAMTF2FwaXNlcnZlci1hZ2dyZWdhdG9yLWNhMB4XDTIxMDYzMDA0NTExMloX + DTMxMDYzMDA0NTExMlowIjEgMB4GA1UEAxMXYXBpc2VydmVyLWFnZ3JlZ2F0b3It + Y2EwXDANBgkqhkiG9w0BAQEFAANLADBIAkEAyyE71AOU3go5XFegLQ6fidI0LhhM + x7CzpTzh2xWKcHUfbNI7itgJvC/+GlyG5W+DF5V7ba0IJiQLsFve0oLdewIDAQAB + o0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU + ALfqF5ZmfqvqORuJIFilZYKF3d0wDQYJKoZIhvcNAQELBQADQQAHAomFKsF4jvYX + WM/UzQXDj9nSAFTf8dBPCXyZZNotsOH7+P6W4mMiuVs8bAuGiXGUdbsQ2lpiT/Rk + CzMeMdr4 + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBgjCCASygAwIBAgIMFo3gM0nxQpiX/agfMA0GCSqGSIb3DQEBCwUAMCIxIDAe + BgNVBAMTF2FwaXNlcnZlci1hZ2dyZWdhdG9yLWNhMB4XDTIxMDYzMDA0NTIzMVoX + DTMxMDYzMDA0NTIzMVowIjEgMB4GA1UEAxMXYXBpc2VydmVyLWFnZ3JlZ2F0b3It + Y2EwXDANBgkqhkiG9w0BAQEFAANLADBIAkEAyyE71AOU3go5XFegLQ6fidI0LhhM + x7CzpTzh2xWKcHUfbNI7itgJvC/+GlyG5W+DF5V7ba0IJiQLsFve0oLdewIDAQAB + o0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU + ALfqF5ZmfqvqORuJIFilZYKF3d0wDQYJKoZIhvcNAQELBQADQQCXsoezoxXu2CEN + QdlXZOfmBT6cqxIX/RMHXhpHwRiqPsTO8IO2bVA8CSzxNwMuSv/ZtrMHoh8+PcVW + HLtkTXH8 + -----END CERTIFICATE----- + etcd-clients-ca: | + -----BEGIN CERTIFICATE----- + MIIBcjCCARygAwIBAgIMFo1ogHnr26DL9YkqMA0GCSqGSIb3DQEBCwUAMBoxGDAW + BgNVBAMTD2V0Y2QtY2xpZW50cy1jYTAeFw0yMTA2MjgxNjE5MDFaFw0zMTA2Mjgx + NjE5MDFaMBoxGDAWBgNVBAMTD2V0Y2QtY2xpZW50cy1jYTBcMA0GCSqGSIb3DQEB + AQUAA0sAMEgCQQDYlt4Xx03Cp8QooPrloaVWznx9aQDSpl1UsrDyoBPNEElOLWep + uPaQBHiDLL8LwzGi7G9r+ib13tKrwprnlPv7AgMBAAGjQjBAMA4GA1UdDwEB/wQE + AwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQjlt4Ue54AbJPWlDpRM51s + x+PeBDANBgkqhkiG9w0BAQsFAANBAAZAdf8ROEVkr3Rf7I+s+CQOil2toadlKWOY + qCeJ2XaEROfp9aUTEIU1MGM3g57MPyAPPU7mURskuOQz6B1UFaY= + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBcjCCARygAwIBAgIMFo1olfBnC/CsT+dqMA0GCSqGSIb3DQEBCwUAMBoxGDAW + BgNVBAMTD2V0Y2QtY2xpZW50cy1jYTAeFw0yMTA2MjgxNjIwMzNaFw0zMTA2Mjgx + NjIwMzNaMBoxGDAWBgNVBAMTD2V0Y2QtY2xpZW50cy1jYTBcMA0GCSqGSIb3DQEB + AQUAA0sAMEgCQQDYlt4Xx03Cp8QooPrloaVWznx9aQDSpl1UsrDyoBPNEElOLWep + uPaQBHiDLL8LwzGi7G9r+ib13tKrwprnlPv7AgMBAAGjQjBAMA4GA1UdDwEB/wQE + AwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQjlt4Ue54AbJPWlDpRM51s + x+PeBDANBgkqhkiG9w0BAQsFAANBAF1xUz77PlUVUnd9duF8F7plou0TONC9R6/E + YQ8C6vM1b+9NSDGjCW8YmwEU2fBgskb/BBX2lwVZ32/RUEju4Co= + -----END CERTIFICATE----- + etcd-manager-ca-events: | + -----BEGIN CERTIFICATE----- + MIIBgDCCASqgAwIBAgIMFo+bKjm04vB4rNtaMA0GCSqGSIb3DQEBCwUAMCExHzAd + BgNVBAMTFmV0Y2QtbWFuYWdlci1jYS1ldmVudHMwHhcNMjEwNzA1MjAwOTU2WhcN + MzEwNzA1MjAwOTU2WjAhMR8wHQYDVQQDExZldGNkLW1hbmFnZXItY2EtZXZlbnRz + MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAKiC8tndMlEFZ7qzeKxeKqFVjaYpsh/H + g7RxWo15+1kgH3suO0lxp9+RxSVv97hnsfbySTPZVhy2cIQj7eZtZt8CAwEAAaNC + MEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFBg6 + CEZkQNnRkARBwFce03AEWa+sMA0GCSqGSIb3DQEBCwUAA0EAJMnBThok/uUe8q8O + sS5q19KUuE8YCTUzMDj36EBKf6NX4NoakCa1h6kfQVtlMtEIMWQZCjbm8xGK5ffs + GS/VUw== + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBgDCCASqgAwIBAgIMFo+bQ+EgIiBmGghjMA0GCSqGSIb3DQEBCwUAMCExHzAd + BgNVBAMTFmV0Y2QtbWFuYWdlci1jYS1ldmVudHMwHhcNMjEwNzA1MjAxMTQ2WhcN + MzEwNzA1MjAxMTQ2WjAhMR8wHQYDVQQDExZldGNkLW1hbmFnZXItY2EtZXZlbnRz + MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAKFhHVVxxDGv8d1jBvtdSxz7KIVoBOjL + DMxsmTsINiQkTQaFlb+XPlnY1ar4+RhE519AFUkqfhypk4Zxqf1YFXUCAwEAAaNC + MEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNuW + LLH5c8kDubDbr6BHgedW0iJ9MA0GCSqGSIb3DQEBCwUAA0EAiKUoBoaGu7XzboFE + hjfKlX0TujqWuW3qMxDEJwj4dVzlSLrAoB/G01MJ+xxYKh456n48aG6N827UPXhV + cPfVNg== + -----END CERTIFICATE----- + etcd-manager-ca-main: | + -----BEGIN CERTIFICATE----- + MIIBfDCCASagAwIBAgIMFo+bKjm1c3jfv6hIMA0GCSqGSIb3DQEBCwUAMB8xHTAb + BgNVBAMTFGV0Y2QtbWFuYWdlci1jYS1tYWluMB4XDTIxMDcwNTIwMDk1NloXDTMx + MDcwNTIwMDk1NlowHzEdMBsGA1UEAxMUZXRjZC1tYW5hZ2VyLWNhLW1haW4wXDAN + BgkqhkiG9w0BAQEFAANLADBIAkEAxbkDbGYmCSShpRG3r+lzTOFujyuruRfjOhYm + ZRX4w1Utd5y63dUc98sjc9GGUYMHd+0k1ql/a48tGhnK6N6jJwIDAQABo0IwQDAO + BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUWZLkbBFx + GAgPU4i62c52unSo7RswDQYJKoZIhvcNAQELBQADQQAj6Pgd0va/8FtkyMlnohLu + Gf4v8RJO6zk3Y6jJ4+cwWziipFM1ielMzSOZfFcCZgH3m5Io40is4hPSqyq2TOA6 + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBfDCCASagAwIBAgIMFo+bQ+Eg8Si30gr4MA0GCSqGSIb3DQEBCwUAMB8xHTAb + BgNVBAMTFGV0Y2QtbWFuYWdlci1jYS1tYWluMB4XDTIxMDcwNTIwMTE0NloXDTMx + MDcwNTIwMTE0NlowHzEdMBsGA1UEAxMUZXRjZC1tYW5hZ2VyLWNhLW1haW4wXDAN + BgkqhkiG9w0BAQEFAANLADBIAkEAw33jzcd/iosN04b0WXbDt7B0c3sJ3aafcGLP + vG3xRB9N5bYr9+qZAq3mzAFkxscn4j1ce5b1/GKTDEAClmZgdQIDAQABo0IwQDAO + BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUE/h+3gDP + DvKwHRyiYlXM8voZ1wowDQYJKoZIhvcNAQELBQADQQBXuimeEoAOu5HN4hG7NqL9 + t40K3ZRhRZv3JQWnRVJCBDjg1rD0GQJR/n+DoWvbeijI5C9pNjr2pWSIYR1eYCvd + -----END CERTIFICATE----- + etcd-peers-ca-events: | + -----BEGIN CERTIFICATE----- + MIIBfDCCASagAwIBAgIMFo+bKjmxTPh3/lYJMA0GCSqGSIb3DQEBCwUAMB8xHTAb + BgNVBAMTFGV0Y2QtcGVlcnMtY2EtZXZlbnRzMB4XDTIxMDcwNTIwMDk1NloXDTMx + MDcwNTIwMDk1NlowHzEdMBsGA1UEAxMUZXRjZC1wZWVycy1jYS1ldmVudHMwXDAN + BgkqhkiG9w0BAQEFAANLADBIAkEAv5g4HF2xmrYyouJfY9jXx1M3gPLD/pupvxPY + xyjJw5pNCy5M5XGS3iTqRD5RDE0fWudVHFZKLIe8WPc06NApXwIDAQABo0IwQDAO + BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUf6xiDI+O + Yph1ziCGr2hZaQYt+fUwDQYJKoZIhvcNAQELBQADQQBBxj5hqEQstonTb8lnqeGB + DEYtUeAk4eR/HzvUMjF52LVGuvN3XVt+JTrFeKNvb6/RDUbBNRj3azalcUkpPh6V + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBfDCCASagAwIBAgIMFo+bQ+Eq69jgzpKwMA0GCSqGSIb3DQEBCwUAMB8xHTAb + BgNVBAMTFGV0Y2QtcGVlcnMtY2EtZXZlbnRzMB4XDTIxMDcwNTIwMTE0NloXDTMx + MDcwNTIwMTE0NlowHzEdMBsGA1UEAxMUZXRjZC1wZWVycy1jYS1ldmVudHMwXDAN + BgkqhkiG9w0BAQEFAANLADBIAkEAo5Nj2CjX1qp3mEPw1H5nHAFWLoGNSLSlRFJW + 03NxaNPMFzL5PrCoyOXrX8/MWczuZYw0Crf8EPOOQWi2+W0XLwIDAQABo0IwQDAO + BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUxauhhKQh + cvdZND78rHe0RQVTTiswDQYJKoZIhvcNAQELBQADQQB+cq4jIS9q0zXslaRa+ViI + J+dviA3sMygbmSJO0s4DxYmoazKJblux5q0ASSvS9iL1l9ShuZ1dWyp2tpZawHyb + -----END CERTIFICATE----- + etcd-peers-ca-main: | + -----BEGIN CERTIFICATE----- + MIIBeDCCASKgAwIBAgIMFo+bKjmuLDDLcDHsMA0GCSqGSIb3DQEBCwUAMB0xGzAZ + BgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjAeFw0yMTA3MDUyMDA5NTZaFw0zMTA3 + MDUyMDA5NTZaMB0xGzAZBgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjBcMA0GCSqG + SIb3DQEBAQUAA0sAMEgCQQCyRaXWpwgN6INQqws9p/BvPElJv2Rno9dVTFhlQqDA + aUJXe7MBmiO4NJcW76EozeBh5ztR3/4NE1FM2x8TisS3AgMBAAGjQjBAMA4GA1Ud + DwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQtE1d49uSvpURf + OQ25Vlu6liY20DANBgkqhkiG9w0BAQsFAANBAAgLVaetJZcfOA3OIMMvQbz2Ydrt + uWF9BKkIad8jrcIrm3IkOtR8bKGmDIIaRKuG/ZUOL6NMe2fky3AAfKwleL4= + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBeDCCASKgAwIBAgIMFo+bQ+EuVthBfuZvMA0GCSqGSIb3DQEBCwUAMB0xGzAZ + BgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjAeFw0yMTA3MDUyMDExNDZaFw0zMTA3 + MDUyMDExNDZaMB0xGzAZBgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjBcMA0GCSqG + SIb3DQEBAQUAA0sAMEgCQQCxNbycDZNx5V1ZOiXxZSvaFpHRwKeHDfcuMUitdoPt + naVMlMTGDWAMuCVmFHFAWohIYynemEegmZkZ15S7AErfAgMBAAGjQjBAMA4GA1Ud + DwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTAjQ8T4HclPIsC + qipEfUIcLP6jqTANBgkqhkiG9w0BAQsFAANBAJdZ17TN3HlWrH7HQgfR12UBwz8K + G9DurDznVaBVUYaHY8Sg5AvAXeb+yIF2JMmRR+bK+/G1QYY2D3/P31Ic2Oo= + -----END CERTIFICATE----- + kubernetes-ca: | + -----BEGIN CERTIFICATE----- + MIIBbjCCARigAwIBAgIMFpANqBD8NSD82AUSMA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwODAwWhcNMzEwNzA3MDcw + ODAwWjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD + SwAwSAJBANFI3zr0Tk8krsW8vwjfMpzJOlWQ8616vG3YPa2qAgI7V4oKwfV0yIg1 + jt+H6f4P/wkPAPTPTfRp9Iy8oHEEFw0CAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG + MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNG3zVjTcLlJwDsJ4/K9DV7KohUA + MA0GCSqGSIb3DQEBCwUAA0EAB8d03fY2w7WKpfO29qI295pu2C4ca9AiVGOpgSc8 + tmQsq6rcxt3T+rb589PVtz0mw/cKTxOk6gH2CCC+yHfy2w== + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBbjCCARigAwIBAgIMFpANvmSa0OAlYmXKMA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwOTM2WhcNMzEwNzA3MDcw + OTM2WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD + SwAwSAJBAMF6F4aZdpe0RUpyykaBpWwZCnwbffhYGOw+fs6RdLuUq7QCNmJm/Eq7 + WWOziMYDiI9SbclpD+6QiJ0N3EqppVUCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG + MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLImp6ARjPDAH6nhI+scWVt3Q9bn + MA0GCSqGSIb3DQEBCwUAA0EAVQVx5MUtuAIeePuP9o51xtpT2S6Fvfi8J4ICxnlA + 9B7UD2ushcVFPtaeoL9Gfu8aY4KJBeqqg5ojl4qmRnThjw== + -----END CERTIFICATE----- +ClusterName: containerd.example.com +FileAssets: +- content: | + apiVersion: kubescheduler.config.k8s.io/v1beta1 + clientConnection: + kubeconfig: /var/lib/kube-scheduler/kubeconfig + kind: KubeSchedulerConfiguration + path: /var/lib/kube-scheduler/config.yaml +Hooks: +- null +- null +KeypairIDs: + apiserver-aggregator-ca: "6980187172486667078076483355" + etcd-clients-ca: "6979622252718071085282986282" + etcd-manager-ca-events: "6982279354000777253151890266" + etcd-manager-ca-main: "6982279354000936168671127624" + etcd-peers-ca-events: "6982279353999767935825892873" + etcd-peers-ca-main: "6982279353998887468930183660" + kubernetes-ca: "6982820025135291416230495506" + service-account: "2" +KubeletConfig: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + nodeLabels: + kops.k8s.io/kops-controller-pki: "" + kubernetes.io/role: master + node-role.kubernetes.io/control-plane: "" + node-role.kubernetes.io/master: "" + node.kubernetes.io/exclude-from-external-load-balancers: "" + podInfraContainerImage: registry.k8s.io/pause:3.6 + podManifestPath: /etc/kubernetes/manifests + registerSchedulable: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + taints: + - node-role.kubernetes.io/master=:NoSchedule +UpdatePolicy: automatic +channels: +- memfs://clusters.example.com/containerd.example.com/addons/bootstrap-channel.yaml +containerdConfig: + logLevel: info + version: 1.4.13 +etcdManifests: +- memfs://clusters.example.com/containerd.example.com/manifests/etcd/main-master-us-test-1a.yaml +- memfs://clusters.example.com/containerd.example.com/manifests/etcd/events-master-us-test-1a.yaml +staticManifests: +- key: kube-apiserver-healthcheck + path: manifests/static/kube-apiserver-healthcheck.yaml diff --git a/tests/integration/update_cluster/containerd/data/aws_s3_object_nodeupconfig-nodes_content b/tests/integration/update_cluster/containerd/data/aws_s3_object_nodeupconfig-nodes_content new file mode 100644 index 0000000000000..ab8c9dda69ed1 --- /dev/null +++ b/tests/integration/update_cluster/containerd/data/aws_s3_object_nodeupconfig-nodes_content @@ -0,0 +1,44 @@ +Assets: + amd64: + - 681c81b7934ae2bf38b9f12d891683972d1fbbf6d7d97e50940a47b139d41b35@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kubelet + - 9f74f2fa7ee32ad07e17211725992248470310ca1988214518806b39b1dad9f0@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kubectl + - 977824932d5667c7a37aa6a3cbba40100a6873e7bd97e83e8be837e3e7afd0a8@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-amd64-v0.8.7.tgz + - 29ef1e8635795c2a49a20a56e778f45ff163c5400a5428ca33999ed53d44e3d8@https://github.com/containerd/containerd/releases/download/v1.4.13/cri-containerd-cni-1.4.13-linux-amd64.tar.gz + arm64: + - 17832b192be5ea314714f7e16efd5e5f65347974bbbf41def6b02f68931380c4@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/arm64/kubelet + - a4dd7100f547a40d3e2f83850d0bab75c6ea5eb553f0a80adcf73155bef1fd0d@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/arm64/kubectl + - ae13d7b5c05bd180ea9b5b68f44bdaa7bfb41034a2ef1d68fd8e1259797d642f@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-arm64-v0.8.7.tgz + - debed306ed9a4e70dcbcb228a0b3898f9730099e324f34bb0e76abbaddf7a6a7@https://download.docker.com/linux/static/stable/aarch64/docker-20.10.13.tgz +CAs: {} +ClusterName: containerd.example.com +Hooks: +- null +- null +KeypairIDs: + kubernetes-ca: "6982820025135291416230495506" +KubeletConfig: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + nodeLabels: + kubernetes.io/role: node + node-role.kubernetes.io/node: "" + podInfraContainerImage: registry.k8s.io/pause:3.6 + podManifestPath: /etc/kubernetes/manifests + registerSchedulable: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s +UpdatePolicy: automatic +channels: +- memfs://clusters.example.com/containerd.example.com/addons/bootstrap-channel.yaml +containerdConfig: + logLevel: info + version: 1.4.13 diff --git a/tests/integration/update_cluster/containerd/in-v1alpha2.yaml b/tests/integration/update_cluster/containerd/in-v1alpha2.yaml index 4c4e1b4fd927f..ceaa36fe47fe7 100644 --- a/tests/integration/update_cluster/containerd/in-v1alpha2.yaml +++ b/tests/integration/update_cluster/containerd/in-v1alpha2.yaml @@ -23,7 +23,6 @@ spec: kubelet: anonymousAuth: false kubernetesVersion: v1.21.0 - masterInternalName: api.internal.containerd.example.com masterPublicName: api.containerd.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/tests/integration/update_cluster/containerd/kubernetes.tf b/tests/integration/update_cluster/containerd/kubernetes.tf new file mode 100644 index 0000000000000..fc522fb5c69c1 --- /dev/null +++ b/tests/integration/update_cluster/containerd/kubernetes.tf @@ -0,0 +1,820 @@ +locals { + cluster_name = "containerd.example.com" + master_autoscaling_group_ids = [aws_autoscaling_group.master-us-test-1a-masters-containerd-example-com.id] + master_security_group_ids = [aws_security_group.masters-containerd-example-com.id] + masters_role_arn = aws_iam_role.masters-containerd-example-com.arn + masters_role_name = aws_iam_role.masters-containerd-example-com.name + node_autoscaling_group_ids = [aws_autoscaling_group.nodes-containerd-example-com.id] + node_security_group_ids = [aws_security_group.nodes-containerd-example-com.id] + node_subnet_ids = [aws_subnet.us-test-1a-containerd-example-com.id] + nodes_role_arn = aws_iam_role.nodes-containerd-example-com.arn + nodes_role_name = aws_iam_role.nodes-containerd-example-com.name + region = "us-test-1" + route_table_public_id = aws_route_table.containerd-example-com.id + subnet_us-test-1a_id = aws_subnet.us-test-1a-containerd-example-com.id + vpc_cidr_block = aws_vpc.containerd-example-com.cidr_block + vpc_id = aws_vpc.containerd-example-com.id +} + +output "cluster_name" { + value = "containerd.example.com" +} + +output "master_autoscaling_group_ids" { + value = [aws_autoscaling_group.master-us-test-1a-masters-containerd-example-com.id] +} + +output "master_security_group_ids" { + value = [aws_security_group.masters-containerd-example-com.id] +} + +output "masters_role_arn" { + value = aws_iam_role.masters-containerd-example-com.arn +} + +output "masters_role_name" { + value = aws_iam_role.masters-containerd-example-com.name +} + +output "node_autoscaling_group_ids" { + value = [aws_autoscaling_group.nodes-containerd-example-com.id] +} + +output "node_security_group_ids" { + value = [aws_security_group.nodes-containerd-example-com.id] +} + +output "node_subnet_ids" { + value = [aws_subnet.us-test-1a-containerd-example-com.id] +} + +output "nodes_role_arn" { + value = aws_iam_role.nodes-containerd-example-com.arn +} + +output "nodes_role_name" { + value = aws_iam_role.nodes-containerd-example-com.name +} + +output "region" { + value = "us-test-1" +} + +output "route_table_public_id" { + value = aws_route_table.containerd-example-com.id +} + +output "subnet_us-test-1a_id" { + value = aws_subnet.us-test-1a-containerd-example-com.id +} + +output "vpc_cidr_block" { + value = aws_vpc.containerd-example-com.cidr_block +} + +output "vpc_id" { + value = aws_vpc.containerd-example-com.id +} + +provider "aws" { + region = "us-test-1" +} + +provider "aws" { + alias = "files" + region = "us-test-1" +} + +resource "aws_autoscaling_group" "master-us-test-1a-masters-containerd-example-com" { + enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"] + launch_template { + id = aws_launch_template.master-us-test-1a-masters-containerd-example-com.id + version = aws_launch_template.master-us-test-1a-masters-containerd-example-com.latest_version + } + max_instance_lifetime = 0 + max_size = 1 + metrics_granularity = "1Minute" + min_size = 1 + name = "master-us-test-1a.masters.containerd.example.com" + protect_from_scale_in = false + tag { + key = "KubernetesCluster" + propagate_at_launch = true + value = "containerd.example.com" + } + tag { + key = "Name" + propagate_at_launch = true + value = "master-us-test-1a.masters.containerd.example.com" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" + propagate_at_launch = true + value = "master" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/role/master" + propagate_at_launch = true + value = "1" + } + tag { + key = "kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "master-us-test-1a" + } + tag { + key = "kubernetes.io/cluster/containerd.example.com" + propagate_at_launch = true + value = "owned" + } + vpc_zone_identifier = [aws_subnet.us-test-1a-containerd-example-com.id] +} + +resource "aws_autoscaling_group" "nodes-containerd-example-com" { + enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"] + launch_template { + id = aws_launch_template.nodes-containerd-example-com.id + version = aws_launch_template.nodes-containerd-example-com.latest_version + } + max_instance_lifetime = 0 + max_size = 2 + metrics_granularity = "1Minute" + min_size = 2 + name = "nodes.containerd.example.com" + protect_from_scale_in = false + tag { + key = "KubernetesCluster" + propagate_at_launch = true + value = "containerd.example.com" + } + tag { + key = "Name" + propagate_at_launch = true + value = "nodes.containerd.example.com" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" + propagate_at_launch = true + value = "node" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/role/node" + propagate_at_launch = true + value = "1" + } + tag { + key = "kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "nodes" + } + tag { + key = "kubernetes.io/cluster/containerd.example.com" + propagate_at_launch = true + value = "owned" + } + vpc_zone_identifier = [aws_subnet.us-test-1a-containerd-example-com.id] +} + +resource "aws_ebs_volume" "us-test-1a-etcd-events-containerd-example-com" { + availability_zone = "us-test-1a" + encrypted = false + iops = 3000 + size = 20 + tags = { + "KubernetesCluster" = "containerd.example.com" + "Name" = "us-test-1a.etcd-events.containerd.example.com" + "k8s.io/etcd/events" = "us-test-1a/us-test-1a" + "k8s.io/role/master" = "1" + "kubernetes.io/cluster/containerd.example.com" = "owned" + } + throughput = 125 + type = "gp3" +} + +resource "aws_ebs_volume" "us-test-1a-etcd-main-containerd-example-com" { + availability_zone = "us-test-1a" + encrypted = false + iops = 3000 + size = 20 + tags = { + "KubernetesCluster" = "containerd.example.com" + "Name" = "us-test-1a.etcd-main.containerd.example.com" + "k8s.io/etcd/main" = "us-test-1a/us-test-1a" + "k8s.io/role/master" = "1" + "kubernetes.io/cluster/containerd.example.com" = "owned" + } + throughput = 125 + type = "gp3" +} + +resource "aws_iam_instance_profile" "masters-containerd-example-com" { + name = "masters.containerd.example.com" + role = aws_iam_role.masters-containerd-example-com.name + tags = { + "KubernetesCluster" = "containerd.example.com" + "Name" = "masters.containerd.example.com" + "kubernetes.io/cluster/containerd.example.com" = "owned" + } +} + +resource "aws_iam_instance_profile" "nodes-containerd-example-com" { + name = "nodes.containerd.example.com" + role = aws_iam_role.nodes-containerd-example-com.name + tags = { + "KubernetesCluster" = "containerd.example.com" + "Name" = "nodes.containerd.example.com" + "kubernetes.io/cluster/containerd.example.com" = "owned" + } +} + +resource "aws_iam_role" "masters-containerd-example-com" { + assume_role_policy = file("${path.module}/data/aws_iam_role_masters.containerd.example.com_policy") + name = "masters.containerd.example.com" + tags = { + "KubernetesCluster" = "containerd.example.com" + "Name" = "masters.containerd.example.com" + "kubernetes.io/cluster/containerd.example.com" = "owned" + } +} + +resource "aws_iam_role" "nodes-containerd-example-com" { + assume_role_policy = file("${path.module}/data/aws_iam_role_nodes.containerd.example.com_policy") + name = "nodes.containerd.example.com" + tags = { + "KubernetesCluster" = "containerd.example.com" + "Name" = "nodes.containerd.example.com" + "kubernetes.io/cluster/containerd.example.com" = "owned" + } +} + +resource "aws_iam_role_policy" "masters-containerd-example-com" { + name = "masters.containerd.example.com" + policy = file("${path.module}/data/aws_iam_role_policy_masters.containerd.example.com_policy") + role = aws_iam_role.masters-containerd-example-com.name +} + +resource "aws_iam_role_policy" "nodes-containerd-example-com" { + name = "nodes.containerd.example.com" + policy = file("${path.module}/data/aws_iam_role_policy_nodes.containerd.example.com_policy") + role = aws_iam_role.nodes-containerd-example-com.name +} + +resource "aws_internet_gateway" "containerd-example-com" { + tags = { + "KubernetesCluster" = "containerd.example.com" + "Name" = "containerd.example.com" + "kubernetes.io/cluster/containerd.example.com" = "owned" + } + vpc_id = aws_vpc.containerd-example-com.id +} + +resource "aws_key_pair" "kubernetes-containerd-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157" { + key_name = "kubernetes.containerd.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57" + public_key = file("${path.module}/data/aws_key_pair_kubernetes.containerd.example.com-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key") + tags = { + "KubernetesCluster" = "containerd.example.com" + "Name" = "containerd.example.com" + "kubernetes.io/cluster/containerd.example.com" = "owned" + } +} + +resource "aws_launch_template" "master-us-test-1a-masters-containerd-example-com" { + block_device_mappings { + device_name = "/dev/xvda" + ebs { + delete_on_termination = true + encrypted = true + iops = 3000 + throughput = 125 + volume_size = 64 + volume_type = "gp3" + } + } + block_device_mappings { + device_name = "/dev/sdc" + virtual_name = "ephemeral0" + } + iam_instance_profile { + name = aws_iam_instance_profile.masters-containerd-example-com.id + } + image_id = "ami-12345678" + instance_type = "m3.medium" + key_name = aws_key_pair.kubernetes-containerd-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id + lifecycle { + create_before_destroy = true + } + metadata_options { + http_endpoint = "enabled" + http_protocol_ipv6 = "disabled" + http_put_response_hop_limit = 1 + http_tokens = "optional" + } + monitoring { + enabled = false + } + name = "master-us-test-1a.masters.containerd.example.com" + network_interfaces { + associate_public_ip_address = true + delete_on_termination = true + ipv6_address_count = 0 + security_groups = [aws_security_group.masters-containerd-example-com.id] + } + tag_specifications { + resource_type = "instance" + tags = { + "KubernetesCluster" = "containerd.example.com" + "Name" = "master-us-test-1a.masters.containerd.example.com" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" = "" + "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = "" + "k8s.io/role/master" = "1" + "kops.k8s.io/instancegroup" = "master-us-test-1a" + "kubernetes.io/cluster/containerd.example.com" = "owned" + } + } + tag_specifications { + resource_type = "volume" + tags = { + "KubernetesCluster" = "containerd.example.com" + "Name" = "master-us-test-1a.masters.containerd.example.com" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" = "" + "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = "" + "k8s.io/role/master" = "1" + "kops.k8s.io/instancegroup" = "master-us-test-1a" + "kubernetes.io/cluster/containerd.example.com" = "owned" + } + } + tags = { + "KubernetesCluster" = "containerd.example.com" + "Name" = "master-us-test-1a.masters.containerd.example.com" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" = "" + "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = "" + "k8s.io/role/master" = "1" + "kops.k8s.io/instancegroup" = "master-us-test-1a" + "kubernetes.io/cluster/containerd.example.com" = "owned" + } + user_data = filebase64("${path.module}/data/aws_launch_template_master-us-test-1a.masters.containerd.example.com_user_data") +} + +resource "aws_launch_template" "nodes-containerd-example-com" { + block_device_mappings { + device_name = "/dev/xvda" + ebs { + delete_on_termination = true + encrypted = true + iops = 3000 + throughput = 125 + volume_size = 128 + volume_type = "gp3" + } + } + iam_instance_profile { + name = aws_iam_instance_profile.nodes-containerd-example-com.id + } + image_id = "ami-12345678" + instance_type = "t2.medium" + key_name = aws_key_pair.kubernetes-containerd-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id + lifecycle { + create_before_destroy = true + } + metadata_options { + http_endpoint = "enabled" + http_protocol_ipv6 = "disabled" + http_put_response_hop_limit = 1 + http_tokens = "optional" + } + monitoring { + enabled = false + } + name = "nodes.containerd.example.com" + network_interfaces { + associate_public_ip_address = true + delete_on_termination = true + ipv6_address_count = 0 + security_groups = [aws_security_group.nodes-containerd-example-com.id] + } + tag_specifications { + resource_type = "instance" + tags = { + "KubernetesCluster" = "containerd.example.com" + "Name" = "nodes.containerd.example.com" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "nodes" + "kubernetes.io/cluster/containerd.example.com" = "owned" + } + } + tag_specifications { + resource_type = "volume" + tags = { + "KubernetesCluster" = "containerd.example.com" + "Name" = "nodes.containerd.example.com" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "nodes" + "kubernetes.io/cluster/containerd.example.com" = "owned" + } + } + tags = { + "KubernetesCluster" = "containerd.example.com" + "Name" = "nodes.containerd.example.com" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "nodes" + "kubernetes.io/cluster/containerd.example.com" = "owned" + } + user_data = filebase64("${path.module}/data/aws_launch_template_nodes.containerd.example.com_user_data") +} + +resource "aws_route" "route-0-0-0-0--0" { + destination_cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.containerd-example-com.id + route_table_id = aws_route_table.containerd-example-com.id +} + +resource "aws_route" "route-__--0" { + destination_ipv6_cidr_block = "::/0" + gateway_id = aws_internet_gateway.containerd-example-com.id + route_table_id = aws_route_table.containerd-example-com.id +} + +resource "aws_route_table" "containerd-example-com" { + tags = { + "KubernetesCluster" = "containerd.example.com" + "Name" = "containerd.example.com" + "kubernetes.io/cluster/containerd.example.com" = "owned" + "kubernetes.io/kops/role" = "public" + } + vpc_id = aws_vpc.containerd-example-com.id +} + +resource "aws_route_table_association" "us-test-1a-containerd-example-com" { + route_table_id = aws_route_table.containerd-example-com.id + subnet_id = aws_subnet.us-test-1a-containerd-example-com.id +} + +resource "aws_s3_object" "cluster-completed-spec" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_cluster-completed.spec_content") + key = "clusters.example.com/containerd.example.com/cluster-completed.spec" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "containerd-example-com-addons-bootstrap" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_containerd.example.com-addons-bootstrap_content") + key = "clusters.example.com/containerd.example.com/addons/bootstrap-channel.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "containerd-example-com-addons-coredns-addons-k8s-io-k8s-1-12" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_containerd.example.com-addons-coredns.addons.k8s.io-k8s-1.12_content") + key = "clusters.example.com/containerd.example.com/addons/coredns.addons.k8s.io/k8s-1.12.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "containerd-example-com-addons-dns-controller-addons-k8s-io-k8s-1-12" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_containerd.example.com-addons-dns-controller.addons.k8s.io-k8s-1.12_content") + key = "clusters.example.com/containerd.example.com/addons/dns-controller.addons.k8s.io/k8s-1.12.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "containerd-example-com-addons-kops-controller-addons-k8s-io-k8s-1-16" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_containerd.example.com-addons-kops-controller.addons.k8s.io-k8s-1.16_content") + key = "clusters.example.com/containerd.example.com/addons/kops-controller.addons.k8s.io/k8s-1.16.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "containerd-example-com-addons-kubelet-api-rbac-addons-k8s-io-k8s-1-9" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_containerd.example.com-addons-kubelet-api.rbac.addons.k8s.io-k8s-1.9_content") + key = "clusters.example.com/containerd.example.com/addons/kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "containerd-example-com-addons-limit-range-addons-k8s-io" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_containerd.example.com-addons-limit-range.addons.k8s.io_content") + key = "clusters.example.com/containerd.example.com/addons/limit-range.addons.k8s.io/v1.5.0.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "containerd-example-com-addons-storage-aws-addons-k8s-io-v1-15-0" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_containerd.example.com-addons-storage-aws.addons.k8s.io-v1.15.0_content") + key = "clusters.example.com/containerd.example.com/addons/storage-aws.addons.k8s.io/v1.15.0.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "etcd-cluster-spec-events" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_etcd-cluster-spec-events_content") + key = "clusters.example.com/containerd.example.com/backups/etcd/events/control/etcd-cluster-spec" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "etcd-cluster-spec-main" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_etcd-cluster-spec-main_content") + key = "clusters.example.com/containerd.example.com/backups/etcd/main/control/etcd-cluster-spec" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "kops-version-txt" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_kops-version.txt_content") + key = "clusters.example.com/containerd.example.com/kops-version.txt" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "manifests-etcdmanager-events-master-us-test-1a" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_manifests-etcdmanager-events-master-us-test-1a_content") + key = "clusters.example.com/containerd.example.com/manifests/etcd/events-master-us-test-1a.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "manifests-etcdmanager-main-master-us-test-1a" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_manifests-etcdmanager-main-master-us-test-1a_content") + key = "clusters.example.com/containerd.example.com/manifests/etcd/main-master-us-test-1a.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "manifests-static-kube-apiserver-healthcheck" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_manifests-static-kube-apiserver-healthcheck_content") + key = "clusters.example.com/containerd.example.com/manifests/static/kube-apiserver-healthcheck.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nodeupconfig-master-us-test-1a" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_nodeupconfig-master-us-test-1a_content") + key = "clusters.example.com/containerd.example.com/igconfig/master/master-us-test-1a/nodeupconfig.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nodeupconfig-nodes" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_nodeupconfig-nodes_content") + key = "clusters.example.com/containerd.example.com/igconfig/node/nodes/nodeupconfig.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_security_group" "masters-containerd-example-com" { + description = "Security group for masters" + name = "masters.containerd.example.com" + tags = { + "KubernetesCluster" = "containerd.example.com" + "Name" = "masters.containerd.example.com" + "kubernetes.io/cluster/containerd.example.com" = "owned" + } + vpc_id = aws_vpc.containerd-example-com.id +} + +resource "aws_security_group" "nodes-containerd-example-com" { + description = "Security group for nodes" + name = "nodes.containerd.example.com" + tags = { + "KubernetesCluster" = "containerd.example.com" + "Name" = "nodes.containerd.example.com" + "kubernetes.io/cluster/containerd.example.com" = "owned" + } + vpc_id = aws_vpc.containerd-example-com.id +} + +resource "aws_security_group_rule" "from-0-0-0-0--0-ingress-tcp-22to22-masters-containerd-example-com" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 22 + protocol = "tcp" + security_group_id = aws_security_group.masters-containerd-example-com.id + to_port = 22 + type = "ingress" +} + +resource "aws_security_group_rule" "from-0-0-0-0--0-ingress-tcp-22to22-nodes-containerd-example-com" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 22 + protocol = "tcp" + security_group_id = aws_security_group.nodes-containerd-example-com.id + to_port = 22 + type = "ingress" +} + +resource "aws_security_group_rule" "from-0-0-0-0--0-ingress-tcp-443to443-masters-containerd-example-com" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 443 + protocol = "tcp" + security_group_id = aws_security_group.masters-containerd-example-com.id + to_port = 443 + type = "ingress" +} + +resource "aws_security_group_rule" "from-masters-containerd-example-com-egress-all-0to0-0-0-0-0--0" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.masters-containerd-example-com.id + to_port = 0 + type = "egress" +} + +resource "aws_security_group_rule" "from-masters-containerd-example-com-egress-all-0to0-__--0" { + from_port = 0 + ipv6_cidr_blocks = ["::/0"] + protocol = "-1" + security_group_id = aws_security_group.masters-containerd-example-com.id + to_port = 0 + type = "egress" +} + +resource "aws_security_group_rule" "from-masters-containerd-example-com-ingress-all-0to0-masters-containerd-example-com" { + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.masters-containerd-example-com.id + source_security_group_id = aws_security_group.masters-containerd-example-com.id + to_port = 0 + type = "ingress" +} + +resource "aws_security_group_rule" "from-masters-containerd-example-com-ingress-all-0to0-nodes-containerd-example-com" { + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.nodes-containerd-example-com.id + source_security_group_id = aws_security_group.masters-containerd-example-com.id + to_port = 0 + type = "ingress" +} + +resource "aws_security_group_rule" "from-nodes-containerd-example-com-egress-all-0to0-0-0-0-0--0" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.nodes-containerd-example-com.id + to_port = 0 + type = "egress" +} + +resource "aws_security_group_rule" "from-nodes-containerd-example-com-egress-all-0to0-__--0" { + from_port = 0 + ipv6_cidr_blocks = ["::/0"] + protocol = "-1" + security_group_id = aws_security_group.nodes-containerd-example-com.id + to_port = 0 + type = "egress" +} + +resource "aws_security_group_rule" "from-nodes-containerd-example-com-ingress-all-0to0-nodes-containerd-example-com" { + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.nodes-containerd-example-com.id + source_security_group_id = aws_security_group.nodes-containerd-example-com.id + to_port = 0 + type = "ingress" +} + +resource "aws_security_group_rule" "from-nodes-containerd-example-com-ingress-tcp-1to2379-masters-containerd-example-com" { + from_port = 1 + protocol = "tcp" + security_group_id = aws_security_group.masters-containerd-example-com.id + source_security_group_id = aws_security_group.nodes-containerd-example-com.id + to_port = 2379 + type = "ingress" +} + +resource "aws_security_group_rule" "from-nodes-containerd-example-com-ingress-tcp-2382to4000-masters-containerd-example-com" { + from_port = 2382 + protocol = "tcp" + security_group_id = aws_security_group.masters-containerd-example-com.id + source_security_group_id = aws_security_group.nodes-containerd-example-com.id + to_port = 4000 + type = "ingress" +} + +resource "aws_security_group_rule" "from-nodes-containerd-example-com-ingress-tcp-4003to65535-masters-containerd-example-com" { + from_port = 4003 + protocol = "tcp" + security_group_id = aws_security_group.masters-containerd-example-com.id + source_security_group_id = aws_security_group.nodes-containerd-example-com.id + to_port = 65535 + type = "ingress" +} + +resource "aws_security_group_rule" "from-nodes-containerd-example-com-ingress-udp-1to65535-masters-containerd-example-com" { + from_port = 1 + protocol = "udp" + security_group_id = aws_security_group.masters-containerd-example-com.id + source_security_group_id = aws_security_group.nodes-containerd-example-com.id + to_port = 65535 + type = "ingress" +} + +resource "aws_subnet" "us-test-1a-containerd-example-com" { + availability_zone = "us-test-1a" + cidr_block = "172.20.32.0/19" + tags = { + "KubernetesCluster" = "containerd.example.com" + "Name" = "us-test-1a.containerd.example.com" + "SubnetType" = "Public" + "kops.k8s.io/instance-group/master-us-test-1a" = "true" + "kops.k8s.io/instance-group/nodes" = "true" + "kubernetes.io/cluster/containerd.example.com" = "owned" + "kubernetes.io/role/elb" = "1" + "kubernetes.io/role/internal-elb" = "1" + } + vpc_id = aws_vpc.containerd-example-com.id +} + +resource "aws_vpc" "containerd-example-com" { + assign_generated_ipv6_cidr_block = true + cidr_block = "172.20.0.0/16" + enable_dns_hostnames = true + enable_dns_support = true + tags = { + "KubernetesCluster" = "containerd.example.com" + "Name" = "containerd.example.com" + "kubernetes.io/cluster/containerd.example.com" = "owned" + } +} + +resource "aws_vpc_dhcp_options" "containerd-example-com" { + domain_name = "us-test-1.compute.internal" + domain_name_servers = ["AmazonProvidedDNS"] + tags = { + "KubernetesCluster" = "containerd.example.com" + "Name" = "containerd.example.com" + "kubernetes.io/cluster/containerd.example.com" = "owned" + } +} + +resource "aws_vpc_dhcp_options_association" "containerd-example-com" { + dhcp_options_id = aws_vpc_dhcp_options.containerd-example-com.id + vpc_id = aws_vpc.containerd-example-com.id +} + +terraform { + required_version = ">= 0.15.0" + required_providers { + aws = { + "configuration_aliases" = [aws.files] + "source" = "hashicorp/aws" + "version" = ">= 4.0.0" + } + } +} diff --git a/tests/integration/update_cluster/digit/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/digit/data/aws_s3_object_cluster-completed.spec_content index eaeb55a930672..b34afafab7ce6 100644 --- a/tests/integration/update_cluster/digit/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/digit/data/aws_s3_object_cluster-completed.spec_content @@ -164,7 +164,6 @@ spec: kubernetesApiAccess: - 0.0.0.0/0 kubernetesVersion: 1.21.0 - masterInternalName: api.internal.123.example.com masterKubelet: anonymousAuth: false cgroupDriver: systemd diff --git a/tests/integration/update_cluster/digit/in-v1alpha2.yaml b/tests/integration/update_cluster/digit/in-v1alpha2.yaml index 212987f9c69dd..3d51c3ecdacb5 100644 --- a/tests/integration/update_cluster/digit/in-v1alpha2.yaml +++ b/tests/integration/update_cluster/digit/in-v1alpha2.yaml @@ -44,7 +44,6 @@ spec: kubelet: anonymousAuth: false kubernetesVersion: v1.21.0 - masterInternalName: api.internal.123.example.com masterPublicName: api.123.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/tests/integration/update_cluster/docker-custom/cloudformation.json b/tests/integration/update_cluster/docker-custom/cloudformation.json deleted file mode 100644 index 5d734acfa0294..0000000000000 --- a/tests/integration/update_cluster/docker-custom/cloudformation.json +++ /dev/null @@ -1,1349 +0,0 @@ -{ - "Resources": { - "AWSAutoScalingAutoScalingGroupmasterustest1amastersdockerexamplecom": { - "Type": "AWS::AutoScaling::AutoScalingGroup", - "Properties": { - "AutoScalingGroupName": "master-us-test-1a.masters.docker.example.com", - "LaunchTemplate": { - "LaunchTemplateId": { - "Ref": "AWSEC2LaunchTemplatemasterustest1amastersdockerexamplecom" - }, - "Version": { - "Fn::GetAtt": [ - "AWSEC2LaunchTemplatemasterustest1amastersdockerexamplecom", - "LatestVersionNumber" - ] - } - }, - "MaxSize": "1", - "MinSize": "1", - "VPCZoneIdentifier": [ - { - "Ref": "AWSEC2Subnetustest1adockerexamplecom" - } - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "docker.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.docker.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/role/master", - "Value": "1", - "PropagateAtLaunch": true - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a", - "PropagateAtLaunch": true - }, - { - "Key": "kubernetes.io/cluster/docker.example.com", - "Value": "owned", - "PropagateAtLaunch": true - } - ], - "MetricsCollection": [ - { - "Granularity": "1Minute", - "Metrics": [ - "GroupDesiredCapacity", - "GroupInServiceInstances", - "GroupMaxSize", - "GroupMinSize", - "GroupPendingInstances", - "GroupStandbyInstances", - "GroupTerminatingInstances", - "GroupTotalInstances" - ] - } - ] - } - }, - "AWSAutoScalingAutoScalingGroupnodesdockerexamplecom": { - "Type": "AWS::AutoScaling::AutoScalingGroup", - "Properties": { - "AutoScalingGroupName": "nodes.docker.example.com", - "LaunchTemplate": { - "LaunchTemplateId": { - "Ref": "AWSEC2LaunchTemplatenodesdockerexamplecom" - }, - "Version": { - "Fn::GetAtt": [ - "AWSEC2LaunchTemplatenodesdockerexamplecom", - "LatestVersionNumber" - ] - } - }, - "MaxSize": "2", - "MinSize": "2", - "VPCZoneIdentifier": [ - { - "Ref": "AWSEC2Subnetustest1adockerexamplecom" - } - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "docker.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "Name", - "Value": "nodes.docker.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/role/node", - "Value": "1", - "PropagateAtLaunch": true - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes", - "PropagateAtLaunch": true - }, - { - "Key": "kubernetes.io/cluster/docker.example.com", - "Value": "owned", - "PropagateAtLaunch": true - } - ], - "MetricsCollection": [ - { - "Granularity": "1Minute", - "Metrics": [ - "GroupDesiredCapacity", - "GroupInServiceInstances", - "GroupMaxSize", - "GroupMinSize", - "GroupPendingInstances", - "GroupStandbyInstances", - "GroupTerminatingInstances", - "GroupTotalInstances" - ] - } - ] - } - }, - "AWSEC2DHCPOptionsdockerexamplecom": { - "Type": "AWS::EC2::DHCPOptions", - "Properties": { - "DomainName": "us-test-1.compute.internal", - "DomainNameServers": [ - "AmazonProvidedDNS" - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "docker.example.com" - }, - { - "Key": "Name", - "Value": "docker.example.com" - }, - { - "Key": "kubernetes.io/cluster/docker.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2InternetGatewaydockerexamplecom": { - "Type": "AWS::EC2::InternetGateway", - "Properties": { - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "docker.example.com" - }, - { - "Key": "Name", - "Value": "docker.example.com" - }, - { - "Key": "kubernetes.io/cluster/docker.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2LaunchTemplatemasterustest1amastersdockerexamplecom": { - "Type": "AWS::EC2::LaunchTemplate", - "Properties": { - "LaunchTemplateName": "master-us-test-1a.masters.docker.example.com", - "LaunchTemplateData": { - "BlockDeviceMappings": [ - { - "DeviceName": "/dev/xvda", - "Ebs": { - "VolumeType": "gp3", - "VolumeSize": 64, - "Iops": 3000, - "Throughput": 125, - "DeleteOnTermination": true, - "Encrypted": true - } - }, - { - "DeviceName": "/dev/sdc", - "VirtualName": "ephemeral0" - } - ], - "IamInstanceProfile": { - "Name": { - "Ref": "AWSIAMInstanceProfilemastersdockerexamplecom" - } - }, - "ImageId": "ami-12345678", - "InstanceType": "m3.medium", - "KeyName": "kubernetes.docker.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57", - "MetadataOptions": { - "HttpPutResponseHopLimit": 1, - "HttpTokens": "optional" - }, - "Monitoring": { - "Enabled": false - }, - "NetworkInterfaces": [ - { - "AssociatePublicIpAddress": true, - "DeleteOnTermination": true, - "DeviceIndex": 0, - "Ipv6AddressCount": 0, - "Groups": [ - { - "Ref": "AWSEC2SecurityGroupmastersdockerexamplecom" - } - ] - } - ], - "TagSpecifications": [ - { - "ResourceType": "instance", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "docker.example.com" - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.docker.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a" - }, - { - "Key": "kubernetes.io/cluster/docker.example.com", - "Value": "owned" - } - ] - }, - { - "ResourceType": "volume", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "docker.example.com" - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.docker.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a" - }, - { - "Key": "kubernetes.io/cluster/docker.example.com", - "Value": "owned" - } - ] - } - ], - "UserData": "extracted" - } - } - }, - "AWSEC2LaunchTemplatenodesdockerexamplecom": { - "Type": "AWS::EC2::LaunchTemplate", - "Properties": { - "LaunchTemplateName": "nodes.docker.example.com", - "LaunchTemplateData": { - "BlockDeviceMappings": [ - { - "DeviceName": "/dev/xvda", - "Ebs": { - "VolumeType": "gp3", - "VolumeSize": 128, - "Iops": 3000, - "Throughput": 125, - "DeleteOnTermination": true, - "Encrypted": true - } - } - ], - "IamInstanceProfile": { - "Name": { - "Ref": "AWSIAMInstanceProfilenodesdockerexamplecom" - } - }, - "ImageId": "ami-12345678", - "InstanceType": "t2.medium", - "KeyName": "kubernetes.docker.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57", - "MetadataOptions": { - "HttpPutResponseHopLimit": 1, - "HttpTokens": "optional" - }, - "Monitoring": { - "Enabled": false - }, - "NetworkInterfaces": [ - { - "AssociatePublicIpAddress": true, - "DeleteOnTermination": true, - "DeviceIndex": 0, - "Ipv6AddressCount": 0, - "Groups": [ - { - "Ref": "AWSEC2SecurityGroupnodesdockerexamplecom" - } - ] - } - ], - "TagSpecifications": [ - { - "ResourceType": "instance", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "docker.example.com" - }, - { - "Key": "Name", - "Value": "nodes.docker.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "" - }, - { - "Key": "k8s.io/role/node", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes" - }, - { - "Key": "kubernetes.io/cluster/docker.example.com", - "Value": "owned" - } - ] - }, - { - "ResourceType": "volume", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "docker.example.com" - }, - { - "Key": "Name", - "Value": "nodes.docker.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "" - }, - { - "Key": "k8s.io/role/node", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes" - }, - { - "Key": "kubernetes.io/cluster/docker.example.com", - "Value": "owned" - } - ] - } - ], - "UserData": "extracted" - } - } - }, - "AWSEC2Route0": { - "Type": "AWS::EC2::Route", - "Properties": { - "RouteTableId": { - "Ref": "AWSEC2RouteTabledockerexamplecom" - }, - "DestinationIpv6CidrBlock": "::/0", - "GatewayId": { - "Ref": "AWSEC2InternetGatewaydockerexamplecom" - } - } - }, - "AWSEC2Route00000": { - "Type": "AWS::EC2::Route", - "Properties": { - "RouteTableId": { - "Ref": "AWSEC2RouteTabledockerexamplecom" - }, - "DestinationCidrBlock": "0.0.0.0/0", - "GatewayId": { - "Ref": "AWSEC2InternetGatewaydockerexamplecom" - } - } - }, - "AWSEC2RouteTabledockerexamplecom": { - "Type": "AWS::EC2::RouteTable", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCdockerexamplecom" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "docker.example.com" - }, - { - "Key": "Name", - "Value": "docker.example.com" - }, - { - "Key": "kubernetes.io/cluster/docker.example.com", - "Value": "owned" - }, - { - "Key": "kubernetes.io/kops/role", - "Value": "public" - } - ] - } - }, - "AWSEC2SecurityGroupEgressfrommastersdockerexamplecomegressall0to00": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersdockerexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIpv6": "::/0" - } - }, - "AWSEC2SecurityGroupEgressfrommastersdockerexamplecomegressall0to000000": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersdockerexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupEgressfromnodesdockerexamplecomegressall0to00": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesdockerexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIpv6": "::/0" - } - }, - "AWSEC2SecurityGroupEgressfromnodesdockerexamplecomegressall0to000000": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesdockerexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom00000ingresstcp22to22mastersdockerexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersdockerexamplecom" - }, - "FromPort": 22, - "ToPort": 22, - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom00000ingresstcp22to22nodesdockerexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesdockerexamplecom" - }, - "FromPort": 22, - "ToPort": 22, - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom00000ingresstcp443to443mastersdockerexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersdockerexamplecom" - }, - "FromPort": 443, - "ToPort": 443, - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrommastersdockerexamplecomingressall0to0mastersdockerexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersdockerexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupmastersdockerexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfrommastersdockerexamplecomingressall0to0nodesdockerexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesdockerexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupmastersdockerexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfromnodesdockerexamplecomingressall0to0nodesdockerexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesdockerexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesdockerexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfromnodesdockerexamplecomingresstcp1to2379mastersdockerexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersdockerexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesdockerexamplecom" - }, - "FromPort": 1, - "ToPort": 2379, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodesdockerexamplecomingresstcp2382to4000mastersdockerexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersdockerexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesdockerexamplecom" - }, - "FromPort": 2382, - "ToPort": 4000, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodesdockerexamplecomingresstcp4003to65535mastersdockerexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersdockerexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesdockerexamplecom" - }, - "FromPort": 4003, - "ToPort": 65535, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodesdockerexamplecomingressudp1to65535mastersdockerexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersdockerexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesdockerexamplecom" - }, - "FromPort": 1, - "ToPort": 65535, - "IpProtocol": "udp" - } - }, - "AWSEC2SecurityGroupmastersdockerexamplecom": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupName": "masters.docker.example.com", - "VpcId": { - "Ref": "AWSEC2VPCdockerexamplecom" - }, - "GroupDescription": "Security group for masters", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "docker.example.com" - }, - { - "Key": "Name", - "Value": "masters.docker.example.com" - }, - { - "Key": "kubernetes.io/cluster/docker.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2SecurityGroupnodesdockerexamplecom": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupName": "nodes.docker.example.com", - "VpcId": { - "Ref": "AWSEC2VPCdockerexamplecom" - }, - "GroupDescription": "Security group for nodes", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "docker.example.com" - }, - { - "Key": "Name", - "Value": "nodes.docker.example.com" - }, - { - "Key": "kubernetes.io/cluster/docker.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2SubnetRouteTableAssociationustest1adockerexamplecom": { - "Type": "AWS::EC2::SubnetRouteTableAssociation", - "Properties": { - "SubnetId": { - "Ref": "AWSEC2Subnetustest1adockerexamplecom" - }, - "RouteTableId": { - "Ref": "AWSEC2RouteTabledockerexamplecom" - } - } - }, - "AWSEC2Subnetustest1adockerexamplecom": { - "Type": "AWS::EC2::Subnet", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCdockerexamplecom" - }, - "CidrBlock": "172.20.32.0/19", - "AvailabilityZone": "us-test-1a", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "docker.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.docker.example.com" - }, - { - "Key": "SubnetType", - "Value": "Public" - }, - { - "Key": "kops.k8s.io/instance-group/master-us-test-1a", - "Value": "true" - }, - { - "Key": "kops.k8s.io/instance-group/nodes", - "Value": "true" - }, - { - "Key": "kubernetes.io/cluster/docker.example.com", - "Value": "owned" - }, - { - "Key": "kubernetes.io/role/elb", - "Value": "1" - }, - { - "Key": "kubernetes.io/role/internal-elb", - "Value": "1" - } - ] - } - }, - "AWSEC2VPCCidrBlockAmazonIPv6": { - "Type": "AWS::EC2::VPCCidrBlock", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCdockerexamplecom" - }, - "AmazonProvidedIpv6CidrBlock": true - } - }, - "AWSEC2VPCDHCPOptionsAssociationdockerexamplecom": { - "Type": "AWS::EC2::VPCDHCPOptionsAssociation", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCdockerexamplecom" - }, - "DhcpOptionsId": { - "Ref": "AWSEC2DHCPOptionsdockerexamplecom" - } - } - }, - "AWSEC2VPCGatewayAttachmentdockerexamplecom": { - "Type": "AWS::EC2::VPCGatewayAttachment", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCdockerexamplecom" - }, - "InternetGatewayId": { - "Ref": "AWSEC2InternetGatewaydockerexamplecom" - } - } - }, - "AWSEC2VPCdockerexamplecom": { - "Type": "AWS::EC2::VPC", - "Properties": { - "CidrBlock": "172.20.0.0/16", - "EnableDnsHostnames": true, - "EnableDnsSupport": true, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "docker.example.com" - }, - { - "Key": "Name", - "Value": "docker.example.com" - }, - { - "Key": "kubernetes.io/cluster/docker.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2Volumeustest1aetcdeventsdockerexamplecom": { - "Type": "AWS::EC2::Volume", - "Properties": { - "AvailabilityZone": "us-test-1a", - "Size": 20, - "VolumeType": "gp3", - "Iops": 3000, - "Throughput": 125, - "Encrypted": false, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "docker.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.etcd-events.docker.example.com" - }, - { - "Key": "k8s.io/etcd/events", - "Value": "us-test-1a/us-test-1a" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kubernetes.io/cluster/docker.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2Volumeustest1aetcdmaindockerexamplecom": { - "Type": "AWS::EC2::Volume", - "Properties": { - "AvailabilityZone": "us-test-1a", - "Size": 20, - "VolumeType": "gp3", - "Iops": 3000, - "Throughput": 125, - "Encrypted": false, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "docker.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.etcd-main.docker.example.com" - }, - { - "Key": "k8s.io/etcd/main", - "Value": "us-test-1a/us-test-1a" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kubernetes.io/cluster/docker.example.com", - "Value": "owned" - } - ] - } - }, - "AWSIAMInstanceProfilemastersdockerexamplecom": { - "Type": "AWS::IAM::InstanceProfile", - "Properties": { - "InstanceProfileName": "masters.docker.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolemastersdockerexamplecom" - } - ] - } - }, - "AWSIAMInstanceProfilenodesdockerexamplecom": { - "Type": "AWS::IAM::InstanceProfile", - "Properties": { - "InstanceProfileName": "nodes.docker.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolenodesdockerexamplecom" - } - ] - } - }, - "AWSIAMPolicymastersdockerexamplecom": { - "Type": "AWS::IAM::Policy", - "Properties": { - "PolicyName": "masters.docker.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolemastersdockerexamplecom" - } - ], - "PolicyDocument": { - "Statement": [ - { - "Action": "ec2:AttachVolume", - "Condition": { - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "docker.example.com", - "aws:ResourceTag/k8s.io/role/master": "1" - } - }, - "Effect": "Allow", - "Resource": [ - "*" - ] - }, - { - "Action": [ - "s3:Get*" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-read-bucket/clusters.example.com/docker.example.com/*" - }, - { - "Action": [ - "s3:GetObject", - "s3:DeleteObject", - "s3:DeleteObjectVersion", - "s3:PutObject" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/docker.example.com/backups/etcd/main/*" - }, - { - "Action": [ - "s3:GetObject", - "s3:DeleteObject", - "s3:DeleteObjectVersion", - "s3:PutObject" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/docker.example.com/backups/etcd/events/*" - }, - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-read-bucket" - ] - }, - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-write-bucket" - ] - }, - { - "Action": [ - "route53:ChangeResourceRecordSets", - "route53:ListResourceRecordSets", - "route53:GetHostedZone" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:route53:::hostedzone/Z1AFAKE1ZON3YO" - ] - }, - { - "Action": [ - "route53:GetChange" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:route53:::change/*" - ] - }, - { - "Action": [ - "route53:ListHostedZones", - "route53:ListTagsForResource" - ], - "Effect": "Allow", - "Resource": [ - "*" - ] - }, - { - "Action": "ec2:CreateTags", - "Condition": { - "StringEquals": { - "aws:RequestTag/KubernetesCluster": "docker.example.com", - "ec2:CreateAction": [ - "CreateSecurityGroup" - ] - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:security-group/*" - ] - }, - { - "Action": [ - "ec2:CreateTags", - "ec2:DeleteTags" - ], - "Condition": { - "Null": { - "aws:RequestTag/KubernetesCluster": "true" - }, - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "docker.example.com" - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:security-group/*" - ] - }, - { - "Action": "ec2:CreateTags", - "Condition": { - "StringEquals": { - "aws:RequestTag/KubernetesCluster": "docker.example.com", - "ec2:CreateAction": [ - "CreateVolume", - "CreateSnapshot" - ] - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:volume/*", - "arn:aws-test:ec2:*:*:snapshot/*" - ] - }, - { - "Action": [ - "ec2:CreateTags", - "ec2:DeleteTags" - ], - "Condition": { - "Null": { - "aws:RequestTag/KubernetesCluster": "true" - }, - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "docker.example.com" - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:volume/*", - "arn:aws-test:ec2:*:*:snapshot/*" - ] - }, - { - "Action": [ - "autoscaling:DescribeAutoScalingGroups", - "autoscaling:DescribeAutoScalingInstances", - "autoscaling:DescribeLaunchConfigurations", - "autoscaling:DescribeScalingActivities", - "autoscaling:DescribeTags", - "ec2:AttachVolume", - "ec2:AuthorizeSecurityGroupIngress", - "ec2:CreateSecurityGroup", - "ec2:CreateTags", - "ec2:DeleteRoute", - "ec2:DeleteSecurityGroup", - "ec2:DeleteVolume", - "ec2:DescribeAccountAttributes", - "ec2:DescribeInstanceTypes", - "ec2:DescribeInstances", - "ec2:DescribeLaunchTemplateVersions", - "ec2:DescribeRegions", - "ec2:DescribeRouteTables", - "ec2:DescribeSecurityGroups", - "ec2:DescribeSubnets", - "ec2:DescribeTags", - "ec2:DescribeVolumes", - "ec2:DescribeVolumesModifications", - "ec2:DescribeVpcs", - "ec2:DetachVolume", - "ec2:ModifyInstanceAttribute", - "ec2:ModifyVolume", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:CreateListener", - "elasticloadbalancing:CreateTargetGroup", - "elasticloadbalancing:DeleteListener", - "elasticloadbalancing:DescribeListeners", - "elasticloadbalancing:DescribeLoadBalancerAttributes", - "elasticloadbalancing:DescribeLoadBalancerPolicies", - "elasticloadbalancing:DescribeLoadBalancers", - "elasticloadbalancing:DescribeTargetGroups", - "elasticloadbalancing:DescribeTargetHealth", - "elasticloadbalancing:ModifyListener", - "elasticloadbalancing:RegisterTargets", - "iam:GetServerCertificate", - "iam:ListServerCertificates", - "kms:DescribeKey", - "kms:GenerateRandom" - ], - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": [ - "autoscaling:SetDesiredCapacity", - "autoscaling:TerminateInstanceInAutoScalingGroup", - "ec2:AttachVolume", - "ec2:AuthorizeSecurityGroupIngress", - "ec2:DeleteSecurityGroup", - "ec2:DeleteVolume", - "ec2:DetachVolume", - "ec2:ModifyInstanceAttribute", - "ec2:ModifyVolume", - "ec2:RevokeSecurityGroupIngress", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", - "elasticloadbalancing:AttachLoadBalancerToSubnets", - "elasticloadbalancing:ConfigureHealthCheck", - "elasticloadbalancing:CreateLoadBalancerListeners", - "elasticloadbalancing:CreateLoadBalancerPolicy", - "elasticloadbalancing:DeleteListener", - "elasticloadbalancing:DeleteLoadBalancer", - "elasticloadbalancing:DeleteLoadBalancerListeners", - "elasticloadbalancing:DeleteTargetGroup", - "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", - "elasticloadbalancing:DeregisterTargets", - "elasticloadbalancing:DetachLoadBalancerFromSubnets", - "elasticloadbalancing:ModifyListener", - "elasticloadbalancing:ModifyLoadBalancerAttributes", - "elasticloadbalancing:ModifyTargetGroup", - "elasticloadbalancing:RegisterInstancesWithLoadBalancer", - "elasticloadbalancing:RegisterTargets", - "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", - "elasticloadbalancing:SetLoadBalancerPoliciesOfListener" - ], - "Condition": { - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "docker.example.com" - } - }, - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": [ - "ec2:CreateSecurityGroup", - "ec2:CreateSnapshot", - "ec2:CreateVolume", - "elasticloadbalancing:CreateListener", - "elasticloadbalancing:CreateLoadBalancer", - "elasticloadbalancing:CreateTargetGroup" - ], - "Condition": { - "StringEquals": { - "aws:RequestTag/KubernetesCluster": "docker.example.com" - } - }, - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": "ec2:CreateSecurityGroup", - "Effect": "Allow", - "Resource": "arn:aws-test:ec2:*:*:vpc/*" - } - ], - "Version": "2012-10-17" - } - } - }, - "AWSIAMPolicynodesdockerexamplecom": { - "Type": "AWS::IAM::Policy", - "Properties": { - "PolicyName": "nodes.docker.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolenodesdockerexamplecom" - } - ], - "PolicyDocument": { - "Statement": [ - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-read-bucket" - ] - }, - { - "Action": [ - "autoscaling:DescribeAutoScalingInstances", - "ec2:DescribeInstanceTypes", - "ec2:DescribeInstances", - "ec2:DescribeRegions", - "iam:GetServerCertificate", - "iam:ListServerCertificates", - "kms:GenerateRandom" - ], - "Effect": "Allow", - "Resource": "*" - } - ], - "Version": "2012-10-17" - } - } - }, - "AWSIAMRolemastersdockerexamplecom": { - "Type": "AWS::IAM::Role", - "Properties": { - "RoleName": "masters.docker.example.com", - "AssumeRolePolicyDocument": { - "Statement": [ - { - "Action": "sts:AssumeRole", - "Effect": "Allow", - "Principal": { - "Service": "ec2.amazonaws.com" - } - } - ], - "Version": "2012-10-17" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "docker.example.com" - }, - { - "Key": "Name", - "Value": "masters.docker.example.com" - }, - { - "Key": "kubernetes.io/cluster/docker.example.com", - "Value": "owned" - } - ] - } - }, - "AWSIAMRolenodesdockerexamplecom": { - "Type": "AWS::IAM::Role", - "Properties": { - "RoleName": "nodes.docker.example.com", - "AssumeRolePolicyDocument": { - "Statement": [ - { - "Action": "sts:AssumeRole", - "Effect": "Allow", - "Principal": { - "Service": "ec2.amazonaws.com" - } - } - ], - "Version": "2012-10-17" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "docker.example.com" - }, - { - "Key": "Name", - "Value": "nodes.docker.example.com" - }, - { - "Key": "kubernetes.io/cluster/docker.example.com", - "Value": "owned" - } - ] - } - } - } -} diff --git a/tests/integration/update_cluster/docker-custom/cloudformation.json.extracted.yaml b/tests/integration/update_cluster/docker-custom/cloudformation.json.extracted.yaml deleted file mode 100644 index 6009d3776b5ed..0000000000000 --- a/tests/integration/update_cluster/docker-custom/cloudformation.json.extracted.yaml +++ /dev/null @@ -1,480 +0,0 @@ -Resources.AWSEC2LaunchTemplatemasterustest1amastersdockerexamplecom.Properties.LaunchTemplateData.UserData: | - #!/bin/bash - set -o errexit - set -o nounset - set -o pipefail - - NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 - NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 - NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 - NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 - - export AWS_REGION=us-test-1 - - - - - sysctl -w net.core.rmem_max=16777216 || true - sysctl -w net.core.wmem_max=16777216 || true - sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true - sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true - - - function ensure-install-dir() { - INSTALL_DIR="/opt/kops" - # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec - if [[ -d /var/lib/toolbox ]]; then - INSTALL_DIR="/var/lib/toolbox/kops" - fi - mkdir -p ${INSTALL_DIR}/bin - mkdir -p ${INSTALL_DIR}/conf - cd ${INSTALL_DIR} - } - - # Retry a download until we get it. args: name, sha, urls - download-or-bust() { - local -r file="$1" - local -r hash="$2" - local -r urls=( $(split-commas "$3") ) - - if [[ -f "${file}" ]]; then - if ! validate-hash "${file}" "${hash}"; then - rm -f "${file}" - else - return 0 - fi - fi - - while true; do - for url in "${urls[@]}"; do - commands=( - "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - ) - for cmd in "${commands[@]}"; do - echo "Attempting download with: ${cmd} {url}" - if ! (${cmd} "${url}"); then - echo "== Download failed with ${cmd} ==" - continue - fi - if ! validate-hash "${file}" "${hash}"; then - echo "== Hash validation of ${url} failed. Retrying. ==" - rm -f "${file}" - else - echo "== Downloaded ${url} (SHA256 = ${hash}) ==" - return 0 - fi - done - done - - echo "All downloads failed; sleeping before retrying" - sleep 60 - done - } - - validate-hash() { - local -r file="$1" - local -r expected="$2" - local actual - - actual=$(sha256sum ${file} | awk '{ print $1 }') || true - if [[ "${actual}" != "${expected}" ]]; then - echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" - return 1 - fi - } - - function split-commas() { - echo $1 | tr "," "\n" - } - - function download-release() { - case "$(uname -m)" in - x86_64*|i?86_64*|amd64*) - NODEUP_URL="${NODEUP_URL_AMD64}" - NODEUP_HASH="${NODEUP_HASH_AMD64}" - ;; - aarch64*|arm64*) - NODEUP_URL="${NODEUP_URL_ARM64}" - NODEUP_HASH="${NODEUP_HASH_ARM64}" - ;; - *) - echo "Unsupported host arch: $(uname -m)" >&2 - exit 1 - ;; - esac - - cd ${INSTALL_DIR}/bin - download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" - - chmod +x nodeup - - echo "Running nodeup" - # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 - ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) - } - - #################################################################################### - - /bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" - - echo "== nodeup node config starting ==" - ensure-install-dir - - cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' - cloudConfig: - awsEBSCSIDriver: - enabled: false - manageStorageClasses: true - containerRuntime: docker - containerd: - configOverride: | - disabled_plugins = ["cri"] - logLevel: info - docker: - execOpt: - - native.cgroupdriver=systemd - ipMasq: false - ipTables: false - logDriver: json-file - logLevel: info - logOpt: - - max-size=10m - - max-file=5 - packages: - hashAmd64: 000000000000000000000000000000000000000000000000000000000000000a - hashArm64: 000000000000000000000000000000000000000000000000000000000000000b - urlAmd64: https://download.docker.com/linux/static/stable/x86_64/docker-20.10.1.tgz - urlArm64: https://download.docker.com/linux/static/stable/aarch64/docker-20.10.1.tgz - storage: overlay2,overlay,aufs - version: 20.10.17 - encryptionConfig: null - etcdClusters: - events: - version: 3.4.13 - main: - version: 3.4.13 - kubeAPIServer: - allowPrivileged: true - anonymousAuth: false - apiAudiences: - - kubernetes.svc.default - apiServerCount: 1 - authorizationMode: AlwaysAllow - bindAddress: 0.0.0.0 - cloudProvider: aws - enableAdmissionPlugins: - - NamespaceLifecycle - - LimitRanger - - ServiceAccount - - DefaultStorageClass - - DefaultTolerationSeconds - - MutatingAdmissionWebhook - - ValidatingAdmissionWebhook - - NodeRestriction - - ResourceQuota - etcdServers: - - https://127.0.0.1:4001 - etcdServersOverrides: - - /events#https://127.0.0.1:4002 - image: registry.k8s.io/kube-apiserver:v1.21.0 - kubeletPreferredAddressTypes: - - InternalIP - - Hostname - - ExternalIP - logLevel: 2 - requestheaderAllowedNames: - - aggregator - requestheaderExtraHeaderPrefixes: - - X-Remote-Extra- - requestheaderGroupHeaders: - - X-Remote-Group - requestheaderUsernameHeaders: - - X-Remote-User - securePort: 443 - serviceAccountIssuer: https://api.internal.docker.example.com - serviceAccountJWKSURI: https://api.internal.docker.example.com/openid/v1/jwks - serviceClusterIPRange: 100.64.0.0/13 - storageBackend: etcd3 - kubeControllerManager: - allocateNodeCIDRs: true - attachDetachReconcileSyncPeriod: 1m0s - cloudProvider: aws - clusterCIDR: 100.96.0.0/11 - clusterName: docker.example.com - configureCloudRoutes: false - image: registry.k8s.io/kube-controller-manager:v1.21.0 - leaderElection: - leaderElect: true - logLevel: 2 - useServiceAccountCredentials: true - kubeProxy: - clusterCIDR: 100.96.0.0/11 - cpuRequest: 100m - image: registry.k8s.io/kube-proxy:v1.21.0 - logLevel: 2 - kubeScheduler: - image: registry.k8s.io/kube-scheduler:v1.21.0 - leaderElection: - leaderElect: true - logLevel: 2 - kubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: aws - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - masterKubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: aws - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - - __EOF_CLUSTER_SPEC - - cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' - CloudProvider: aws - ConfigBase: memfs://clusters.example.com/docker.example.com - InstanceGroupName: master-us-test-1a - InstanceGroupRole: Master - NodeupConfigHash: dQoLRGMD60cy0GlQpNjoFxN+Z4ZOHecktBvo2KFfzxE= - - __EOF_KUBE_ENV - - download-release - echo "== nodeup node config done ==" -Resources.AWSEC2LaunchTemplatenodesdockerexamplecom.Properties.LaunchTemplateData.UserData: | - #!/bin/bash - set -o errexit - set -o nounset - set -o pipefail - - NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 - NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 - NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 - NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 - - export AWS_REGION=us-test-1 - - - - - sysctl -w net.core.rmem_max=16777216 || true - sysctl -w net.core.wmem_max=16777216 || true - sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true - sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true - - - function ensure-install-dir() { - INSTALL_DIR="/opt/kops" - # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec - if [[ -d /var/lib/toolbox ]]; then - INSTALL_DIR="/var/lib/toolbox/kops" - fi - mkdir -p ${INSTALL_DIR}/bin - mkdir -p ${INSTALL_DIR}/conf - cd ${INSTALL_DIR} - } - - # Retry a download until we get it. args: name, sha, urls - download-or-bust() { - local -r file="$1" - local -r hash="$2" - local -r urls=( $(split-commas "$3") ) - - if [[ -f "${file}" ]]; then - if ! validate-hash "${file}" "${hash}"; then - rm -f "${file}" - else - return 0 - fi - fi - - while true; do - for url in "${urls[@]}"; do - commands=( - "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - ) - for cmd in "${commands[@]}"; do - echo "Attempting download with: ${cmd} {url}" - if ! (${cmd} "${url}"); then - echo "== Download failed with ${cmd} ==" - continue - fi - if ! validate-hash "${file}" "${hash}"; then - echo "== Hash validation of ${url} failed. Retrying. ==" - rm -f "${file}" - else - echo "== Downloaded ${url} (SHA256 = ${hash}) ==" - return 0 - fi - done - done - - echo "All downloads failed; sleeping before retrying" - sleep 60 - done - } - - validate-hash() { - local -r file="$1" - local -r expected="$2" - local actual - - actual=$(sha256sum ${file} | awk '{ print $1 }') || true - if [[ "${actual}" != "${expected}" ]]; then - echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" - return 1 - fi - } - - function split-commas() { - echo $1 | tr "," "\n" - } - - function download-release() { - case "$(uname -m)" in - x86_64*|i?86_64*|amd64*) - NODEUP_URL="${NODEUP_URL_AMD64}" - NODEUP_HASH="${NODEUP_HASH_AMD64}" - ;; - aarch64*|arm64*) - NODEUP_URL="${NODEUP_URL_ARM64}" - NODEUP_HASH="${NODEUP_HASH_ARM64}" - ;; - *) - echo "Unsupported host arch: $(uname -m)" >&2 - exit 1 - ;; - esac - - cd ${INSTALL_DIR}/bin - download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" - - chmod +x nodeup - - echo "Running nodeup" - # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 - ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) - } - - #################################################################################### - - /bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" - - echo "== nodeup node config starting ==" - ensure-install-dir - - cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' - cloudConfig: - awsEBSCSIDriver: - enabled: false - manageStorageClasses: true - containerRuntime: docker - containerd: - configOverride: | - disabled_plugins = ["cri"] - logLevel: info - docker: - execOpt: - - native.cgroupdriver=systemd - ipMasq: false - ipTables: false - logDriver: json-file - logLevel: info - logOpt: - - max-size=10m - - max-file=5 - packages: - hashAmd64: 000000000000000000000000000000000000000000000000000000000000000a - hashArm64: 000000000000000000000000000000000000000000000000000000000000000b - urlAmd64: https://download.docker.com/linux/static/stable/x86_64/docker-20.10.1.tgz - urlArm64: https://download.docker.com/linux/static/stable/aarch64/docker-20.10.1.tgz - storage: overlay2,overlay,aufs - version: 20.10.17 - kubeProxy: - clusterCIDR: 100.96.0.0/11 - cpuRequest: 100m - image: registry.k8s.io/kube-proxy:v1.21.0 - logLevel: 2 - kubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: aws - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - - __EOF_CLUSTER_SPEC - - cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' - CloudProvider: aws - ConfigServer: - CACertificates: | - -----BEGIN CERTIFICATE----- - MIIBbjCCARigAwIBAgIMFpANqBD8NSD82AUSMA0GCSqGSIb3DQEBCwUAMBgxFjAU - BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwODAwWhcNMzEwNzA3MDcw - ODAwWjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD - SwAwSAJBANFI3zr0Tk8krsW8vwjfMpzJOlWQ8616vG3YPa2qAgI7V4oKwfV0yIg1 - jt+H6f4P/wkPAPTPTfRp9Iy8oHEEFw0CAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG - MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNG3zVjTcLlJwDsJ4/K9DV7KohUA - MA0GCSqGSIb3DQEBCwUAA0EAB8d03fY2w7WKpfO29qI295pu2C4ca9AiVGOpgSc8 - tmQsq6rcxt3T+rb589PVtz0mw/cKTxOk6gH2CCC+yHfy2w== - -----END CERTIFICATE----- - -----BEGIN CERTIFICATE----- - MIIBbjCCARigAwIBAgIMFpANvmSa0OAlYmXKMA0GCSqGSIb3DQEBCwUAMBgxFjAU - BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwOTM2WhcNMzEwNzA3MDcw - OTM2WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD - SwAwSAJBAMF6F4aZdpe0RUpyykaBpWwZCnwbffhYGOw+fs6RdLuUq7QCNmJm/Eq7 - WWOziMYDiI9SbclpD+6QiJ0N3EqppVUCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG - MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLImp6ARjPDAH6nhI+scWVt3Q9bn - MA0GCSqGSIb3DQEBCwUAA0EAVQVx5MUtuAIeePuP9o51xtpT2S6Fvfi8J4ICxnlA - 9B7UD2ushcVFPtaeoL9Gfu8aY4KJBeqqg5ojl4qmRnThjw== - -----END CERTIFICATE----- - server: https://kops-controller.internal.docker.example.com:3988/ - InstanceGroupName: nodes - InstanceGroupRole: Node - NodeupConfigHash: t1TG9fUtjlCD8/C+cRe88w3kNJffjnRJTzaXGIigpDM= - - __EOF_KUBE_ENV - - download-release - echo "== nodeup node config done ==" diff --git a/tests/integration/update_cluster/docker-custom/data/aws_iam_role_masters.docker.example.com_policy b/tests/integration/update_cluster/docker-custom/data/aws_iam_role_masters.docker.example.com_policy new file mode 100644 index 0000000000000..66d5de1d5ae1e --- /dev/null +++ b/tests/integration/update_cluster/docker-custom/data/aws_iam_role_masters.docker.example.com_policy @@ -0,0 +1,10 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { "Service": "ec2.amazonaws.com"}, + "Action": "sts:AssumeRole" + } + ] +} diff --git a/tests/integration/update_cluster/docker-custom/data/aws_iam_role_nodes.docker.example.com_policy b/tests/integration/update_cluster/docker-custom/data/aws_iam_role_nodes.docker.example.com_policy new file mode 100644 index 0000000000000..66d5de1d5ae1e --- /dev/null +++ b/tests/integration/update_cluster/docker-custom/data/aws_iam_role_nodes.docker.example.com_policy @@ -0,0 +1,10 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { "Service": "ec2.amazonaws.com"}, + "Action": "sts:AssumeRole" + } + ] +} diff --git a/tests/integration/update_cluster/docker-custom/data/aws_iam_role_policy_masters.docker.example.com_policy b/tests/integration/update_cluster/docker-custom/data/aws_iam_role_policy_masters.docker.example.com_policy new file mode 100644 index 0000000000000..0d8e82ac4f4cc --- /dev/null +++ b/tests/integration/update_cluster/docker-custom/data/aws_iam_role_policy_masters.docker.example.com_policy @@ -0,0 +1,280 @@ +{ + "Statement": [ + { + "Action": "ec2:AttachVolume", + "Condition": { + "StringEquals": { + "aws:ResourceTag/KubernetesCluster": "docker.example.com", + "aws:ResourceTag/k8s.io/role/master": "1" + } + }, + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "s3:Get*" + ], + "Effect": "Allow", + "Resource": "arn:aws-test:s3:::placeholder-read-bucket/clusters.example.com/docker.example.com/*" + }, + { + "Action": [ + "s3:GetObject", + "s3:DeleteObject", + "s3:DeleteObjectVersion", + "s3:PutObject" + ], + "Effect": "Allow", + "Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/docker.example.com/backups/etcd/main/*" + }, + { + "Action": [ + "s3:GetObject", + "s3:DeleteObject", + "s3:DeleteObjectVersion", + "s3:PutObject" + ], + "Effect": "Allow", + "Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/docker.example.com/backups/etcd/events/*" + }, + { + "Action": [ + "s3:GetBucketLocation", + "s3:GetEncryptionConfiguration", + "s3:ListBucket", + "s3:ListBucketVersions" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws-test:s3:::placeholder-read-bucket" + ] + }, + { + "Action": [ + "s3:GetBucketLocation", + "s3:GetEncryptionConfiguration", + "s3:ListBucket", + "s3:ListBucketVersions" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws-test:s3:::placeholder-write-bucket" + ] + }, + { + "Action": [ + "route53:ChangeResourceRecordSets", + "route53:ListResourceRecordSets", + "route53:GetHostedZone" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws-test:route53:::hostedzone/Z1AFAKE1ZON3YO" + ] + }, + { + "Action": [ + "route53:GetChange" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws-test:route53:::change/*" + ] + }, + { + "Action": [ + "route53:ListHostedZones", + "route53:ListTagsForResource" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": "ec2:CreateTags", + "Condition": { + "StringEquals": { + "aws:RequestTag/KubernetesCluster": "docker.example.com", + "ec2:CreateAction": [ + "CreateSecurityGroup" + ] + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws-test:ec2:*:*:security-group/*" + ] + }, + { + "Action": [ + "ec2:CreateTags", + "ec2:DeleteTags" + ], + "Condition": { + "Null": { + "aws:RequestTag/KubernetesCluster": "true" + }, + "StringEquals": { + "aws:ResourceTag/KubernetesCluster": "docker.example.com" + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws-test:ec2:*:*:security-group/*" + ] + }, + { + "Action": "ec2:CreateTags", + "Condition": { + "StringEquals": { + "aws:RequestTag/KubernetesCluster": "docker.example.com", + "ec2:CreateAction": [ + "CreateVolume", + "CreateSnapshot" + ] + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws-test:ec2:*:*:volume/*", + "arn:aws-test:ec2:*:*:snapshot/*" + ] + }, + { + "Action": [ + "ec2:CreateTags", + "ec2:DeleteTags" + ], + "Condition": { + "Null": { + "aws:RequestTag/KubernetesCluster": "true" + }, + "StringEquals": { + "aws:ResourceTag/KubernetesCluster": "docker.example.com" + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws-test:ec2:*:*:volume/*", + "arn:aws-test:ec2:*:*:snapshot/*" + ] + }, + { + "Action": [ + "autoscaling:DescribeAutoScalingGroups", + "autoscaling:DescribeAutoScalingInstances", + "autoscaling:DescribeLaunchConfigurations", + "autoscaling:DescribeScalingActivities", + "autoscaling:DescribeTags", + "ec2:AttachVolume", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CreateSecurityGroup", + "ec2:CreateTags", + "ec2:DeleteRoute", + "ec2:DeleteSecurityGroup", + "ec2:DeleteVolume", + "ec2:DescribeAccountAttributes", + "ec2:DescribeInstanceTypes", + "ec2:DescribeInstances", + "ec2:DescribeLaunchTemplateVersions", + "ec2:DescribeRegions", + "ec2:DescribeRouteTables", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeTags", + "ec2:DescribeVolumes", + "ec2:DescribeVolumesModifications", + "ec2:DescribeVpcs", + "ec2:DetachVolume", + "ec2:ModifyInstanceAttribute", + "ec2:ModifyVolume", + "elasticloadbalancing:AddTags", + "elasticloadbalancing:CreateListener", + "elasticloadbalancing:CreateTargetGroup", + "elasticloadbalancing:DeleteListener", + "elasticloadbalancing:DescribeListeners", + "elasticloadbalancing:DescribeLoadBalancerAttributes", + "elasticloadbalancing:DescribeLoadBalancerPolicies", + "elasticloadbalancing:DescribeLoadBalancers", + "elasticloadbalancing:DescribeTargetGroups", + "elasticloadbalancing:DescribeTargetHealth", + "elasticloadbalancing:ModifyListener", + "elasticloadbalancing:RegisterTargets", + "iam:GetServerCertificate", + "iam:ListServerCertificates", + "kms:DescribeKey", + "kms:GenerateRandom" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "autoscaling:SetDesiredCapacity", + "autoscaling:TerminateInstanceInAutoScalingGroup", + "ec2:AttachVolume", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:DeleteSecurityGroup", + "ec2:DeleteVolume", + "ec2:DetachVolume", + "ec2:ModifyInstanceAttribute", + "ec2:ModifyVolume", + "ec2:RevokeSecurityGroupIngress", + "elasticloadbalancing:AddTags", + "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", + "elasticloadbalancing:AttachLoadBalancerToSubnets", + "elasticloadbalancing:ConfigureHealthCheck", + "elasticloadbalancing:CreateLoadBalancerListeners", + "elasticloadbalancing:CreateLoadBalancerPolicy", + "elasticloadbalancing:DeleteListener", + "elasticloadbalancing:DeleteLoadBalancer", + "elasticloadbalancing:DeleteLoadBalancerListeners", + "elasticloadbalancing:DeleteTargetGroup", + "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", + "elasticloadbalancing:DeregisterTargets", + "elasticloadbalancing:DetachLoadBalancerFromSubnets", + "elasticloadbalancing:ModifyListener", + "elasticloadbalancing:ModifyLoadBalancerAttributes", + "elasticloadbalancing:ModifyTargetGroup", + "elasticloadbalancing:RegisterInstancesWithLoadBalancer", + "elasticloadbalancing:RegisterTargets", + "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", + "elasticloadbalancing:SetLoadBalancerPoliciesOfListener" + ], + "Condition": { + "StringEquals": { + "aws:ResourceTag/KubernetesCluster": "docker.example.com" + } + }, + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "ec2:CreateSecurityGroup", + "ec2:CreateSnapshot", + "ec2:CreateVolume", + "elasticloadbalancing:CreateListener", + "elasticloadbalancing:CreateLoadBalancer", + "elasticloadbalancing:CreateTargetGroup" + ], + "Condition": { + "StringEquals": { + "aws:RequestTag/KubernetesCluster": "docker.example.com" + } + }, + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "ec2:CreateSecurityGroup", + "Effect": "Allow", + "Resource": "arn:aws-test:ec2:*:*:vpc/*" + } + ], + "Version": "2012-10-17" +} diff --git a/tests/integration/update_cluster/docker-custom/data/aws_iam_role_policy_nodes.docker.example.com_policy b/tests/integration/update_cluster/docker-custom/data/aws_iam_role_policy_nodes.docker.example.com_policy new file mode 100644 index 0000000000000..153ab3c7f64f4 --- /dev/null +++ b/tests/integration/update_cluster/docker-custom/data/aws_iam_role_policy_nodes.docker.example.com_policy @@ -0,0 +1,30 @@ +{ + "Statement": [ + { + "Action": [ + "s3:GetBucketLocation", + "s3:GetEncryptionConfiguration", + "s3:ListBucket", + "s3:ListBucketVersions" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws-test:s3:::placeholder-read-bucket" + ] + }, + { + "Action": [ + "autoscaling:DescribeAutoScalingInstances", + "ec2:DescribeInstanceTypes", + "ec2:DescribeInstances", + "ec2:DescribeRegions", + "iam:GetServerCertificate", + "iam:ListServerCertificates", + "kms:GenerateRandom" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" +} diff --git a/tests/integration/update_cluster/docker-custom/data/aws_key_pair_kubernetes.docker.example.com-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key b/tests/integration/update_cluster/docker-custom/data/aws_key_pair_kubernetes.docker.example.com-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key new file mode 100644 index 0000000000000..81cb0127830e7 --- /dev/null +++ b/tests/integration/update_cluster/docker-custom/data/aws_key_pair_kubernetes.docker.example.com-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key @@ -0,0 +1 @@ +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQCtWu40XQo8dczLsCq0OWV+hxm9uV3WxeH9Kgh4sMzQxNtoU1pvW0XdjpkBesRKGoolfWeCLXWxpyQb1IaiMkKoz7MdhQ/6UKjMjP66aFWWp3pwD0uj0HuJ7tq4gKHKRYGTaZIRWpzUiANBrjugVgA+Sd7E/mYwc/DMXkIyRZbvhQ== diff --git a/tests/integration/update_cluster/docker-custom/data/aws_launch_template_master-us-test-1a.masters.docker.example.com_user_data b/tests/integration/update_cluster/docker-custom/data/aws_launch_template_master-us-test-1a.masters.docker.example.com_user_data new file mode 100644 index 0000000000000..001c03b7a7864 --- /dev/null +++ b/tests/integration/update_cluster/docker-custom/data/aws_launch_template_master-us-test-1a.masters.docker.example.com_user_data @@ -0,0 +1,269 @@ +#!/bin/bash +set -o errexit +set -o nounset +set -o pipefail + +NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 +NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 +NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 +NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 + +export AWS_REGION=us-test-1 + + + + +sysctl -w net.core.rmem_max=16777216 || true +sysctl -w net.core.wmem_max=16777216 || true +sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true +sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true + + +function ensure-install-dir() { + INSTALL_DIR="/opt/kops" + # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec + if [[ -d /var/lib/toolbox ]]; then + INSTALL_DIR="/var/lib/toolbox/kops" + fi + mkdir -p ${INSTALL_DIR}/bin + mkdir -p ${INSTALL_DIR}/conf + cd ${INSTALL_DIR} +} + +# Retry a download until we get it. args: name, sha, urls +download-or-bust() { + local -r file="$1" + local -r hash="$2" + local -r urls=( $(split-commas "$3") ) + + if [[ -f "${file}" ]]; then + if ! validate-hash "${file}" "${hash}"; then + rm -f "${file}" + else + return 0 + fi + fi + + while true; do + for url in "${urls[@]}"; do + commands=( + "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + ) + for cmd in "${commands[@]}"; do + echo "Attempting download with: ${cmd} {url}" + if ! (${cmd} "${url}"); then + echo "== Download failed with ${cmd} ==" + continue + fi + if ! validate-hash "${file}" "${hash}"; then + echo "== Hash validation of ${url} failed. Retrying. ==" + rm -f "${file}" + else + echo "== Downloaded ${url} (SHA256 = ${hash}) ==" + return 0 + fi + done + done + + echo "All downloads failed; sleeping before retrying" + sleep 60 + done +} + +validate-hash() { + local -r file="$1" + local -r expected="$2" + local actual + + actual=$(sha256sum ${file} | awk '{ print $1 }') || true + if [[ "${actual}" != "${expected}" ]]; then + echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" + return 1 + fi +} + +function split-commas() { + echo $1 | tr "," "\n" +} + +function download-release() { + case "$(uname -m)" in + x86_64*|i?86_64*|amd64*) + NODEUP_URL="${NODEUP_URL_AMD64}" + NODEUP_HASH="${NODEUP_HASH_AMD64}" + ;; + aarch64*|arm64*) + NODEUP_URL="${NODEUP_URL_ARM64}" + NODEUP_HASH="${NODEUP_HASH_ARM64}" + ;; + *) + echo "Unsupported host arch: $(uname -m)" >&2 + exit 1 + ;; + esac + + cd ${INSTALL_DIR}/bin + download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" + + chmod +x nodeup + + echo "Running nodeup" + # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 + ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) +} + +#################################################################################### + +/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" + +echo "== nodeup node config starting ==" +ensure-install-dir + +cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' +cloudConfig: + awsEBSCSIDriver: + enabled: false + manageStorageClasses: true +containerRuntime: docker +containerd: + configOverride: | + disabled_plugins = ["cri"] + logLevel: info +docker: + execOpt: + - native.cgroupdriver=systemd + ipMasq: false + ipTables: false + logDriver: json-file + logLevel: info + logOpt: + - max-size=10m + - max-file=5 + packages: + hashAmd64: 000000000000000000000000000000000000000000000000000000000000000a + hashArm64: 000000000000000000000000000000000000000000000000000000000000000b + urlAmd64: https://download.docker.com/linux/static/stable/x86_64/docker-20.10.1.tgz + urlArm64: https://download.docker.com/linux/static/stable/aarch64/docker-20.10.1.tgz + storage: overlay2,overlay,aufs + version: 20.10.17 +encryptionConfig: null +etcdClusters: + events: + version: 3.4.13 + main: + version: 3.4.13 +kubeAPIServer: + allowPrivileged: true + anonymousAuth: false + apiAudiences: + - kubernetes.svc.default + apiServerCount: 1 + authorizationMode: AlwaysAllow + bindAddress: 0.0.0.0 + cloudProvider: aws + enableAdmissionPlugins: + - NamespaceLifecycle + - LimitRanger + - ServiceAccount + - DefaultStorageClass + - DefaultTolerationSeconds + - MutatingAdmissionWebhook + - ValidatingAdmissionWebhook + - NodeRestriction + - ResourceQuota + etcdServers: + - https://127.0.0.1:4001 + etcdServersOverrides: + - /events#https://127.0.0.1:4002 + image: registry.k8s.io/kube-apiserver:v1.21.0 + kubeletPreferredAddressTypes: + - InternalIP + - Hostname + - ExternalIP + logLevel: 2 + requestheaderAllowedNames: + - aggregator + requestheaderExtraHeaderPrefixes: + - X-Remote-Extra- + requestheaderGroupHeaders: + - X-Remote-Group + requestheaderUsernameHeaders: + - X-Remote-User + securePort: 443 + serviceAccountIssuer: https://api.internal.docker.example.com + serviceAccountJWKSURI: https://api.internal.docker.example.com/openid/v1/jwks + serviceClusterIPRange: 100.64.0.0/13 + storageBackend: etcd3 +kubeControllerManager: + allocateNodeCIDRs: true + attachDetachReconcileSyncPeriod: 1m0s + cloudProvider: aws + clusterCIDR: 100.96.0.0/11 + clusterName: docker.example.com + configureCloudRoutes: false + image: registry.k8s.io/kube-controller-manager:v1.21.0 + leaderElection: + leaderElect: true + logLevel: 2 + useServiceAccountCredentials: true +kubeProxy: + clusterCIDR: 100.96.0.0/11 + cpuRequest: 100m + image: registry.k8s.io/kube-proxy:v1.21.0 + logLevel: 2 +kubeScheduler: + image: registry.k8s.io/kube-scheduler:v1.21.0 + leaderElection: + leaderElect: true + logLevel: 2 +kubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: registry.k8s.io/pause:3.6 + podManifestPath: /etc/kubernetes/manifests + registerSchedulable: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s +masterKubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: registry.k8s.io/pause:3.6 + podManifestPath: /etc/kubernetes/manifests + registerSchedulable: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + +__EOF_CLUSTER_SPEC + +cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' +CloudProvider: aws +ConfigBase: memfs://clusters.example.com/docker.example.com +InstanceGroupName: master-us-test-1a +InstanceGroupRole: Master +NodeupConfigHash: dQoLRGMD60cy0GlQpNjoFxN+Z4ZOHecktBvo2KFfzxE= + +__EOF_KUBE_ENV + +download-release +echo "== nodeup node config done ==" diff --git a/tests/integration/update_cluster/docker-custom/data/aws_launch_template_nodes.docker.example.com_user_data b/tests/integration/update_cluster/docker-custom/data/aws_launch_template_nodes.docker.example.com_user_data new file mode 100644 index 0000000000000..5c911195185e2 --- /dev/null +++ b/tests/integration/update_cluster/docker-custom/data/aws_launch_template_nodes.docker.example.com_user_data @@ -0,0 +1,209 @@ +#!/bin/bash +set -o errexit +set -o nounset +set -o pipefail + +NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 +NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 +NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 +NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 + +export AWS_REGION=us-test-1 + + + + +sysctl -w net.core.rmem_max=16777216 || true +sysctl -w net.core.wmem_max=16777216 || true +sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true +sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true + + +function ensure-install-dir() { + INSTALL_DIR="/opt/kops" + # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec + if [[ -d /var/lib/toolbox ]]; then + INSTALL_DIR="/var/lib/toolbox/kops" + fi + mkdir -p ${INSTALL_DIR}/bin + mkdir -p ${INSTALL_DIR}/conf + cd ${INSTALL_DIR} +} + +# Retry a download until we get it. args: name, sha, urls +download-or-bust() { + local -r file="$1" + local -r hash="$2" + local -r urls=( $(split-commas "$3") ) + + if [[ -f "${file}" ]]; then + if ! validate-hash "${file}" "${hash}"; then + rm -f "${file}" + else + return 0 + fi + fi + + while true; do + for url in "${urls[@]}"; do + commands=( + "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + ) + for cmd in "${commands[@]}"; do + echo "Attempting download with: ${cmd} {url}" + if ! (${cmd} "${url}"); then + echo "== Download failed with ${cmd} ==" + continue + fi + if ! validate-hash "${file}" "${hash}"; then + echo "== Hash validation of ${url} failed. Retrying. ==" + rm -f "${file}" + else + echo "== Downloaded ${url} (SHA256 = ${hash}) ==" + return 0 + fi + done + done + + echo "All downloads failed; sleeping before retrying" + sleep 60 + done +} + +validate-hash() { + local -r file="$1" + local -r expected="$2" + local actual + + actual=$(sha256sum ${file} | awk '{ print $1 }') || true + if [[ "${actual}" != "${expected}" ]]; then + echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" + return 1 + fi +} + +function split-commas() { + echo $1 | tr "," "\n" +} + +function download-release() { + case "$(uname -m)" in + x86_64*|i?86_64*|amd64*) + NODEUP_URL="${NODEUP_URL_AMD64}" + NODEUP_HASH="${NODEUP_HASH_AMD64}" + ;; + aarch64*|arm64*) + NODEUP_URL="${NODEUP_URL_ARM64}" + NODEUP_HASH="${NODEUP_HASH_ARM64}" + ;; + *) + echo "Unsupported host arch: $(uname -m)" >&2 + exit 1 + ;; + esac + + cd ${INSTALL_DIR}/bin + download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" + + chmod +x nodeup + + echo "Running nodeup" + # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 + ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) +} + +#################################################################################### + +/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" + +echo "== nodeup node config starting ==" +ensure-install-dir + +cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' +cloudConfig: + awsEBSCSIDriver: + enabled: false + manageStorageClasses: true +containerRuntime: docker +containerd: + configOverride: | + disabled_plugins = ["cri"] + logLevel: info +docker: + execOpt: + - native.cgroupdriver=systemd + ipMasq: false + ipTables: false + logDriver: json-file + logLevel: info + logOpt: + - max-size=10m + - max-file=5 + packages: + hashAmd64: 000000000000000000000000000000000000000000000000000000000000000a + hashArm64: 000000000000000000000000000000000000000000000000000000000000000b + urlAmd64: https://download.docker.com/linux/static/stable/x86_64/docker-20.10.1.tgz + urlArm64: https://download.docker.com/linux/static/stable/aarch64/docker-20.10.1.tgz + storage: overlay2,overlay,aufs + version: 20.10.17 +kubeProxy: + clusterCIDR: 100.96.0.0/11 + cpuRequest: 100m + image: registry.k8s.io/kube-proxy:v1.21.0 + logLevel: 2 +kubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: registry.k8s.io/pause:3.6 + podManifestPath: /etc/kubernetes/manifests + registerSchedulable: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + +__EOF_CLUSTER_SPEC + +cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' +CloudProvider: aws +ConfigServer: + CACertificates: | + -----BEGIN CERTIFICATE----- + MIIBbjCCARigAwIBAgIMFpANqBD8NSD82AUSMA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwODAwWhcNMzEwNzA3MDcw + ODAwWjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD + SwAwSAJBANFI3zr0Tk8krsW8vwjfMpzJOlWQ8616vG3YPa2qAgI7V4oKwfV0yIg1 + jt+H6f4P/wkPAPTPTfRp9Iy8oHEEFw0CAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG + MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNG3zVjTcLlJwDsJ4/K9DV7KohUA + MA0GCSqGSIb3DQEBCwUAA0EAB8d03fY2w7WKpfO29qI295pu2C4ca9AiVGOpgSc8 + tmQsq6rcxt3T+rb589PVtz0mw/cKTxOk6gH2CCC+yHfy2w== + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBbjCCARigAwIBAgIMFpANvmSa0OAlYmXKMA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwOTM2WhcNMzEwNzA3MDcw + OTM2WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD + SwAwSAJBAMF6F4aZdpe0RUpyykaBpWwZCnwbffhYGOw+fs6RdLuUq7QCNmJm/Eq7 + WWOziMYDiI9SbclpD+6QiJ0N3EqppVUCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG + MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLImp6ARjPDAH6nhI+scWVt3Q9bn + MA0GCSqGSIb3DQEBCwUAA0EAVQVx5MUtuAIeePuP9o51xtpT2S6Fvfi8J4ICxnlA + 9B7UD2ushcVFPtaeoL9Gfu8aY4KJBeqqg5ojl4qmRnThjw== + -----END CERTIFICATE----- + server: https://kops-controller.internal.docker.example.com:3988/ +InstanceGroupName: nodes +InstanceGroupRole: Node +NodeupConfigHash: t1TG9fUtjlCD8/C+cRe88w3kNJffjnRJTzaXGIigpDM= + +__EOF_KUBE_ENV + +download-release +echo "== nodeup node config done ==" diff --git a/tests/integration/update_cluster/docker-custom/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/docker-custom/data/aws_s3_object_cluster-completed.spec_content new file mode 100644 index 0000000000000..5f674a8c75b5e --- /dev/null +++ b/tests/integration/update_cluster/docker-custom/data/aws_s3_object_cluster-completed.spec_content @@ -0,0 +1,197 @@ +apiVersion: kops.k8s.io/v1alpha2 +kind: Cluster +metadata: + creationTimestamp: "2016-12-10T22:42:27Z" + name: docker.example.com +spec: + api: + dns: {} + authorization: + alwaysAllow: {} + channel: stable + cloudConfig: + awsEBSCSIDriver: + enabled: false + manageStorageClasses: true + cloudProvider: aws + clusterDNSDomain: cluster.local + configBase: memfs://clusters.example.com/docker.example.com + configStore: memfs://clusters.example.com/docker.example.com + containerRuntime: docker + containerd: + configOverride: | + disabled_plugins = ["cri"] + logLevel: info + dnsZone: Z1AFAKE1ZON3YO + docker: + execOpt: + - native.cgroupdriver=systemd + ipMasq: false + ipTables: false + logDriver: json-file + logLevel: info + logOpt: + - max-size=10m + - max-file=5 + packages: + hashAmd64: 000000000000000000000000000000000000000000000000000000000000000a + hashArm64: 000000000000000000000000000000000000000000000000000000000000000b + urlAmd64: https://download.docker.com/linux/static/stable/x86_64/docker-20.10.1.tgz + urlArm64: https://download.docker.com/linux/static/stable/aarch64/docker-20.10.1.tgz + storage: overlay2,overlay,aufs + version: 20.10.17 + etcdClusters: + - backups: + backupStore: memfs://clusters.example.com/docker.example.com/backups/etcd/main + etcdMembers: + - instanceGroup: master-us-test-1a + name: us-test-1a + name: main + version: 3.4.13 + - backups: + backupStore: memfs://clusters.example.com/docker.example.com/backups/etcd/events + etcdMembers: + - instanceGroup: master-us-test-1a + name: us-test-1a + name: events + version: 3.4.13 + externalDns: + provider: dns-controller + iam: + legacy: false + keyStore: memfs://clusters.example.com/docker.example.com/pki + kubeAPIServer: + allowPrivileged: true + anonymousAuth: false + apiAudiences: + - kubernetes.svc.default + apiServerCount: 1 + authorizationMode: AlwaysAllow + bindAddress: 0.0.0.0 + cloudProvider: aws + enableAdmissionPlugins: + - NamespaceLifecycle + - LimitRanger + - ServiceAccount + - DefaultStorageClass + - DefaultTolerationSeconds + - MutatingAdmissionWebhook + - ValidatingAdmissionWebhook + - NodeRestriction + - ResourceQuota + etcdServers: + - https://127.0.0.1:4001 + etcdServersOverrides: + - /events#https://127.0.0.1:4002 + image: registry.k8s.io/kube-apiserver:v1.21.0 + kubeletPreferredAddressTypes: + - InternalIP + - Hostname + - ExternalIP + logLevel: 2 + requestheaderAllowedNames: + - aggregator + requestheaderExtraHeaderPrefixes: + - X-Remote-Extra- + requestheaderGroupHeaders: + - X-Remote-Group + requestheaderUsernameHeaders: + - X-Remote-User + securePort: 443 + serviceAccountIssuer: https://api.internal.docker.example.com + serviceAccountJWKSURI: https://api.internal.docker.example.com/openid/v1/jwks + serviceClusterIPRange: 100.64.0.0/13 + storageBackend: etcd3 + kubeControllerManager: + allocateNodeCIDRs: true + attachDetachReconcileSyncPeriod: 1m0s + cloudProvider: aws + clusterCIDR: 100.96.0.0/11 + clusterName: docker.example.com + configureCloudRoutes: false + image: registry.k8s.io/kube-controller-manager:v1.21.0 + leaderElection: + leaderElect: true + logLevel: 2 + useServiceAccountCredentials: true + kubeDNS: + cacheMaxConcurrent: 150 + cacheMaxSize: 1000 + cpuRequest: 100m + domain: cluster.local + memoryLimit: 170Mi + memoryRequest: 70Mi + nodeLocalDNS: + cpuRequest: 25m + enabled: false + image: registry.k8s.io/dns/k8s-dns-node-cache:1.22.8 + memoryRequest: 5Mi + provider: CoreDNS + serverIP: 100.64.0.10 + kubeProxy: + clusterCIDR: 100.96.0.0/11 + cpuRequest: 100m + image: registry.k8s.io/kube-proxy:v1.21.0 + logLevel: 2 + kubeScheduler: + image: registry.k8s.io/kube-scheduler:v1.21.0 + leaderElection: + leaderElect: true + logLevel: 2 + kubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: registry.k8s.io/pause:3.6 + podManifestPath: /etc/kubernetes/manifests + registerSchedulable: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + kubernetesApiAccess: + - 0.0.0.0/0 + kubernetesVersion: 1.21.0 + masterKubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: registry.k8s.io/pause:3.6 + podManifestPath: /etc/kubernetes/manifests + registerSchedulable: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + masterPublicName: api.docker.example.com + networkCIDR: 172.20.0.0/16 + networking: + cni: {} + nonMasqueradeCIDR: 100.64.0.0/10 + podCIDR: 100.96.0.0/11 + secretStore: memfs://clusters.example.com/docker.example.com/secrets + serviceClusterIPRange: 100.64.0.0/13 + sshAccess: + - 0.0.0.0/0 + subnets: + - cidr: 172.20.32.0/19 + name: us-test-1a + type: Public + zone: us-test-1a + topology: + dns: + type: Public + masters: public + nodes: public diff --git a/tests/integration/update_cluster/docker-custom/data/aws_s3_object_docker.example.com-addons-bootstrap_content b/tests/integration/update_cluster/docker-custom/data/aws_s3_object_docker.example.com-addons-bootstrap_content new file mode 100644 index 0000000000000..d0edb2e52c73a --- /dev/null +++ b/tests/integration/update_cluster/docker-custom/data/aws_s3_object_docker.example.com-addons-bootstrap_content @@ -0,0 +1,48 @@ +kind: Addons +metadata: + creationTimestamp: null + name: bootstrap +spec: + addons: + - id: k8s-1.16 + manifest: kops-controller.addons.k8s.io/k8s-1.16.yaml + manifestHash: 90aba87cf7564df41d4e8653e5d38547cf280787833eea28c4a045f7bf50a8f3 + name: kops-controller.addons.k8s.io + needsRollingUpdate: control-plane + selector: + k8s-addon: kops-controller.addons.k8s.io + version: 9.99.0 + - id: k8s-1.12 + manifest: coredns.addons.k8s.io/k8s-1.12.yaml + manifestHash: cd1e8f47fe52b13fee5536b0d4b4429ef256829d87a51cbc189fa0f21ff3503b + name: coredns.addons.k8s.io + selector: + k8s-addon: coredns.addons.k8s.io + version: 9.99.0 + - id: k8s-1.9 + manifest: kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml + manifestHash: 01c120e887bd98d82ef57983ad58a0b22bc85efb48108092a24c4b82e4c9ea81 + name: kubelet-api.rbac.addons.k8s.io + selector: + k8s-addon: kubelet-api.rbac.addons.k8s.io + version: 9.99.0 + - manifest: limit-range.addons.k8s.io/v1.5.0.yaml + manifestHash: 2d55c3bc5e354e84a3730a65b42f39aba630a59dc8d32b30859fcce3d3178bc2 + name: limit-range.addons.k8s.io + selector: + k8s-addon: limit-range.addons.k8s.io + version: 9.99.0 + - id: k8s-1.12 + manifest: dns-controller.addons.k8s.io/k8s-1.12.yaml + manifestHash: 6c8f01b2470d323965dfb22d410f322e0b429f7acc3831f41a763ec072dfc69b + name: dns-controller.addons.k8s.io + selector: + k8s-addon: dns-controller.addons.k8s.io + version: 9.99.0 + - id: v1.15.0 + manifest: storage-aws.addons.k8s.io/v1.15.0.yaml + manifestHash: 065ae832ddac8d0931e9992d6a76f43a33a36975a38003b34f4c5d86a7d42780 + name: storage-aws.addons.k8s.io + selector: + k8s-addon: storage-aws.addons.k8s.io + version: 9.99.0 diff --git a/tests/integration/update_cluster/docker-custom/data/aws_s3_object_docker.example.com-addons-coredns.addons.k8s.io-k8s-1.12_content b/tests/integration/update_cluster/docker-custom/data/aws_s3_object_docker.example.com-addons-coredns.addons.k8s.io-k8s-1.12_content new file mode 100644 index 0000000000000..fd5b8a7c053f2 --- /dev/null +++ b/tests/integration/update_cluster/docker-custom/data/aws_s3_object_docker.example.com-addons-coredns.addons.k8s.io-k8s-1.12_content @@ -0,0 +1,383 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + kubernetes.io/cluster-service: "true" + name: coredns + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + kubernetes.io/bootstrapping: rbac-defaults + name: system:coredns +rules: +- apiGroups: + - "" + resources: + - endpoints + - services + - pods + - namespaces + verbs: + - list + - watch +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - list + - watch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + kubernetes.io/bootstrapping: rbac-defaults + name: system:coredns +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:coredns +subjects: +- kind: ServiceAccount + name: coredns + namespace: kube-system + +--- + +apiVersion: v1 +data: + Corefile: |- + .:53 { + errors + health { + lameduck 5s + } + ready + kubernetes cluster.local. in-addr.arpa ip6.arpa { + pods insecure + fallthrough in-addr.arpa ip6.arpa + ttl 30 + } + prometheus :9153 + forward . /etc/resolv.conf { + max_concurrent 1000 + } + cache 30 + loop + reload + loadbalance + } +kind: ConfigMap +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + addonmanager.kubernetes.io/mode: EnsureExists + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + name: coredns + namespace: kube-system + +--- + +apiVersion: apps/v1 +kind: Deployment +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + k8s-app: kube-dns + kubernetes.io/cluster-service: "true" + kubernetes.io/name: CoreDNS + name: coredns + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: kube-dns + strategy: + rollingUpdate: + maxSurge: 10% + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + creationTimestamp: null + labels: + k8s-app: kube-dns + kops.k8s.io/managed-by: kops + spec: + containers: + - args: + - -conf + - /etc/coredns/Corefile + image: registry.k8s.io/coredns/coredns:v1.9.3 + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 5 + httpGet: + path: /health + port: 8080 + scheme: HTTP + initialDelaySeconds: 60 + successThreshold: 1 + timeoutSeconds: 5 + name: coredns + ports: + - containerPort: 53 + name: dns + protocol: UDP + - containerPort: 53 + name: dns-tcp + protocol: TCP + - containerPort: 9153 + name: metrics + protocol: TCP + readinessProbe: + httpGet: + path: /ready + port: 8181 + scheme: HTTP + resources: + limits: + memory: 170Mi + requests: + cpu: 100m + memory: 70Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_BIND_SERVICE + drop: + - all + readOnlyRootFilesystem: true + volumeMounts: + - mountPath: /etc/coredns + name: config-volume + readOnly: true + dnsPolicy: Default + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-cluster-critical + serviceAccountName: coredns + tolerations: + - key: CriticalAddonsOnly + operator: Exists + topologySpreadConstraints: + - labelSelector: + matchLabels: + k8s-app: kube-dns + maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: ScheduleAnyway + - labelSelector: + matchLabels: + k8s-app: kube-dns + maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + volumes: + - configMap: + name: coredns + name: config-volume + +--- + +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/port: "9153" + prometheus.io/scrape: "true" + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + k8s-app: kube-dns + kubernetes.io/cluster-service: "true" + kubernetes.io/name: CoreDNS + name: kube-dns + namespace: kube-system + resourceVersion: "0" +spec: + clusterIP: 100.64.0.10 + ports: + - name: dns + port: 53 + protocol: UDP + - name: dns-tcp + port: 53 + protocol: TCP + - name: metrics + port: 9153 + protocol: TCP + selector: + k8s-app: kube-dns + +--- + +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + name: kube-dns + namespace: kube-system +spec: + maxUnavailable: 50% + selector: + matchLabels: + k8s-app: kube-dns + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + name: coredns-autoscaler + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + name: coredns-autoscaler +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch +- apiGroups: + - "" + resources: + - replicationcontrollers/scale + verbs: + - get + - update +- apiGroups: + - extensions + - apps + resources: + - deployments/scale + - replicasets/scale + verbs: + - get + - update +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - create + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + name: coredns-autoscaler +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: coredns-autoscaler +subjects: +- kind: ServiceAccount + name: coredns-autoscaler + namespace: kube-system + +--- + +apiVersion: apps/v1 +kind: Deployment +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + k8s-app: coredns-autoscaler + kubernetes.io/cluster-service: "true" + name: coredns-autoscaler + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: coredns-autoscaler + template: + metadata: + creationTimestamp: null + labels: + k8s-app: coredns-autoscaler + kops.k8s.io/managed-by: kops + spec: + containers: + - command: + - /cluster-proportional-autoscaler + - --namespace=kube-system + - --configmap=coredns-autoscaler + - --target=Deployment/coredns + - --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":16,"preventSinglePointFailure":true}} + - --logtostderr=true + - --v=2 + image: registry.k8s.io/cpa/cluster-proportional-autoscaler:1.8.5 + name: autoscaler + resources: + requests: + cpu: 20m + memory: 10Mi + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-cluster-critical + serviceAccountName: coredns-autoscaler + tolerations: + - key: CriticalAddonsOnly + operator: Exists diff --git a/tests/integration/update_cluster/docker-custom/data/aws_s3_object_docker.example.com-addons-dns-controller.addons.k8s.io-k8s-1.12_content b/tests/integration/update_cluster/docker-custom/data/aws_s3_object_docker.example.com-addons-dns-controller.addons.k8s.io-k8s-1.12_content new file mode 100644 index 0000000000000..2eab063fdb45a --- /dev/null +++ b/tests/integration/update_cluster/docker-custom/data/aws_s3_object_docker.example.com-addons-dns-controller.addons.k8s.io-k8s-1.12_content @@ -0,0 +1,138 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: dns-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: dns-controller.addons.k8s.io + k8s-app: dns-controller + version: v1.26.0-alpha.1 + name: dns-controller + namespace: kube-system +spec: + replicas: 1 + selector: + matchLabels: + k8s-app: dns-controller + strategy: + type: Recreate + template: + metadata: + creationTimestamp: null + labels: + k8s-addon: dns-controller.addons.k8s.io + k8s-app: dns-controller + kops.k8s.io/managed-by: kops + version: v1.26.0-alpha.1 + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + - matchExpressions: + - key: node-role.kubernetes.io/master + operator: Exists + containers: + - args: + - --watch-ingress=false + - --dns=aws-route53 + - --zone=*/Z1AFAKE1ZON3YO + - --internal-ipv4 + - --zone=*/* + - -v=2 + command: null + env: + - name: KUBERNETES_SERVICE_HOST + value: 127.0.0.1 + image: registry.k8s.io/kops/dns-controller:1.26.0-alpha.1 + name: dns-controller + resources: + requests: + cpu: 50m + memory: 50Mi + securityContext: + runAsNonRoot: true + dnsPolicy: Default + hostNetwork: true + nodeSelector: null + priorityClassName: system-cluster-critical + serviceAccount: dns-controller + tolerations: + - key: node.cloudprovider.kubernetes.io/uninitialized + operator: Exists + - key: node.kubernetes.io/not-ready + operator: Exists + - key: node-role.kubernetes.io/control-plane + operator: Exists + - key: node-role.kubernetes.io/master + operator: Exists + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: dns-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: dns-controller.addons.k8s.io + name: dns-controller + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: dns-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: dns-controller.addons.k8s.io + name: kops:dns-controller +rules: +- apiGroups: + - "" + resources: + - endpoints + - services + - pods + - ingress + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: dns-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: dns-controller.addons.k8s.io + name: kops:dns-controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kops:dns-controller +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: system:serviceaccount:kube-system:dns-controller diff --git a/tests/integration/update_cluster/docker-custom/data/aws_s3_object_docker.example.com-addons-kops-controller.addons.k8s.io-k8s-1.16_content b/tests/integration/update_cluster/docker-custom/data/aws_s3_object_docker.example.com-addons-kops-controller.addons.k8s.io-k8s-1.16_content new file mode 100644 index 0000000000000..0d0e9d691dc27 --- /dev/null +++ b/tests/integration/update_cluster/docker-custom/data/aws_s3_object_docker.example.com-addons-kops-controller.addons.k8s.io-k8s-1.16_content @@ -0,0 +1,225 @@ +apiVersion: v1 +data: + config.yaml: | + {"cloud":"aws","configBase":"memfs://clusters.example.com/docker.example.com","secretStore":"memfs://clusters.example.com/docker.example.com/secrets","server":{"Listen":":3988","provider":{"aws":{"nodesRoles":["nodes.docker.example.com"],"Region":"us-test-1"}},"serverKeyPath":"/etc/kubernetes/kops-controller/pki/kops-controller.key","serverCertificatePath":"/etc/kubernetes/kops-controller/pki/kops-controller.crt","caBasePath":"/etc/kubernetes/kops-controller/pki","signingCAs":["kubernetes-ca"],"certNames":["kubelet","kubelet-server","kube-proxy"]}} +kind: ConfigMap +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller + namespace: kube-system + +--- + +apiVersion: apps/v1 +kind: DaemonSet +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + k8s-app: kops-controller + version: v1.26.0-alpha.1 + name: kops-controller + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: kops-controller + template: + metadata: + annotations: + dns.alpha.kubernetes.io/internal: kops-controller.internal.docker.example.com + creationTimestamp: null + labels: + k8s-addon: kops-controller.addons.k8s.io + k8s-app: kops-controller + kops.k8s.io/managed-by: kops + version: v1.26.0-alpha.1 + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + - key: kops.k8s.io/kops-controller-pki + operator: Exists + - matchExpressions: + - key: node-role.kubernetes.io/master + operator: Exists + - key: kops.k8s.io/kops-controller-pki + operator: Exists + containers: + - args: + - --v=2 + - --conf=/etc/kubernetes/kops-controller/config/config.yaml + command: null + env: + - name: KUBERNETES_SERVICE_HOST + value: 127.0.0.1 + image: registry.k8s.io/kops/kops-controller:1.26.0-alpha.1 + name: kops-controller + resources: + requests: + cpu: 50m + memory: 50Mi + securityContext: + runAsNonRoot: true + runAsUser: 10011 + volumeMounts: + - mountPath: /etc/kubernetes/kops-controller/config/ + name: kops-controller-config + - mountPath: /etc/kubernetes/kops-controller/pki/ + name: kops-controller-pki + dnsPolicy: Default + hostNetwork: true + nodeSelector: null + priorityClassName: system-cluster-critical + serviceAccount: kops-controller + tolerations: + - key: node.cloudprovider.kubernetes.io/uninitialized + operator: Exists + - key: node.kubernetes.io/not-ready + operator: Exists + - key: node-role.kubernetes.io/master + operator: Exists + - key: node-role.kubernetes.io/control-plane + operator: Exists + volumes: + - configMap: + name: kops-controller + name: kops-controller-config + - hostPath: + path: /etc/kubernetes/kops-controller/ + type: Directory + name: kops-controller-pki + updateStrategy: + type: OnDelete + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - patch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kops-controller +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: system:serviceaccount:kube-system:kops-controller + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller + namespace: kube-system +rules: +- apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + - create +- apiGroups: + - "" + - coordination.k8s.io + resourceNames: + - kops-controller-leader + resources: + - configmaps + - leases + verbs: + - get + - list + - watch + - patch + - update + - delete +- apiGroups: + - "" + - coordination.k8s.io + resources: + - configmaps + - leases + verbs: + - create + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: kops-controller +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: system:serviceaccount:kube-system:kops-controller diff --git a/tests/integration/update_cluster/docker-custom/data/aws_s3_object_docker.example.com-addons-kubelet-api.rbac.addons.k8s.io-k8s-1.9_content b/tests/integration/update_cluster/docker-custom/data/aws_s3_object_docker.example.com-addons-kubelet-api.rbac.addons.k8s.io-k8s-1.9_content new file mode 100644 index 0000000000000..36761e1c56255 --- /dev/null +++ b/tests/integration/update_cluster/docker-custom/data/aws_s3_object_docker.example.com-addons-kubelet-api.rbac.addons.k8s.io-k8s-1.9_content @@ -0,0 +1,17 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kubelet-api.rbac.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kubelet-api.rbac.addons.k8s.io + name: kops:system:kubelet-api-admin +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:kubelet-api-admin +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: kubelet-api diff --git a/tests/integration/update_cluster/docker-custom/data/aws_s3_object_docker.example.com-addons-limit-range.addons.k8s.io_content b/tests/integration/update_cluster/docker-custom/data/aws_s3_object_docker.example.com-addons-limit-range.addons.k8s.io_content new file mode 100644 index 0000000000000..4dcdce48b9ab9 --- /dev/null +++ b/tests/integration/update_cluster/docker-custom/data/aws_s3_object_docker.example.com-addons-limit-range.addons.k8s.io_content @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: LimitRange +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: limit-range.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: limit-range.addons.k8s.io + name: limits + namespace: default +spec: + limits: + - defaultRequest: + cpu: 100m + type: Container diff --git a/tests/integration/update_cluster/docker-custom/data/aws_s3_object_docker.example.com-addons-storage-aws.addons.k8s.io-v1.15.0_content b/tests/integration/update_cluster/docker-custom/data/aws_s3_object_docker.example.com-addons-storage-aws.addons.k8s.io-v1.15.0_content new file mode 100644 index 0000000000000..21efd54326518 --- /dev/null +++ b/tests/integration/update_cluster/docker-custom/data/aws_s3_object_docker.example.com-addons-storage-aws.addons.k8s.io-v1.15.0_content @@ -0,0 +1,98 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: storage-aws.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: storage-aws.addons.k8s.io + name: default +parameters: + type: gp2 +provisioner: kubernetes.io/aws-ebs + +--- + +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + annotations: + storageclass.kubernetes.io/is-default-class: "false" + creationTimestamp: null + labels: + addon.kops.k8s.io/name: storage-aws.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: storage-aws.addons.k8s.io + name: gp2 +parameters: + type: gp2 +provisioner: kubernetes.io/aws-ebs + +--- + +allowVolumeExpansion: true +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + annotations: + storageclass.kubernetes.io/is-default-class: "true" + creationTimestamp: null + labels: + addon.kops.k8s.io/name: storage-aws.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: storage-aws.addons.k8s.io + name: kops-ssd-1-17 +parameters: + encrypted: "true" + type: gp2 +provisioner: kubernetes.io/aws-ebs +volumeBindingMode: WaitForFirstConsumer + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: storage-aws.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: storage-aws.addons.k8s.io + name: system:aws-cloud-provider +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - patch +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: storage-aws.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: storage-aws.addons.k8s.io + name: system:aws-cloud-provider +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:aws-cloud-provider +subjects: +- kind: ServiceAccount + name: aws-cloud-provider + namespace: kube-system diff --git a/tests/integration/update_cluster/docker-custom/data/aws_s3_object_etcd-cluster-spec-events_content b/tests/integration/update_cluster/docker-custom/data/aws_s3_object_etcd-cluster-spec-events_content new file mode 100644 index 0000000000000..bb8ddb0e2e0ec --- /dev/null +++ b/tests/integration/update_cluster/docker-custom/data/aws_s3_object_etcd-cluster-spec-events_content @@ -0,0 +1,4 @@ +{ + "memberCount": 1, + "etcdVersion": "3.4.13" +} diff --git a/tests/integration/update_cluster/docker-custom/data/aws_s3_object_etcd-cluster-spec-main_content b/tests/integration/update_cluster/docker-custom/data/aws_s3_object_etcd-cluster-spec-main_content new file mode 100644 index 0000000000000..bb8ddb0e2e0ec --- /dev/null +++ b/tests/integration/update_cluster/docker-custom/data/aws_s3_object_etcd-cluster-spec-main_content @@ -0,0 +1,4 @@ +{ + "memberCount": 1, + "etcdVersion": "3.4.13" +} diff --git a/tests/integration/update_cluster/docker-custom/data/aws_s3_object_kops-version.txt_content b/tests/integration/update_cluster/docker-custom/data/aws_s3_object_kops-version.txt_content new file mode 100644 index 0000000000000..b7340298dcdd5 --- /dev/null +++ b/tests/integration/update_cluster/docker-custom/data/aws_s3_object_kops-version.txt_content @@ -0,0 +1 @@ +1.21.0-alpha.1 diff --git a/tests/integration/update_cluster/docker-custom/data/aws_s3_object_manifests-etcdmanager-events-master-us-test-1a_content b/tests/integration/update_cluster/docker-custom/data/aws_s3_object_manifests-etcdmanager-events-master-us-test-1a_content new file mode 100644 index 0000000000000..bdaa72827454e --- /dev/null +++ b/tests/integration/update_cluster/docker-custom/data/aws_s3_object_manifests-etcdmanager-events-master-us-test-1a_content @@ -0,0 +1,61 @@ +apiVersion: v1 +kind: Pod +metadata: + creationTimestamp: null + labels: + k8s-app: etcd-manager-events + name: etcd-manager-events + namespace: kube-system +spec: + containers: + - command: + - /bin/sh + - -c + - mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /etcd-manager + --backup-store=memfs://clusters.example.com/docker.example.com/backups/etcd/events + --client-urls=https://__name__:4002 --cluster-name=etcd-events --containerized=true + --dns-suffix=.internal.docker.example.com --grpc-port=3997 --peer-urls=https://__name__:2381 + --quarantine-client-urls=https://__name__:3995 --v=6 --volume-name-tag=k8s.io/etcd/events + --volume-provider=aws --volume-tag=k8s.io/etcd/events --volume-tag=k8s.io/role/master=1 + --volume-tag=kubernetes.io/cluster/docker.example.com=owned > /tmp/pipe 2>&1 + image: registry.k8s.io/etcdadm/etcd-manager:v3.0.20220831 + name: etcd-manager + resources: + requests: + cpu: 200m + memory: 100Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /rootfs + name: rootfs + - mountPath: /run + name: run + - mountPath: /etc/kubernetes/pki/etcd-manager + name: pki + - mountPath: /var/log/etcd.log + name: varlogetcd + hostNetwork: true + hostPID: true + priorityClassName: system-cluster-critical + tolerations: + - key: CriticalAddonsOnly + operator: Exists + volumes: + - hostPath: + path: / + type: Directory + name: rootfs + - hostPath: + path: /run + type: DirectoryOrCreate + name: run + - hostPath: + path: /etc/kubernetes/pki/etcd-manager-events + type: DirectoryOrCreate + name: pki + - hostPath: + path: /var/log/etcd-events.log + type: FileOrCreate + name: varlogetcd +status: {} diff --git a/tests/integration/update_cluster/docker-custom/data/aws_s3_object_manifests-etcdmanager-main-master-us-test-1a_content b/tests/integration/update_cluster/docker-custom/data/aws_s3_object_manifests-etcdmanager-main-master-us-test-1a_content new file mode 100644 index 0000000000000..b15ddbeaa3111 --- /dev/null +++ b/tests/integration/update_cluster/docker-custom/data/aws_s3_object_manifests-etcdmanager-main-master-us-test-1a_content @@ -0,0 +1,61 @@ +apiVersion: v1 +kind: Pod +metadata: + creationTimestamp: null + labels: + k8s-app: etcd-manager-main + name: etcd-manager-main + namespace: kube-system +spec: + containers: + - command: + - /bin/sh + - -c + - mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /etcd-manager + --backup-store=memfs://clusters.example.com/docker.example.com/backups/etcd/main + --client-urls=https://__name__:4001 --cluster-name=etcd --containerized=true + --dns-suffix=.internal.docker.example.com --grpc-port=3996 --peer-urls=https://__name__:2380 + --quarantine-client-urls=https://__name__:3994 --v=6 --volume-name-tag=k8s.io/etcd/main + --volume-provider=aws --volume-tag=k8s.io/etcd/main --volume-tag=k8s.io/role/master=1 + --volume-tag=kubernetes.io/cluster/docker.example.com=owned > /tmp/pipe 2>&1 + image: registry.k8s.io/etcdadm/etcd-manager:v3.0.20220831 + name: etcd-manager + resources: + requests: + cpu: 200m + memory: 100Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /rootfs + name: rootfs + - mountPath: /run + name: run + - mountPath: /etc/kubernetes/pki/etcd-manager + name: pki + - mountPath: /var/log/etcd.log + name: varlogetcd + hostNetwork: true + hostPID: true + priorityClassName: system-cluster-critical + tolerations: + - key: CriticalAddonsOnly + operator: Exists + volumes: + - hostPath: + path: / + type: Directory + name: rootfs + - hostPath: + path: /run + type: DirectoryOrCreate + name: run + - hostPath: + path: /etc/kubernetes/pki/etcd-manager-main + type: DirectoryOrCreate + name: pki + - hostPath: + path: /var/log/etcd.log + type: FileOrCreate + name: varlogetcd +status: {} diff --git a/tests/integration/update_cluster/docker-custom/data/aws_s3_object_manifests-static-kube-apiserver-healthcheck_content b/tests/integration/update_cluster/docker-custom/data/aws_s3_object_manifests-static-kube-apiserver-healthcheck_content new file mode 100644 index 0000000000000..5cb249fea763e --- /dev/null +++ b/tests/integration/update_cluster/docker-custom/data/aws_s3_object_manifests-static-kube-apiserver-healthcheck_content @@ -0,0 +1,33 @@ +apiVersion: v1 +kind: Pod +metadata: + creationTimestamp: null +spec: + containers: + - args: + - --ca-cert=/secrets/ca.crt + - --client-cert=/secrets/client.crt + - --client-key=/secrets/client.key + image: registry.k8s.io/kops/kube-apiserver-healthcheck:1.26.0-alpha.1 + livenessProbe: + httpGet: + host: 127.0.0.1 + path: /.kube-apiserver-healthcheck/healthz + port: 3990 + initialDelaySeconds: 5 + timeoutSeconds: 5 + name: healthcheck + resources: {} + securityContext: + runAsNonRoot: true + runAsUser: 10012 + volumeMounts: + - mountPath: /secrets + name: healthcheck-secrets + readOnly: true + volumes: + - hostPath: + path: /etc/kubernetes/kube-apiserver-healthcheck/secrets + type: Directory + name: healthcheck-secrets +status: {} diff --git a/tests/integration/update_cluster/docker-custom/data/aws_s3_object_nodeupconfig-master-us-test-1a_content b/tests/integration/update_cluster/docker-custom/data/aws_s3_object_nodeupconfig-master-us-test-1a_content new file mode 100644 index 0000000000000..d5cc2df51a940 --- /dev/null +++ b/tests/integration/update_cluster/docker-custom/data/aws_s3_object_nodeupconfig-master-us-test-1a_content @@ -0,0 +1,273 @@ +APIServerConfig: + KubeAPIServer: + allowPrivileged: true + anonymousAuth: false + apiAudiences: + - kubernetes.svc.default + apiServerCount: 1 + authorizationMode: AlwaysAllow + bindAddress: 0.0.0.0 + cloudProvider: aws + enableAdmissionPlugins: + - NamespaceLifecycle + - LimitRanger + - ServiceAccount + - DefaultStorageClass + - DefaultTolerationSeconds + - MutatingAdmissionWebhook + - ValidatingAdmissionWebhook + - NodeRestriction + - ResourceQuota + etcdServers: + - https://127.0.0.1:4001 + etcdServersOverrides: + - /events#https://127.0.0.1:4002 + image: registry.k8s.io/kube-apiserver:v1.21.0 + kubeletPreferredAddressTypes: + - InternalIP + - Hostname + - ExternalIP + logLevel: 2 + requestheaderAllowedNames: + - aggregator + requestheaderExtraHeaderPrefixes: + - X-Remote-Extra- + requestheaderGroupHeaders: + - X-Remote-Group + requestheaderUsernameHeaders: + - X-Remote-User + securePort: 443 + serviceAccountIssuer: https://api.internal.docker.example.com + serviceAccountJWKSURI: https://api.internal.docker.example.com/openid/v1/jwks + serviceClusterIPRange: 100.64.0.0/13 + storageBackend: etcd3 + ServiceAccountPublicKeys: | + -----BEGIN RSA PUBLIC KEY----- + MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBANiW3hfHTcKnxCig+uWhpVbOfH1pANKm + XVSysPKgE80QSU4tZ6m49pAEeIMsvwvDMaLsb2v6JvXe0qvCmueU+/sCAwEAAQ== + -----END RSA PUBLIC KEY----- + -----BEGIN RSA PUBLIC KEY----- + MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAKOE64nZbH+GM91AIrqf7HEk4hvzqsZF + Ftxc+8xir1XC3mI/RhCCrs6AdVRZNZ26A6uHArhi33c2kHQkCjyLA7sCAwEAAQ== + -----END RSA PUBLIC KEY----- +Assets: + amd64: + - 681c81b7934ae2bf38b9f12d891683972d1fbbf6d7d97e50940a47b139d41b35@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kubelet + - 9f74f2fa7ee32ad07e17211725992248470310ca1988214518806b39b1dad9f0@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kubectl + - 977824932d5667c7a37aa6a3cbba40100a6873e7bd97e83e8be837e3e7afd0a8@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-amd64-v0.8.7.tgz + - 000000000000000000000000000000000000000000000000000000000000000a@https://download.docker.com/linux/static/stable/x86_64/docker-20.10.1.tgz + - f90ed6dcef534e6d1ae17907dc7eb40614b8945ad4af7f0e98d2be7cde8165c6@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/protokube,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/protokube-linux-amd64 + - 9992e7eb2a2e93f799e5a9e98eb718637433524bc65f630357201a79f49b13d0@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/channels,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/channels-linux-amd64 + arm64: + - 17832b192be5ea314714f7e16efd5e5f65347974bbbf41def6b02f68931380c4@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/arm64/kubelet + - a4dd7100f547a40d3e2f83850d0bab75c6ea5eb553f0a80adcf73155bef1fd0d@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/arm64/kubectl + - ae13d7b5c05bd180ea9b5b68f44bdaa7bfb41034a2ef1d68fd8e1259797d642f@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-arm64-v0.8.7.tgz + - 000000000000000000000000000000000000000000000000000000000000000b@https://download.docker.com/linux/static/stable/aarch64/docker-20.10.1.tgz + - 2f599c3d54f4c4bdbcc95aaf0c7b513a845d8f9503ec5b34c9f86aa1bc34fc0c@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/protokube,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/protokube-linux-arm64 + - 9d842e3636a95de2315cdea2be7a282355aac0658ef0b86d5dc2449066538f13@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/channels,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/channels-linux-arm64 +CAs: + apiserver-aggregator-ca: | + -----BEGIN CERTIFICATE----- + MIIBgjCCASygAwIBAgIMFo3gINaZLHjisEcbMA0GCSqGSIb3DQEBCwUAMCIxIDAe + BgNVBAMTF2FwaXNlcnZlci1hZ2dyZWdhdG9yLWNhMB4XDTIxMDYzMDA0NTExMloX + DTMxMDYzMDA0NTExMlowIjEgMB4GA1UEAxMXYXBpc2VydmVyLWFnZ3JlZ2F0b3It + Y2EwXDANBgkqhkiG9w0BAQEFAANLADBIAkEAyyE71AOU3go5XFegLQ6fidI0LhhM + x7CzpTzh2xWKcHUfbNI7itgJvC/+GlyG5W+DF5V7ba0IJiQLsFve0oLdewIDAQAB + o0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU + ALfqF5ZmfqvqORuJIFilZYKF3d0wDQYJKoZIhvcNAQELBQADQQAHAomFKsF4jvYX + WM/UzQXDj9nSAFTf8dBPCXyZZNotsOH7+P6W4mMiuVs8bAuGiXGUdbsQ2lpiT/Rk + CzMeMdr4 + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBgjCCASygAwIBAgIMFo3gM0nxQpiX/agfMA0GCSqGSIb3DQEBCwUAMCIxIDAe + BgNVBAMTF2FwaXNlcnZlci1hZ2dyZWdhdG9yLWNhMB4XDTIxMDYzMDA0NTIzMVoX + DTMxMDYzMDA0NTIzMVowIjEgMB4GA1UEAxMXYXBpc2VydmVyLWFnZ3JlZ2F0b3It + Y2EwXDANBgkqhkiG9w0BAQEFAANLADBIAkEAyyE71AOU3go5XFegLQ6fidI0LhhM + x7CzpTzh2xWKcHUfbNI7itgJvC/+GlyG5W+DF5V7ba0IJiQLsFve0oLdewIDAQAB + o0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU + ALfqF5ZmfqvqORuJIFilZYKF3d0wDQYJKoZIhvcNAQELBQADQQCXsoezoxXu2CEN + QdlXZOfmBT6cqxIX/RMHXhpHwRiqPsTO8IO2bVA8CSzxNwMuSv/ZtrMHoh8+PcVW + HLtkTXH8 + -----END CERTIFICATE----- + etcd-clients-ca: | + -----BEGIN CERTIFICATE----- + MIIBcjCCARygAwIBAgIMFo1ogHnr26DL9YkqMA0GCSqGSIb3DQEBCwUAMBoxGDAW + BgNVBAMTD2V0Y2QtY2xpZW50cy1jYTAeFw0yMTA2MjgxNjE5MDFaFw0zMTA2Mjgx + NjE5MDFaMBoxGDAWBgNVBAMTD2V0Y2QtY2xpZW50cy1jYTBcMA0GCSqGSIb3DQEB + AQUAA0sAMEgCQQDYlt4Xx03Cp8QooPrloaVWznx9aQDSpl1UsrDyoBPNEElOLWep + uPaQBHiDLL8LwzGi7G9r+ib13tKrwprnlPv7AgMBAAGjQjBAMA4GA1UdDwEB/wQE + AwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQjlt4Ue54AbJPWlDpRM51s + x+PeBDANBgkqhkiG9w0BAQsFAANBAAZAdf8ROEVkr3Rf7I+s+CQOil2toadlKWOY + qCeJ2XaEROfp9aUTEIU1MGM3g57MPyAPPU7mURskuOQz6B1UFaY= + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBcjCCARygAwIBAgIMFo1olfBnC/CsT+dqMA0GCSqGSIb3DQEBCwUAMBoxGDAW + BgNVBAMTD2V0Y2QtY2xpZW50cy1jYTAeFw0yMTA2MjgxNjIwMzNaFw0zMTA2Mjgx + NjIwMzNaMBoxGDAWBgNVBAMTD2V0Y2QtY2xpZW50cy1jYTBcMA0GCSqGSIb3DQEB + AQUAA0sAMEgCQQDYlt4Xx03Cp8QooPrloaVWznx9aQDSpl1UsrDyoBPNEElOLWep + uPaQBHiDLL8LwzGi7G9r+ib13tKrwprnlPv7AgMBAAGjQjBAMA4GA1UdDwEB/wQE + AwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQjlt4Ue54AbJPWlDpRM51s + x+PeBDANBgkqhkiG9w0BAQsFAANBAF1xUz77PlUVUnd9duF8F7plou0TONC9R6/E + YQ8C6vM1b+9NSDGjCW8YmwEU2fBgskb/BBX2lwVZ32/RUEju4Co= + -----END CERTIFICATE----- + etcd-manager-ca-events: | + -----BEGIN CERTIFICATE----- + MIIBgDCCASqgAwIBAgIMFo+bKjm04vB4rNtaMA0GCSqGSIb3DQEBCwUAMCExHzAd + BgNVBAMTFmV0Y2QtbWFuYWdlci1jYS1ldmVudHMwHhcNMjEwNzA1MjAwOTU2WhcN + MzEwNzA1MjAwOTU2WjAhMR8wHQYDVQQDExZldGNkLW1hbmFnZXItY2EtZXZlbnRz + MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAKiC8tndMlEFZ7qzeKxeKqFVjaYpsh/H + g7RxWo15+1kgH3suO0lxp9+RxSVv97hnsfbySTPZVhy2cIQj7eZtZt8CAwEAAaNC + MEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFBg6 + CEZkQNnRkARBwFce03AEWa+sMA0GCSqGSIb3DQEBCwUAA0EAJMnBThok/uUe8q8O + sS5q19KUuE8YCTUzMDj36EBKf6NX4NoakCa1h6kfQVtlMtEIMWQZCjbm8xGK5ffs + GS/VUw== + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBgDCCASqgAwIBAgIMFo+bQ+EgIiBmGghjMA0GCSqGSIb3DQEBCwUAMCExHzAd + BgNVBAMTFmV0Y2QtbWFuYWdlci1jYS1ldmVudHMwHhcNMjEwNzA1MjAxMTQ2WhcN + MzEwNzA1MjAxMTQ2WjAhMR8wHQYDVQQDExZldGNkLW1hbmFnZXItY2EtZXZlbnRz + MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAKFhHVVxxDGv8d1jBvtdSxz7KIVoBOjL + DMxsmTsINiQkTQaFlb+XPlnY1ar4+RhE519AFUkqfhypk4Zxqf1YFXUCAwEAAaNC + MEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNuW + LLH5c8kDubDbr6BHgedW0iJ9MA0GCSqGSIb3DQEBCwUAA0EAiKUoBoaGu7XzboFE + hjfKlX0TujqWuW3qMxDEJwj4dVzlSLrAoB/G01MJ+xxYKh456n48aG6N827UPXhV + cPfVNg== + -----END CERTIFICATE----- + etcd-manager-ca-main: | + -----BEGIN CERTIFICATE----- + MIIBfDCCASagAwIBAgIMFo+bKjm1c3jfv6hIMA0GCSqGSIb3DQEBCwUAMB8xHTAb + BgNVBAMTFGV0Y2QtbWFuYWdlci1jYS1tYWluMB4XDTIxMDcwNTIwMDk1NloXDTMx + MDcwNTIwMDk1NlowHzEdMBsGA1UEAxMUZXRjZC1tYW5hZ2VyLWNhLW1haW4wXDAN + BgkqhkiG9w0BAQEFAANLADBIAkEAxbkDbGYmCSShpRG3r+lzTOFujyuruRfjOhYm + ZRX4w1Utd5y63dUc98sjc9GGUYMHd+0k1ql/a48tGhnK6N6jJwIDAQABo0IwQDAO + BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUWZLkbBFx + GAgPU4i62c52unSo7RswDQYJKoZIhvcNAQELBQADQQAj6Pgd0va/8FtkyMlnohLu + Gf4v8RJO6zk3Y6jJ4+cwWziipFM1ielMzSOZfFcCZgH3m5Io40is4hPSqyq2TOA6 + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBfDCCASagAwIBAgIMFo+bQ+Eg8Si30gr4MA0GCSqGSIb3DQEBCwUAMB8xHTAb + BgNVBAMTFGV0Y2QtbWFuYWdlci1jYS1tYWluMB4XDTIxMDcwNTIwMTE0NloXDTMx + MDcwNTIwMTE0NlowHzEdMBsGA1UEAxMUZXRjZC1tYW5hZ2VyLWNhLW1haW4wXDAN + BgkqhkiG9w0BAQEFAANLADBIAkEAw33jzcd/iosN04b0WXbDt7B0c3sJ3aafcGLP + vG3xRB9N5bYr9+qZAq3mzAFkxscn4j1ce5b1/GKTDEAClmZgdQIDAQABo0IwQDAO + BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUE/h+3gDP + DvKwHRyiYlXM8voZ1wowDQYJKoZIhvcNAQELBQADQQBXuimeEoAOu5HN4hG7NqL9 + t40K3ZRhRZv3JQWnRVJCBDjg1rD0GQJR/n+DoWvbeijI5C9pNjr2pWSIYR1eYCvd + -----END CERTIFICATE----- + etcd-peers-ca-events: | + -----BEGIN CERTIFICATE----- + MIIBfDCCASagAwIBAgIMFo+bKjmxTPh3/lYJMA0GCSqGSIb3DQEBCwUAMB8xHTAb + BgNVBAMTFGV0Y2QtcGVlcnMtY2EtZXZlbnRzMB4XDTIxMDcwNTIwMDk1NloXDTMx + MDcwNTIwMDk1NlowHzEdMBsGA1UEAxMUZXRjZC1wZWVycy1jYS1ldmVudHMwXDAN + BgkqhkiG9w0BAQEFAANLADBIAkEAv5g4HF2xmrYyouJfY9jXx1M3gPLD/pupvxPY + xyjJw5pNCy5M5XGS3iTqRD5RDE0fWudVHFZKLIe8WPc06NApXwIDAQABo0IwQDAO + BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUf6xiDI+O + Yph1ziCGr2hZaQYt+fUwDQYJKoZIhvcNAQELBQADQQBBxj5hqEQstonTb8lnqeGB + DEYtUeAk4eR/HzvUMjF52LVGuvN3XVt+JTrFeKNvb6/RDUbBNRj3azalcUkpPh6V + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBfDCCASagAwIBAgIMFo+bQ+Eq69jgzpKwMA0GCSqGSIb3DQEBCwUAMB8xHTAb + BgNVBAMTFGV0Y2QtcGVlcnMtY2EtZXZlbnRzMB4XDTIxMDcwNTIwMTE0NloXDTMx + MDcwNTIwMTE0NlowHzEdMBsGA1UEAxMUZXRjZC1wZWVycy1jYS1ldmVudHMwXDAN + BgkqhkiG9w0BAQEFAANLADBIAkEAo5Nj2CjX1qp3mEPw1H5nHAFWLoGNSLSlRFJW + 03NxaNPMFzL5PrCoyOXrX8/MWczuZYw0Crf8EPOOQWi2+W0XLwIDAQABo0IwQDAO + BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUxauhhKQh + cvdZND78rHe0RQVTTiswDQYJKoZIhvcNAQELBQADQQB+cq4jIS9q0zXslaRa+ViI + J+dviA3sMygbmSJO0s4DxYmoazKJblux5q0ASSvS9iL1l9ShuZ1dWyp2tpZawHyb + -----END CERTIFICATE----- + etcd-peers-ca-main: | + -----BEGIN CERTIFICATE----- + MIIBeDCCASKgAwIBAgIMFo+bKjmuLDDLcDHsMA0GCSqGSIb3DQEBCwUAMB0xGzAZ + BgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjAeFw0yMTA3MDUyMDA5NTZaFw0zMTA3 + MDUyMDA5NTZaMB0xGzAZBgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjBcMA0GCSqG + SIb3DQEBAQUAA0sAMEgCQQCyRaXWpwgN6INQqws9p/BvPElJv2Rno9dVTFhlQqDA + aUJXe7MBmiO4NJcW76EozeBh5ztR3/4NE1FM2x8TisS3AgMBAAGjQjBAMA4GA1Ud + DwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQtE1d49uSvpURf + OQ25Vlu6liY20DANBgkqhkiG9w0BAQsFAANBAAgLVaetJZcfOA3OIMMvQbz2Ydrt + uWF9BKkIad8jrcIrm3IkOtR8bKGmDIIaRKuG/ZUOL6NMe2fky3AAfKwleL4= + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBeDCCASKgAwIBAgIMFo+bQ+EuVthBfuZvMA0GCSqGSIb3DQEBCwUAMB0xGzAZ + BgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjAeFw0yMTA3MDUyMDExNDZaFw0zMTA3 + MDUyMDExNDZaMB0xGzAZBgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjBcMA0GCSqG + SIb3DQEBAQUAA0sAMEgCQQCxNbycDZNx5V1ZOiXxZSvaFpHRwKeHDfcuMUitdoPt + naVMlMTGDWAMuCVmFHFAWohIYynemEegmZkZ15S7AErfAgMBAAGjQjBAMA4GA1Ud + DwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTAjQ8T4HclPIsC + qipEfUIcLP6jqTANBgkqhkiG9w0BAQsFAANBAJdZ17TN3HlWrH7HQgfR12UBwz8K + G9DurDznVaBVUYaHY8Sg5AvAXeb+yIF2JMmRR+bK+/G1QYY2D3/P31Ic2Oo= + -----END CERTIFICATE----- + kubernetes-ca: | + -----BEGIN CERTIFICATE----- + MIIBbjCCARigAwIBAgIMFpANqBD8NSD82AUSMA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwODAwWhcNMzEwNzA3MDcw + ODAwWjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD + SwAwSAJBANFI3zr0Tk8krsW8vwjfMpzJOlWQ8616vG3YPa2qAgI7V4oKwfV0yIg1 + jt+H6f4P/wkPAPTPTfRp9Iy8oHEEFw0CAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG + MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNG3zVjTcLlJwDsJ4/K9DV7KohUA + MA0GCSqGSIb3DQEBCwUAA0EAB8d03fY2w7WKpfO29qI295pu2C4ca9AiVGOpgSc8 + tmQsq6rcxt3T+rb589PVtz0mw/cKTxOk6gH2CCC+yHfy2w== + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBbjCCARigAwIBAgIMFpANvmSa0OAlYmXKMA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwOTM2WhcNMzEwNzA3MDcw + OTM2WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD + SwAwSAJBAMF6F4aZdpe0RUpyykaBpWwZCnwbffhYGOw+fs6RdLuUq7QCNmJm/Eq7 + WWOziMYDiI9SbclpD+6QiJ0N3EqppVUCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG + MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLImp6ARjPDAH6nhI+scWVt3Q9bn + MA0GCSqGSIb3DQEBCwUAA0EAVQVx5MUtuAIeePuP9o51xtpT2S6Fvfi8J4ICxnlA + 9B7UD2ushcVFPtaeoL9Gfu8aY4KJBeqqg5ojl4qmRnThjw== + -----END CERTIFICATE----- +ClusterName: docker.example.com +FileAssets: +- content: | + apiVersion: kubescheduler.config.k8s.io/v1beta1 + clientConnection: + kubeconfig: /var/lib/kube-scheduler/kubeconfig + kind: KubeSchedulerConfiguration + path: /var/lib/kube-scheduler/config.yaml +Hooks: +- null +- null +KeypairIDs: + apiserver-aggregator-ca: "6980187172486667078076483355" + etcd-clients-ca: "6979622252718071085282986282" + etcd-manager-ca-events: "6982279354000777253151890266" + etcd-manager-ca-main: "6982279354000936168671127624" + etcd-peers-ca-events: "6982279353999767935825892873" + etcd-peers-ca-main: "6982279353998887468930183660" + kubernetes-ca: "6982820025135291416230495506" + service-account: "2" +KubeletConfig: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + nodeLabels: + kops.k8s.io/kops-controller-pki: "" + kubernetes.io/role: master + node-role.kubernetes.io/control-plane: "" + node-role.kubernetes.io/master: "" + node.kubernetes.io/exclude-from-external-load-balancers: "" + podInfraContainerImage: registry.k8s.io/pause:3.6 + podManifestPath: /etc/kubernetes/manifests + registerSchedulable: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + taints: + - node-role.kubernetes.io/master=:NoSchedule +UpdatePolicy: automatic +channels: +- memfs://clusters.example.com/docker.example.com/addons/bootstrap-channel.yaml +etcdManifests: +- memfs://clusters.example.com/docker.example.com/manifests/etcd/main-master-us-test-1a.yaml +- memfs://clusters.example.com/docker.example.com/manifests/etcd/events-master-us-test-1a.yaml +staticManifests: +- key: kube-apiserver-healthcheck + path: manifests/static/kube-apiserver-healthcheck.yaml diff --git a/tests/integration/update_cluster/docker-custom/data/aws_s3_object_nodeupconfig-nodes_content b/tests/integration/update_cluster/docker-custom/data/aws_s3_object_nodeupconfig-nodes_content new file mode 100644 index 0000000000000..b9867c81c791f --- /dev/null +++ b/tests/integration/update_cluster/docker-custom/data/aws_s3_object_nodeupconfig-nodes_content @@ -0,0 +1,41 @@ +Assets: + amd64: + - 681c81b7934ae2bf38b9f12d891683972d1fbbf6d7d97e50940a47b139d41b35@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kubelet + - 9f74f2fa7ee32ad07e17211725992248470310ca1988214518806b39b1dad9f0@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kubectl + - 977824932d5667c7a37aa6a3cbba40100a6873e7bd97e83e8be837e3e7afd0a8@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-amd64-v0.8.7.tgz + - 000000000000000000000000000000000000000000000000000000000000000a@https://download.docker.com/linux/static/stable/x86_64/docker-20.10.1.tgz + arm64: + - 17832b192be5ea314714f7e16efd5e5f65347974bbbf41def6b02f68931380c4@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/arm64/kubelet + - a4dd7100f547a40d3e2f83850d0bab75c6ea5eb553f0a80adcf73155bef1fd0d@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/arm64/kubectl + - ae13d7b5c05bd180ea9b5b68f44bdaa7bfb41034a2ef1d68fd8e1259797d642f@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-arm64-v0.8.7.tgz + - 000000000000000000000000000000000000000000000000000000000000000b@https://download.docker.com/linux/static/stable/aarch64/docker-20.10.1.tgz +CAs: {} +ClusterName: docker.example.com +Hooks: +- null +- null +KeypairIDs: + kubernetes-ca: "6982820025135291416230495506" +KubeletConfig: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + nodeLabels: + kubernetes.io/role: node + node-role.kubernetes.io/node: "" + podInfraContainerImage: registry.k8s.io/pause:3.6 + podManifestPath: /etc/kubernetes/manifests + registerSchedulable: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s +UpdatePolicy: automatic +channels: +- memfs://clusters.example.com/docker.example.com/addons/bootstrap-channel.yaml diff --git a/tests/integration/update_cluster/docker-custom/in-v1alpha2.yaml b/tests/integration/update_cluster/docker-custom/in-v1alpha2.yaml index 6e95229472ceb..f90d56fa7e7bf 100644 --- a/tests/integration/update_cluster/docker-custom/in-v1alpha2.yaml +++ b/tests/integration/update_cluster/docker-custom/in-v1alpha2.yaml @@ -29,7 +29,6 @@ spec: kubelet: anonymousAuth: false kubernetesVersion: v1.21.0 - masterInternalName: api.internal.docker.example.com masterPublicName: api.docker.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/tests/integration/update_cluster/docker-custom/kubernetes.tf b/tests/integration/update_cluster/docker-custom/kubernetes.tf new file mode 100644 index 0000000000000..bb8acf5e881ae --- /dev/null +++ b/tests/integration/update_cluster/docker-custom/kubernetes.tf @@ -0,0 +1,820 @@ +locals { + cluster_name = "docker.example.com" + master_autoscaling_group_ids = [aws_autoscaling_group.master-us-test-1a-masters-docker-example-com.id] + master_security_group_ids = [aws_security_group.masters-docker-example-com.id] + masters_role_arn = aws_iam_role.masters-docker-example-com.arn + masters_role_name = aws_iam_role.masters-docker-example-com.name + node_autoscaling_group_ids = [aws_autoscaling_group.nodes-docker-example-com.id] + node_security_group_ids = [aws_security_group.nodes-docker-example-com.id] + node_subnet_ids = [aws_subnet.us-test-1a-docker-example-com.id] + nodes_role_arn = aws_iam_role.nodes-docker-example-com.arn + nodes_role_name = aws_iam_role.nodes-docker-example-com.name + region = "us-test-1" + route_table_public_id = aws_route_table.docker-example-com.id + subnet_us-test-1a_id = aws_subnet.us-test-1a-docker-example-com.id + vpc_cidr_block = aws_vpc.docker-example-com.cidr_block + vpc_id = aws_vpc.docker-example-com.id +} + +output "cluster_name" { + value = "docker.example.com" +} + +output "master_autoscaling_group_ids" { + value = [aws_autoscaling_group.master-us-test-1a-masters-docker-example-com.id] +} + +output "master_security_group_ids" { + value = [aws_security_group.masters-docker-example-com.id] +} + +output "masters_role_arn" { + value = aws_iam_role.masters-docker-example-com.arn +} + +output "masters_role_name" { + value = aws_iam_role.masters-docker-example-com.name +} + +output "node_autoscaling_group_ids" { + value = [aws_autoscaling_group.nodes-docker-example-com.id] +} + +output "node_security_group_ids" { + value = [aws_security_group.nodes-docker-example-com.id] +} + +output "node_subnet_ids" { + value = [aws_subnet.us-test-1a-docker-example-com.id] +} + +output "nodes_role_arn" { + value = aws_iam_role.nodes-docker-example-com.arn +} + +output "nodes_role_name" { + value = aws_iam_role.nodes-docker-example-com.name +} + +output "region" { + value = "us-test-1" +} + +output "route_table_public_id" { + value = aws_route_table.docker-example-com.id +} + +output "subnet_us-test-1a_id" { + value = aws_subnet.us-test-1a-docker-example-com.id +} + +output "vpc_cidr_block" { + value = aws_vpc.docker-example-com.cidr_block +} + +output "vpc_id" { + value = aws_vpc.docker-example-com.id +} + +provider "aws" { + region = "us-test-1" +} + +provider "aws" { + alias = "files" + region = "us-test-1" +} + +resource "aws_autoscaling_group" "master-us-test-1a-masters-docker-example-com" { + enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"] + launch_template { + id = aws_launch_template.master-us-test-1a-masters-docker-example-com.id + version = aws_launch_template.master-us-test-1a-masters-docker-example-com.latest_version + } + max_instance_lifetime = 0 + max_size = 1 + metrics_granularity = "1Minute" + min_size = 1 + name = "master-us-test-1a.masters.docker.example.com" + protect_from_scale_in = false + tag { + key = "KubernetesCluster" + propagate_at_launch = true + value = "docker.example.com" + } + tag { + key = "Name" + propagate_at_launch = true + value = "master-us-test-1a.masters.docker.example.com" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" + propagate_at_launch = true + value = "master" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/role/master" + propagate_at_launch = true + value = "1" + } + tag { + key = "kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "master-us-test-1a" + } + tag { + key = "kubernetes.io/cluster/docker.example.com" + propagate_at_launch = true + value = "owned" + } + vpc_zone_identifier = [aws_subnet.us-test-1a-docker-example-com.id] +} + +resource "aws_autoscaling_group" "nodes-docker-example-com" { + enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"] + launch_template { + id = aws_launch_template.nodes-docker-example-com.id + version = aws_launch_template.nodes-docker-example-com.latest_version + } + max_instance_lifetime = 0 + max_size = 2 + metrics_granularity = "1Minute" + min_size = 2 + name = "nodes.docker.example.com" + protect_from_scale_in = false + tag { + key = "KubernetesCluster" + propagate_at_launch = true + value = "docker.example.com" + } + tag { + key = "Name" + propagate_at_launch = true + value = "nodes.docker.example.com" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" + propagate_at_launch = true + value = "node" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/role/node" + propagate_at_launch = true + value = "1" + } + tag { + key = "kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "nodes" + } + tag { + key = "kubernetes.io/cluster/docker.example.com" + propagate_at_launch = true + value = "owned" + } + vpc_zone_identifier = [aws_subnet.us-test-1a-docker-example-com.id] +} + +resource "aws_ebs_volume" "us-test-1a-etcd-events-docker-example-com" { + availability_zone = "us-test-1a" + encrypted = false + iops = 3000 + size = 20 + tags = { + "KubernetesCluster" = "docker.example.com" + "Name" = "us-test-1a.etcd-events.docker.example.com" + "k8s.io/etcd/events" = "us-test-1a/us-test-1a" + "k8s.io/role/master" = "1" + "kubernetes.io/cluster/docker.example.com" = "owned" + } + throughput = 125 + type = "gp3" +} + +resource "aws_ebs_volume" "us-test-1a-etcd-main-docker-example-com" { + availability_zone = "us-test-1a" + encrypted = false + iops = 3000 + size = 20 + tags = { + "KubernetesCluster" = "docker.example.com" + "Name" = "us-test-1a.etcd-main.docker.example.com" + "k8s.io/etcd/main" = "us-test-1a/us-test-1a" + "k8s.io/role/master" = "1" + "kubernetes.io/cluster/docker.example.com" = "owned" + } + throughput = 125 + type = "gp3" +} + +resource "aws_iam_instance_profile" "masters-docker-example-com" { + name = "masters.docker.example.com" + role = aws_iam_role.masters-docker-example-com.name + tags = { + "KubernetesCluster" = "docker.example.com" + "Name" = "masters.docker.example.com" + "kubernetes.io/cluster/docker.example.com" = "owned" + } +} + +resource "aws_iam_instance_profile" "nodes-docker-example-com" { + name = "nodes.docker.example.com" + role = aws_iam_role.nodes-docker-example-com.name + tags = { + "KubernetesCluster" = "docker.example.com" + "Name" = "nodes.docker.example.com" + "kubernetes.io/cluster/docker.example.com" = "owned" + } +} + +resource "aws_iam_role" "masters-docker-example-com" { + assume_role_policy = file("${path.module}/data/aws_iam_role_masters.docker.example.com_policy") + name = "masters.docker.example.com" + tags = { + "KubernetesCluster" = "docker.example.com" + "Name" = "masters.docker.example.com" + "kubernetes.io/cluster/docker.example.com" = "owned" + } +} + +resource "aws_iam_role" "nodes-docker-example-com" { + assume_role_policy = file("${path.module}/data/aws_iam_role_nodes.docker.example.com_policy") + name = "nodes.docker.example.com" + tags = { + "KubernetesCluster" = "docker.example.com" + "Name" = "nodes.docker.example.com" + "kubernetes.io/cluster/docker.example.com" = "owned" + } +} + +resource "aws_iam_role_policy" "masters-docker-example-com" { + name = "masters.docker.example.com" + policy = file("${path.module}/data/aws_iam_role_policy_masters.docker.example.com_policy") + role = aws_iam_role.masters-docker-example-com.name +} + +resource "aws_iam_role_policy" "nodes-docker-example-com" { + name = "nodes.docker.example.com" + policy = file("${path.module}/data/aws_iam_role_policy_nodes.docker.example.com_policy") + role = aws_iam_role.nodes-docker-example-com.name +} + +resource "aws_internet_gateway" "docker-example-com" { + tags = { + "KubernetesCluster" = "docker.example.com" + "Name" = "docker.example.com" + "kubernetes.io/cluster/docker.example.com" = "owned" + } + vpc_id = aws_vpc.docker-example-com.id +} + +resource "aws_key_pair" "kubernetes-docker-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157" { + key_name = "kubernetes.docker.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57" + public_key = file("${path.module}/data/aws_key_pair_kubernetes.docker.example.com-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key") + tags = { + "KubernetesCluster" = "docker.example.com" + "Name" = "docker.example.com" + "kubernetes.io/cluster/docker.example.com" = "owned" + } +} + +resource "aws_launch_template" "master-us-test-1a-masters-docker-example-com" { + block_device_mappings { + device_name = "/dev/xvda" + ebs { + delete_on_termination = true + encrypted = true + iops = 3000 + throughput = 125 + volume_size = 64 + volume_type = "gp3" + } + } + block_device_mappings { + device_name = "/dev/sdc" + virtual_name = "ephemeral0" + } + iam_instance_profile { + name = aws_iam_instance_profile.masters-docker-example-com.id + } + image_id = "ami-12345678" + instance_type = "m3.medium" + key_name = aws_key_pair.kubernetes-docker-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id + lifecycle { + create_before_destroy = true + } + metadata_options { + http_endpoint = "enabled" + http_protocol_ipv6 = "disabled" + http_put_response_hop_limit = 1 + http_tokens = "optional" + } + monitoring { + enabled = false + } + name = "master-us-test-1a.masters.docker.example.com" + network_interfaces { + associate_public_ip_address = true + delete_on_termination = true + ipv6_address_count = 0 + security_groups = [aws_security_group.masters-docker-example-com.id] + } + tag_specifications { + resource_type = "instance" + tags = { + "KubernetesCluster" = "docker.example.com" + "Name" = "master-us-test-1a.masters.docker.example.com" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" = "" + "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = "" + "k8s.io/role/master" = "1" + "kops.k8s.io/instancegroup" = "master-us-test-1a" + "kubernetes.io/cluster/docker.example.com" = "owned" + } + } + tag_specifications { + resource_type = "volume" + tags = { + "KubernetesCluster" = "docker.example.com" + "Name" = "master-us-test-1a.masters.docker.example.com" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" = "" + "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = "" + "k8s.io/role/master" = "1" + "kops.k8s.io/instancegroup" = "master-us-test-1a" + "kubernetes.io/cluster/docker.example.com" = "owned" + } + } + tags = { + "KubernetesCluster" = "docker.example.com" + "Name" = "master-us-test-1a.masters.docker.example.com" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" = "" + "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = "" + "k8s.io/role/master" = "1" + "kops.k8s.io/instancegroup" = "master-us-test-1a" + "kubernetes.io/cluster/docker.example.com" = "owned" + } + user_data = filebase64("${path.module}/data/aws_launch_template_master-us-test-1a.masters.docker.example.com_user_data") +} + +resource "aws_launch_template" "nodes-docker-example-com" { + block_device_mappings { + device_name = "/dev/xvda" + ebs { + delete_on_termination = true + encrypted = true + iops = 3000 + throughput = 125 + volume_size = 128 + volume_type = "gp3" + } + } + iam_instance_profile { + name = aws_iam_instance_profile.nodes-docker-example-com.id + } + image_id = "ami-12345678" + instance_type = "t2.medium" + key_name = aws_key_pair.kubernetes-docker-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id + lifecycle { + create_before_destroy = true + } + metadata_options { + http_endpoint = "enabled" + http_protocol_ipv6 = "disabled" + http_put_response_hop_limit = 1 + http_tokens = "optional" + } + monitoring { + enabled = false + } + name = "nodes.docker.example.com" + network_interfaces { + associate_public_ip_address = true + delete_on_termination = true + ipv6_address_count = 0 + security_groups = [aws_security_group.nodes-docker-example-com.id] + } + tag_specifications { + resource_type = "instance" + tags = { + "KubernetesCluster" = "docker.example.com" + "Name" = "nodes.docker.example.com" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "nodes" + "kubernetes.io/cluster/docker.example.com" = "owned" + } + } + tag_specifications { + resource_type = "volume" + tags = { + "KubernetesCluster" = "docker.example.com" + "Name" = "nodes.docker.example.com" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "nodes" + "kubernetes.io/cluster/docker.example.com" = "owned" + } + } + tags = { + "KubernetesCluster" = "docker.example.com" + "Name" = "nodes.docker.example.com" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "nodes" + "kubernetes.io/cluster/docker.example.com" = "owned" + } + user_data = filebase64("${path.module}/data/aws_launch_template_nodes.docker.example.com_user_data") +} + +resource "aws_route" "route-0-0-0-0--0" { + destination_cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.docker-example-com.id + route_table_id = aws_route_table.docker-example-com.id +} + +resource "aws_route" "route-__--0" { + destination_ipv6_cidr_block = "::/0" + gateway_id = aws_internet_gateway.docker-example-com.id + route_table_id = aws_route_table.docker-example-com.id +} + +resource "aws_route_table" "docker-example-com" { + tags = { + "KubernetesCluster" = "docker.example.com" + "Name" = "docker.example.com" + "kubernetes.io/cluster/docker.example.com" = "owned" + "kubernetes.io/kops/role" = "public" + } + vpc_id = aws_vpc.docker-example-com.id +} + +resource "aws_route_table_association" "us-test-1a-docker-example-com" { + route_table_id = aws_route_table.docker-example-com.id + subnet_id = aws_subnet.us-test-1a-docker-example-com.id +} + +resource "aws_s3_object" "cluster-completed-spec" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_cluster-completed.spec_content") + key = "clusters.example.com/docker.example.com/cluster-completed.spec" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "docker-example-com-addons-bootstrap" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_docker.example.com-addons-bootstrap_content") + key = "clusters.example.com/docker.example.com/addons/bootstrap-channel.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "docker-example-com-addons-coredns-addons-k8s-io-k8s-1-12" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_docker.example.com-addons-coredns.addons.k8s.io-k8s-1.12_content") + key = "clusters.example.com/docker.example.com/addons/coredns.addons.k8s.io/k8s-1.12.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "docker-example-com-addons-dns-controller-addons-k8s-io-k8s-1-12" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_docker.example.com-addons-dns-controller.addons.k8s.io-k8s-1.12_content") + key = "clusters.example.com/docker.example.com/addons/dns-controller.addons.k8s.io/k8s-1.12.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "docker-example-com-addons-kops-controller-addons-k8s-io-k8s-1-16" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_docker.example.com-addons-kops-controller.addons.k8s.io-k8s-1.16_content") + key = "clusters.example.com/docker.example.com/addons/kops-controller.addons.k8s.io/k8s-1.16.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "docker-example-com-addons-kubelet-api-rbac-addons-k8s-io-k8s-1-9" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_docker.example.com-addons-kubelet-api.rbac.addons.k8s.io-k8s-1.9_content") + key = "clusters.example.com/docker.example.com/addons/kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "docker-example-com-addons-limit-range-addons-k8s-io" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_docker.example.com-addons-limit-range.addons.k8s.io_content") + key = "clusters.example.com/docker.example.com/addons/limit-range.addons.k8s.io/v1.5.0.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "docker-example-com-addons-storage-aws-addons-k8s-io-v1-15-0" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_docker.example.com-addons-storage-aws.addons.k8s.io-v1.15.0_content") + key = "clusters.example.com/docker.example.com/addons/storage-aws.addons.k8s.io/v1.15.0.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "etcd-cluster-spec-events" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_etcd-cluster-spec-events_content") + key = "clusters.example.com/docker.example.com/backups/etcd/events/control/etcd-cluster-spec" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "etcd-cluster-spec-main" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_etcd-cluster-spec-main_content") + key = "clusters.example.com/docker.example.com/backups/etcd/main/control/etcd-cluster-spec" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "kops-version-txt" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_kops-version.txt_content") + key = "clusters.example.com/docker.example.com/kops-version.txt" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "manifests-etcdmanager-events-master-us-test-1a" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_manifests-etcdmanager-events-master-us-test-1a_content") + key = "clusters.example.com/docker.example.com/manifests/etcd/events-master-us-test-1a.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "manifests-etcdmanager-main-master-us-test-1a" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_manifests-etcdmanager-main-master-us-test-1a_content") + key = "clusters.example.com/docker.example.com/manifests/etcd/main-master-us-test-1a.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "manifests-static-kube-apiserver-healthcheck" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_manifests-static-kube-apiserver-healthcheck_content") + key = "clusters.example.com/docker.example.com/manifests/static/kube-apiserver-healthcheck.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nodeupconfig-master-us-test-1a" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_nodeupconfig-master-us-test-1a_content") + key = "clusters.example.com/docker.example.com/igconfig/master/master-us-test-1a/nodeupconfig.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nodeupconfig-nodes" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_nodeupconfig-nodes_content") + key = "clusters.example.com/docker.example.com/igconfig/node/nodes/nodeupconfig.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_security_group" "masters-docker-example-com" { + description = "Security group for masters" + name = "masters.docker.example.com" + tags = { + "KubernetesCluster" = "docker.example.com" + "Name" = "masters.docker.example.com" + "kubernetes.io/cluster/docker.example.com" = "owned" + } + vpc_id = aws_vpc.docker-example-com.id +} + +resource "aws_security_group" "nodes-docker-example-com" { + description = "Security group for nodes" + name = "nodes.docker.example.com" + tags = { + "KubernetesCluster" = "docker.example.com" + "Name" = "nodes.docker.example.com" + "kubernetes.io/cluster/docker.example.com" = "owned" + } + vpc_id = aws_vpc.docker-example-com.id +} + +resource "aws_security_group_rule" "from-0-0-0-0--0-ingress-tcp-22to22-masters-docker-example-com" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 22 + protocol = "tcp" + security_group_id = aws_security_group.masters-docker-example-com.id + to_port = 22 + type = "ingress" +} + +resource "aws_security_group_rule" "from-0-0-0-0--0-ingress-tcp-22to22-nodes-docker-example-com" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 22 + protocol = "tcp" + security_group_id = aws_security_group.nodes-docker-example-com.id + to_port = 22 + type = "ingress" +} + +resource "aws_security_group_rule" "from-0-0-0-0--0-ingress-tcp-443to443-masters-docker-example-com" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 443 + protocol = "tcp" + security_group_id = aws_security_group.masters-docker-example-com.id + to_port = 443 + type = "ingress" +} + +resource "aws_security_group_rule" "from-masters-docker-example-com-egress-all-0to0-0-0-0-0--0" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.masters-docker-example-com.id + to_port = 0 + type = "egress" +} + +resource "aws_security_group_rule" "from-masters-docker-example-com-egress-all-0to0-__--0" { + from_port = 0 + ipv6_cidr_blocks = ["::/0"] + protocol = "-1" + security_group_id = aws_security_group.masters-docker-example-com.id + to_port = 0 + type = "egress" +} + +resource "aws_security_group_rule" "from-masters-docker-example-com-ingress-all-0to0-masters-docker-example-com" { + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.masters-docker-example-com.id + source_security_group_id = aws_security_group.masters-docker-example-com.id + to_port = 0 + type = "ingress" +} + +resource "aws_security_group_rule" "from-masters-docker-example-com-ingress-all-0to0-nodes-docker-example-com" { + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.nodes-docker-example-com.id + source_security_group_id = aws_security_group.masters-docker-example-com.id + to_port = 0 + type = "ingress" +} + +resource "aws_security_group_rule" "from-nodes-docker-example-com-egress-all-0to0-0-0-0-0--0" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.nodes-docker-example-com.id + to_port = 0 + type = "egress" +} + +resource "aws_security_group_rule" "from-nodes-docker-example-com-egress-all-0to0-__--0" { + from_port = 0 + ipv6_cidr_blocks = ["::/0"] + protocol = "-1" + security_group_id = aws_security_group.nodes-docker-example-com.id + to_port = 0 + type = "egress" +} + +resource "aws_security_group_rule" "from-nodes-docker-example-com-ingress-all-0to0-nodes-docker-example-com" { + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.nodes-docker-example-com.id + source_security_group_id = aws_security_group.nodes-docker-example-com.id + to_port = 0 + type = "ingress" +} + +resource "aws_security_group_rule" "from-nodes-docker-example-com-ingress-tcp-1to2379-masters-docker-example-com" { + from_port = 1 + protocol = "tcp" + security_group_id = aws_security_group.masters-docker-example-com.id + source_security_group_id = aws_security_group.nodes-docker-example-com.id + to_port = 2379 + type = "ingress" +} + +resource "aws_security_group_rule" "from-nodes-docker-example-com-ingress-tcp-2382to4000-masters-docker-example-com" { + from_port = 2382 + protocol = "tcp" + security_group_id = aws_security_group.masters-docker-example-com.id + source_security_group_id = aws_security_group.nodes-docker-example-com.id + to_port = 4000 + type = "ingress" +} + +resource "aws_security_group_rule" "from-nodes-docker-example-com-ingress-tcp-4003to65535-masters-docker-example-com" { + from_port = 4003 + protocol = "tcp" + security_group_id = aws_security_group.masters-docker-example-com.id + source_security_group_id = aws_security_group.nodes-docker-example-com.id + to_port = 65535 + type = "ingress" +} + +resource "aws_security_group_rule" "from-nodes-docker-example-com-ingress-udp-1to65535-masters-docker-example-com" { + from_port = 1 + protocol = "udp" + security_group_id = aws_security_group.masters-docker-example-com.id + source_security_group_id = aws_security_group.nodes-docker-example-com.id + to_port = 65535 + type = "ingress" +} + +resource "aws_subnet" "us-test-1a-docker-example-com" { + availability_zone = "us-test-1a" + cidr_block = "172.20.32.0/19" + tags = { + "KubernetesCluster" = "docker.example.com" + "Name" = "us-test-1a.docker.example.com" + "SubnetType" = "Public" + "kops.k8s.io/instance-group/master-us-test-1a" = "true" + "kops.k8s.io/instance-group/nodes" = "true" + "kubernetes.io/cluster/docker.example.com" = "owned" + "kubernetes.io/role/elb" = "1" + "kubernetes.io/role/internal-elb" = "1" + } + vpc_id = aws_vpc.docker-example-com.id +} + +resource "aws_vpc" "docker-example-com" { + assign_generated_ipv6_cidr_block = true + cidr_block = "172.20.0.0/16" + enable_dns_hostnames = true + enable_dns_support = true + tags = { + "KubernetesCluster" = "docker.example.com" + "Name" = "docker.example.com" + "kubernetes.io/cluster/docker.example.com" = "owned" + } +} + +resource "aws_vpc_dhcp_options" "docker-example-com" { + domain_name = "us-test-1.compute.internal" + domain_name_servers = ["AmazonProvidedDNS"] + tags = { + "KubernetesCluster" = "docker.example.com" + "Name" = "docker.example.com" + "kubernetes.io/cluster/docker.example.com" = "owned" + } +} + +resource "aws_vpc_dhcp_options_association" "docker-example-com" { + dhcp_options_id = aws_vpc_dhcp_options.docker-example-com.id + vpc_id = aws_vpc.docker-example-com.id +} + +terraform { + required_version = ">= 0.15.0" + required_providers { + aws = { + "configuration_aliases" = [aws.files] + "source" = "hashicorp/aws" + "version" = ">= 4.0.0" + } + } +} diff --git a/tests/integration/update_cluster/existing_iam/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/existing_iam/data/aws_s3_object_cluster-completed.spec_content index 96a4213e30401..6989791b446cf 100644 --- a/tests/integration/update_cluster/existing_iam/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/existing_iam/data/aws_s3_object_cluster-completed.spec_content @@ -150,7 +150,6 @@ spec: kubernetesApiAccess: - 0.0.0.0/0 kubernetesVersion: 1.21.0 - masterInternalName: api.internal.existing-iam.example.com masterKubelet: anonymousAuth: false cgroupDriver: systemd diff --git a/tests/integration/update_cluster/existing_iam_cloudformation/cloudformation.json b/tests/integration/update_cluster/existing_iam_cloudformation/cloudformation.json deleted file mode 100644 index 7c36b5091e5f8..0000000000000 --- a/tests/integration/update_cluster/existing_iam_cloudformation/cloudformation.json +++ /dev/null @@ -1,927 +0,0 @@ -{ - "Resources": { - "AWSAutoScalingAutoScalingGroupmasterustest1amastersminimalexamplecom": { - "Type": "AWS::AutoScaling::AutoScalingGroup", - "Properties": { - "AutoScalingGroupName": "master-us-test-1a.masters.minimal.example.com", - "LaunchTemplate": { - "LaunchTemplateId": { - "Ref": "AWSEC2LaunchTemplatemasterustest1amastersminimalexamplecom" - }, - "Version": { - "Fn::GetAtt": [ - "AWSEC2LaunchTemplatemasterustest1amastersminimalexamplecom", - "LatestVersionNumber" - ] - } - }, - "MaxSize": "1", - "MinSize": "1", - "VPCZoneIdentifier": [ - { - "Ref": "AWSEC2Subnetustest1aminimalexamplecom" - } - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.minimal.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/role/master", - "Value": "1", - "PropagateAtLaunch": true - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a", - "PropagateAtLaunch": true - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned", - "PropagateAtLaunch": true - } - ], - "MetricsCollection": [ - { - "Granularity": "1Minute", - "Metrics": [ - "GroupDesiredCapacity", - "GroupInServiceInstances", - "GroupMaxSize", - "GroupMinSize", - "GroupPendingInstances", - "GroupStandbyInstances", - "GroupTerminatingInstances", - "GroupTotalInstances" - ] - } - ] - } - }, - "AWSAutoScalingAutoScalingGroupnodesminimalexamplecom": { - "Type": "AWS::AutoScaling::AutoScalingGroup", - "Properties": { - "AutoScalingGroupName": "nodes.minimal.example.com", - "LaunchTemplate": { - "LaunchTemplateId": { - "Ref": "AWSEC2LaunchTemplatenodesminimalexamplecom" - }, - "Version": { - "Fn::GetAtt": [ - "AWSEC2LaunchTemplatenodesminimalexamplecom", - "LatestVersionNumber" - ] - } - }, - "MaxSize": "2", - "MinSize": "2", - "VPCZoneIdentifier": [ - { - "Ref": "AWSEC2Subnetustest1aminimalexamplecom" - } - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "Name", - "Value": "nodes.minimal.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/role/node", - "Value": "1", - "PropagateAtLaunch": true - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes", - "PropagateAtLaunch": true - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned", - "PropagateAtLaunch": true - } - ], - "MetricsCollection": [ - { - "Granularity": "1Minute", - "Metrics": [ - "GroupDesiredCapacity", - "GroupInServiceInstances", - "GroupMaxSize", - "GroupMinSize", - "GroupPendingInstances", - "GroupStandbyInstances", - "GroupTerminatingInstances", - "GroupTotalInstances" - ] - } - ] - } - }, - "AWSEC2DHCPOptionsminimalexamplecom": { - "Type": "AWS::EC2::DHCPOptions", - "Properties": { - "DomainName": "us-test-1.compute.internal", - "DomainNameServers": [ - "AmazonProvidedDNS" - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "minimal.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2InternetGatewayminimalexamplecom": { - "Type": "AWS::EC2::InternetGateway", - "Properties": { - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "minimal.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2LaunchTemplatemasterustest1amastersminimalexamplecom": { - "Type": "AWS::EC2::LaunchTemplate", - "Properties": { - "LaunchTemplateName": "master-us-test-1a.masters.minimal.example.com", - "LaunchTemplateData": { - "BlockDeviceMappings": [ - { - "DeviceName": "/dev/xvda", - "Ebs": { - "VolumeType": "gp3", - "VolumeSize": 64, - "Iops": 3000, - "Throughput": 125, - "DeleteOnTermination": true, - "Encrypted": true - } - }, - { - "DeviceName": "/dev/sdc", - "VirtualName": "ephemeral0" - } - ], - "IamInstanceProfile": { - "Name": "kops-custom-master-role" - }, - "ImageId": "ami-12345678", - "InstanceType": "m3.medium", - "KeyName": "kubernetes.minimal.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57", - "MetadataOptions": { - "HttpPutResponseHopLimit": 1, - "HttpTokens": "optional" - }, - "Monitoring": { - "Enabled": false - }, - "NetworkInterfaces": [ - { - "AssociatePublicIpAddress": true, - "DeleteOnTermination": true, - "DeviceIndex": 0, - "Ipv6AddressCount": 0, - "Groups": [ - { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - } - ] - } - ], - "TagSpecifications": [ - { - "ResourceType": "instance", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.minimal.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - }, - { - "ResourceType": "volume", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.minimal.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - ], - "UserData": "extracted" - } - } - }, - "AWSEC2LaunchTemplatenodesminimalexamplecom": { - "Type": "AWS::EC2::LaunchTemplate", - "Properties": { - "LaunchTemplateName": "nodes.minimal.example.com", - "LaunchTemplateData": { - "BlockDeviceMappings": [ - { - "DeviceName": "/dev/xvda", - "Ebs": { - "VolumeType": "gp3", - "VolumeSize": 128, - "Iops": 3000, - "Throughput": 125, - "DeleteOnTermination": true, - "Encrypted": true - } - } - ], - "IamInstanceProfile": { - "Name": "kops-custom-node-role" - }, - "ImageId": "ami-12345678", - "InstanceType": "t2.medium", - "KeyName": "kubernetes.minimal.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57", - "MetadataOptions": { - "HttpPutResponseHopLimit": 1, - "HttpTokens": "optional" - }, - "Monitoring": { - "Enabled": false - }, - "NetworkInterfaces": [ - { - "AssociatePublicIpAddress": true, - "DeleteOnTermination": true, - "DeviceIndex": 0, - "Ipv6AddressCount": 0, - "Groups": [ - { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - } - ] - } - ], - "TagSpecifications": [ - { - "ResourceType": "instance", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "nodes.minimal.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "" - }, - { - "Key": "k8s.io/role/node", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - }, - { - "ResourceType": "volume", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "nodes.minimal.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "" - }, - { - "Key": "k8s.io/role/node", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - ], - "UserData": "extracted" - } - } - }, - "AWSEC2Route0": { - "Type": "AWS::EC2::Route", - "Properties": { - "RouteTableId": { - "Ref": "AWSEC2RouteTableminimalexamplecom" - }, - "DestinationIpv6CidrBlock": "::/0", - "GatewayId": { - "Ref": "AWSEC2InternetGatewayminimalexamplecom" - } - } - }, - "AWSEC2Route00000": { - "Type": "AWS::EC2::Route", - "Properties": { - "RouteTableId": { - "Ref": "AWSEC2RouteTableminimalexamplecom" - }, - "DestinationCidrBlock": "0.0.0.0/0", - "GatewayId": { - "Ref": "AWSEC2InternetGatewayminimalexamplecom" - } - } - }, - "AWSEC2RouteTableminimalexamplecom": { - "Type": "AWS::EC2::RouteTable", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCminimalexamplecom" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "minimal.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - }, - { - "Key": "kubernetes.io/kops/role", - "Value": "public" - } - ] - } - }, - "AWSEC2SecurityGroupEgressfrommastersminimalexamplecomegressall0to00": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIpv6": "::/0" - } - }, - "AWSEC2SecurityGroupEgressfrommastersminimalexamplecomegressall0to000000": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupEgressfromnodesminimalexamplecomegressall0to00": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIpv6": "::/0" - } - }, - "AWSEC2SecurityGroupEgressfromnodesminimalexamplecomegressall0to000000": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom00000ingresstcp22to22mastersminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "FromPort": 22, - "ToPort": 22, - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom00000ingresstcp22to22nodesminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "FromPort": 22, - "ToPort": 22, - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom00000ingresstcp443to443mastersminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "FromPort": 443, - "ToPort": 443, - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrommastersminimalexamplecomingressall0to0mastersminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfrommastersminimalexamplecomingressall0to0nodesminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfromnodesminimalexamplecomingressall0to0nodesminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfromnodesminimalexamplecomingresstcp1to2379mastersminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "FromPort": 1, - "ToPort": 2379, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodesminimalexamplecomingresstcp2382to4000mastersminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "FromPort": 2382, - "ToPort": 4000, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodesminimalexamplecomingresstcp4003to65535mastersminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "FromPort": 4003, - "ToPort": 65535, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodesminimalexamplecomingressudp1to65535mastersminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "FromPort": 1, - "ToPort": 65535, - "IpProtocol": "udp" - } - }, - "AWSEC2SecurityGroupmastersminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupName": "masters.minimal.example.com", - "VpcId": { - "Ref": "AWSEC2VPCminimalexamplecom" - }, - "GroupDescription": "Security group for masters", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "masters.minimal.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2SecurityGroupnodesminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupName": "nodes.minimal.example.com", - "VpcId": { - "Ref": "AWSEC2VPCminimalexamplecom" - }, - "GroupDescription": "Security group for nodes", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "nodes.minimal.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2SubnetRouteTableAssociationustest1aminimalexamplecom": { - "Type": "AWS::EC2::SubnetRouteTableAssociation", - "Properties": { - "SubnetId": { - "Ref": "AWSEC2Subnetustest1aminimalexamplecom" - }, - "RouteTableId": { - "Ref": "AWSEC2RouteTableminimalexamplecom" - } - } - }, - "AWSEC2Subnetustest1aminimalexamplecom": { - "Type": "AWS::EC2::Subnet", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCminimalexamplecom" - }, - "CidrBlock": "172.20.32.0/19", - "AvailabilityZone": "us-test-1a", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.minimal.example.com" - }, - { - "Key": "SubnetType", - "Value": "Public" - }, - { - "Key": "kops.k8s.io/instance-group/master-us-test-1a", - "Value": "true" - }, - { - "Key": "kops.k8s.io/instance-group/nodes", - "Value": "true" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - }, - { - "Key": "kubernetes.io/role/elb", - "Value": "1" - }, - { - "Key": "kubernetes.io/role/internal-elb", - "Value": "1" - } - ] - } - }, - "AWSEC2VPCCidrBlockAmazonIPv6": { - "Type": "AWS::EC2::VPCCidrBlock", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCminimalexamplecom" - }, - "AmazonProvidedIpv6CidrBlock": true - } - }, - "AWSEC2VPCDHCPOptionsAssociationminimalexamplecom": { - "Type": "AWS::EC2::VPCDHCPOptionsAssociation", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCminimalexamplecom" - }, - "DhcpOptionsId": { - "Ref": "AWSEC2DHCPOptionsminimalexamplecom" - } - } - }, - "AWSEC2VPCGatewayAttachmentminimalexamplecom": { - "Type": "AWS::EC2::VPCGatewayAttachment", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCminimalexamplecom" - }, - "InternetGatewayId": { - "Ref": "AWSEC2InternetGatewayminimalexamplecom" - } - } - }, - "AWSEC2VPCminimalexamplecom": { - "Type": "AWS::EC2::VPC", - "Properties": { - "CidrBlock": "172.20.0.0/16", - "EnableDnsHostnames": true, - "EnableDnsSupport": true, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "minimal.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2Volumeustest1aetcdeventsminimalexamplecom": { - "Type": "AWS::EC2::Volume", - "Properties": { - "AvailabilityZone": "us-test-1a", - "Size": 20, - "VolumeType": "gp3", - "Iops": 3000, - "Throughput": 125, - "Encrypted": false, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.etcd-events.minimal.example.com" - }, - { - "Key": "k8s.io/etcd/events", - "Value": "us-test-1a/us-test-1a" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2Volumeustest1aetcdmainminimalexamplecom": { - "Type": "AWS::EC2::Volume", - "Properties": { - "AvailabilityZone": "us-test-1a", - "Size": 20, - "VolumeType": "gp3", - "Iops": 3000, - "Throughput": 125, - "Encrypted": false, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.etcd-main.minimal.example.com" - }, - { - "Key": "k8s.io/etcd/main", - "Value": "us-test-1a/us-test-1a" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - } - } -} diff --git a/tests/integration/update_cluster/existing_iam_cloudformation/cloudformation.json.extracted.yaml b/tests/integration/update_cluster/existing_iam_cloudformation/cloudformation.json.extracted.yaml deleted file mode 100644 index 202be63780a11..0000000000000 --- a/tests/integration/update_cluster/existing_iam_cloudformation/cloudformation.json.extracted.yaml +++ /dev/null @@ -1,448 +0,0 @@ -Resources.AWSEC2LaunchTemplatemasterustest1amastersminimalexamplecom.Properties.LaunchTemplateData.UserData: | - #!/bin/bash - set -o errexit - set -o nounset - set -o pipefail - - NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 - NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 - NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 - NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 - - export AWS_REGION=us-test-1 - - - - - sysctl -w net.core.rmem_max=16777216 || true - sysctl -w net.core.wmem_max=16777216 || true - sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true - sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true - - - function ensure-install-dir() { - INSTALL_DIR="/opt/kops" - # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec - if [[ -d /var/lib/toolbox ]]; then - INSTALL_DIR="/var/lib/toolbox/kops" - fi - mkdir -p ${INSTALL_DIR}/bin - mkdir -p ${INSTALL_DIR}/conf - cd ${INSTALL_DIR} - } - - # Retry a download until we get it. args: name, sha, urls - download-or-bust() { - local -r file="$1" - local -r hash="$2" - local -r urls=( $(split-commas "$3") ) - - if [[ -f "${file}" ]]; then - if ! validate-hash "${file}" "${hash}"; then - rm -f "${file}" - else - return 0 - fi - fi - - while true; do - for url in "${urls[@]}"; do - commands=( - "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - ) - for cmd in "${commands[@]}"; do - echo "Attempting download with: ${cmd} {url}" - if ! (${cmd} "${url}"); then - echo "== Download failed with ${cmd} ==" - continue - fi - if ! validate-hash "${file}" "${hash}"; then - echo "== Hash validation of ${url} failed. Retrying. ==" - rm -f "${file}" - else - echo "== Downloaded ${url} (SHA256 = ${hash}) ==" - return 0 - fi - done - done - - echo "All downloads failed; sleeping before retrying" - sleep 60 - done - } - - validate-hash() { - local -r file="$1" - local -r expected="$2" - local actual - - actual=$(sha256sum ${file} | awk '{ print $1 }') || true - if [[ "${actual}" != "${expected}" ]]; then - echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" - return 1 - fi - } - - function split-commas() { - echo $1 | tr "," "\n" - } - - function download-release() { - case "$(uname -m)" in - x86_64*|i?86_64*|amd64*) - NODEUP_URL="${NODEUP_URL_AMD64}" - NODEUP_HASH="${NODEUP_HASH_AMD64}" - ;; - aarch64*|arm64*) - NODEUP_URL="${NODEUP_URL_ARM64}" - NODEUP_HASH="${NODEUP_HASH_ARM64}" - ;; - *) - echo "Unsupported host arch: $(uname -m)" >&2 - exit 1 - ;; - esac - - cd ${INSTALL_DIR}/bin - download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" - - chmod +x nodeup - - echo "Running nodeup" - # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 - ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) - } - - #################################################################################### - - /bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" - - echo "== nodeup node config starting ==" - ensure-install-dir - - cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' - cloudConfig: - awsEBSCSIDriver: - enabled: false - manageStorageClasses: true - containerRuntime: containerd - containerd: - logLevel: info - version: 1.4.13 - docker: - skipInstall: true - encryptionConfig: null - etcdClusters: - events: - version: 3.4.13 - main: - version: 3.4.13 - kubeAPIServer: - allowPrivileged: true - anonymousAuth: false - apiAudiences: - - kubernetes.svc.default - apiServerCount: 1 - authorizationMode: AlwaysAllow - bindAddress: 0.0.0.0 - cloudProvider: aws - enableAdmissionPlugins: - - NamespaceLifecycle - - LimitRanger - - ServiceAccount - - DefaultStorageClass - - DefaultTolerationSeconds - - MutatingAdmissionWebhook - - ValidatingAdmissionWebhook - - NodeRestriction - - ResourceQuota - etcdServers: - - https://127.0.0.1:4001 - etcdServersOverrides: - - /events#https://127.0.0.1:4002 - image: registry.k8s.io/kube-apiserver:v1.21.0 - kubeletPreferredAddressTypes: - - InternalIP - - Hostname - - ExternalIP - logLevel: 2 - requestheaderAllowedNames: - - aggregator - requestheaderExtraHeaderPrefixes: - - X-Remote-Extra- - requestheaderGroupHeaders: - - X-Remote-Group - requestheaderUsernameHeaders: - - X-Remote-User - securePort: 443 - serviceAccountIssuer: https://api.internal.minimal.example.com - serviceAccountJWKSURI: https://api.internal.minimal.example.com/openid/v1/jwks - serviceClusterIPRange: 100.64.0.0/13 - storageBackend: etcd3 - kubeControllerManager: - allocateNodeCIDRs: true - attachDetachReconcileSyncPeriod: 1m0s - cloudProvider: aws - clusterCIDR: 100.96.0.0/11 - clusterName: minimal.example.com - configureCloudRoutes: false - image: registry.k8s.io/kube-controller-manager:v1.21.0 - leaderElection: - leaderElect: true - logLevel: 2 - useServiceAccountCredentials: true - kubeProxy: - clusterCIDR: 100.96.0.0/11 - cpuRequest: 100m - image: registry.k8s.io/kube-proxy:v1.21.0 - logLevel: 2 - kubeScheduler: - image: registry.k8s.io/kube-scheduler:v1.21.0 - leaderElection: - leaderElect: true - logLevel: 2 - kubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: aws - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - masterKubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: aws - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - - __EOF_CLUSTER_SPEC - - cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' - CloudProvider: aws - ConfigBase: memfs://clusters.example.com/minimal.example.com - InstanceGroupName: master-us-test-1a - InstanceGroupRole: Master - NodeupConfigHash: 60mXYDhn2rgNTlX5fCqkcYlpIoYatEz0xmlTKl5N5Tc= - - __EOF_KUBE_ENV - - download-release - echo "== nodeup node config done ==" -Resources.AWSEC2LaunchTemplatenodesminimalexamplecom.Properties.LaunchTemplateData.UserData: | - #!/bin/bash - set -o errexit - set -o nounset - set -o pipefail - - NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 - NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 - NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 - NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 - - export AWS_REGION=us-test-1 - - - - - sysctl -w net.core.rmem_max=16777216 || true - sysctl -w net.core.wmem_max=16777216 || true - sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true - sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true - - - function ensure-install-dir() { - INSTALL_DIR="/opt/kops" - # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec - if [[ -d /var/lib/toolbox ]]; then - INSTALL_DIR="/var/lib/toolbox/kops" - fi - mkdir -p ${INSTALL_DIR}/bin - mkdir -p ${INSTALL_DIR}/conf - cd ${INSTALL_DIR} - } - - # Retry a download until we get it. args: name, sha, urls - download-or-bust() { - local -r file="$1" - local -r hash="$2" - local -r urls=( $(split-commas "$3") ) - - if [[ -f "${file}" ]]; then - if ! validate-hash "${file}" "${hash}"; then - rm -f "${file}" - else - return 0 - fi - fi - - while true; do - for url in "${urls[@]}"; do - commands=( - "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - ) - for cmd in "${commands[@]}"; do - echo "Attempting download with: ${cmd} {url}" - if ! (${cmd} "${url}"); then - echo "== Download failed with ${cmd} ==" - continue - fi - if ! validate-hash "${file}" "${hash}"; then - echo "== Hash validation of ${url} failed. Retrying. ==" - rm -f "${file}" - else - echo "== Downloaded ${url} (SHA256 = ${hash}) ==" - return 0 - fi - done - done - - echo "All downloads failed; sleeping before retrying" - sleep 60 - done - } - - validate-hash() { - local -r file="$1" - local -r expected="$2" - local actual - - actual=$(sha256sum ${file} | awk '{ print $1 }') || true - if [[ "${actual}" != "${expected}" ]]; then - echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" - return 1 - fi - } - - function split-commas() { - echo $1 | tr "," "\n" - } - - function download-release() { - case "$(uname -m)" in - x86_64*|i?86_64*|amd64*) - NODEUP_URL="${NODEUP_URL_AMD64}" - NODEUP_HASH="${NODEUP_HASH_AMD64}" - ;; - aarch64*|arm64*) - NODEUP_URL="${NODEUP_URL_ARM64}" - NODEUP_HASH="${NODEUP_HASH_ARM64}" - ;; - *) - echo "Unsupported host arch: $(uname -m)" >&2 - exit 1 - ;; - esac - - cd ${INSTALL_DIR}/bin - download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" - - chmod +x nodeup - - echo "Running nodeup" - # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 - ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) - } - - #################################################################################### - - /bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" - - echo "== nodeup node config starting ==" - ensure-install-dir - - cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' - cloudConfig: - awsEBSCSIDriver: - enabled: false - manageStorageClasses: true - containerRuntime: containerd - containerd: - logLevel: info - version: 1.4.13 - docker: - skipInstall: true - kubeProxy: - clusterCIDR: 100.96.0.0/11 - cpuRequest: 100m - image: registry.k8s.io/kube-proxy:v1.21.0 - logLevel: 2 - kubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: aws - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - - __EOF_CLUSTER_SPEC - - cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' - CloudProvider: aws - ConfigServer: - CACertificates: | - -----BEGIN CERTIFICATE----- - MIIBbjCCARigAwIBAgIMFpANqBD8NSD82AUSMA0GCSqGSIb3DQEBCwUAMBgxFjAU - BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwODAwWhcNMzEwNzA3MDcw - ODAwWjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD - SwAwSAJBANFI3zr0Tk8krsW8vwjfMpzJOlWQ8616vG3YPa2qAgI7V4oKwfV0yIg1 - jt+H6f4P/wkPAPTPTfRp9Iy8oHEEFw0CAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG - MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNG3zVjTcLlJwDsJ4/K9DV7KohUA - MA0GCSqGSIb3DQEBCwUAA0EAB8d03fY2w7WKpfO29qI295pu2C4ca9AiVGOpgSc8 - tmQsq6rcxt3T+rb589PVtz0mw/cKTxOk6gH2CCC+yHfy2w== - -----END CERTIFICATE----- - -----BEGIN CERTIFICATE----- - MIIBbjCCARigAwIBAgIMFpANvmSa0OAlYmXKMA0GCSqGSIb3DQEBCwUAMBgxFjAU - BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwOTM2WhcNMzEwNzA3MDcw - OTM2WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD - SwAwSAJBAMF6F4aZdpe0RUpyykaBpWwZCnwbffhYGOw+fs6RdLuUq7QCNmJm/Eq7 - WWOziMYDiI9SbclpD+6QiJ0N3EqppVUCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG - MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLImp6ARjPDAH6nhI+scWVt3Q9bn - MA0GCSqGSIb3DQEBCwUAA0EAVQVx5MUtuAIeePuP9o51xtpT2S6Fvfi8J4ICxnlA - 9B7UD2ushcVFPtaeoL9Gfu8aY4KJBeqqg5ojl4qmRnThjw== - -----END CERTIFICATE----- - server: https://kops-controller.internal.minimal.example.com:3988/ - InstanceGroupName: nodes - InstanceGroupRole: Node - NodeupConfigHash: 9DUpZRVmc6wlDASGHOilemwB8uS48cVjaxMtHdeu8oE= - - __EOF_KUBE_ENV - - download-release - echo "== nodeup node config done ==" diff --git a/tests/integration/update_cluster/existing_iam_cloudformation/in-v1alpha2.yaml b/tests/integration/update_cluster/existing_iam_cloudformation/in-v1alpha2.yaml deleted file mode 100644 index 93a0bf3a778a9..0000000000000 --- a/tests/integration/update_cluster/existing_iam_cloudformation/in-v1alpha2.yaml +++ /dev/null @@ -1,82 +0,0 @@ -apiVersion: kops.k8s.io/v1alpha2 -kind: Cluster -metadata: - creationTimestamp: "2016-12-10T22:42:27Z" - name: minimal.example.com -spec: - kubernetesApiAccess: - - 0.0.0.0/0 - channel: stable - cloudProvider: aws - configBase: memfs://clusters.example.com/minimal.example.com - etcdClusters: - - etcdMembers: - - instanceGroup: master-us-test-1a - name: us-test-1a - name: main - - etcdMembers: - - instanceGroup: master-us-test-1a - name: us-test-1a - name: events - iam: {} - kubelet: - anonymousAuth: false - kubernetesVersion: v1.21.0 - masterInternalName: api.internal.minimal.example.com - masterPublicName: api.minimal.example.com - networkCIDR: 172.20.0.0/16 - networking: - cni: {} - nonMasqueradeCIDR: 100.64.0.0/10 - sshAccess: - - 0.0.0.0/0 - topology: - masters: public - nodes: public - subnets: - - cidr: 172.20.32.0/19 - name: us-test-1a - type: Public - zone: us-test-1a - ---- - -apiVersion: kops.k8s.io/v1alpha2 -kind: InstanceGroup -metadata: - creationTimestamp: "2016-12-10T22:42:28Z" - name: nodes - labels: - kops.k8s.io/cluster: minimal.example.com -spec: - associatePublicIp: true - iam: - profile: arn:aws-test:iam::422917490108:instance-profile/kops-custom-node-role - image: ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20220404 - machineType: t2.medium - maxSize: 2 - minSize: 2 - role: Node - subnets: - - us-test-1a - ---- - -apiVersion: kops.k8s.io/v1alpha2 -kind: InstanceGroup -metadata: - creationTimestamp: "2016-12-10T22:42:28Z" - name: master-us-test-1a - labels: - kops.k8s.io/cluster: minimal.example.com -spec: - associatePublicIp: true - iam: - profile: arn:aws-test:iam::422917490108:instance-profile/kops-custom-master-role - image: ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20220404 - machineType: m3.medium - maxSize: 1 - minSize: 1 - role: Master - subnets: - - us-test-1a diff --git a/tests/integration/update_cluster/existing_sg/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/existing_sg/data/aws_s3_object_cluster-completed.spec_content index f6a2e653a78d4..fbb029e3bfb1e 100644 --- a/tests/integration/update_cluster/existing_sg/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/existing_sg/data/aws_s3_object_cluster-completed.spec_content @@ -153,7 +153,6 @@ spec: kubernetesApiAccess: - 0.0.0.0/0 kubernetesVersion: 1.21.0 - masterInternalName: api.internal.existingsg.example.com masterKubelet: anonymousAuth: false cgroupDriver: systemd diff --git a/tests/integration/update_cluster/existing_sg/in-v1alpha2.yaml b/tests/integration/update_cluster/existing_sg/in-v1alpha2.yaml index 93274102ef045..4005f61e9a0e1 100644 --- a/tests/integration/update_cluster/existing_sg/in-v1alpha2.yaml +++ b/tests/integration/update_cluster/existing_sg/in-v1alpha2.yaml @@ -33,7 +33,6 @@ spec: kubelet: anonymousAuth: false kubernetesVersion: v1.21.0 - masterInternalName: api.internal.existingsg.example.com masterPublicName: api.existingsg.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/tests/integration/update_cluster/external_dns/cloudformation.json b/tests/integration/update_cluster/external_dns/cloudformation.json deleted file mode 100644 index a2316cd5dd576..0000000000000 --- a/tests/integration/update_cluster/external_dns/cloudformation.json +++ /dev/null @@ -1,1349 +0,0 @@ -{ - "Resources": { - "AWSAutoScalingAutoScalingGroupmasterustest1amastersminimalexamplecom": { - "Type": "AWS::AutoScaling::AutoScalingGroup", - "Properties": { - "AutoScalingGroupName": "master-us-test-1a.masters.minimal.example.com", - "LaunchTemplate": { - "LaunchTemplateId": { - "Ref": "AWSEC2LaunchTemplatemasterustest1amastersminimalexamplecom" - }, - "Version": { - "Fn::GetAtt": [ - "AWSEC2LaunchTemplatemasterustest1amastersminimalexamplecom", - "LatestVersionNumber" - ] - } - }, - "MaxSize": "1", - "MinSize": "1", - "VPCZoneIdentifier": [ - { - "Ref": "AWSEC2Subnetustest1aminimalexamplecom" - } - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.minimal.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/role/master", - "Value": "1", - "PropagateAtLaunch": true - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a", - "PropagateAtLaunch": true - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned", - "PropagateAtLaunch": true - } - ], - "MetricsCollection": [ - { - "Granularity": "1Minute", - "Metrics": [ - "GroupDesiredCapacity", - "GroupInServiceInstances", - "GroupMaxSize", - "GroupMinSize", - "GroupPendingInstances", - "GroupStandbyInstances", - "GroupTerminatingInstances", - "GroupTotalInstances" - ] - } - ] - } - }, - "AWSAutoScalingAutoScalingGroupnodesminimalexamplecom": { - "Type": "AWS::AutoScaling::AutoScalingGroup", - "Properties": { - "AutoScalingGroupName": "nodes.minimal.example.com", - "LaunchTemplate": { - "LaunchTemplateId": { - "Ref": "AWSEC2LaunchTemplatenodesminimalexamplecom" - }, - "Version": { - "Fn::GetAtt": [ - "AWSEC2LaunchTemplatenodesminimalexamplecom", - "LatestVersionNumber" - ] - } - }, - "MaxSize": "2", - "MinSize": "2", - "VPCZoneIdentifier": [ - { - "Ref": "AWSEC2Subnetustest1aminimalexamplecom" - } - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "Name", - "Value": "nodes.minimal.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/role/node", - "Value": "1", - "PropagateAtLaunch": true - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes", - "PropagateAtLaunch": true - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned", - "PropagateAtLaunch": true - } - ], - "MetricsCollection": [ - { - "Granularity": "1Minute", - "Metrics": [ - "GroupDesiredCapacity", - "GroupInServiceInstances", - "GroupMaxSize", - "GroupMinSize", - "GroupPendingInstances", - "GroupStandbyInstances", - "GroupTerminatingInstances", - "GroupTotalInstances" - ] - } - ] - } - }, - "AWSEC2DHCPOptionsminimalexamplecom": { - "Type": "AWS::EC2::DHCPOptions", - "Properties": { - "DomainName": "us-test-1.compute.internal", - "DomainNameServers": [ - "AmazonProvidedDNS" - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "minimal.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2InternetGatewayminimalexamplecom": { - "Type": "AWS::EC2::InternetGateway", - "Properties": { - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "minimal.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2LaunchTemplatemasterustest1amastersminimalexamplecom": { - "Type": "AWS::EC2::LaunchTemplate", - "Properties": { - "LaunchTemplateName": "master-us-test-1a.masters.minimal.example.com", - "LaunchTemplateData": { - "BlockDeviceMappings": [ - { - "DeviceName": "/dev/xvda", - "Ebs": { - "VolumeType": "gp3", - "VolumeSize": 64, - "Iops": 3000, - "Throughput": 125, - "DeleteOnTermination": true, - "Encrypted": true - } - }, - { - "DeviceName": "/dev/sdc", - "VirtualName": "ephemeral0" - } - ], - "IamInstanceProfile": { - "Name": { - "Ref": "AWSIAMInstanceProfilemastersminimalexamplecom" - } - }, - "ImageId": "ami-12345678", - "InstanceType": "m3.medium", - "KeyName": "kubernetes.minimal.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57", - "MetadataOptions": { - "HttpPutResponseHopLimit": 1, - "HttpTokens": "optional" - }, - "Monitoring": { - "Enabled": false - }, - "NetworkInterfaces": [ - { - "AssociatePublicIpAddress": true, - "DeleteOnTermination": true, - "DeviceIndex": 0, - "Ipv6AddressCount": 0, - "Groups": [ - { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - } - ] - } - ], - "TagSpecifications": [ - { - "ResourceType": "instance", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.minimal.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - }, - { - "ResourceType": "volume", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.minimal.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - ], - "UserData": "extracted" - } - } - }, - "AWSEC2LaunchTemplatenodesminimalexamplecom": { - "Type": "AWS::EC2::LaunchTemplate", - "Properties": { - "LaunchTemplateName": "nodes.minimal.example.com", - "LaunchTemplateData": { - "BlockDeviceMappings": [ - { - "DeviceName": "/dev/xvda", - "Ebs": { - "VolumeType": "gp3", - "VolumeSize": 128, - "Iops": 3000, - "Throughput": 125, - "DeleteOnTermination": true, - "Encrypted": true - } - } - ], - "IamInstanceProfile": { - "Name": { - "Ref": "AWSIAMInstanceProfilenodesminimalexamplecom" - } - }, - "ImageId": "ami-12345678", - "InstanceType": "t2.medium", - "KeyName": "kubernetes.minimal.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57", - "MetadataOptions": { - "HttpPutResponseHopLimit": 1, - "HttpTokens": "optional" - }, - "Monitoring": { - "Enabled": false - }, - "NetworkInterfaces": [ - { - "AssociatePublicIpAddress": true, - "DeleteOnTermination": true, - "DeviceIndex": 0, - "Ipv6AddressCount": 0, - "Groups": [ - { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - } - ] - } - ], - "TagSpecifications": [ - { - "ResourceType": "instance", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "nodes.minimal.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "" - }, - { - "Key": "k8s.io/role/node", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - }, - { - "ResourceType": "volume", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "nodes.minimal.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "" - }, - { - "Key": "k8s.io/role/node", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - ], - "UserData": "extracted" - } - } - }, - "AWSEC2Route0": { - "Type": "AWS::EC2::Route", - "Properties": { - "RouteTableId": { - "Ref": "AWSEC2RouteTableminimalexamplecom" - }, - "DestinationIpv6CidrBlock": "::/0", - "GatewayId": { - "Ref": "AWSEC2InternetGatewayminimalexamplecom" - } - } - }, - "AWSEC2Route00000": { - "Type": "AWS::EC2::Route", - "Properties": { - "RouteTableId": { - "Ref": "AWSEC2RouteTableminimalexamplecom" - }, - "DestinationCidrBlock": "0.0.0.0/0", - "GatewayId": { - "Ref": "AWSEC2InternetGatewayminimalexamplecom" - } - } - }, - "AWSEC2RouteTableminimalexamplecom": { - "Type": "AWS::EC2::RouteTable", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCminimalexamplecom" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "minimal.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - }, - { - "Key": "kubernetes.io/kops/role", - "Value": "public" - } - ] - } - }, - "AWSEC2SecurityGroupEgressfrommastersminimalexamplecomegressall0to00": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIpv6": "::/0" - } - }, - "AWSEC2SecurityGroupEgressfrommastersminimalexamplecomegressall0to000000": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupEgressfromnodesminimalexamplecomegressall0to00": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIpv6": "::/0" - } - }, - "AWSEC2SecurityGroupEgressfromnodesminimalexamplecomegressall0to000000": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom00000ingresstcp22to22mastersminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "FromPort": 22, - "ToPort": 22, - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom00000ingresstcp22to22nodesminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "FromPort": 22, - "ToPort": 22, - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom00000ingresstcp443to443mastersminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "FromPort": 443, - "ToPort": 443, - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrommastersminimalexamplecomingressall0to0mastersminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfrommastersminimalexamplecomingressall0to0nodesminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfromnodesminimalexamplecomingressall0to0nodesminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfromnodesminimalexamplecomingresstcp1to2379mastersminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "FromPort": 1, - "ToPort": 2379, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodesminimalexamplecomingresstcp2382to4000mastersminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "FromPort": 2382, - "ToPort": 4000, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodesminimalexamplecomingresstcp4003to65535mastersminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "FromPort": 4003, - "ToPort": 65535, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodesminimalexamplecomingressudp1to65535mastersminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "FromPort": 1, - "ToPort": 65535, - "IpProtocol": "udp" - } - }, - "AWSEC2SecurityGroupmastersminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupName": "masters.minimal.example.com", - "VpcId": { - "Ref": "AWSEC2VPCminimalexamplecom" - }, - "GroupDescription": "Security group for masters", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "masters.minimal.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2SecurityGroupnodesminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupName": "nodes.minimal.example.com", - "VpcId": { - "Ref": "AWSEC2VPCminimalexamplecom" - }, - "GroupDescription": "Security group for nodes", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "nodes.minimal.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2SubnetRouteTableAssociationustest1aminimalexamplecom": { - "Type": "AWS::EC2::SubnetRouteTableAssociation", - "Properties": { - "SubnetId": { - "Ref": "AWSEC2Subnetustest1aminimalexamplecom" - }, - "RouteTableId": { - "Ref": "AWSEC2RouteTableminimalexamplecom" - } - } - }, - "AWSEC2Subnetustest1aminimalexamplecom": { - "Type": "AWS::EC2::Subnet", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCminimalexamplecom" - }, - "CidrBlock": "172.20.32.0/19", - "AvailabilityZone": "us-test-1a", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.minimal.example.com" - }, - { - "Key": "SubnetType", - "Value": "Public" - }, - { - "Key": "kops.k8s.io/instance-group/master-us-test-1a", - "Value": "true" - }, - { - "Key": "kops.k8s.io/instance-group/nodes", - "Value": "true" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - }, - { - "Key": "kubernetes.io/role/elb", - "Value": "1" - }, - { - "Key": "kubernetes.io/role/internal-elb", - "Value": "1" - } - ] - } - }, - "AWSEC2VPCCidrBlockAmazonIPv6": { - "Type": "AWS::EC2::VPCCidrBlock", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCminimalexamplecom" - }, - "AmazonProvidedIpv6CidrBlock": true - } - }, - "AWSEC2VPCDHCPOptionsAssociationminimalexamplecom": { - "Type": "AWS::EC2::VPCDHCPOptionsAssociation", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCminimalexamplecom" - }, - "DhcpOptionsId": { - "Ref": "AWSEC2DHCPOptionsminimalexamplecom" - } - } - }, - "AWSEC2VPCGatewayAttachmentminimalexamplecom": { - "Type": "AWS::EC2::VPCGatewayAttachment", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCminimalexamplecom" - }, - "InternetGatewayId": { - "Ref": "AWSEC2InternetGatewayminimalexamplecom" - } - } - }, - "AWSEC2VPCminimalexamplecom": { - "Type": "AWS::EC2::VPC", - "Properties": { - "CidrBlock": "172.20.0.0/16", - "EnableDnsHostnames": true, - "EnableDnsSupport": true, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "minimal.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2Volumeustest1aetcdeventsminimalexamplecom": { - "Type": "AWS::EC2::Volume", - "Properties": { - "AvailabilityZone": "us-test-1a", - "Size": 20, - "VolumeType": "gp3", - "Iops": 3000, - "Throughput": 125, - "Encrypted": false, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.etcd-events.minimal.example.com" - }, - { - "Key": "k8s.io/etcd/events", - "Value": "us-test-1a/us-test-1a" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2Volumeustest1aetcdmainminimalexamplecom": { - "Type": "AWS::EC2::Volume", - "Properties": { - "AvailabilityZone": "us-test-1a", - "Size": 20, - "VolumeType": "gp3", - "Iops": 3000, - "Throughput": 125, - "Encrypted": false, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.etcd-main.minimal.example.com" - }, - { - "Key": "k8s.io/etcd/main", - "Value": "us-test-1a/us-test-1a" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - }, - "AWSIAMInstanceProfilemastersminimalexamplecom": { - "Type": "AWS::IAM::InstanceProfile", - "Properties": { - "InstanceProfileName": "masters.minimal.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolemastersminimalexamplecom" - } - ] - } - }, - "AWSIAMInstanceProfilenodesminimalexamplecom": { - "Type": "AWS::IAM::InstanceProfile", - "Properties": { - "InstanceProfileName": "nodes.minimal.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolenodesminimalexamplecom" - } - ] - } - }, - "AWSIAMPolicymastersminimalexamplecom": { - "Type": "AWS::IAM::Policy", - "Properties": { - "PolicyName": "masters.minimal.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolemastersminimalexamplecom" - } - ], - "PolicyDocument": { - "Statement": [ - { - "Action": "ec2:AttachVolume", - "Condition": { - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "minimal.example.com", - "aws:ResourceTag/k8s.io/role/master": "1" - } - }, - "Effect": "Allow", - "Resource": [ - "*" - ] - }, - { - "Action": [ - "s3:Get*" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-read-bucket/clusters.example.com/minimal.example.com/*" - }, - { - "Action": [ - "s3:GetObject", - "s3:DeleteObject", - "s3:DeleteObjectVersion", - "s3:PutObject" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/minimal.example.com/backups/etcd/main/*" - }, - { - "Action": [ - "s3:GetObject", - "s3:DeleteObject", - "s3:DeleteObjectVersion", - "s3:PutObject" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/minimal.example.com/backups/etcd/events/*" - }, - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-read-bucket" - ] - }, - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-write-bucket" - ] - }, - { - "Action": [ - "route53:ChangeResourceRecordSets", - "route53:ListResourceRecordSets", - "route53:GetHostedZone" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:route53:::hostedzone/Z1AFAKE1ZON3YO" - ] - }, - { - "Action": [ - "route53:GetChange" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:route53:::change/*" - ] - }, - { - "Action": [ - "route53:ListHostedZones", - "route53:ListTagsForResource" - ], - "Effect": "Allow", - "Resource": [ - "*" - ] - }, - { - "Action": "ec2:CreateTags", - "Condition": { - "StringEquals": { - "aws:RequestTag/KubernetesCluster": "minimal.example.com", - "ec2:CreateAction": [ - "CreateSecurityGroup" - ] - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:security-group/*" - ] - }, - { - "Action": [ - "ec2:CreateTags", - "ec2:DeleteTags" - ], - "Condition": { - "Null": { - "aws:RequestTag/KubernetesCluster": "true" - }, - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "minimal.example.com" - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:security-group/*" - ] - }, - { - "Action": "ec2:CreateTags", - "Condition": { - "StringEquals": { - "aws:RequestTag/KubernetesCluster": "minimal.example.com", - "ec2:CreateAction": [ - "CreateVolume", - "CreateSnapshot" - ] - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:volume/*", - "arn:aws-test:ec2:*:*:snapshot/*" - ] - }, - { - "Action": [ - "ec2:CreateTags", - "ec2:DeleteTags" - ], - "Condition": { - "Null": { - "aws:RequestTag/KubernetesCluster": "true" - }, - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "minimal.example.com" - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:volume/*", - "arn:aws-test:ec2:*:*:snapshot/*" - ] - }, - { - "Action": [ - "autoscaling:DescribeAutoScalingGroups", - "autoscaling:DescribeAutoScalingInstances", - "autoscaling:DescribeLaunchConfigurations", - "autoscaling:DescribeScalingActivities", - "autoscaling:DescribeTags", - "ec2:AttachVolume", - "ec2:AuthorizeSecurityGroupIngress", - "ec2:CreateSecurityGroup", - "ec2:CreateTags", - "ec2:DeleteRoute", - "ec2:DeleteSecurityGroup", - "ec2:DeleteVolume", - "ec2:DescribeAccountAttributes", - "ec2:DescribeInstanceTypes", - "ec2:DescribeInstances", - "ec2:DescribeLaunchTemplateVersions", - "ec2:DescribeRegions", - "ec2:DescribeRouteTables", - "ec2:DescribeSecurityGroups", - "ec2:DescribeSubnets", - "ec2:DescribeTags", - "ec2:DescribeVolumes", - "ec2:DescribeVolumesModifications", - "ec2:DescribeVpcs", - "ec2:DetachVolume", - "ec2:ModifyInstanceAttribute", - "ec2:ModifyVolume", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:CreateListener", - "elasticloadbalancing:CreateTargetGroup", - "elasticloadbalancing:DeleteListener", - "elasticloadbalancing:DescribeListeners", - "elasticloadbalancing:DescribeLoadBalancerAttributes", - "elasticloadbalancing:DescribeLoadBalancerPolicies", - "elasticloadbalancing:DescribeLoadBalancers", - "elasticloadbalancing:DescribeTargetGroups", - "elasticloadbalancing:DescribeTargetHealth", - "elasticloadbalancing:ModifyListener", - "elasticloadbalancing:RegisterTargets", - "iam:GetServerCertificate", - "iam:ListServerCertificates", - "kms:DescribeKey", - "kms:GenerateRandom" - ], - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": [ - "autoscaling:SetDesiredCapacity", - "autoscaling:TerminateInstanceInAutoScalingGroup", - "ec2:AttachVolume", - "ec2:AuthorizeSecurityGroupIngress", - "ec2:DeleteSecurityGroup", - "ec2:DeleteVolume", - "ec2:DetachVolume", - "ec2:ModifyInstanceAttribute", - "ec2:ModifyVolume", - "ec2:RevokeSecurityGroupIngress", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", - "elasticloadbalancing:AttachLoadBalancerToSubnets", - "elasticloadbalancing:ConfigureHealthCheck", - "elasticloadbalancing:CreateLoadBalancerListeners", - "elasticloadbalancing:CreateLoadBalancerPolicy", - "elasticloadbalancing:DeleteListener", - "elasticloadbalancing:DeleteLoadBalancer", - "elasticloadbalancing:DeleteLoadBalancerListeners", - "elasticloadbalancing:DeleteTargetGroup", - "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", - "elasticloadbalancing:DeregisterTargets", - "elasticloadbalancing:DetachLoadBalancerFromSubnets", - "elasticloadbalancing:ModifyListener", - "elasticloadbalancing:ModifyLoadBalancerAttributes", - "elasticloadbalancing:ModifyTargetGroup", - "elasticloadbalancing:RegisterInstancesWithLoadBalancer", - "elasticloadbalancing:RegisterTargets", - "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", - "elasticloadbalancing:SetLoadBalancerPoliciesOfListener" - ], - "Condition": { - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "minimal.example.com" - } - }, - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": [ - "ec2:CreateSecurityGroup", - "ec2:CreateSnapshot", - "ec2:CreateVolume", - "elasticloadbalancing:CreateListener", - "elasticloadbalancing:CreateLoadBalancer", - "elasticloadbalancing:CreateTargetGroup" - ], - "Condition": { - "StringEquals": { - "aws:RequestTag/KubernetesCluster": "minimal.example.com" - } - }, - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": "ec2:CreateSecurityGroup", - "Effect": "Allow", - "Resource": "arn:aws-test:ec2:*:*:vpc/*" - } - ], - "Version": "2012-10-17" - } - } - }, - "AWSIAMPolicynodesminimalexamplecom": { - "Type": "AWS::IAM::Policy", - "Properties": { - "PolicyName": "nodes.minimal.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolenodesminimalexamplecom" - } - ], - "PolicyDocument": { - "Statement": [ - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-read-bucket" - ] - }, - { - "Action": [ - "autoscaling:DescribeAutoScalingInstances", - "ec2:DescribeInstanceTypes", - "ec2:DescribeInstances", - "ec2:DescribeRegions", - "iam:GetServerCertificate", - "iam:ListServerCertificates", - "kms:GenerateRandom" - ], - "Effect": "Allow", - "Resource": "*" - } - ], - "Version": "2012-10-17" - } - } - }, - "AWSIAMRolemastersminimalexamplecom": { - "Type": "AWS::IAM::Role", - "Properties": { - "RoleName": "masters.minimal.example.com", - "AssumeRolePolicyDocument": { - "Statement": [ - { - "Action": "sts:AssumeRole", - "Effect": "Allow", - "Principal": { - "Service": "ec2.amazonaws.com" - } - } - ], - "Version": "2012-10-17" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "masters.minimal.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - }, - "AWSIAMRolenodesminimalexamplecom": { - "Type": "AWS::IAM::Role", - "Properties": { - "RoleName": "nodes.minimal.example.com", - "AssumeRolePolicyDocument": { - "Statement": [ - { - "Action": "sts:AssumeRole", - "Effect": "Allow", - "Principal": { - "Service": "ec2.amazonaws.com" - } - } - ], - "Version": "2012-10-17" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "nodes.minimal.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - } - } -} diff --git a/tests/integration/update_cluster/external_dns/cloudformation.json.extracted.yaml b/tests/integration/update_cluster/external_dns/cloudformation.json.extracted.yaml deleted file mode 100644 index 202be63780a11..0000000000000 --- a/tests/integration/update_cluster/external_dns/cloudformation.json.extracted.yaml +++ /dev/null @@ -1,448 +0,0 @@ -Resources.AWSEC2LaunchTemplatemasterustest1amastersminimalexamplecom.Properties.LaunchTemplateData.UserData: | - #!/bin/bash - set -o errexit - set -o nounset - set -o pipefail - - NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 - NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 - NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 - NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 - - export AWS_REGION=us-test-1 - - - - - sysctl -w net.core.rmem_max=16777216 || true - sysctl -w net.core.wmem_max=16777216 || true - sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true - sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true - - - function ensure-install-dir() { - INSTALL_DIR="/opt/kops" - # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec - if [[ -d /var/lib/toolbox ]]; then - INSTALL_DIR="/var/lib/toolbox/kops" - fi - mkdir -p ${INSTALL_DIR}/bin - mkdir -p ${INSTALL_DIR}/conf - cd ${INSTALL_DIR} - } - - # Retry a download until we get it. args: name, sha, urls - download-or-bust() { - local -r file="$1" - local -r hash="$2" - local -r urls=( $(split-commas "$3") ) - - if [[ -f "${file}" ]]; then - if ! validate-hash "${file}" "${hash}"; then - rm -f "${file}" - else - return 0 - fi - fi - - while true; do - for url in "${urls[@]}"; do - commands=( - "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - ) - for cmd in "${commands[@]}"; do - echo "Attempting download with: ${cmd} {url}" - if ! (${cmd} "${url}"); then - echo "== Download failed with ${cmd} ==" - continue - fi - if ! validate-hash "${file}" "${hash}"; then - echo "== Hash validation of ${url} failed. Retrying. ==" - rm -f "${file}" - else - echo "== Downloaded ${url} (SHA256 = ${hash}) ==" - return 0 - fi - done - done - - echo "All downloads failed; sleeping before retrying" - sleep 60 - done - } - - validate-hash() { - local -r file="$1" - local -r expected="$2" - local actual - - actual=$(sha256sum ${file} | awk '{ print $1 }') || true - if [[ "${actual}" != "${expected}" ]]; then - echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" - return 1 - fi - } - - function split-commas() { - echo $1 | tr "," "\n" - } - - function download-release() { - case "$(uname -m)" in - x86_64*|i?86_64*|amd64*) - NODEUP_URL="${NODEUP_URL_AMD64}" - NODEUP_HASH="${NODEUP_HASH_AMD64}" - ;; - aarch64*|arm64*) - NODEUP_URL="${NODEUP_URL_ARM64}" - NODEUP_HASH="${NODEUP_HASH_ARM64}" - ;; - *) - echo "Unsupported host arch: $(uname -m)" >&2 - exit 1 - ;; - esac - - cd ${INSTALL_DIR}/bin - download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" - - chmod +x nodeup - - echo "Running nodeup" - # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 - ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) - } - - #################################################################################### - - /bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" - - echo "== nodeup node config starting ==" - ensure-install-dir - - cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' - cloudConfig: - awsEBSCSIDriver: - enabled: false - manageStorageClasses: true - containerRuntime: containerd - containerd: - logLevel: info - version: 1.4.13 - docker: - skipInstall: true - encryptionConfig: null - etcdClusters: - events: - version: 3.4.13 - main: - version: 3.4.13 - kubeAPIServer: - allowPrivileged: true - anonymousAuth: false - apiAudiences: - - kubernetes.svc.default - apiServerCount: 1 - authorizationMode: AlwaysAllow - bindAddress: 0.0.0.0 - cloudProvider: aws - enableAdmissionPlugins: - - NamespaceLifecycle - - LimitRanger - - ServiceAccount - - DefaultStorageClass - - DefaultTolerationSeconds - - MutatingAdmissionWebhook - - ValidatingAdmissionWebhook - - NodeRestriction - - ResourceQuota - etcdServers: - - https://127.0.0.1:4001 - etcdServersOverrides: - - /events#https://127.0.0.1:4002 - image: registry.k8s.io/kube-apiserver:v1.21.0 - kubeletPreferredAddressTypes: - - InternalIP - - Hostname - - ExternalIP - logLevel: 2 - requestheaderAllowedNames: - - aggregator - requestheaderExtraHeaderPrefixes: - - X-Remote-Extra- - requestheaderGroupHeaders: - - X-Remote-Group - requestheaderUsernameHeaders: - - X-Remote-User - securePort: 443 - serviceAccountIssuer: https://api.internal.minimal.example.com - serviceAccountJWKSURI: https://api.internal.minimal.example.com/openid/v1/jwks - serviceClusterIPRange: 100.64.0.0/13 - storageBackend: etcd3 - kubeControllerManager: - allocateNodeCIDRs: true - attachDetachReconcileSyncPeriod: 1m0s - cloudProvider: aws - clusterCIDR: 100.96.0.0/11 - clusterName: minimal.example.com - configureCloudRoutes: false - image: registry.k8s.io/kube-controller-manager:v1.21.0 - leaderElection: - leaderElect: true - logLevel: 2 - useServiceAccountCredentials: true - kubeProxy: - clusterCIDR: 100.96.0.0/11 - cpuRequest: 100m - image: registry.k8s.io/kube-proxy:v1.21.0 - logLevel: 2 - kubeScheduler: - image: registry.k8s.io/kube-scheduler:v1.21.0 - leaderElection: - leaderElect: true - logLevel: 2 - kubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: aws - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - masterKubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: aws - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - - __EOF_CLUSTER_SPEC - - cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' - CloudProvider: aws - ConfigBase: memfs://clusters.example.com/minimal.example.com - InstanceGroupName: master-us-test-1a - InstanceGroupRole: Master - NodeupConfigHash: 60mXYDhn2rgNTlX5fCqkcYlpIoYatEz0xmlTKl5N5Tc= - - __EOF_KUBE_ENV - - download-release - echo "== nodeup node config done ==" -Resources.AWSEC2LaunchTemplatenodesminimalexamplecom.Properties.LaunchTemplateData.UserData: | - #!/bin/bash - set -o errexit - set -o nounset - set -o pipefail - - NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 - NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 - NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 - NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 - - export AWS_REGION=us-test-1 - - - - - sysctl -w net.core.rmem_max=16777216 || true - sysctl -w net.core.wmem_max=16777216 || true - sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true - sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true - - - function ensure-install-dir() { - INSTALL_DIR="/opt/kops" - # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec - if [[ -d /var/lib/toolbox ]]; then - INSTALL_DIR="/var/lib/toolbox/kops" - fi - mkdir -p ${INSTALL_DIR}/bin - mkdir -p ${INSTALL_DIR}/conf - cd ${INSTALL_DIR} - } - - # Retry a download until we get it. args: name, sha, urls - download-or-bust() { - local -r file="$1" - local -r hash="$2" - local -r urls=( $(split-commas "$3") ) - - if [[ -f "${file}" ]]; then - if ! validate-hash "${file}" "${hash}"; then - rm -f "${file}" - else - return 0 - fi - fi - - while true; do - for url in "${urls[@]}"; do - commands=( - "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - ) - for cmd in "${commands[@]}"; do - echo "Attempting download with: ${cmd} {url}" - if ! (${cmd} "${url}"); then - echo "== Download failed with ${cmd} ==" - continue - fi - if ! validate-hash "${file}" "${hash}"; then - echo "== Hash validation of ${url} failed. Retrying. ==" - rm -f "${file}" - else - echo "== Downloaded ${url} (SHA256 = ${hash}) ==" - return 0 - fi - done - done - - echo "All downloads failed; sleeping before retrying" - sleep 60 - done - } - - validate-hash() { - local -r file="$1" - local -r expected="$2" - local actual - - actual=$(sha256sum ${file} | awk '{ print $1 }') || true - if [[ "${actual}" != "${expected}" ]]; then - echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" - return 1 - fi - } - - function split-commas() { - echo $1 | tr "," "\n" - } - - function download-release() { - case "$(uname -m)" in - x86_64*|i?86_64*|amd64*) - NODEUP_URL="${NODEUP_URL_AMD64}" - NODEUP_HASH="${NODEUP_HASH_AMD64}" - ;; - aarch64*|arm64*) - NODEUP_URL="${NODEUP_URL_ARM64}" - NODEUP_HASH="${NODEUP_HASH_ARM64}" - ;; - *) - echo "Unsupported host arch: $(uname -m)" >&2 - exit 1 - ;; - esac - - cd ${INSTALL_DIR}/bin - download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" - - chmod +x nodeup - - echo "Running nodeup" - # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 - ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) - } - - #################################################################################### - - /bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" - - echo "== nodeup node config starting ==" - ensure-install-dir - - cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' - cloudConfig: - awsEBSCSIDriver: - enabled: false - manageStorageClasses: true - containerRuntime: containerd - containerd: - logLevel: info - version: 1.4.13 - docker: - skipInstall: true - kubeProxy: - clusterCIDR: 100.96.0.0/11 - cpuRequest: 100m - image: registry.k8s.io/kube-proxy:v1.21.0 - logLevel: 2 - kubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: aws - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - - __EOF_CLUSTER_SPEC - - cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' - CloudProvider: aws - ConfigServer: - CACertificates: | - -----BEGIN CERTIFICATE----- - MIIBbjCCARigAwIBAgIMFpANqBD8NSD82AUSMA0GCSqGSIb3DQEBCwUAMBgxFjAU - BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwODAwWhcNMzEwNzA3MDcw - ODAwWjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD - SwAwSAJBANFI3zr0Tk8krsW8vwjfMpzJOlWQ8616vG3YPa2qAgI7V4oKwfV0yIg1 - jt+H6f4P/wkPAPTPTfRp9Iy8oHEEFw0CAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG - MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNG3zVjTcLlJwDsJ4/K9DV7KohUA - MA0GCSqGSIb3DQEBCwUAA0EAB8d03fY2w7WKpfO29qI295pu2C4ca9AiVGOpgSc8 - tmQsq6rcxt3T+rb589PVtz0mw/cKTxOk6gH2CCC+yHfy2w== - -----END CERTIFICATE----- - -----BEGIN CERTIFICATE----- - MIIBbjCCARigAwIBAgIMFpANvmSa0OAlYmXKMA0GCSqGSIb3DQEBCwUAMBgxFjAU - BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwOTM2WhcNMzEwNzA3MDcw - OTM2WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD - SwAwSAJBAMF6F4aZdpe0RUpyykaBpWwZCnwbffhYGOw+fs6RdLuUq7QCNmJm/Eq7 - WWOziMYDiI9SbclpD+6QiJ0N3EqppVUCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG - MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLImp6ARjPDAH6nhI+scWVt3Q9bn - MA0GCSqGSIb3DQEBCwUAA0EAVQVx5MUtuAIeePuP9o51xtpT2S6Fvfi8J4ICxnlA - 9B7UD2ushcVFPtaeoL9Gfu8aY4KJBeqqg5ojl4qmRnThjw== - -----END CERTIFICATE----- - server: https://kops-controller.internal.minimal.example.com:3988/ - InstanceGroupName: nodes - InstanceGroupRole: Node - NodeupConfigHash: 9DUpZRVmc6wlDASGHOilemwB8uS48cVjaxMtHdeu8oE= - - __EOF_KUBE_ENV - - download-release - echo "== nodeup node config done ==" diff --git a/tests/integration/update_cluster/external_dns/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/external_dns/data/aws_s3_object_cluster-completed.spec_content index d08f0406b31af..a8d8ff6580a00 100644 --- a/tests/integration/update_cluster/external_dns/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/external_dns/data/aws_s3_object_cluster-completed.spec_content @@ -142,7 +142,6 @@ spec: kubernetesApiAccess: - 0.0.0.0/0 kubernetesVersion: 1.21.0 - masterInternalName: api.internal.minimal.example.com masterKubelet: anonymousAuth: false cgroupDriver: systemd diff --git a/tests/integration/update_cluster/external_dns/in-v1alpha2.yaml b/tests/integration/update_cluster/external_dns/in-v1alpha2.yaml index b21d71e0a4004..4181dfc7f9a37 100644 --- a/tests/integration/update_cluster/external_dns/in-v1alpha2.yaml +++ b/tests/integration/update_cluster/external_dns/in-v1alpha2.yaml @@ -24,7 +24,6 @@ spec: kubelet: anonymousAuth: false kubernetesVersion: v1.21.0 - masterInternalName: api.internal.minimal.example.com masterPublicName: api.minimal.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/tests/integration/update_cluster/external_dns_irsa/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/external_dns_irsa/data/aws_s3_object_cluster-completed.spec_content index cbc2993e60883..c04cc9b8c6b84 100644 --- a/tests/integration/update_cluster/external_dns_irsa/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/external_dns_irsa/data/aws_s3_object_cluster-completed.spec_content @@ -143,7 +143,6 @@ spec: kubernetesApiAccess: - 0.0.0.0/0 kubernetesVersion: 1.21.0 - masterInternalName: api.internal.minimal.example.com masterKubelet: anonymousAuth: false cgroupDriver: systemd diff --git a/tests/integration/update_cluster/external_dns_irsa/in-v1alpha2.yaml b/tests/integration/update_cluster/external_dns_irsa/in-v1alpha2.yaml index 3521625fa1bef..6c4512478c47e 100644 --- a/tests/integration/update_cluster/external_dns_irsa/in-v1alpha2.yaml +++ b/tests/integration/update_cluster/external_dns_irsa/in-v1alpha2.yaml @@ -25,7 +25,6 @@ spec: kubelet: anonymousAuth: false kubernetesVersion: v1.21.0 - masterInternalName: api.internal.minimal.example.com masterPublicName: api.minimal.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/tests/integration/update_cluster/externallb/cloudformation.json b/tests/integration/update_cluster/externallb/cloudformation.json deleted file mode 100644 index ebde3c5865016..0000000000000 --- a/tests/integration/update_cluster/externallb/cloudformation.json +++ /dev/null @@ -1,1365 +0,0 @@ -{ - "Resources": { - "AWSAutoScalingAutoScalingGroupmasterustest1amastersexternallbexamplecom": { - "Type": "AWS::AutoScaling::AutoScalingGroup", - "Properties": { - "AutoScalingGroupName": "master-us-test-1a.masters.externallb.example.com", - "LaunchTemplate": { - "LaunchTemplateId": { - "Ref": "AWSEC2LaunchTemplatemasterustest1amastersexternallbexamplecom" - }, - "Version": { - "Fn::GetAtt": [ - "AWSEC2LaunchTemplatemasterustest1amastersexternallbexamplecom", - "LatestVersionNumber" - ] - } - }, - "MaxSize": "1", - "MinSize": "1", - "VPCZoneIdentifier": [ - { - "Ref": "AWSEC2Subnetustest1aexternallbexamplecom" - } - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "externallb.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.externallb.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/role/master", - "Value": "1", - "PropagateAtLaunch": true - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a", - "PropagateAtLaunch": true - }, - { - "Key": "kubernetes.io/cluster/externallb.example.com", - "Value": "owned", - "PropagateAtLaunch": true - } - ], - "MetricsCollection": [ - { - "Granularity": "1Minute", - "Metrics": [ - "GroupDesiredCapacity", - "GroupInServiceInstances", - "GroupMaxSize", - "GroupMinSize", - "GroupPendingInstances", - "GroupStandbyInstances", - "GroupTerminatingInstances", - "GroupTotalInstances" - ] - } - ], - "LoadBalancerNames": [ - "my-external-elb-1", - "my-external-elb-2", - "my-external-elb-3" - ], - "TargetGroupARNs": [ - "arn:aws-test:elasticloadbalancing:us-test-1:000000000000:targetgroup/my-external-tg-1/1", - "arn:aws-test:elasticloadbalancing:us-test-1:000000000000:targetgroup/my-external-tg-2/2", - "arn:aws-test:elasticloadbalancing:us-test-1:000000000000:targetgroup/my-external-tg-3/3" - ] - } - }, - "AWSAutoScalingAutoScalingGroupnodesexternallbexamplecom": { - "Type": "AWS::AutoScaling::AutoScalingGroup", - "Properties": { - "AutoScalingGroupName": "nodes.externallb.example.com", - "LaunchTemplate": { - "LaunchTemplateId": { - "Ref": "AWSEC2LaunchTemplatenodesexternallbexamplecom" - }, - "Version": { - "Fn::GetAtt": [ - "AWSEC2LaunchTemplatenodesexternallbexamplecom", - "LatestVersionNumber" - ] - } - }, - "MaxSize": "2", - "MinSize": "2", - "VPCZoneIdentifier": [ - { - "Ref": "AWSEC2Subnetustest1aexternallbexamplecom" - } - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "externallb.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "Name", - "Value": "nodes.externallb.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/role/node", - "Value": "1", - "PropagateAtLaunch": true - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes", - "PropagateAtLaunch": true - }, - { - "Key": "kubernetes.io/cluster/externallb.example.com", - "Value": "owned", - "PropagateAtLaunch": true - } - ], - "MetricsCollection": [ - { - "Granularity": "1Minute", - "Metrics": [ - "GroupDesiredCapacity", - "GroupInServiceInstances", - "GroupMaxSize", - "GroupMinSize", - "GroupPendingInstances", - "GroupStandbyInstances", - "GroupTerminatingInstances", - "GroupTotalInstances" - ] - } - ], - "LoadBalancerNames": [ - "my-external-elb-1" - ], - "TargetGroupARNs": [ - "arn:aws-test:elasticloadbalancing:us-test-1:000000000000:targetgroup/my-external-tg-1/1" - ] - } - }, - "AWSEC2DHCPOptionsexternallbexamplecom": { - "Type": "AWS::EC2::DHCPOptions", - "Properties": { - "DomainName": "us-test-1.compute.internal", - "DomainNameServers": [ - "AmazonProvidedDNS" - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "externallb.example.com" - }, - { - "Key": "Name", - "Value": "externallb.example.com" - }, - { - "Key": "kubernetes.io/cluster/externallb.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2InternetGatewayexternallbexamplecom": { - "Type": "AWS::EC2::InternetGateway", - "Properties": { - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "externallb.example.com" - }, - { - "Key": "Name", - "Value": "externallb.example.com" - }, - { - "Key": "kubernetes.io/cluster/externallb.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2LaunchTemplatemasterustest1amastersexternallbexamplecom": { - "Type": "AWS::EC2::LaunchTemplate", - "Properties": { - "LaunchTemplateName": "master-us-test-1a.masters.externallb.example.com", - "LaunchTemplateData": { - "BlockDeviceMappings": [ - { - "DeviceName": "/dev/xvda", - "Ebs": { - "VolumeType": "gp3", - "VolumeSize": 64, - "Iops": 3000, - "Throughput": 125, - "DeleteOnTermination": true, - "Encrypted": true - } - }, - { - "DeviceName": "/dev/sdc", - "VirtualName": "ephemeral0" - } - ], - "IamInstanceProfile": { - "Name": { - "Ref": "AWSIAMInstanceProfilemastersexternallbexamplecom" - } - }, - "ImageId": "ami-12345678", - "InstanceType": "m3.medium", - "KeyName": "kubernetes.externallb.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57", - "MetadataOptions": { - "HttpPutResponseHopLimit": 1, - "HttpTokens": "optional" - }, - "Monitoring": { - "Enabled": false - }, - "NetworkInterfaces": [ - { - "AssociatePublicIpAddress": true, - "DeleteOnTermination": true, - "DeviceIndex": 0, - "Ipv6AddressCount": 0, - "Groups": [ - { - "Ref": "AWSEC2SecurityGroupmastersexternallbexamplecom" - } - ] - } - ], - "TagSpecifications": [ - { - "ResourceType": "instance", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "externallb.example.com" - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.externallb.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a" - }, - { - "Key": "kubernetes.io/cluster/externallb.example.com", - "Value": "owned" - } - ] - }, - { - "ResourceType": "volume", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "externallb.example.com" - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.externallb.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a" - }, - { - "Key": "kubernetes.io/cluster/externallb.example.com", - "Value": "owned" - } - ] - } - ], - "UserData": "extracted" - } - } - }, - "AWSEC2LaunchTemplatenodesexternallbexamplecom": { - "Type": "AWS::EC2::LaunchTemplate", - "Properties": { - "LaunchTemplateName": "nodes.externallb.example.com", - "LaunchTemplateData": { - "BlockDeviceMappings": [ - { - "DeviceName": "/dev/xvda", - "Ebs": { - "VolumeType": "gp3", - "VolumeSize": 128, - "Iops": 3000, - "Throughput": 125, - "DeleteOnTermination": true, - "Encrypted": true - } - } - ], - "IamInstanceProfile": { - "Name": { - "Ref": "AWSIAMInstanceProfilenodesexternallbexamplecom" - } - }, - "ImageId": "ami-12345678", - "InstanceType": "t2.medium", - "KeyName": "kubernetes.externallb.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57", - "MetadataOptions": { - "HttpPutResponseHopLimit": 1, - "HttpTokens": "optional" - }, - "Monitoring": { - "Enabled": false - }, - "NetworkInterfaces": [ - { - "AssociatePublicIpAddress": true, - "DeleteOnTermination": true, - "DeviceIndex": 0, - "Ipv6AddressCount": 0, - "Groups": [ - { - "Ref": "AWSEC2SecurityGroupnodesexternallbexamplecom" - } - ] - } - ], - "TagSpecifications": [ - { - "ResourceType": "instance", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "externallb.example.com" - }, - { - "Key": "Name", - "Value": "nodes.externallb.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "" - }, - { - "Key": "k8s.io/role/node", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes" - }, - { - "Key": "kubernetes.io/cluster/externallb.example.com", - "Value": "owned" - } - ] - }, - { - "ResourceType": "volume", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "externallb.example.com" - }, - { - "Key": "Name", - "Value": "nodes.externallb.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "" - }, - { - "Key": "k8s.io/role/node", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes" - }, - { - "Key": "kubernetes.io/cluster/externallb.example.com", - "Value": "owned" - } - ] - } - ], - "UserData": "extracted" - } - } - }, - "AWSEC2Route0": { - "Type": "AWS::EC2::Route", - "Properties": { - "RouteTableId": { - "Ref": "AWSEC2RouteTableexternallbexamplecom" - }, - "DestinationIpv6CidrBlock": "::/0", - "GatewayId": { - "Ref": "AWSEC2InternetGatewayexternallbexamplecom" - } - } - }, - "AWSEC2Route00000": { - "Type": "AWS::EC2::Route", - "Properties": { - "RouteTableId": { - "Ref": "AWSEC2RouteTableexternallbexamplecom" - }, - "DestinationCidrBlock": "0.0.0.0/0", - "GatewayId": { - "Ref": "AWSEC2InternetGatewayexternallbexamplecom" - } - } - }, - "AWSEC2RouteTableexternallbexamplecom": { - "Type": "AWS::EC2::RouteTable", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCexternallbexamplecom" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "externallb.example.com" - }, - { - "Key": "Name", - "Value": "externallb.example.com" - }, - { - "Key": "kubernetes.io/cluster/externallb.example.com", - "Value": "owned" - }, - { - "Key": "kubernetes.io/kops/role", - "Value": "public" - } - ] - } - }, - "AWSEC2SecurityGroupEgressfrommastersexternallbexamplecomegressall0to00": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersexternallbexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIpv6": "::/0" - } - }, - "AWSEC2SecurityGroupEgressfrommastersexternallbexamplecomegressall0to000000": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersexternallbexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupEgressfromnodesexternallbexamplecomegressall0to00": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesexternallbexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIpv6": "::/0" - } - }, - "AWSEC2SecurityGroupEgressfromnodesexternallbexamplecomegressall0to000000": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesexternallbexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom00000ingresstcp22to22mastersexternallbexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersexternallbexamplecom" - }, - "FromPort": 22, - "ToPort": 22, - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom00000ingresstcp22to22nodesexternallbexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesexternallbexamplecom" - }, - "FromPort": 22, - "ToPort": 22, - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom00000ingresstcp443to443mastersexternallbexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersexternallbexamplecom" - }, - "FromPort": 443, - "ToPort": 443, - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrommastersexternallbexamplecomingressall0to0mastersexternallbexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersexternallbexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupmastersexternallbexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfrommastersexternallbexamplecomingressall0to0nodesexternallbexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesexternallbexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupmastersexternallbexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfromnodesexternallbexamplecomingressall0to0nodesexternallbexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesexternallbexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesexternallbexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfromnodesexternallbexamplecomingresstcp1to2379mastersexternallbexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersexternallbexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesexternallbexamplecom" - }, - "FromPort": 1, - "ToPort": 2379, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodesexternallbexamplecomingresstcp2382to4000mastersexternallbexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersexternallbexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesexternallbexamplecom" - }, - "FromPort": 2382, - "ToPort": 4000, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodesexternallbexamplecomingresstcp4003to65535mastersexternallbexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersexternallbexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesexternallbexamplecom" - }, - "FromPort": 4003, - "ToPort": 65535, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodesexternallbexamplecomingressudp1to65535mastersexternallbexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersexternallbexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesexternallbexamplecom" - }, - "FromPort": 1, - "ToPort": 65535, - "IpProtocol": "udp" - } - }, - "AWSEC2SecurityGroupmastersexternallbexamplecom": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupName": "masters.externallb.example.com", - "VpcId": { - "Ref": "AWSEC2VPCexternallbexamplecom" - }, - "GroupDescription": "Security group for masters", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "externallb.example.com" - }, - { - "Key": "Name", - "Value": "masters.externallb.example.com" - }, - { - "Key": "kubernetes.io/cluster/externallb.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2SecurityGroupnodesexternallbexamplecom": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupName": "nodes.externallb.example.com", - "VpcId": { - "Ref": "AWSEC2VPCexternallbexamplecom" - }, - "GroupDescription": "Security group for nodes", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "externallb.example.com" - }, - { - "Key": "Name", - "Value": "nodes.externallb.example.com" - }, - { - "Key": "kubernetes.io/cluster/externallb.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2SubnetRouteTableAssociationustest1aexternallbexamplecom": { - "Type": "AWS::EC2::SubnetRouteTableAssociation", - "Properties": { - "SubnetId": { - "Ref": "AWSEC2Subnetustest1aexternallbexamplecom" - }, - "RouteTableId": { - "Ref": "AWSEC2RouteTableexternallbexamplecom" - } - } - }, - "AWSEC2Subnetustest1aexternallbexamplecom": { - "Type": "AWS::EC2::Subnet", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCexternallbexamplecom" - }, - "CidrBlock": "172.20.32.0/19", - "AvailabilityZone": "us-test-1a", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "externallb.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.externallb.example.com" - }, - { - "Key": "SubnetType", - "Value": "Public" - }, - { - "Key": "kops.k8s.io/instance-group/master-us-test-1a", - "Value": "true" - }, - { - "Key": "kops.k8s.io/instance-group/nodes", - "Value": "true" - }, - { - "Key": "kubernetes.io/cluster/externallb.example.com", - "Value": "owned" - }, - { - "Key": "kubernetes.io/role/elb", - "Value": "1" - }, - { - "Key": "kubernetes.io/role/internal-elb", - "Value": "1" - } - ] - } - }, - "AWSEC2VPCCidrBlockAmazonIPv6": { - "Type": "AWS::EC2::VPCCidrBlock", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCexternallbexamplecom" - }, - "AmazonProvidedIpv6CidrBlock": true - } - }, - "AWSEC2VPCDHCPOptionsAssociationexternallbexamplecom": { - "Type": "AWS::EC2::VPCDHCPOptionsAssociation", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCexternallbexamplecom" - }, - "DhcpOptionsId": { - "Ref": "AWSEC2DHCPOptionsexternallbexamplecom" - } - } - }, - "AWSEC2VPCGatewayAttachmentexternallbexamplecom": { - "Type": "AWS::EC2::VPCGatewayAttachment", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCexternallbexamplecom" - }, - "InternetGatewayId": { - "Ref": "AWSEC2InternetGatewayexternallbexamplecom" - } - } - }, - "AWSEC2VPCexternallbexamplecom": { - "Type": "AWS::EC2::VPC", - "Properties": { - "CidrBlock": "172.20.0.0/16", - "EnableDnsHostnames": true, - "EnableDnsSupport": true, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "externallb.example.com" - }, - { - "Key": "Name", - "Value": "externallb.example.com" - }, - { - "Key": "kubernetes.io/cluster/externallb.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2Volumeustest1aetcdeventsexternallbexamplecom": { - "Type": "AWS::EC2::Volume", - "Properties": { - "AvailabilityZone": "us-test-1a", - "Size": 20, - "VolumeType": "gp3", - "Iops": 3000, - "Throughput": 125, - "Encrypted": false, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "externallb.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.etcd-events.externallb.example.com" - }, - { - "Key": "k8s.io/etcd/events", - "Value": "us-test-1a/us-test-1a" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kubernetes.io/cluster/externallb.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2Volumeustest1aetcdmainexternallbexamplecom": { - "Type": "AWS::EC2::Volume", - "Properties": { - "AvailabilityZone": "us-test-1a", - "Size": 20, - "VolumeType": "gp3", - "Iops": 3000, - "Throughput": 125, - "Encrypted": false, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "externallb.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.etcd-main.externallb.example.com" - }, - { - "Key": "k8s.io/etcd/main", - "Value": "us-test-1a/us-test-1a" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kubernetes.io/cluster/externallb.example.com", - "Value": "owned" - } - ] - } - }, - "AWSIAMInstanceProfilemastersexternallbexamplecom": { - "Type": "AWS::IAM::InstanceProfile", - "Properties": { - "InstanceProfileName": "masters.externallb.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolemastersexternallbexamplecom" - } - ] - } - }, - "AWSIAMInstanceProfilenodesexternallbexamplecom": { - "Type": "AWS::IAM::InstanceProfile", - "Properties": { - "InstanceProfileName": "nodes.externallb.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolenodesexternallbexamplecom" - } - ] - } - }, - "AWSIAMPolicymastersexternallbexamplecom": { - "Type": "AWS::IAM::Policy", - "Properties": { - "PolicyName": "masters.externallb.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolemastersexternallbexamplecom" - } - ], - "PolicyDocument": { - "Statement": [ - { - "Action": "ec2:AttachVolume", - "Condition": { - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "externallb.example.com", - "aws:ResourceTag/k8s.io/role/master": "1" - } - }, - "Effect": "Allow", - "Resource": [ - "*" - ] - }, - { - "Action": [ - "s3:Get*" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-read-bucket/clusters.example.com/externallb.example.com/*" - }, - { - "Action": [ - "s3:GetObject", - "s3:DeleteObject", - "s3:DeleteObjectVersion", - "s3:PutObject" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/externallb.example.com/backups/etcd/main/*" - }, - { - "Action": [ - "s3:GetObject", - "s3:DeleteObject", - "s3:DeleteObjectVersion", - "s3:PutObject" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/externallb.example.com/backups/etcd/events/*" - }, - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-read-bucket" - ] - }, - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-write-bucket" - ] - }, - { - "Action": [ - "route53:ChangeResourceRecordSets", - "route53:ListResourceRecordSets", - "route53:GetHostedZone" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:route53:::hostedzone/Z1AFAKE1ZON3YO" - ] - }, - { - "Action": [ - "route53:GetChange" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:route53:::change/*" - ] - }, - { - "Action": [ - "route53:ListHostedZones", - "route53:ListTagsForResource" - ], - "Effect": "Allow", - "Resource": [ - "*" - ] - }, - { - "Action": "ec2:CreateTags", - "Condition": { - "StringEquals": { - "aws:RequestTag/KubernetesCluster": "externallb.example.com", - "ec2:CreateAction": [ - "CreateSecurityGroup" - ] - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:security-group/*" - ] - }, - { - "Action": [ - "ec2:CreateTags", - "ec2:DeleteTags" - ], - "Condition": { - "Null": { - "aws:RequestTag/KubernetesCluster": "true" - }, - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "externallb.example.com" - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:security-group/*" - ] - }, - { - "Action": "ec2:CreateTags", - "Condition": { - "StringEquals": { - "aws:RequestTag/KubernetesCluster": "externallb.example.com", - "ec2:CreateAction": [ - "CreateVolume", - "CreateSnapshot" - ] - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:volume/*", - "arn:aws-test:ec2:*:*:snapshot/*" - ] - }, - { - "Action": [ - "ec2:CreateTags", - "ec2:DeleteTags" - ], - "Condition": { - "Null": { - "aws:RequestTag/KubernetesCluster": "true" - }, - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "externallb.example.com" - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:volume/*", - "arn:aws-test:ec2:*:*:snapshot/*" - ] - }, - { - "Action": [ - "autoscaling:DescribeAutoScalingGroups", - "autoscaling:DescribeAutoScalingInstances", - "autoscaling:DescribeLaunchConfigurations", - "autoscaling:DescribeScalingActivities", - "autoscaling:DescribeTags", - "ec2:AttachVolume", - "ec2:AuthorizeSecurityGroupIngress", - "ec2:CreateSecurityGroup", - "ec2:CreateTags", - "ec2:DeleteRoute", - "ec2:DeleteSecurityGroup", - "ec2:DeleteVolume", - "ec2:DescribeAccountAttributes", - "ec2:DescribeInstanceTypes", - "ec2:DescribeInstances", - "ec2:DescribeLaunchTemplateVersions", - "ec2:DescribeRegions", - "ec2:DescribeRouteTables", - "ec2:DescribeSecurityGroups", - "ec2:DescribeSubnets", - "ec2:DescribeTags", - "ec2:DescribeVolumes", - "ec2:DescribeVolumesModifications", - "ec2:DescribeVpcs", - "ec2:DetachVolume", - "ec2:ModifyInstanceAttribute", - "ec2:ModifyVolume", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:CreateListener", - "elasticloadbalancing:CreateTargetGroup", - "elasticloadbalancing:DeleteListener", - "elasticloadbalancing:DescribeListeners", - "elasticloadbalancing:DescribeLoadBalancerAttributes", - "elasticloadbalancing:DescribeLoadBalancerPolicies", - "elasticloadbalancing:DescribeLoadBalancers", - "elasticloadbalancing:DescribeTargetGroups", - "elasticloadbalancing:DescribeTargetHealth", - "elasticloadbalancing:ModifyListener", - "elasticloadbalancing:RegisterTargets", - "iam:GetServerCertificate", - "iam:ListServerCertificates", - "kms:DescribeKey", - "kms:GenerateRandom" - ], - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": [ - "autoscaling:SetDesiredCapacity", - "autoscaling:TerminateInstanceInAutoScalingGroup", - "ec2:AttachVolume", - "ec2:AuthorizeSecurityGroupIngress", - "ec2:DeleteSecurityGroup", - "ec2:DeleteVolume", - "ec2:DetachVolume", - "ec2:ModifyInstanceAttribute", - "ec2:ModifyVolume", - "ec2:RevokeSecurityGroupIngress", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", - "elasticloadbalancing:AttachLoadBalancerToSubnets", - "elasticloadbalancing:ConfigureHealthCheck", - "elasticloadbalancing:CreateLoadBalancerListeners", - "elasticloadbalancing:CreateLoadBalancerPolicy", - "elasticloadbalancing:DeleteListener", - "elasticloadbalancing:DeleteLoadBalancer", - "elasticloadbalancing:DeleteLoadBalancerListeners", - "elasticloadbalancing:DeleteTargetGroup", - "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", - "elasticloadbalancing:DeregisterTargets", - "elasticloadbalancing:DetachLoadBalancerFromSubnets", - "elasticloadbalancing:ModifyListener", - "elasticloadbalancing:ModifyLoadBalancerAttributes", - "elasticloadbalancing:ModifyTargetGroup", - "elasticloadbalancing:RegisterInstancesWithLoadBalancer", - "elasticloadbalancing:RegisterTargets", - "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", - "elasticloadbalancing:SetLoadBalancerPoliciesOfListener" - ], - "Condition": { - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "externallb.example.com" - } - }, - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": [ - "ec2:CreateSecurityGroup", - "ec2:CreateSnapshot", - "ec2:CreateVolume", - "elasticloadbalancing:CreateListener", - "elasticloadbalancing:CreateLoadBalancer", - "elasticloadbalancing:CreateTargetGroup" - ], - "Condition": { - "StringEquals": { - "aws:RequestTag/KubernetesCluster": "externallb.example.com" - } - }, - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": "ec2:CreateSecurityGroup", - "Effect": "Allow", - "Resource": "arn:aws-test:ec2:*:*:vpc/*" - } - ], - "Version": "2012-10-17" - } - } - }, - "AWSIAMPolicynodesexternallbexamplecom": { - "Type": "AWS::IAM::Policy", - "Properties": { - "PolicyName": "nodes.externallb.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolenodesexternallbexamplecom" - } - ], - "PolicyDocument": { - "Statement": [ - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-read-bucket" - ] - }, - { - "Action": [ - "autoscaling:DescribeAutoScalingInstances", - "ec2:DescribeInstanceTypes", - "ec2:DescribeInstances", - "ec2:DescribeRegions", - "iam:GetServerCertificate", - "iam:ListServerCertificates", - "kms:GenerateRandom" - ], - "Effect": "Allow", - "Resource": "*" - } - ], - "Version": "2012-10-17" - } - } - }, - "AWSIAMRolemastersexternallbexamplecom": { - "Type": "AWS::IAM::Role", - "Properties": { - "RoleName": "masters.externallb.example.com", - "AssumeRolePolicyDocument": { - "Statement": [ - { - "Action": "sts:AssumeRole", - "Effect": "Allow", - "Principal": { - "Service": "ec2.amazonaws.com" - } - } - ], - "Version": "2012-10-17" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "externallb.example.com" - }, - { - "Key": "Name", - "Value": "masters.externallb.example.com" - }, - { - "Key": "kubernetes.io/cluster/externallb.example.com", - "Value": "owned" - } - ] - } - }, - "AWSIAMRolenodesexternallbexamplecom": { - "Type": "AWS::IAM::Role", - "Properties": { - "RoleName": "nodes.externallb.example.com", - "AssumeRolePolicyDocument": { - "Statement": [ - { - "Action": "sts:AssumeRole", - "Effect": "Allow", - "Principal": { - "Service": "ec2.amazonaws.com" - } - } - ], - "Version": "2012-10-17" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "externallb.example.com" - }, - { - "Key": "Name", - "Value": "nodes.externallb.example.com" - }, - { - "Key": "kubernetes.io/cluster/externallb.example.com", - "Value": "owned" - } - ] - } - } - } -} diff --git a/tests/integration/update_cluster/externallb/cloudformation.json.extracted.yaml b/tests/integration/update_cluster/externallb/cloudformation.json.extracted.yaml deleted file mode 100644 index 78852482c16bc..0000000000000 --- a/tests/integration/update_cluster/externallb/cloudformation.json.extracted.yaml +++ /dev/null @@ -1,448 +0,0 @@ -Resources.AWSEC2LaunchTemplatemasterustest1amastersexternallbexamplecom.Properties.LaunchTemplateData.UserData: | - #!/bin/bash - set -o errexit - set -o nounset - set -o pipefail - - NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 - NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 - NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 - NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 - - export AWS_REGION=us-test-1 - - - - - sysctl -w net.core.rmem_max=16777216 || true - sysctl -w net.core.wmem_max=16777216 || true - sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true - sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true - - - function ensure-install-dir() { - INSTALL_DIR="/opt/kops" - # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec - if [[ -d /var/lib/toolbox ]]; then - INSTALL_DIR="/var/lib/toolbox/kops" - fi - mkdir -p ${INSTALL_DIR}/bin - mkdir -p ${INSTALL_DIR}/conf - cd ${INSTALL_DIR} - } - - # Retry a download until we get it. args: name, sha, urls - download-or-bust() { - local -r file="$1" - local -r hash="$2" - local -r urls=( $(split-commas "$3") ) - - if [[ -f "${file}" ]]; then - if ! validate-hash "${file}" "${hash}"; then - rm -f "${file}" - else - return 0 - fi - fi - - while true; do - for url in "${urls[@]}"; do - commands=( - "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - ) - for cmd in "${commands[@]}"; do - echo "Attempting download with: ${cmd} {url}" - if ! (${cmd} "${url}"); then - echo "== Download failed with ${cmd} ==" - continue - fi - if ! validate-hash "${file}" "${hash}"; then - echo "== Hash validation of ${url} failed. Retrying. ==" - rm -f "${file}" - else - echo "== Downloaded ${url} (SHA256 = ${hash}) ==" - return 0 - fi - done - done - - echo "All downloads failed; sleeping before retrying" - sleep 60 - done - } - - validate-hash() { - local -r file="$1" - local -r expected="$2" - local actual - - actual=$(sha256sum ${file} | awk '{ print $1 }') || true - if [[ "${actual}" != "${expected}" ]]; then - echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" - return 1 - fi - } - - function split-commas() { - echo $1 | tr "," "\n" - } - - function download-release() { - case "$(uname -m)" in - x86_64*|i?86_64*|amd64*) - NODEUP_URL="${NODEUP_URL_AMD64}" - NODEUP_HASH="${NODEUP_HASH_AMD64}" - ;; - aarch64*|arm64*) - NODEUP_URL="${NODEUP_URL_ARM64}" - NODEUP_HASH="${NODEUP_HASH_ARM64}" - ;; - *) - echo "Unsupported host arch: $(uname -m)" >&2 - exit 1 - ;; - esac - - cd ${INSTALL_DIR}/bin - download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" - - chmod +x nodeup - - echo "Running nodeup" - # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 - ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) - } - - #################################################################################### - - /bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" - - echo "== nodeup node config starting ==" - ensure-install-dir - - cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' - cloudConfig: - awsEBSCSIDriver: - enabled: false - manageStorageClasses: true - containerRuntime: containerd - containerd: - logLevel: info - version: 1.4.13 - docker: - skipInstall: true - encryptionConfig: null - etcdClusters: - events: - version: 3.4.13 - main: - version: 3.4.13 - kubeAPIServer: - allowPrivileged: true - anonymousAuth: false - apiAudiences: - - kubernetes.svc.default - apiServerCount: 1 - authorizationMode: AlwaysAllow - bindAddress: 0.0.0.0 - cloudProvider: aws - enableAdmissionPlugins: - - NamespaceLifecycle - - LimitRanger - - ServiceAccount - - DefaultStorageClass - - DefaultTolerationSeconds - - MutatingAdmissionWebhook - - ValidatingAdmissionWebhook - - NodeRestriction - - ResourceQuota - etcdServers: - - https://127.0.0.1:4001 - etcdServersOverrides: - - /events#https://127.0.0.1:4002 - image: registry.k8s.io/kube-apiserver:v1.21.0 - kubeletPreferredAddressTypes: - - InternalIP - - Hostname - - ExternalIP - logLevel: 2 - requestheaderAllowedNames: - - aggregator - requestheaderExtraHeaderPrefixes: - - X-Remote-Extra- - requestheaderGroupHeaders: - - X-Remote-Group - requestheaderUsernameHeaders: - - X-Remote-User - securePort: 443 - serviceAccountIssuer: https://api.internal.externallb.example.com - serviceAccountJWKSURI: https://api.internal.externallb.example.com/openid/v1/jwks - serviceClusterIPRange: 100.64.0.0/13 - storageBackend: etcd3 - kubeControllerManager: - allocateNodeCIDRs: true - attachDetachReconcileSyncPeriod: 1m0s - cloudProvider: aws - clusterCIDR: 100.96.0.0/11 - clusterName: externallb.example.com - configureCloudRoutes: false - image: registry.k8s.io/kube-controller-manager:v1.21.0 - leaderElection: - leaderElect: true - logLevel: 2 - useServiceAccountCredentials: true - kubeProxy: - clusterCIDR: 100.96.0.0/11 - cpuRequest: 100m - image: registry.k8s.io/kube-proxy:v1.21.0 - logLevel: 2 - kubeScheduler: - image: registry.k8s.io/kube-scheduler:v1.21.0 - leaderElection: - leaderElect: true - logLevel: 2 - kubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: aws - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - masterKubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: aws - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - - __EOF_CLUSTER_SPEC - - cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' - CloudProvider: aws - ConfigBase: memfs://clusters.example.com/externallb.example.com - InstanceGroupName: master-us-test-1a - InstanceGroupRole: Master - NodeupConfigHash: R6Az6LnzdiPzz7l2QwPSrQQPPo6fKGKNzVY5XIonRsI= - - __EOF_KUBE_ENV - - download-release - echo "== nodeup node config done ==" -Resources.AWSEC2LaunchTemplatenodesexternallbexamplecom.Properties.LaunchTemplateData.UserData: | - #!/bin/bash - set -o errexit - set -o nounset - set -o pipefail - - NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 - NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 - NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 - NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 - - export AWS_REGION=us-test-1 - - - - - sysctl -w net.core.rmem_max=16777216 || true - sysctl -w net.core.wmem_max=16777216 || true - sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true - sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true - - - function ensure-install-dir() { - INSTALL_DIR="/opt/kops" - # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec - if [[ -d /var/lib/toolbox ]]; then - INSTALL_DIR="/var/lib/toolbox/kops" - fi - mkdir -p ${INSTALL_DIR}/bin - mkdir -p ${INSTALL_DIR}/conf - cd ${INSTALL_DIR} - } - - # Retry a download until we get it. args: name, sha, urls - download-or-bust() { - local -r file="$1" - local -r hash="$2" - local -r urls=( $(split-commas "$3") ) - - if [[ -f "${file}" ]]; then - if ! validate-hash "${file}" "${hash}"; then - rm -f "${file}" - else - return 0 - fi - fi - - while true; do - for url in "${urls[@]}"; do - commands=( - "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - ) - for cmd in "${commands[@]}"; do - echo "Attempting download with: ${cmd} {url}" - if ! (${cmd} "${url}"); then - echo "== Download failed with ${cmd} ==" - continue - fi - if ! validate-hash "${file}" "${hash}"; then - echo "== Hash validation of ${url} failed. Retrying. ==" - rm -f "${file}" - else - echo "== Downloaded ${url} (SHA256 = ${hash}) ==" - return 0 - fi - done - done - - echo "All downloads failed; sleeping before retrying" - sleep 60 - done - } - - validate-hash() { - local -r file="$1" - local -r expected="$2" - local actual - - actual=$(sha256sum ${file} | awk '{ print $1 }') || true - if [[ "${actual}" != "${expected}" ]]; then - echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" - return 1 - fi - } - - function split-commas() { - echo $1 | tr "," "\n" - } - - function download-release() { - case "$(uname -m)" in - x86_64*|i?86_64*|amd64*) - NODEUP_URL="${NODEUP_URL_AMD64}" - NODEUP_HASH="${NODEUP_HASH_AMD64}" - ;; - aarch64*|arm64*) - NODEUP_URL="${NODEUP_URL_ARM64}" - NODEUP_HASH="${NODEUP_HASH_ARM64}" - ;; - *) - echo "Unsupported host arch: $(uname -m)" >&2 - exit 1 - ;; - esac - - cd ${INSTALL_DIR}/bin - download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" - - chmod +x nodeup - - echo "Running nodeup" - # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 - ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) - } - - #################################################################################### - - /bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" - - echo "== nodeup node config starting ==" - ensure-install-dir - - cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' - cloudConfig: - awsEBSCSIDriver: - enabled: false - manageStorageClasses: true - containerRuntime: containerd - containerd: - logLevel: info - version: 1.4.13 - docker: - skipInstall: true - kubeProxy: - clusterCIDR: 100.96.0.0/11 - cpuRequest: 100m - image: registry.k8s.io/kube-proxy:v1.21.0 - logLevel: 2 - kubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: aws - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - - __EOF_CLUSTER_SPEC - - cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' - CloudProvider: aws - ConfigServer: - CACertificates: | - -----BEGIN CERTIFICATE----- - MIIBbjCCARigAwIBAgIMFpANqBD8NSD82AUSMA0GCSqGSIb3DQEBCwUAMBgxFjAU - BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwODAwWhcNMzEwNzA3MDcw - ODAwWjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD - SwAwSAJBANFI3zr0Tk8krsW8vwjfMpzJOlWQ8616vG3YPa2qAgI7V4oKwfV0yIg1 - jt+H6f4P/wkPAPTPTfRp9Iy8oHEEFw0CAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG - MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNG3zVjTcLlJwDsJ4/K9DV7KohUA - MA0GCSqGSIb3DQEBCwUAA0EAB8d03fY2w7WKpfO29qI295pu2C4ca9AiVGOpgSc8 - tmQsq6rcxt3T+rb589PVtz0mw/cKTxOk6gH2CCC+yHfy2w== - -----END CERTIFICATE----- - -----BEGIN CERTIFICATE----- - MIIBbjCCARigAwIBAgIMFpANvmSa0OAlYmXKMA0GCSqGSIb3DQEBCwUAMBgxFjAU - BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwOTM2WhcNMzEwNzA3MDcw - OTM2WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD - SwAwSAJBAMF6F4aZdpe0RUpyykaBpWwZCnwbffhYGOw+fs6RdLuUq7QCNmJm/Eq7 - WWOziMYDiI9SbclpD+6QiJ0N3EqppVUCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG - MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLImp6ARjPDAH6nhI+scWVt3Q9bn - MA0GCSqGSIb3DQEBCwUAA0EAVQVx5MUtuAIeePuP9o51xtpT2S6Fvfi8J4ICxnlA - 9B7UD2ushcVFPtaeoL9Gfu8aY4KJBeqqg5ojl4qmRnThjw== - -----END CERTIFICATE----- - server: https://kops-controller.internal.externallb.example.com:3988/ - InstanceGroupName: nodes - InstanceGroupRole: Node - NodeupConfigHash: iJKilqxspeP23hRmmEoE/6ub0cjhfIb1+5ckx058i1w= - - __EOF_KUBE_ENV - - download-release - echo "== nodeup node config done ==" diff --git a/tests/integration/update_cluster/externallb/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/externallb/data/aws_s3_object_cluster-completed.spec_content index 80a36d3dd24f7..bf5b5e0feb6a6 100644 --- a/tests/integration/update_cluster/externallb/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/externallb/data/aws_s3_object_cluster-completed.spec_content @@ -142,7 +142,6 @@ spec: kubernetesApiAccess: - 0.0.0.0/0 kubernetesVersion: 1.21.0 - masterInternalName: api.internal.externallb.example.com masterKubelet: anonymousAuth: false cgroupDriver: systemd diff --git a/tests/integration/update_cluster/externallb/in-v1alpha2.yaml b/tests/integration/update_cluster/externallb/in-v1alpha2.yaml index bf81a04b34a33..06c096d78afb4 100644 --- a/tests/integration/update_cluster/externallb/in-v1alpha2.yaml +++ b/tests/integration/update_cluster/externallb/in-v1alpha2.yaml @@ -22,7 +22,6 @@ spec: kubelet: anonymousAuth: false kubernetesVersion: v1.21.0 - masterInternalName: api.internal.externallb.example.com masterPublicName: api.externallb.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/tests/integration/update_cluster/externalpolicies/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/externalpolicies/data/aws_s3_object_cluster-completed.spec_content index e0bbc349653e9..ceae412d40101 100644 --- a/tests/integration/update_cluster/externalpolicies/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/externalpolicies/data/aws_s3_object_cluster-completed.spec_content @@ -159,7 +159,6 @@ spec: kubernetesApiAccess: - 0.0.0.0/0 kubernetesVersion: 1.21.0 - masterInternalName: api.internal.externalpolicies.example.com masterKubelet: anonymousAuth: false cgroupDriver: systemd diff --git a/tests/integration/update_cluster/externalpolicies/in-v1alpha2.yaml b/tests/integration/update_cluster/externalpolicies/in-v1alpha2.yaml index aefaca60ae88c..99b3df55f5886 100644 --- a/tests/integration/update_cluster/externalpolicies/in-v1alpha2.yaml +++ b/tests/integration/update_cluster/externalpolicies/in-v1alpha2.yaml @@ -34,7 +34,6 @@ spec: kubelet: anonymousAuth: false kubernetesVersion: v1.21.0 - masterInternalName: api.internal.externalpolicies.example.com masterPublicName: api.externalpolicies.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/tests/integration/update_cluster/ha/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/ha/data/aws_s3_object_cluster-completed.spec_content index 8ab8c8ee69735..17908d12ea1ed 100644 --- a/tests/integration/update_cluster/ha/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/ha/data/aws_s3_object_cluster-completed.spec_content @@ -150,7 +150,6 @@ spec: kubernetesApiAccess: - 0.0.0.0/0 kubernetesVersion: 1.21.0 - masterInternalName: api.internal.ha.example.com masterKubelet: anonymousAuth: false cgroupDriver: systemd diff --git a/tests/integration/update_cluster/ha_gce/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/ha_gce/data/aws_s3_object_cluster-completed.spec_content index f1347feeeac50..bce74e79078f7 100644 --- a/tests/integration/update_cluster/ha_gce/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/ha_gce/data/aws_s3_object_cluster-completed.spec_content @@ -154,7 +154,6 @@ spec: - 0.0.0.0/0 - ::/0 kubernetesVersion: 1.21.0 - masterInternalName: api.internal.ha-gce.example.com masterKubelet: anonymousAuth: false cgroupDriver: systemd diff --git a/tests/integration/update_cluster/irsa/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/irsa/data/aws_s3_object_cluster-completed.spec_content index 890cd1894f9ac..5a5a3477ac70d 100644 --- a/tests/integration/update_cluster/irsa/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/irsa/data/aws_s3_object_cluster-completed.spec_content @@ -171,7 +171,6 @@ spec: kubernetesApiAccess: - 0.0.0.0/0 kubernetesVersion: 1.21.0 - masterInternalName: api.internal.minimal.example.com masterKubelet: anonymousAuth: false cgroupDriver: systemd diff --git a/tests/integration/update_cluster/irsa/in-v1alpha2.yaml b/tests/integration/update_cluster/irsa/in-v1alpha2.yaml index 3e3ae16847997..7dfbe894dd704 100644 --- a/tests/integration/update_cluster/irsa/in-v1alpha2.yaml +++ b/tests/integration/update_cluster/irsa/in-v1alpha2.yaml @@ -51,7 +51,6 @@ spec: kubelet: anonymousAuth: false kubernetesVersion: v1.21.0 - masterInternalName: api.internal.minimal.example.com masterPublicName: api.minimal.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/tests/integration/update_cluster/karpenter/cloudformation.json b/tests/integration/update_cluster/karpenter/cloudformation.json deleted file mode 100644 index db82e8a455e66..0000000000000 --- a/tests/integration/update_cluster/karpenter/cloudformation.json +++ /dev/null @@ -1,1312 +0,0 @@ -{ - "Resources": { - "AWSAutoScalingAutoScalingGroupmasterustest1amastersminimalexamplecom": { - "Type": "AWS::AutoScaling::AutoScalingGroup", - "Properties": { - "AutoScalingGroupName": "master-us-test-1a.masters.minimal.example.com", - "LaunchTemplate": { - "LaunchTemplateId": { - "Ref": "AWSEC2LaunchTemplatemasterustest1amastersminimalexamplecom" - }, - "Version": { - "Fn::GetAtt": [ - "AWSEC2LaunchTemplatemasterustest1amastersminimalexamplecom", - "LatestVersionNumber" - ] - } - }, - "MaxSize": "1", - "MinSize": "1", - "VPCZoneIdentifier": [ - { - "Ref": "AWSEC2Subnetustest1aminimalexamplecom" - } - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.minimal.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/role/master", - "Value": "1", - "PropagateAtLaunch": true - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a", - "PropagateAtLaunch": true - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned", - "PropagateAtLaunch": true - } - ], - "MetricsCollection": [ - { - "Granularity": "1Minute", - "Metrics": [ - "GroupDesiredCapacity", - "GroupInServiceInstances", - "GroupMaxSize", - "GroupMinSize", - "GroupPendingInstances", - "GroupStandbyInstances", - "GroupTerminatingInstances", - "GroupTotalInstances" - ] - } - ] - } - }, - "AWSAutoScalingAutoScalingGroupnodesminimalexamplecom": { - "Type": "AWS::AutoScaling::AutoScalingGroup", - "Properties": { - "AutoScalingGroupName": "nodes.minimal.example.com", - "LaunchTemplate": { - "LaunchTemplateId": { - "Ref": "AWSEC2LaunchTemplatenodesminimalexamplecom" - }, - "Version": { - "Fn::GetAtt": [ - "AWSEC2LaunchTemplatenodesminimalexamplecom", - "LatestVersionNumber" - ] - } - }, - "MaxSize": "2", - "MinSize": "2", - "VPCZoneIdentifier": [ - { - "Ref": "AWSEC2Subnetustest1aminimalexamplecom" - } - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "Name", - "Value": "nodes.minimal.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/role/node", - "Value": "1", - "PropagateAtLaunch": true - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes", - "PropagateAtLaunch": true - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned", - "PropagateAtLaunch": true - } - ], - "MetricsCollection": [ - { - "Granularity": "1Minute", - "Metrics": [ - "GroupDesiredCapacity", - "GroupInServiceInstances", - "GroupMaxSize", - "GroupMinSize", - "GroupPendingInstances", - "GroupStandbyInstances", - "GroupTerminatingInstances", - "GroupTotalInstances" - ] - } - ] - } - }, - "AWSEC2DHCPOptionsminimalexamplecom": { - "Type": "AWS::EC2::DHCPOptions", - "Properties": { - "DomainName": "us-test-1.compute.internal", - "DomainNameServers": [ - "AmazonProvidedDNS" - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "minimal.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2InternetGatewayminimalexamplecom": { - "Type": "AWS::EC2::InternetGateway", - "Properties": { - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "minimal.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2LaunchTemplatemasterustest1amastersminimalexamplecom": { - "Type": "AWS::EC2::LaunchTemplate", - "Properties": { - "LaunchTemplateName": "master-us-test-1a.masters.minimal.example.com", - "LaunchTemplateData": { - "BlockDeviceMappings": [ - { - "DeviceName": "/dev/xvda", - "Ebs": { - "VolumeType": "gp3", - "VolumeSize": 64, - "Iops": 3000, - "Throughput": 125, - "DeleteOnTermination": true, - "Encrypted": true - } - }, - { - "DeviceName": "/dev/sdc", - "VirtualName": "ephemeral0" - } - ], - "IamInstanceProfile": { - "Name": { - "Ref": "AWSIAMInstanceProfilemastersminimalexamplecom" - } - }, - "ImageId": "ami-12345678", - "InstanceType": "m3.medium", - "KeyName": "kubernetes.minimal.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57", - "MetadataOptions": { - "HttpPutResponseHopLimit": 1, - "HttpTokens": "optional" - }, - "Monitoring": { - "Enabled": false - }, - "NetworkInterfaces": [ - { - "AssociatePublicIpAddress": true, - "DeleteOnTermination": true, - "DeviceIndex": 0, - "Ipv6AddressCount": 0, - "Groups": [ - { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - } - ] - } - ], - "TagSpecifications": [ - { - "ResourceType": "instance", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.minimal.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - }, - { - "ResourceType": "volume", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.minimal.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - ], - "UserData": "extracted" - } - } - }, - "AWSEC2LaunchTemplatenodesminimalexamplecom": { - "Type": "AWS::EC2::LaunchTemplate", - "Properties": { - "LaunchTemplateName": "nodes.minimal.example.com", - "LaunchTemplateData": { - "BlockDeviceMappings": [ - { - "DeviceName": "/dev/xvda", - "Ebs": { - "VolumeType": "gp3", - "VolumeSize": 128, - "Iops": 3000, - "Throughput": 125, - "DeleteOnTermination": true, - "Encrypted": true - } - } - ], - "IamInstanceProfile": { - "Name": { - "Ref": "AWSIAMInstanceProfilenodesminimalexamplecom" - } - }, - "ImageId": "ami-12345678", - "InstanceType": "t2.medium", - "KeyName": "kubernetes.minimal.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57", - "MetadataOptions": { - "HttpPutResponseHopLimit": 1, - "HttpTokens": "optional" - }, - "Monitoring": { - "Enabled": false - }, - "NetworkInterfaces": [ - { - "AssociatePublicIpAddress": true, - "DeleteOnTermination": true, - "DeviceIndex": 0, - "Ipv6AddressCount": 0, - "Groups": [ - { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - } - ] - } - ], - "TagSpecifications": [ - { - "ResourceType": "instance", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "nodes.minimal.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "" - }, - { - "Key": "k8s.io/role/node", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - }, - { - "ResourceType": "volume", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "nodes.minimal.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "" - }, - { - "Key": "k8s.io/role/node", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - ], - "UserData": "extracted" - } - } - }, - "AWSEC2Route0": { - "Type": "AWS::EC2::Route", - "Properties": { - "RouteTableId": { - "Ref": "AWSEC2RouteTableminimalexamplecom" - }, - "DestinationIpv6CidrBlock": "::/0", - "GatewayId": { - "Ref": "AWSEC2InternetGatewayminimalexamplecom" - } - } - }, - "AWSEC2Route00000": { - "Type": "AWS::EC2::Route", - "Properties": { - "RouteTableId": { - "Ref": "AWSEC2RouteTableminimalexamplecom" - }, - "DestinationCidrBlock": "0.0.0.0/0", - "GatewayId": { - "Ref": "AWSEC2InternetGatewayminimalexamplecom" - } - } - }, - "AWSEC2RouteTableminimalexamplecom": { - "Type": "AWS::EC2::RouteTable", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCminimalexamplecom" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "minimal.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - }, - { - "Key": "kubernetes.io/kops/role", - "Value": "public" - } - ] - } - }, - "AWSEC2SecurityGroupEgressfrommastersminimalexamplecomegressall0to00": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIpv6": "::/0" - } - }, - "AWSEC2SecurityGroupEgressfrommastersminimalexamplecomegressall0to000000": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupEgressfromnodesminimalexamplecomegressall0to00": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIpv6": "::/0" - } - }, - "AWSEC2SecurityGroupEgressfromnodesminimalexamplecomegressall0to000000": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom00000ingresstcp22to22mastersminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "FromPort": 22, - "ToPort": 22, - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom00000ingresstcp22to22nodesminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "FromPort": 22, - "ToPort": 22, - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom00000ingresstcp443to443mastersminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "FromPort": 443, - "ToPort": 443, - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrommastersminimalexamplecomingressall0to0mastersminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfrommastersminimalexamplecomingressall0to0nodesminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfromnodesminimalexamplecomingressall0to0nodesminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfromnodesminimalexamplecomingresstcp1to2379mastersminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "FromPort": 1, - "ToPort": 2379, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodesminimalexamplecomingresstcp2382to4000mastersminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "FromPort": 2382, - "ToPort": 4000, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodesminimalexamplecomingresstcp4003to65535mastersminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "FromPort": 4003, - "ToPort": 65535, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodesminimalexamplecomingressudp1to65535mastersminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "FromPort": 1, - "ToPort": 65535, - "IpProtocol": "udp" - } - }, - "AWSEC2SecurityGroupmastersminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupName": "masters.minimal.example.com", - "VpcId": { - "Ref": "AWSEC2VPCminimalexamplecom" - }, - "GroupDescription": "Security group for masters", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "masters.minimal.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2SecurityGroupnodesminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupName": "nodes.minimal.example.com", - "VpcId": { - "Ref": "AWSEC2VPCminimalexamplecom" - }, - "GroupDescription": "Security group for nodes", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "nodes.minimal.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2SubnetRouteTableAssociationustest1aminimalexamplecom": { - "Type": "AWS::EC2::SubnetRouteTableAssociation", - "Properties": { - "SubnetId": { - "Ref": "AWSEC2Subnetustest1aminimalexamplecom" - }, - "RouteTableId": { - "Ref": "AWSEC2RouteTableminimalexamplecom" - } - } - }, - "AWSEC2Subnetustest1aminimalexamplecom": { - "Type": "AWS::EC2::Subnet", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCminimalexamplecom" - }, - "CidrBlock": "172.20.32.0/19", - "AvailabilityZone": "us-test-1a", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.minimal.example.com" - }, - { - "Key": "SubnetType", - "Value": "Public" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - }, - { - "Key": "kubernetes.io/role/elb", - "Value": "1" - }, - { - "Key": "kubernetes.io/role/internal-elb", - "Value": "1" - } - ] - } - }, - "AWSEC2VPCCidrBlockAmazonIPv6": { - "Type": "AWS::EC2::VPCCidrBlock", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCminimalexamplecom" - }, - "AmazonProvidedIpv6CidrBlock": true - } - }, - "AWSEC2VPCDHCPOptionsAssociationminimalexamplecom": { - "Type": "AWS::EC2::VPCDHCPOptionsAssociation", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCminimalexamplecom" - }, - "DhcpOptionsId": { - "Ref": "AWSEC2DHCPOptionsminimalexamplecom" - } - } - }, - "AWSEC2VPCGatewayAttachmentminimalexamplecom": { - "Type": "AWS::EC2::VPCGatewayAttachment", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCminimalexamplecom" - }, - "InternetGatewayId": { - "Ref": "AWSEC2InternetGatewayminimalexamplecom" - } - } - }, - "AWSEC2VPCminimalexamplecom": { - "Type": "AWS::EC2::VPC", - "Properties": { - "CidrBlock": "172.20.0.0/16", - "EnableDnsHostnames": true, - "EnableDnsSupport": true, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "minimal.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2Volumeustest1aetcdeventsminimalexamplecom": { - "Type": "AWS::EC2::Volume", - "Properties": { - "AvailabilityZone": "us-test-1a", - "Size": 20, - "VolumeType": "gp3", - "Iops": 3000, - "Throughput": 125, - "Encrypted": false, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.etcd-events.minimal.example.com" - }, - { - "Key": "k8s.io/etcd/events", - "Value": "us-test-1a/us-test-1a" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2Volumeustest1aetcdmainminimalexamplecom": { - "Type": "AWS::EC2::Volume", - "Properties": { - "AvailabilityZone": "us-test-1a", - "Size": 20, - "VolumeType": "gp3", - "Iops": 3000, - "Throughput": 125, - "Encrypted": false, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.etcd-main.minimal.example.com" - }, - { - "Key": "k8s.io/etcd/main", - "Value": "us-test-1a/us-test-1a" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - }, - "AWSIAMInstanceProfilemastersminimalexamplecom": { - "Type": "AWS::IAM::InstanceProfile", - "Properties": { - "InstanceProfileName": "masters.minimal.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolemastersminimalexamplecom" - } - ] - } - }, - "AWSIAMInstanceProfilenodesminimalexamplecom": { - "Type": "AWS::IAM::InstanceProfile", - "Properties": { - "InstanceProfileName": "nodes.minimal.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolenodesminimalexamplecom" - } - ] - } - }, - "AWSIAMPolicymastersminimalexamplecom": { - "Type": "AWS::IAM::Policy", - "Properties": { - "PolicyName": "masters.minimal.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolemastersminimalexamplecom" - } - ], - "PolicyDocument": { - "Statement": [ - { - "Action": "ec2:AttachVolume", - "Condition": { - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "minimal.example.com", - "aws:ResourceTag/k8s.io/role/master": "1" - } - }, - "Effect": "Allow", - "Resource": [ - "*" - ] - }, - { - "Action": [ - "s3:Get*" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-read-bucket/clusters.example.com/minimal.example.com/*" - }, - { - "Action": [ - "s3:GetObject", - "s3:DeleteObject", - "s3:DeleteObjectVersion", - "s3:PutObject" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/minimal.example.com/backups/etcd/main/*" - }, - { - "Action": [ - "s3:GetObject", - "s3:DeleteObject", - "s3:DeleteObjectVersion", - "s3:PutObject" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/minimal.example.com/backups/etcd/events/*" - }, - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-read-bucket" - ] - }, - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-write-bucket" - ] - }, - { - "Action": [ - "route53:ChangeResourceRecordSets", - "route53:ListResourceRecordSets", - "route53:GetHostedZone" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:route53:::hostedzone/Z1AFAKE1ZON3YO" - ] - }, - { - "Action": [ - "route53:GetChange" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:route53:::change/*" - ] - }, - { - "Action": [ - "route53:ListHostedZones", - "route53:ListTagsForResource" - ], - "Effect": "Allow", - "Resource": [ - "*" - ] - }, - { - "Action": "ec2:CreateTags", - "Condition": { - "StringEquals": { - "ec2:CreateAction": [ - "CreateVolume", - "CreateSnapshot" - ] - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:volume/*", - "arn:aws-test:ec2:*:*:snapshot/*" - ] - }, - { - "Action": "ec2:CreateTags", - "Condition": { - "StringEquals": { - "ec2:CreateAction": [ - "CreateVolume", - "CreateSnapshot" - ] - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:volume/*", - "arn:aws-test:ec2:*:*:snapshot/*" - ] - }, - { - "Action": "ec2:DeleteTags", - "Condition": { - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "minimal.example.com" - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:volume/*", - "arn:aws-test:ec2:*:*:snapshot/*" - ] - }, - { - "Action": [ - "autoscaling:DescribeAutoScalingGroups", - "autoscaling:DescribeAutoScalingInstances", - "autoscaling:DescribeLaunchConfigurations", - "autoscaling:DescribeTags", - "ec2:CreateSecurityGroup", - "ec2:CreateTags", - "ec2:DescribeAccountAttributes", - "ec2:DescribeInstanceTypes", - "ec2:DescribeInstances", - "ec2:DescribeLaunchTemplateVersions", - "ec2:DescribeRegions", - "ec2:DescribeRouteTables", - "ec2:DescribeSecurityGroups", - "ec2:DescribeSubnets", - "ec2:DescribeTags", - "ec2:DescribeVolumes", - "ec2:DescribeVolumesModifications", - "ec2:DescribeVpcs", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:CreateListener", - "elasticloadbalancing:CreateTargetGroup", - "elasticloadbalancing:DescribeListeners", - "elasticloadbalancing:DescribeLoadBalancerAttributes", - "elasticloadbalancing:DescribeLoadBalancerPolicies", - "elasticloadbalancing:DescribeLoadBalancers", - "elasticloadbalancing:DescribeTargetGroups", - "elasticloadbalancing:DescribeTargetHealth", - "elasticloadbalancing:RegisterTargets", - "iam:GetServerCertificate", - "iam:ListServerCertificates", - "kms:DescribeKey", - "kms:GenerateRandom" - ], - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": [ - "autoscaling:SetDesiredCapacity", - "autoscaling:TerminateInstanceInAutoScalingGroup", - "ec2:AttachVolume", - "ec2:AuthorizeSecurityGroupIngress", - "ec2:DeleteRoute", - "ec2:DeleteSecurityGroup", - "ec2:DeleteVolume", - "ec2:DetachVolume", - "ec2:ModifyInstanceAttribute", - "ec2:ModifyVolume", - "ec2:RevokeSecurityGroupIngress", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", - "elasticloadbalancing:AttachLoadBalancerToSubnets", - "elasticloadbalancing:ConfigureHealthCheck", - "elasticloadbalancing:CreateLoadBalancerListeners", - "elasticloadbalancing:CreateLoadBalancerPolicy", - "elasticloadbalancing:DeleteListener", - "elasticloadbalancing:DeleteLoadBalancer", - "elasticloadbalancing:DeleteLoadBalancerListeners", - "elasticloadbalancing:DeleteTargetGroup", - "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", - "elasticloadbalancing:DeregisterTargets", - "elasticloadbalancing:DetachLoadBalancerFromSubnets", - "elasticloadbalancing:ModifyListener", - "elasticloadbalancing:ModifyLoadBalancerAttributes", - "elasticloadbalancing:ModifyTargetGroup", - "elasticloadbalancing:RegisterInstancesWithLoadBalancer", - "elasticloadbalancing:RegisterTargets", - "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", - "elasticloadbalancing:SetLoadBalancerPoliciesOfListener" - ], - "Condition": { - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "minimal.example.com" - } - }, - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": [ - "ec2:CreateSecurityGroup", - "ec2:CreateVolume", - "elasticloadbalancing:CreateListener", - "elasticloadbalancing:CreateLoadBalancer", - "elasticloadbalancing:CreateTargetGroup" - ], - "Condition": { - "StringEquals": { - "aws:RequestTag/KubernetesCluster": "minimal.example.com" - } - }, - "Effect": "Allow", - "Resource": "*" - } - ], - "Version": "2012-10-17" - } - } - }, - "AWSIAMPolicynodesminimalexamplecom": { - "Type": "AWS::IAM::Policy", - "Properties": { - "PolicyName": "nodes.minimal.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolenodesminimalexamplecom" - } - ], - "PolicyDocument": { - "Statement": [ - { - "Action": [ - "s3:Get*" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-read-bucket/clusters.example.com/minimal.example.com/addons/*", - "arn:aws-test:s3:::placeholder-read-bucket/clusters.example.com/minimal.example.com/cluster-completed.spec", - "arn:aws-test:s3:::placeholder-read-bucket/clusters.example.com/minimal.example.com/igconfig/node/*", - "arn:aws-test:s3:::placeholder-read-bucket/clusters.example.com/minimal.example.com/secrets/dockerconfig" - ] - }, - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-read-bucket" - ] - }, - { - "Action": [ - "autoscaling:DescribeAutoScalingInstances", - "ec2:DescribeInstanceTypes", - "ec2:DescribeInstances", - "iam:GetServerCertificate", - "iam:ListServerCertificates", - "kms:GenerateRandom" - ], - "Effect": "Allow", - "Resource": "*" - } - ], - "Version": "2012-10-17" - } - } - }, - "AWSIAMRolemastersminimalexamplecom": { - "Type": "AWS::IAM::Role", - "Properties": { - "RoleName": "masters.minimal.example.com", - "AssumeRolePolicyDocument": { - "Statement": [ - { - "Action": "sts:AssumeRole", - "Effect": "Allow", - "Principal": { - "Service": "ec2.amazonaws.com" - } - } - ], - "Version": "2012-10-17" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "masters.minimal.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - }, - "AWSIAMRolenodesminimalexamplecom": { - "Type": "AWS::IAM::Role", - "Properties": { - "RoleName": "nodes.minimal.example.com", - "AssumeRolePolicyDocument": { - "Statement": [ - { - "Action": "sts:AssumeRole", - "Effect": "Allow", - "Principal": { - "Service": "ec2.amazonaws.com" - } - } - ], - "Version": "2012-10-17" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "nodes.minimal.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - } - } -} diff --git a/tests/integration/update_cluster/karpenter/cloudformation.json.extracted.yaml b/tests/integration/update_cluster/karpenter/cloudformation.json.extracted.yaml deleted file mode 100644 index 48f30976828d9..0000000000000 --- a/tests/integration/update_cluster/karpenter/cloudformation.json.extracted.yaml +++ /dev/null @@ -1,418 +0,0 @@ -Resources.AWSEC2LaunchTemplatemasterustest1amastersminimalexamplecom.Properties.LaunchTemplateData.UserData: | - #!/bin/bash - set -o errexit - set -o nounset - set -o pipefail - - NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 - NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 - NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 - NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 - - export AWS_REGION=us-test-1 - - - - - sysctl -w net.core.rmem_max=16777216 || true - sysctl -w net.core.wmem_max=16777216 || true - sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true - sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true - - - function ensure-install-dir() { - INSTALL_DIR="/opt/kops" - # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec - if [[ -d /var/lib/toolbox ]]; then - INSTALL_DIR="/var/lib/toolbox/kops" - fi - mkdir -p ${INSTALL_DIR}/bin - mkdir -p ${INSTALL_DIR}/conf - cd ${INSTALL_DIR} - } - - # Retry a download until we get it. args: name, sha, urls - download-or-bust() { - local -r file="$1" - local -r hash="$2" - local -r urls=( $(split-commas "$3") ) - - if [[ -f "${file}" ]]; then - if ! validate-hash "${file}" "${hash}"; then - rm -f "${file}" - else - return - fi - fi - - while true; do - for url in "${urls[@]}"; do - commands=( - "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - ) - for cmd in "${commands[@]}"; do - echo "Attempting download with: ${cmd} {url}" - if ! (${cmd} "${url}"); then - echo "== Download failed with ${cmd} ==" - continue - fi - if ! validate-hash "${file}" "${hash}"; then - echo "== Hash validation of ${url} failed. Retrying. ==" - rm -f "${file}" - else - echo "== Downloaded ${url} (SHA256 = ${hash}) ==" - return - fi - done - done - - echo "All downloads failed; sleeping before retrying" - sleep 60 - done - } - - validate-hash() { - local -r file="$1" - local -r expected="$2" - local actual - - actual=$(sha256sum ${file} | awk '{ print $1 }') || true - if [[ "${actual}" != "${expected}" ]]; then - echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" - return 1 - fi - } - - function split-commas() { - echo $1 | tr "," "\n" - } - - function download-release() { - case "$(uname -m)" in - x86_64*|i?86_64*|amd64*) - NODEUP_URL="${NODEUP_URL_AMD64}" - NODEUP_HASH="${NODEUP_HASH_AMD64}" - ;; - aarch64*|arm64*) - NODEUP_URL="${NODEUP_URL_ARM64}" - NODEUP_HASH="${NODEUP_HASH_ARM64}" - ;; - *) - echo "Unsupported host arch: $(uname -m)" >&2 - exit 1 - ;; - esac - - cd ${INSTALL_DIR}/bin - download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" - - chmod +x nodeup - - echo "Running nodeup" - # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 - ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) - } - - #################################################################################### - - /bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" - - echo "== nodeup node config starting ==" - ensure-install-dir - - cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' - cloudConfig: - awsEBSCSIDriver: - enabled: false - manageStorageClasses: true - containerRuntime: containerd - containerd: - logLevel: info - version: 1.4.12 - docker: - skipInstall: true - encryptionConfig: null - etcdClusters: - events: - version: 3.4.13 - main: - version: 3.4.13 - kubeAPIServer: - allowPrivileged: true - anonymousAuth: false - apiAudiences: - - kubernetes.svc.default - apiServerCount: 1 - authorizationMode: AlwaysAllow - bindAddress: 0.0.0.0 - cloudProvider: aws - enableAdmissionPlugins: - - NamespaceLifecycle - - LimitRanger - - ServiceAccount - - DefaultStorageClass - - DefaultTolerationSeconds - - MutatingAdmissionWebhook - - ValidatingAdmissionWebhook - - NodeRestriction - - ResourceQuota - etcdServers: - - https://127.0.0.1:4001 - etcdServersOverrides: - - /events#https://127.0.0.1:4002 - image: k8s.gcr.io/kube-apiserver:v1.21.0 - kubeletPreferredAddressTypes: - - InternalIP - - Hostname - - ExternalIP - logLevel: 2 - requestheaderAllowedNames: - - aggregator - requestheaderExtraHeaderPrefixes: - - X-Remote-Extra- - requestheaderGroupHeaders: - - X-Remote-Group - requestheaderUsernameHeaders: - - X-Remote-User - securePort: 443 - serviceAccountIssuer: https://api.internal.minimal.example.com - serviceAccountJWKSURI: https://api.internal.minimal.example.com/openid/v1/jwks - serviceClusterIPRange: 100.64.0.0/13 - storageBackend: etcd3 - kubeControllerManager: - allocateNodeCIDRs: true - attachDetachReconcileSyncPeriod: 1m0s - cloudProvider: aws - clusterCIDR: 100.96.0.0/11 - clusterName: minimal.example.com - configureCloudRoutes: false - image: k8s.gcr.io/kube-controller-manager:v1.21.0 - leaderElection: - leaderElect: true - logLevel: 2 - useServiceAccountCredentials: true - kubeProxy: - clusterCIDR: 100.96.0.0/11 - cpuRequest: 100m - image: k8s.gcr.io/kube-proxy:v1.21.0 - logLevel: 2 - kubeScheduler: - image: k8s.gcr.io/kube-scheduler:v1.21.0 - leaderElection: - leaderElect: true - logLevel: 2 - kubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: aws - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - nonMasqueradeCIDR: 100.64.0.0/10 - podManifestPath: /etc/kubernetes/manifests - masterKubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: aws - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - nonMasqueradeCIDR: 100.64.0.0/10 - podManifestPath: /etc/kubernetes/manifests - registerSchedulable: false - - __EOF_CLUSTER_SPEC - - cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' - CloudProvider: aws - ConfigBase: memfs://clusters.example.com/minimal.example.com - InstanceGroupName: master-us-test-1a - InstanceGroupRole: Master - NodeupConfigHash: LFwTDQ1M/AxVLdvKc8ZPsktDgr836JEsdQRwn2TU+iM= - - __EOF_KUBE_ENV - - download-release - echo "== nodeup node config done ==" -Resources.AWSEC2LaunchTemplatenodesminimalexamplecom.Properties.LaunchTemplateData.UserData: | - #!/bin/bash - set -o errexit - set -o nounset - set -o pipefail - - NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 - NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 - NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 - NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 - - export AWS_REGION=us-test-1 - - - - - sysctl -w net.core.rmem_max=16777216 || true - sysctl -w net.core.wmem_max=16777216 || true - sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true - sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true - - - function ensure-install-dir() { - INSTALL_DIR="/opt/kops" - # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec - if [[ -d /var/lib/toolbox ]]; then - INSTALL_DIR="/var/lib/toolbox/kops" - fi - mkdir -p ${INSTALL_DIR}/bin - mkdir -p ${INSTALL_DIR}/conf - cd ${INSTALL_DIR} - } - - # Retry a download until we get it. args: name, sha, urls - download-or-bust() { - local -r file="$1" - local -r hash="$2" - local -r urls=( $(split-commas "$3") ) - - if [[ -f "${file}" ]]; then - if ! validate-hash "${file}" "${hash}"; then - rm -f "${file}" - else - return - fi - fi - - while true; do - for url in "${urls[@]}"; do - commands=( - "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - ) - for cmd in "${commands[@]}"; do - echo "Attempting download with: ${cmd} {url}" - if ! (${cmd} "${url}"); then - echo "== Download failed with ${cmd} ==" - continue - fi - if ! validate-hash "${file}" "${hash}"; then - echo "== Hash validation of ${url} failed. Retrying. ==" - rm -f "${file}" - else - echo "== Downloaded ${url} (SHA256 = ${hash}) ==" - return - fi - done - done - - echo "All downloads failed; sleeping before retrying" - sleep 60 - done - } - - validate-hash() { - local -r file="$1" - local -r expected="$2" - local actual - - actual=$(sha256sum ${file} | awk '{ print $1 }') || true - if [[ "${actual}" != "${expected}" ]]; then - echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" - return 1 - fi - } - - function split-commas() { - echo $1 | tr "," "\n" - } - - function download-release() { - case "$(uname -m)" in - x86_64*|i?86_64*|amd64*) - NODEUP_URL="${NODEUP_URL_AMD64}" - NODEUP_HASH="${NODEUP_HASH_AMD64}" - ;; - aarch64*|arm64*) - NODEUP_URL="${NODEUP_URL_ARM64}" - NODEUP_HASH="${NODEUP_HASH_ARM64}" - ;; - *) - echo "Unsupported host arch: $(uname -m)" >&2 - exit 1 - ;; - esac - - cd ${INSTALL_DIR}/bin - download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" - - chmod +x nodeup - - echo "Running nodeup" - # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 - ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) - } - - #################################################################################### - - /bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" - - echo "== nodeup node config starting ==" - ensure-install-dir - - cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' - cloudConfig: - awsEBSCSIDriver: - enabled: false - manageStorageClasses: true - containerRuntime: containerd - containerd: - logLevel: info - version: 1.4.12 - docker: - skipInstall: true - kubeProxy: - clusterCIDR: 100.96.0.0/11 - cpuRequest: 100m - image: k8s.gcr.io/kube-proxy:v1.21.0 - logLevel: 2 - kubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: aws - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - nonMasqueradeCIDR: 100.64.0.0/10 - podManifestPath: /etc/kubernetes/manifests - - __EOF_CLUSTER_SPEC - - cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' - CloudProvider: aws - ConfigBase: memfs://clusters.example.com/minimal.example.com - InstanceGroupName: nodes - InstanceGroupRole: Node - NodeupConfigHash: ehZK5PooPMXQw0YD3dy5oARwClEXIj8ymh6DR1XYbQ0= - - __EOF_KUBE_ENV - - download-release - echo "== nodeup node config done ==" diff --git a/tests/integration/update_cluster/karpenter/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/karpenter/data/aws_s3_object_cluster-completed.spec_content index 53e3abc3f3416..606a97dd6f3ad 100644 --- a/tests/integration/update_cluster/karpenter/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/karpenter/data/aws_s3_object_cluster-completed.spec_content @@ -168,7 +168,6 @@ spec: kubernetesApiAccess: - 0.0.0.0/0 kubernetesVersion: 1.25.0 - masterInternalName: api.internal.minimal.example.com masterKubelet: anonymousAuth: false cgroupDriver: systemd diff --git a/tests/integration/update_cluster/karpenter/in-v1alpha2.yaml b/tests/integration/update_cluster/karpenter/in-v1alpha2.yaml index ad7982e77e0ed..a5219c3276a40 100644 --- a/tests/integration/update_cluster/karpenter/in-v1alpha2.yaml +++ b/tests/integration/update_cluster/karpenter/in-v1alpha2.yaml @@ -25,7 +25,6 @@ spec: kubelet: anonymousAuth: false kubernetesVersion: v1.25.0 - masterInternalName: api.internal.minimal.example.com masterPublicName: api.minimal.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/tests/integration/update_cluster/lifecycle_phases/in-v1alpha2.yaml b/tests/integration/update_cluster/lifecycle_phases/in-v1alpha2.yaml index eddc09d357a18..c9a357730bfd0 100644 --- a/tests/integration/update_cluster/lifecycle_phases/in-v1alpha2.yaml +++ b/tests/integration/update_cluster/lifecycle_phases/in-v1alpha2.yaml @@ -22,7 +22,6 @@ spec: kubelet: anonymousAuth: false kubernetesVersion: v1.21.0 - masterInternalName: api.internal.lifecyclephases.example.com masterPublicName: api.lifecyclephases.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/tests/integration/update_cluster/many-addons-ccm-irsa/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/many-addons-ccm-irsa/data/aws_s3_object_cluster-completed.spec_content index 1192b43f552a0..e91074143087d 100644 --- a/tests/integration/update_cluster/many-addons-ccm-irsa/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/many-addons-ccm-irsa/data/aws_s3_object_cluster-completed.spec_content @@ -182,7 +182,6 @@ spec: kubernetesApiAccess: - 0.0.0.0/0 kubernetesVersion: 1.21.0 - masterInternalName: api.internal.minimal.example.com masterKubelet: anonymousAuth: false cgroupDriver: systemd diff --git a/tests/integration/update_cluster/many-addons-ccm-irsa/in-v1alpha2.yaml b/tests/integration/update_cluster/many-addons-ccm-irsa/in-v1alpha2.yaml index b3bdda0a1891d..233cbaa6378c1 100644 --- a/tests/integration/update_cluster/many-addons-ccm-irsa/in-v1alpha2.yaml +++ b/tests/integration/update_cluster/many-addons-ccm-irsa/in-v1alpha2.yaml @@ -34,7 +34,6 @@ spec: kubelet: anonymousAuth: false kubernetesVersion: v1.21.0 - masterInternalName: api.internal.minimal.example.com masterPublicName: api.minimal.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/tests/integration/update_cluster/many-addons-ccm-irsa23/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/many-addons-ccm-irsa23/data/aws_s3_object_cluster-completed.spec_content index ea37c01c8c619..95ad134e09d73 100644 --- a/tests/integration/update_cluster/many-addons-ccm-irsa23/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/many-addons-ccm-irsa23/data/aws_s3_object_cluster-completed.spec_content @@ -185,7 +185,6 @@ spec: kubernetesApiAccess: - 0.0.0.0/0 kubernetesVersion: 1.23.0 - masterInternalName: api.internal.minimal.example.com masterKubelet: anonymousAuth: false cgroupDriver: systemd diff --git a/tests/integration/update_cluster/many-addons-ccm-irsa23/in-v1alpha2.yaml b/tests/integration/update_cluster/many-addons-ccm-irsa23/in-v1alpha2.yaml index 9bab7e1386c81..0b85db9d864ae 100644 --- a/tests/integration/update_cluster/many-addons-ccm-irsa23/in-v1alpha2.yaml +++ b/tests/integration/update_cluster/many-addons-ccm-irsa23/in-v1alpha2.yaml @@ -34,7 +34,6 @@ spec: kubelet: anonymousAuth: false kubernetesVersion: v1.23.0 - masterInternalName: api.internal.minimal.example.com masterPublicName: api.minimal.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/tests/integration/update_cluster/many-addons-ccm-irsa24/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/many-addons-ccm-irsa24/data/aws_s3_object_cluster-completed.spec_content index b99dc9691c768..6becec04c6205 100644 --- a/tests/integration/update_cluster/many-addons-ccm-irsa24/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/many-addons-ccm-irsa24/data/aws_s3_object_cluster-completed.spec_content @@ -185,7 +185,6 @@ spec: kubernetesApiAccess: - 0.0.0.0/0 kubernetesVersion: 1.24.0 - masterInternalName: api.internal.minimal.example.com masterKubelet: anonymousAuth: false cgroupDriver: systemd diff --git a/tests/integration/update_cluster/many-addons-ccm-irsa24/in-v1alpha2.yaml b/tests/integration/update_cluster/many-addons-ccm-irsa24/in-v1alpha2.yaml index 2c8cc45399d36..008f14ad441ad 100644 --- a/tests/integration/update_cluster/many-addons-ccm-irsa24/in-v1alpha2.yaml +++ b/tests/integration/update_cluster/many-addons-ccm-irsa24/in-v1alpha2.yaml @@ -34,7 +34,6 @@ spec: kubelet: anonymousAuth: false kubernetesVersion: v1.24.0 - masterInternalName: api.internal.minimal.example.com masterPublicName: api.minimal.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/tests/integration/update_cluster/many-addons-ccm-irsa25/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/many-addons-ccm-irsa25/data/aws_s3_object_cluster-completed.spec_content index 646c12404e4b4..f29bd607701a3 100644 --- a/tests/integration/update_cluster/many-addons-ccm-irsa25/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/many-addons-ccm-irsa25/data/aws_s3_object_cluster-completed.spec_content @@ -184,7 +184,6 @@ spec: kubernetesApiAccess: - 0.0.0.0/0 kubernetesVersion: 1.25.0-rc.1 - masterInternalName: api.internal.minimal.example.com masterKubelet: anonymousAuth: false cgroupDriver: systemd diff --git a/tests/integration/update_cluster/many-addons-ccm-irsa25/in-v1alpha2.yaml b/tests/integration/update_cluster/many-addons-ccm-irsa25/in-v1alpha2.yaml index 8ef1178654c4e..acde3b1ab5b49 100644 --- a/tests/integration/update_cluster/many-addons-ccm-irsa25/in-v1alpha2.yaml +++ b/tests/integration/update_cluster/many-addons-ccm-irsa25/in-v1alpha2.yaml @@ -34,7 +34,6 @@ spec: kubelet: anonymousAuth: false kubernetesVersion: v1.25.0-rc.1 - masterInternalName: api.internal.minimal.example.com masterPublicName: api.minimal.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/tests/integration/update_cluster/many-addons-ccm-irsa26/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/many-addons-ccm-irsa26/data/aws_s3_object_cluster-completed.spec_content index 83c925bbab5a6..a9865919d7d33 100644 --- a/tests/integration/update_cluster/many-addons-ccm-irsa26/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/many-addons-ccm-irsa26/data/aws_s3_object_cluster-completed.spec_content @@ -184,7 +184,6 @@ spec: kubernetesApiAccess: - 0.0.0.0/0 kubernetesVersion: 1.26.0-alpha.0 - masterInternalName: api.internal.minimal.example.com masterKubelet: anonymousAuth: false cgroupDriver: systemd diff --git a/tests/integration/update_cluster/many-addons-ccm-irsa26/in-v1alpha2.yaml b/tests/integration/update_cluster/many-addons-ccm-irsa26/in-v1alpha2.yaml index a714cacbb486c..61134a20eec38 100644 --- a/tests/integration/update_cluster/many-addons-ccm-irsa26/in-v1alpha2.yaml +++ b/tests/integration/update_cluster/many-addons-ccm-irsa26/in-v1alpha2.yaml @@ -34,7 +34,6 @@ spec: kubelet: anonymousAuth: false kubernetesVersion: v1.26.0-alpha.0 - masterInternalName: api.internal.minimal.example.com masterPublicName: api.minimal.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/tests/integration/update_cluster/many-addons-ccm/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/many-addons-ccm/data/aws_s3_object_cluster-completed.spec_content index 04a8ec3583f8f..53934b3ead961 100644 --- a/tests/integration/update_cluster/many-addons-ccm/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/many-addons-ccm/data/aws_s3_object_cluster-completed.spec_content @@ -181,7 +181,6 @@ spec: kubernetesApiAccess: - 0.0.0.0/0 kubernetesVersion: 1.21.0 - masterInternalName: api.internal.minimal.example.com masterKubelet: anonymousAuth: false cgroupDriver: systemd diff --git a/tests/integration/update_cluster/many-addons-ccm/in-v1alpha2.yaml b/tests/integration/update_cluster/many-addons-ccm/in-v1alpha2.yaml index 4234ee9f11c79..e6c7d55891254 100644 --- a/tests/integration/update_cluster/many-addons-ccm/in-v1alpha2.yaml +++ b/tests/integration/update_cluster/many-addons-ccm/in-v1alpha2.yaml @@ -33,7 +33,6 @@ spec: kubelet: anonymousAuth: false kubernetesVersion: v1.21.0 - masterInternalName: api.internal.minimal.example.com masterPublicName: api.minimal.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/tests/integration/update_cluster/many-addons/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/many-addons/data/aws_s3_object_cluster-completed.spec_content index 3dc36cadd8afa..c06d3d9a4cae8 100644 --- a/tests/integration/update_cluster/many-addons/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/many-addons/data/aws_s3_object_cluster-completed.spec_content @@ -174,7 +174,6 @@ spec: kubernetesApiAccess: - 0.0.0.0/0 kubernetesVersion: 1.21.0 - masterInternalName: api.internal.minimal.example.com masterKubelet: anonymousAuth: false cgroupDriver: systemd diff --git a/tests/integration/update_cluster/many-addons/in-v1alpha2.yaml b/tests/integration/update_cluster/many-addons/in-v1alpha2.yaml index 51fcdca875f70..1b662a897b5c8 100644 --- a/tests/integration/update_cluster/many-addons/in-v1alpha2.yaml +++ b/tests/integration/update_cluster/many-addons/in-v1alpha2.yaml @@ -33,7 +33,6 @@ spec: kubelet: anonymousAuth: false kubernetesVersion: v1.21.0 - masterInternalName: api.internal.minimal.example.com masterPublicName: api.minimal.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/tests/integration/update_cluster/minimal-1.23/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/minimal-1.23/data/aws_s3_object_cluster-completed.spec_content index 9ffb135532869..d65cf54fe613f 100644 --- a/tests/integration/update_cluster/minimal-1.23/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/minimal-1.23/data/aws_s3_object_cluster-completed.spec_content @@ -167,7 +167,6 @@ spec: - 0.0.0.0/0 - ::/0 kubernetesVersion: 1.23.0 - masterInternalName: api.internal.minimal.example.com masterKubelet: anonymousAuth: false cgroupDriver: systemd diff --git a/tests/integration/update_cluster/minimal-1.24/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/minimal-1.24/data/aws_s3_object_cluster-completed.spec_content index 4799f7bf0d485..a6eb75c03b8e8 100644 --- a/tests/integration/update_cluster/minimal-1.24/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/minimal-1.24/data/aws_s3_object_cluster-completed.spec_content @@ -174,7 +174,6 @@ spec: - 0.0.0.0/0 - ::/0 kubernetesVersion: 1.24.3 - masterInternalName: api.internal.minimal.example.com masterKubelet: anonymousAuth: false cgroupDriver: systemd diff --git a/tests/integration/update_cluster/minimal-1.25/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/minimal-1.25/data/aws_s3_object_cluster-completed.spec_content index 7c0eb471b8f1b..6c21925a9199f 100644 --- a/tests/integration/update_cluster/minimal-1.25/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/minimal-1.25/data/aws_s3_object_cluster-completed.spec_content @@ -173,7 +173,6 @@ spec: - 0.0.0.0/0 - ::/0 kubernetesVersion: 1.25.0-rc.1 - masterInternalName: api.internal.minimal.example.com masterKubelet: anonymousAuth: false cgroupDriver: systemd diff --git a/tests/integration/update_cluster/minimal-1.26/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/minimal-1.26/data/aws_s3_object_cluster-completed.spec_content index 91c8852b58072..b993486dccfe7 100644 --- a/tests/integration/update_cluster/minimal-1.26/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/minimal-1.26/data/aws_s3_object_cluster-completed.spec_content @@ -173,7 +173,6 @@ spec: - 0.0.0.0/0 - ::/0 kubernetesVersion: 1.26.0-alpha.0 - masterInternalName: api.internal.minimal.example.com masterKubelet: anonymousAuth: false cgroupDriver: systemd diff --git a/tests/integration/update_cluster/minimal-dns-none/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/minimal-dns-none/data/aws_s3_object_cluster-completed.spec_content index 3b0e8175cc4ec..960b1e3adc3f4 100644 --- a/tests/integration/update_cluster/minimal-dns-none/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/minimal-dns-none/data/aws_s3_object_cluster-completed.spec_content @@ -172,7 +172,6 @@ spec: - 0.0.0.0/0 - ::/0 kubernetesVersion: 1.26.0-alpha.0 - masterInternalName: api.internal.minimal.example.com masterKubelet: anonymousAuth: false cgroupDriver: systemd diff --git a/tests/integration/update_cluster/minimal-etcd/cloudformation.json b/tests/integration/update_cluster/minimal-etcd/cloudformation.json deleted file mode 100644 index e3c44fd948035..0000000000000 --- a/tests/integration/update_cluster/minimal-etcd/cloudformation.json +++ /dev/null @@ -1,1349 +0,0 @@ -{ - "Resources": { - "AWSAutoScalingAutoScalingGroupmasterustest1amastersminimaletcdexamplecom": { - "Type": "AWS::AutoScaling::AutoScalingGroup", - "Properties": { - "AutoScalingGroupName": "master-us-test-1a.masters.minimal-etcd.example.com", - "LaunchTemplate": { - "LaunchTemplateId": { - "Ref": "AWSEC2LaunchTemplatemasterustest1amastersminimaletcdexamplecom" - }, - "Version": { - "Fn::GetAtt": [ - "AWSEC2LaunchTemplatemasterustest1amastersminimaletcdexamplecom", - "LatestVersionNumber" - ] - } - }, - "MaxSize": "1", - "MinSize": "1", - "VPCZoneIdentifier": [ - { - "Ref": "AWSEC2Subnetustest1aminimaletcdexamplecom" - } - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal-etcd.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.minimal-etcd.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/role/master", - "Value": "1", - "PropagateAtLaunch": true - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a", - "PropagateAtLaunch": true - }, - { - "Key": "kubernetes.io/cluster/minimal-etcd.example.com", - "Value": "owned", - "PropagateAtLaunch": true - } - ], - "MetricsCollection": [ - { - "Granularity": "1Minute", - "Metrics": [ - "GroupDesiredCapacity", - "GroupInServiceInstances", - "GroupMaxSize", - "GroupMinSize", - "GroupPendingInstances", - "GroupStandbyInstances", - "GroupTerminatingInstances", - "GroupTotalInstances" - ] - } - ] - } - }, - "AWSAutoScalingAutoScalingGroupnodesminimaletcdexamplecom": { - "Type": "AWS::AutoScaling::AutoScalingGroup", - "Properties": { - "AutoScalingGroupName": "nodes.minimal-etcd.example.com", - "LaunchTemplate": { - "LaunchTemplateId": { - "Ref": "AWSEC2LaunchTemplatenodesminimaletcdexamplecom" - }, - "Version": { - "Fn::GetAtt": [ - "AWSEC2LaunchTemplatenodesminimaletcdexamplecom", - "LatestVersionNumber" - ] - } - }, - "MaxSize": "2", - "MinSize": "2", - "VPCZoneIdentifier": [ - { - "Ref": "AWSEC2Subnetustest1aminimaletcdexamplecom" - } - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal-etcd.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "Name", - "Value": "nodes.minimal-etcd.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/role/node", - "Value": "1", - "PropagateAtLaunch": true - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes", - "PropagateAtLaunch": true - }, - { - "Key": "kubernetes.io/cluster/minimal-etcd.example.com", - "Value": "owned", - "PropagateAtLaunch": true - } - ], - "MetricsCollection": [ - { - "Granularity": "1Minute", - "Metrics": [ - "GroupDesiredCapacity", - "GroupInServiceInstances", - "GroupMaxSize", - "GroupMinSize", - "GroupPendingInstances", - "GroupStandbyInstances", - "GroupTerminatingInstances", - "GroupTotalInstances" - ] - } - ] - } - }, - "AWSEC2DHCPOptionsminimaletcdexamplecom": { - "Type": "AWS::EC2::DHCPOptions", - "Properties": { - "DomainName": "us-test-1.compute.internal", - "DomainNameServers": [ - "AmazonProvidedDNS" - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal-etcd.example.com" - }, - { - "Key": "Name", - "Value": "minimal-etcd.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal-etcd.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2InternetGatewayminimaletcdexamplecom": { - "Type": "AWS::EC2::InternetGateway", - "Properties": { - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal-etcd.example.com" - }, - { - "Key": "Name", - "Value": "minimal-etcd.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal-etcd.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2LaunchTemplatemasterustest1amastersminimaletcdexamplecom": { - "Type": "AWS::EC2::LaunchTemplate", - "Properties": { - "LaunchTemplateName": "master-us-test-1a.masters.minimal-etcd.example.com", - "LaunchTemplateData": { - "BlockDeviceMappings": [ - { - "DeviceName": "/dev/xvda", - "Ebs": { - "VolumeType": "gp3", - "VolumeSize": 64, - "Iops": 3000, - "Throughput": 125, - "DeleteOnTermination": true, - "Encrypted": true - } - }, - { - "DeviceName": "/dev/sdc", - "VirtualName": "ephemeral0" - } - ], - "IamInstanceProfile": { - "Name": { - "Ref": "AWSIAMInstanceProfilemastersminimaletcdexamplecom" - } - }, - "ImageId": "ami-12345678", - "InstanceType": "m3.medium", - "KeyName": "kubernetes.minimal-etcd.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57", - "MetadataOptions": { - "HttpPutResponseHopLimit": 1, - "HttpTokens": "optional" - }, - "Monitoring": { - "Enabled": false - }, - "NetworkInterfaces": [ - { - "AssociatePublicIpAddress": true, - "DeleteOnTermination": true, - "DeviceIndex": 0, - "Ipv6AddressCount": 0, - "Groups": [ - { - "Ref": "AWSEC2SecurityGroupmastersminimaletcdexamplecom" - } - ] - } - ], - "TagSpecifications": [ - { - "ResourceType": "instance", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal-etcd.example.com" - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.minimal-etcd.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a" - }, - { - "Key": "kubernetes.io/cluster/minimal-etcd.example.com", - "Value": "owned" - } - ] - }, - { - "ResourceType": "volume", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal-etcd.example.com" - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.minimal-etcd.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a" - }, - { - "Key": "kubernetes.io/cluster/minimal-etcd.example.com", - "Value": "owned" - } - ] - } - ], - "UserData": "extracted" - } - } - }, - "AWSEC2LaunchTemplatenodesminimaletcdexamplecom": { - "Type": "AWS::EC2::LaunchTemplate", - "Properties": { - "LaunchTemplateName": "nodes.minimal-etcd.example.com", - "LaunchTemplateData": { - "BlockDeviceMappings": [ - { - "DeviceName": "/dev/xvda", - "Ebs": { - "VolumeType": "gp3", - "VolumeSize": 128, - "Iops": 3000, - "Throughput": 125, - "DeleteOnTermination": true, - "Encrypted": true - } - } - ], - "IamInstanceProfile": { - "Name": { - "Ref": "AWSIAMInstanceProfilenodesminimaletcdexamplecom" - } - }, - "ImageId": "ami-12345678", - "InstanceType": "t2.medium", - "KeyName": "kubernetes.minimal-etcd.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57", - "MetadataOptions": { - "HttpPutResponseHopLimit": 1, - "HttpTokens": "optional" - }, - "Monitoring": { - "Enabled": false - }, - "NetworkInterfaces": [ - { - "AssociatePublicIpAddress": true, - "DeleteOnTermination": true, - "DeviceIndex": 0, - "Ipv6AddressCount": 0, - "Groups": [ - { - "Ref": "AWSEC2SecurityGroupnodesminimaletcdexamplecom" - } - ] - } - ], - "TagSpecifications": [ - { - "ResourceType": "instance", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal-etcd.example.com" - }, - { - "Key": "Name", - "Value": "nodes.minimal-etcd.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "" - }, - { - "Key": "k8s.io/role/node", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes" - }, - { - "Key": "kubernetes.io/cluster/minimal-etcd.example.com", - "Value": "owned" - } - ] - }, - { - "ResourceType": "volume", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal-etcd.example.com" - }, - { - "Key": "Name", - "Value": "nodes.minimal-etcd.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "" - }, - { - "Key": "k8s.io/role/node", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes" - }, - { - "Key": "kubernetes.io/cluster/minimal-etcd.example.com", - "Value": "owned" - } - ] - } - ], - "UserData": "extracted" - } - } - }, - "AWSEC2Route0": { - "Type": "AWS::EC2::Route", - "Properties": { - "RouteTableId": { - "Ref": "AWSEC2RouteTableminimaletcdexamplecom" - }, - "DestinationIpv6CidrBlock": "::/0", - "GatewayId": { - "Ref": "AWSEC2InternetGatewayminimaletcdexamplecom" - } - } - }, - "AWSEC2Route00000": { - "Type": "AWS::EC2::Route", - "Properties": { - "RouteTableId": { - "Ref": "AWSEC2RouteTableminimaletcdexamplecom" - }, - "DestinationCidrBlock": "0.0.0.0/0", - "GatewayId": { - "Ref": "AWSEC2InternetGatewayminimaletcdexamplecom" - } - } - }, - "AWSEC2RouteTableminimaletcdexamplecom": { - "Type": "AWS::EC2::RouteTable", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCminimaletcdexamplecom" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal-etcd.example.com" - }, - { - "Key": "Name", - "Value": "minimal-etcd.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal-etcd.example.com", - "Value": "owned" - }, - { - "Key": "kubernetes.io/kops/role", - "Value": "public" - } - ] - } - }, - "AWSEC2SecurityGroupEgressfrommastersminimaletcdexamplecomegressall0to00": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimaletcdexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIpv6": "::/0" - } - }, - "AWSEC2SecurityGroupEgressfrommastersminimaletcdexamplecomegressall0to000000": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimaletcdexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupEgressfromnodesminimaletcdexamplecomegressall0to00": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimaletcdexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIpv6": "::/0" - } - }, - "AWSEC2SecurityGroupEgressfromnodesminimaletcdexamplecomegressall0to000000": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimaletcdexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom00000ingresstcp22to22mastersminimaletcdexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimaletcdexamplecom" - }, - "FromPort": 22, - "ToPort": 22, - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom00000ingresstcp22to22nodesminimaletcdexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimaletcdexamplecom" - }, - "FromPort": 22, - "ToPort": 22, - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom00000ingresstcp443to443mastersminimaletcdexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimaletcdexamplecom" - }, - "FromPort": 443, - "ToPort": 443, - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrommastersminimaletcdexamplecomingressall0to0mastersminimaletcdexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimaletcdexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimaletcdexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfrommastersminimaletcdexamplecomingressall0to0nodesminimaletcdexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimaletcdexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimaletcdexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfromnodesminimaletcdexamplecomingressall0to0nodesminimaletcdexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimaletcdexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimaletcdexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfromnodesminimaletcdexamplecomingresstcp1to2379mastersminimaletcdexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimaletcdexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimaletcdexamplecom" - }, - "FromPort": 1, - "ToPort": 2379, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodesminimaletcdexamplecomingresstcp2382to4000mastersminimaletcdexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimaletcdexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimaletcdexamplecom" - }, - "FromPort": 2382, - "ToPort": 4000, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodesminimaletcdexamplecomingresstcp4003to65535mastersminimaletcdexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimaletcdexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimaletcdexamplecom" - }, - "FromPort": 4003, - "ToPort": 65535, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodesminimaletcdexamplecomingressudp1to65535mastersminimaletcdexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimaletcdexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimaletcdexamplecom" - }, - "FromPort": 1, - "ToPort": 65535, - "IpProtocol": "udp" - } - }, - "AWSEC2SecurityGroupmastersminimaletcdexamplecom": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupName": "masters.minimal-etcd.example.com", - "VpcId": { - "Ref": "AWSEC2VPCminimaletcdexamplecom" - }, - "GroupDescription": "Security group for masters", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal-etcd.example.com" - }, - { - "Key": "Name", - "Value": "masters.minimal-etcd.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal-etcd.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2SecurityGroupnodesminimaletcdexamplecom": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupName": "nodes.minimal-etcd.example.com", - "VpcId": { - "Ref": "AWSEC2VPCminimaletcdexamplecom" - }, - "GroupDescription": "Security group for nodes", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal-etcd.example.com" - }, - { - "Key": "Name", - "Value": "nodes.minimal-etcd.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal-etcd.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2SubnetRouteTableAssociationustest1aminimaletcdexamplecom": { - "Type": "AWS::EC2::SubnetRouteTableAssociation", - "Properties": { - "SubnetId": { - "Ref": "AWSEC2Subnetustest1aminimaletcdexamplecom" - }, - "RouteTableId": { - "Ref": "AWSEC2RouteTableminimaletcdexamplecom" - } - } - }, - "AWSEC2Subnetustest1aminimaletcdexamplecom": { - "Type": "AWS::EC2::Subnet", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCminimaletcdexamplecom" - }, - "CidrBlock": "172.20.32.0/19", - "AvailabilityZone": "us-test-1a", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal-etcd.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.minimal-etcd.example.com" - }, - { - "Key": "SubnetType", - "Value": "Public" - }, - { - "Key": "kops.k8s.io/instance-group/master-us-test-1a", - "Value": "true" - }, - { - "Key": "kops.k8s.io/instance-group/nodes", - "Value": "true" - }, - { - "Key": "kubernetes.io/cluster/minimal-etcd.example.com", - "Value": "owned" - }, - { - "Key": "kubernetes.io/role/elb", - "Value": "1" - }, - { - "Key": "kubernetes.io/role/internal-elb", - "Value": "1" - } - ] - } - }, - "AWSEC2VPCCidrBlockAmazonIPv6": { - "Type": "AWS::EC2::VPCCidrBlock", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCminimaletcdexamplecom" - }, - "AmazonProvidedIpv6CidrBlock": true - } - }, - "AWSEC2VPCDHCPOptionsAssociationminimaletcdexamplecom": { - "Type": "AWS::EC2::VPCDHCPOptionsAssociation", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCminimaletcdexamplecom" - }, - "DhcpOptionsId": { - "Ref": "AWSEC2DHCPOptionsminimaletcdexamplecom" - } - } - }, - "AWSEC2VPCGatewayAttachmentminimaletcdexamplecom": { - "Type": "AWS::EC2::VPCGatewayAttachment", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCminimaletcdexamplecom" - }, - "InternetGatewayId": { - "Ref": "AWSEC2InternetGatewayminimaletcdexamplecom" - } - } - }, - "AWSEC2VPCminimaletcdexamplecom": { - "Type": "AWS::EC2::VPC", - "Properties": { - "CidrBlock": "172.20.0.0/16", - "EnableDnsHostnames": true, - "EnableDnsSupport": true, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal-etcd.example.com" - }, - { - "Key": "Name", - "Value": "minimal-etcd.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal-etcd.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2Volumeustest1aetcdeventsminimaletcdexamplecom": { - "Type": "AWS::EC2::Volume", - "Properties": { - "AvailabilityZone": "us-test-1a", - "Size": 20, - "VolumeType": "gp3", - "Iops": 3000, - "Throughput": 125, - "Encrypted": false, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal-etcd.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.etcd-events.minimal-etcd.example.com" - }, - { - "Key": "k8s.io/etcd/events", - "Value": "us-test-1a/us-test-1a" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kubernetes.io/cluster/minimal-etcd.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2Volumeustest1aetcdmainminimaletcdexamplecom": { - "Type": "AWS::EC2::Volume", - "Properties": { - "AvailabilityZone": "us-test-1a", - "Size": 20, - "VolumeType": "gp3", - "Iops": 3000, - "Throughput": 125, - "Encrypted": false, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal-etcd.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.etcd-main.minimal-etcd.example.com" - }, - { - "Key": "k8s.io/etcd/main", - "Value": "us-test-1a/us-test-1a" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kubernetes.io/cluster/minimal-etcd.example.com", - "Value": "owned" - } - ] - } - }, - "AWSIAMInstanceProfilemastersminimaletcdexamplecom": { - "Type": "AWS::IAM::InstanceProfile", - "Properties": { - "InstanceProfileName": "masters.minimal-etcd.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolemastersminimaletcdexamplecom" - } - ] - } - }, - "AWSIAMInstanceProfilenodesminimaletcdexamplecom": { - "Type": "AWS::IAM::InstanceProfile", - "Properties": { - "InstanceProfileName": "nodes.minimal-etcd.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolenodesminimaletcdexamplecom" - } - ] - } - }, - "AWSIAMPolicymastersminimaletcdexamplecom": { - "Type": "AWS::IAM::Policy", - "Properties": { - "PolicyName": "masters.minimal-etcd.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolemastersminimaletcdexamplecom" - } - ], - "PolicyDocument": { - "Statement": [ - { - "Action": "ec2:AttachVolume", - "Condition": { - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "minimal-etcd.example.com", - "aws:ResourceTag/k8s.io/role/master": "1" - } - }, - "Effect": "Allow", - "Resource": [ - "*" - ] - }, - { - "Action": [ - "s3:Get*" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-read-bucket/clusters.example.com/minimal-etcd.example.com/*" - }, - { - "Action": [ - "s3:GetObject", - "s3:DeleteObject", - "s3:DeleteObjectVersion", - "s3:PutObject" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/minimal-etcd.example.com/backups/etcd/main/*" - }, - { - "Action": [ - "s3:GetObject", - "s3:DeleteObject", - "s3:DeleteObjectVersion", - "s3:PutObject" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/minimal-etcd.example.com/backups/etcd/events/*" - }, - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-read-bucket" - ] - }, - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-write-bucket" - ] - }, - { - "Action": [ - "route53:ChangeResourceRecordSets", - "route53:ListResourceRecordSets", - "route53:GetHostedZone" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:route53:::hostedzone/Z1AFAKE1ZON3YO" - ] - }, - { - "Action": [ - "route53:GetChange" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:route53:::change/*" - ] - }, - { - "Action": [ - "route53:ListHostedZones", - "route53:ListTagsForResource" - ], - "Effect": "Allow", - "Resource": [ - "*" - ] - }, - { - "Action": "ec2:CreateTags", - "Condition": { - "StringEquals": { - "aws:RequestTag/KubernetesCluster": "minimal-etcd.example.com", - "ec2:CreateAction": [ - "CreateSecurityGroup" - ] - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:security-group/*" - ] - }, - { - "Action": [ - "ec2:CreateTags", - "ec2:DeleteTags" - ], - "Condition": { - "Null": { - "aws:RequestTag/KubernetesCluster": "true" - }, - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "minimal-etcd.example.com" - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:security-group/*" - ] - }, - { - "Action": "ec2:CreateTags", - "Condition": { - "StringEquals": { - "aws:RequestTag/KubernetesCluster": "minimal-etcd.example.com", - "ec2:CreateAction": [ - "CreateVolume", - "CreateSnapshot" - ] - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:volume/*", - "arn:aws-test:ec2:*:*:snapshot/*" - ] - }, - { - "Action": [ - "ec2:CreateTags", - "ec2:DeleteTags" - ], - "Condition": { - "Null": { - "aws:RequestTag/KubernetesCluster": "true" - }, - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "minimal-etcd.example.com" - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:volume/*", - "arn:aws-test:ec2:*:*:snapshot/*" - ] - }, - { - "Action": [ - "autoscaling:DescribeAutoScalingGroups", - "autoscaling:DescribeAutoScalingInstances", - "autoscaling:DescribeLaunchConfigurations", - "autoscaling:DescribeScalingActivities", - "autoscaling:DescribeTags", - "ec2:AttachVolume", - "ec2:AuthorizeSecurityGroupIngress", - "ec2:CreateSecurityGroup", - "ec2:CreateTags", - "ec2:DeleteRoute", - "ec2:DeleteSecurityGroup", - "ec2:DeleteVolume", - "ec2:DescribeAccountAttributes", - "ec2:DescribeInstanceTypes", - "ec2:DescribeInstances", - "ec2:DescribeLaunchTemplateVersions", - "ec2:DescribeRegions", - "ec2:DescribeRouteTables", - "ec2:DescribeSecurityGroups", - "ec2:DescribeSubnets", - "ec2:DescribeTags", - "ec2:DescribeVolumes", - "ec2:DescribeVolumesModifications", - "ec2:DescribeVpcs", - "ec2:DetachVolume", - "ec2:ModifyInstanceAttribute", - "ec2:ModifyVolume", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:CreateListener", - "elasticloadbalancing:CreateTargetGroup", - "elasticloadbalancing:DeleteListener", - "elasticloadbalancing:DescribeListeners", - "elasticloadbalancing:DescribeLoadBalancerAttributes", - "elasticloadbalancing:DescribeLoadBalancerPolicies", - "elasticloadbalancing:DescribeLoadBalancers", - "elasticloadbalancing:DescribeTargetGroups", - "elasticloadbalancing:DescribeTargetHealth", - "elasticloadbalancing:ModifyListener", - "elasticloadbalancing:RegisterTargets", - "iam:GetServerCertificate", - "iam:ListServerCertificates", - "kms:DescribeKey", - "kms:GenerateRandom" - ], - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": [ - "autoscaling:SetDesiredCapacity", - "autoscaling:TerminateInstanceInAutoScalingGroup", - "ec2:AttachVolume", - "ec2:AuthorizeSecurityGroupIngress", - "ec2:DeleteSecurityGroup", - "ec2:DeleteVolume", - "ec2:DetachVolume", - "ec2:ModifyInstanceAttribute", - "ec2:ModifyVolume", - "ec2:RevokeSecurityGroupIngress", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", - "elasticloadbalancing:AttachLoadBalancerToSubnets", - "elasticloadbalancing:ConfigureHealthCheck", - "elasticloadbalancing:CreateLoadBalancerListeners", - "elasticloadbalancing:CreateLoadBalancerPolicy", - "elasticloadbalancing:DeleteListener", - "elasticloadbalancing:DeleteLoadBalancer", - "elasticloadbalancing:DeleteLoadBalancerListeners", - "elasticloadbalancing:DeleteTargetGroup", - "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", - "elasticloadbalancing:DeregisterTargets", - "elasticloadbalancing:DetachLoadBalancerFromSubnets", - "elasticloadbalancing:ModifyListener", - "elasticloadbalancing:ModifyLoadBalancerAttributes", - "elasticloadbalancing:ModifyTargetGroup", - "elasticloadbalancing:RegisterInstancesWithLoadBalancer", - "elasticloadbalancing:RegisterTargets", - "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", - "elasticloadbalancing:SetLoadBalancerPoliciesOfListener" - ], - "Condition": { - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "minimal-etcd.example.com" - } - }, - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": [ - "ec2:CreateSecurityGroup", - "ec2:CreateSnapshot", - "ec2:CreateVolume", - "elasticloadbalancing:CreateListener", - "elasticloadbalancing:CreateLoadBalancer", - "elasticloadbalancing:CreateTargetGroup" - ], - "Condition": { - "StringEquals": { - "aws:RequestTag/KubernetesCluster": "minimal-etcd.example.com" - } - }, - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": "ec2:CreateSecurityGroup", - "Effect": "Allow", - "Resource": "arn:aws-test:ec2:*:*:vpc/*" - } - ], - "Version": "2012-10-17" - } - } - }, - "AWSIAMPolicynodesminimaletcdexamplecom": { - "Type": "AWS::IAM::Policy", - "Properties": { - "PolicyName": "nodes.minimal-etcd.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolenodesminimaletcdexamplecom" - } - ], - "PolicyDocument": { - "Statement": [ - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-read-bucket" - ] - }, - { - "Action": [ - "autoscaling:DescribeAutoScalingInstances", - "ec2:DescribeInstanceTypes", - "ec2:DescribeInstances", - "ec2:DescribeRegions", - "iam:GetServerCertificate", - "iam:ListServerCertificates", - "kms:GenerateRandom" - ], - "Effect": "Allow", - "Resource": "*" - } - ], - "Version": "2012-10-17" - } - } - }, - "AWSIAMRolemastersminimaletcdexamplecom": { - "Type": "AWS::IAM::Role", - "Properties": { - "RoleName": "masters.minimal-etcd.example.com", - "AssumeRolePolicyDocument": { - "Statement": [ - { - "Action": "sts:AssumeRole", - "Effect": "Allow", - "Principal": { - "Service": "ec2.amazonaws.com" - } - } - ], - "Version": "2012-10-17" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal-etcd.example.com" - }, - { - "Key": "Name", - "Value": "masters.minimal-etcd.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal-etcd.example.com", - "Value": "owned" - } - ] - } - }, - "AWSIAMRolenodesminimaletcdexamplecom": { - "Type": "AWS::IAM::Role", - "Properties": { - "RoleName": "nodes.minimal-etcd.example.com", - "AssumeRolePolicyDocument": { - "Statement": [ - { - "Action": "sts:AssumeRole", - "Effect": "Allow", - "Principal": { - "Service": "ec2.amazonaws.com" - } - } - ], - "Version": "2012-10-17" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal-etcd.example.com" - }, - { - "Key": "Name", - "Value": "nodes.minimal-etcd.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal-etcd.example.com", - "Value": "owned" - } - ] - } - } - } -} diff --git a/tests/integration/update_cluster/minimal-etcd/cloudformation.json.extracted.yaml b/tests/integration/update_cluster/minimal-etcd/cloudformation.json.extracted.yaml deleted file mode 100644 index 508e7471fd49a..0000000000000 --- a/tests/integration/update_cluster/minimal-etcd/cloudformation.json.extracted.yaml +++ /dev/null @@ -1,464 +0,0 @@ -Resources.AWSEC2LaunchTemplatemasterustest1amastersminimaletcdexamplecom.Properties.LaunchTemplateData.UserData: | - #!/bin/bash - set -o errexit - set -o nounset - set -o pipefail - - NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 - NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 - NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 - NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 - - export AWS_REGION=us-test-1 - - - - - sysctl -w net.core.rmem_max=16777216 || true - sysctl -w net.core.wmem_max=16777216 || true - sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true - sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true - - - function ensure-install-dir() { - INSTALL_DIR="/opt/kops" - # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec - if [[ -d /var/lib/toolbox ]]; then - INSTALL_DIR="/var/lib/toolbox/kops" - fi - mkdir -p ${INSTALL_DIR}/bin - mkdir -p ${INSTALL_DIR}/conf - cd ${INSTALL_DIR} - } - - # Retry a download until we get it. args: name, sha, urls - download-or-bust() { - local -r file="$1" - local -r hash="$2" - local -r urls=( $(split-commas "$3") ) - - if [[ -f "${file}" ]]; then - if ! validate-hash "${file}" "${hash}"; then - rm -f "${file}" - else - return 0 - fi - fi - - while true; do - for url in "${urls[@]}"; do - commands=( - "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - ) - for cmd in "${commands[@]}"; do - echo "Attempting download with: ${cmd} {url}" - if ! (${cmd} "${url}"); then - echo "== Download failed with ${cmd} ==" - continue - fi - if ! validate-hash "${file}" "${hash}"; then - echo "== Hash validation of ${url} failed. Retrying. ==" - rm -f "${file}" - else - echo "== Downloaded ${url} (SHA256 = ${hash}) ==" - return 0 - fi - done - done - - echo "All downloads failed; sleeping before retrying" - sleep 60 - done - } - - validate-hash() { - local -r file="$1" - local -r expected="$2" - local actual - - actual=$(sha256sum ${file} | awk '{ print $1 }') || true - if [[ "${actual}" != "${expected}" ]]; then - echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" - return 1 - fi - } - - function split-commas() { - echo $1 | tr "," "\n" - } - - function download-release() { - case "$(uname -m)" in - x86_64*|i?86_64*|amd64*) - NODEUP_URL="${NODEUP_URL_AMD64}" - NODEUP_HASH="${NODEUP_HASH_AMD64}" - ;; - aarch64*|arm64*) - NODEUP_URL="${NODEUP_URL_ARM64}" - NODEUP_HASH="${NODEUP_HASH_ARM64}" - ;; - *) - echo "Unsupported host arch: $(uname -m)" >&2 - exit 1 - ;; - esac - - cd ${INSTALL_DIR}/bin - download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" - - chmod +x nodeup - - echo "Running nodeup" - # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 - ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) - } - - #################################################################################### - - /bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" - - echo "== nodeup node config starting ==" - ensure-install-dir - - cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' - cloudConfig: - awsEBSCSIDriver: - enabled: false - manageStorageClasses: true - containerRuntime: containerd - containerd: - logLevel: info - version: 1.4.13 - docker: - skipInstall: true - encryptionConfig: null - etcdClusters: - events: - etcdMembers: - - name: us-test-1a - volumeSize: 20 - manager: - env: - - name: ETCD_MANAGER_HOURLY_BACKUPS_RETENTION - value: 1d - - name: ETCD_MANAGER_DAILY_BACKUPS_RETENTION - value: 30d - image: gcr.io/k8s-staging-etcdadm/etcd:v20210430-v0.1.3-739-g7da12acc - version: 3.4.13 - main: - etcdMembers: - - name: us-test-1a - volumeSize: 20 - manager: - image: gcr.io/k8s-staging-etcdadm/etcd:v20210430-v0.1.3-739-g7da12acc - logLevel: 10 - version: 3.4.13 - kubeAPIServer: - allowPrivileged: true - anonymousAuth: false - apiAudiences: - - kubernetes.svc.default - apiServerCount: 1 - authorizationMode: AlwaysAllow - bindAddress: 0.0.0.0 - cloudProvider: aws - enableAdmissionPlugins: - - NamespaceLifecycle - - LimitRanger - - ServiceAccount - - DefaultStorageClass - - DefaultTolerationSeconds - - MutatingAdmissionWebhook - - ValidatingAdmissionWebhook - - NodeRestriction - - ResourceQuota - etcdServers: - - https://127.0.0.1:4001 - etcdServersOverrides: - - /events#https://127.0.0.1:4002 - image: registry.k8s.io/kube-apiserver:v1.21.0 - kubeletPreferredAddressTypes: - - InternalIP - - Hostname - - ExternalIP - logLevel: 2 - requestheaderAllowedNames: - - aggregator - requestheaderExtraHeaderPrefixes: - - X-Remote-Extra- - requestheaderGroupHeaders: - - X-Remote-Group - requestheaderUsernameHeaders: - - X-Remote-User - securePort: 443 - serviceAccountIssuer: https://api.internal.minimal-etcd.example.com - serviceAccountJWKSURI: https://api.internal.minimal-etcd.example.com/openid/v1/jwks - serviceClusterIPRange: 100.64.0.0/13 - storageBackend: etcd3 - kubeControllerManager: - allocateNodeCIDRs: true - attachDetachReconcileSyncPeriod: 1m0s - cloudProvider: aws - clusterCIDR: 100.96.0.0/11 - clusterName: minimal-etcd.example.com - configureCloudRoutes: false - image: registry.k8s.io/kube-controller-manager:v1.21.0 - leaderElection: - leaderElect: true - logLevel: 2 - useServiceAccountCredentials: true - kubeProxy: - clusterCIDR: 100.96.0.0/11 - cpuRequest: 100m - image: registry.k8s.io/kube-proxy:v1.21.0 - logLevel: 2 - kubeScheduler: - image: registry.k8s.io/kube-scheduler:v1.21.0 - leaderElection: - leaderElect: true - logLevel: 2 - kubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: aws - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - masterKubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: aws - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - - __EOF_CLUSTER_SPEC - - cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' - CloudProvider: aws - ConfigBase: memfs://clusters.example.com/minimal-etcd.example.com - InstanceGroupName: master-us-test-1a - InstanceGroupRole: Master - NodeupConfigHash: 6t2fVzcVS700vzqTGel+2diC36uSLb+ilZjsIjNbIBw= - - __EOF_KUBE_ENV - - download-release - echo "== nodeup node config done ==" -Resources.AWSEC2LaunchTemplatenodesminimaletcdexamplecom.Properties.LaunchTemplateData.UserData: | - #!/bin/bash - set -o errexit - set -o nounset - set -o pipefail - - NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 - NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 - NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 - NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 - - export AWS_REGION=us-test-1 - - - - - sysctl -w net.core.rmem_max=16777216 || true - sysctl -w net.core.wmem_max=16777216 || true - sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true - sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true - - - function ensure-install-dir() { - INSTALL_DIR="/opt/kops" - # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec - if [[ -d /var/lib/toolbox ]]; then - INSTALL_DIR="/var/lib/toolbox/kops" - fi - mkdir -p ${INSTALL_DIR}/bin - mkdir -p ${INSTALL_DIR}/conf - cd ${INSTALL_DIR} - } - - # Retry a download until we get it. args: name, sha, urls - download-or-bust() { - local -r file="$1" - local -r hash="$2" - local -r urls=( $(split-commas "$3") ) - - if [[ -f "${file}" ]]; then - if ! validate-hash "${file}" "${hash}"; then - rm -f "${file}" - else - return 0 - fi - fi - - while true; do - for url in "${urls[@]}"; do - commands=( - "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - ) - for cmd in "${commands[@]}"; do - echo "Attempting download with: ${cmd} {url}" - if ! (${cmd} "${url}"); then - echo "== Download failed with ${cmd} ==" - continue - fi - if ! validate-hash "${file}" "${hash}"; then - echo "== Hash validation of ${url} failed. Retrying. ==" - rm -f "${file}" - else - echo "== Downloaded ${url} (SHA256 = ${hash}) ==" - return 0 - fi - done - done - - echo "All downloads failed; sleeping before retrying" - sleep 60 - done - } - - validate-hash() { - local -r file="$1" - local -r expected="$2" - local actual - - actual=$(sha256sum ${file} | awk '{ print $1 }') || true - if [[ "${actual}" != "${expected}" ]]; then - echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" - return 1 - fi - } - - function split-commas() { - echo $1 | tr "," "\n" - } - - function download-release() { - case "$(uname -m)" in - x86_64*|i?86_64*|amd64*) - NODEUP_URL="${NODEUP_URL_AMD64}" - NODEUP_HASH="${NODEUP_HASH_AMD64}" - ;; - aarch64*|arm64*) - NODEUP_URL="${NODEUP_URL_ARM64}" - NODEUP_HASH="${NODEUP_HASH_ARM64}" - ;; - *) - echo "Unsupported host arch: $(uname -m)" >&2 - exit 1 - ;; - esac - - cd ${INSTALL_DIR}/bin - download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" - - chmod +x nodeup - - echo "Running nodeup" - # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 - ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) - } - - #################################################################################### - - /bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" - - echo "== nodeup node config starting ==" - ensure-install-dir - - cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' - cloudConfig: - awsEBSCSIDriver: - enabled: false - manageStorageClasses: true - containerRuntime: containerd - containerd: - logLevel: info - version: 1.4.13 - docker: - skipInstall: true - kubeProxy: - clusterCIDR: 100.96.0.0/11 - cpuRequest: 100m - image: registry.k8s.io/kube-proxy:v1.21.0 - logLevel: 2 - kubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: aws - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - - __EOF_CLUSTER_SPEC - - cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' - CloudProvider: aws - ConfigServer: - CACertificates: | - -----BEGIN CERTIFICATE----- - MIIBbjCCARigAwIBAgIMFpANqBD8NSD82AUSMA0GCSqGSIb3DQEBCwUAMBgxFjAU - BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwODAwWhcNMzEwNzA3MDcw - ODAwWjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD - SwAwSAJBANFI3zr0Tk8krsW8vwjfMpzJOlWQ8616vG3YPa2qAgI7V4oKwfV0yIg1 - jt+H6f4P/wkPAPTPTfRp9Iy8oHEEFw0CAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG - MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNG3zVjTcLlJwDsJ4/K9DV7KohUA - MA0GCSqGSIb3DQEBCwUAA0EAB8d03fY2w7WKpfO29qI295pu2C4ca9AiVGOpgSc8 - tmQsq6rcxt3T+rb589PVtz0mw/cKTxOk6gH2CCC+yHfy2w== - -----END CERTIFICATE----- - -----BEGIN CERTIFICATE----- - MIIBbjCCARigAwIBAgIMFpANvmSa0OAlYmXKMA0GCSqGSIb3DQEBCwUAMBgxFjAU - BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwOTM2WhcNMzEwNzA3MDcw - OTM2WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD - SwAwSAJBAMF6F4aZdpe0RUpyykaBpWwZCnwbffhYGOw+fs6RdLuUq7QCNmJm/Eq7 - WWOziMYDiI9SbclpD+6QiJ0N3EqppVUCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG - MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLImp6ARjPDAH6nhI+scWVt3Q9bn - MA0GCSqGSIb3DQEBCwUAA0EAVQVx5MUtuAIeePuP9o51xtpT2S6Fvfi8J4ICxnlA - 9B7UD2ushcVFPtaeoL9Gfu8aY4KJBeqqg5ojl4qmRnThjw== - -----END CERTIFICATE----- - server: https://kops-controller.internal.minimal-etcd.example.com:3988/ - InstanceGroupName: nodes - InstanceGroupRole: Node - NodeupConfigHash: ukThrd8BJM+6ERw5+e6WgG9jB4sh9VuLOfx+xd/cnzI= - - __EOF_KUBE_ENV - - download-release - echo "== nodeup node config done ==" diff --git a/tests/integration/update_cluster/minimal-etcd/data/aws_iam_role_masters.minimal-etcd.example.com_policy b/tests/integration/update_cluster/minimal-etcd/data/aws_iam_role_masters.minimal-etcd.example.com_policy new file mode 100644 index 0000000000000..66d5de1d5ae1e --- /dev/null +++ b/tests/integration/update_cluster/minimal-etcd/data/aws_iam_role_masters.minimal-etcd.example.com_policy @@ -0,0 +1,10 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { "Service": "ec2.amazonaws.com"}, + "Action": "sts:AssumeRole" + } + ] +} diff --git a/tests/integration/update_cluster/minimal-etcd/data/aws_iam_role_nodes.minimal-etcd.example.com_policy b/tests/integration/update_cluster/minimal-etcd/data/aws_iam_role_nodes.minimal-etcd.example.com_policy new file mode 100644 index 0000000000000..66d5de1d5ae1e --- /dev/null +++ b/tests/integration/update_cluster/minimal-etcd/data/aws_iam_role_nodes.minimal-etcd.example.com_policy @@ -0,0 +1,10 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { "Service": "ec2.amazonaws.com"}, + "Action": "sts:AssumeRole" + } + ] +} diff --git a/tests/integration/update_cluster/minimal-etcd/data/aws_iam_role_policy_masters.minimal-etcd.example.com_policy b/tests/integration/update_cluster/minimal-etcd/data/aws_iam_role_policy_masters.minimal-etcd.example.com_policy new file mode 100644 index 0000000000000..97ca3bc5b6c41 --- /dev/null +++ b/tests/integration/update_cluster/minimal-etcd/data/aws_iam_role_policy_masters.minimal-etcd.example.com_policy @@ -0,0 +1,280 @@ +{ + "Statement": [ + { + "Action": "ec2:AttachVolume", + "Condition": { + "StringEquals": { + "aws:ResourceTag/KubernetesCluster": "minimal-etcd.example.com", + "aws:ResourceTag/k8s.io/role/master": "1" + } + }, + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "s3:Get*" + ], + "Effect": "Allow", + "Resource": "arn:aws-test:s3:::placeholder-read-bucket/clusters.example.com/minimal-etcd.example.com/*" + }, + { + "Action": [ + "s3:GetObject", + "s3:DeleteObject", + "s3:DeleteObjectVersion", + "s3:PutObject" + ], + "Effect": "Allow", + "Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/minimal-etcd.example.com/backups/etcd/main/*" + }, + { + "Action": [ + "s3:GetObject", + "s3:DeleteObject", + "s3:DeleteObjectVersion", + "s3:PutObject" + ], + "Effect": "Allow", + "Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/minimal-etcd.example.com/backups/etcd/events/*" + }, + { + "Action": [ + "s3:GetBucketLocation", + "s3:GetEncryptionConfiguration", + "s3:ListBucket", + "s3:ListBucketVersions" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws-test:s3:::placeholder-read-bucket" + ] + }, + { + "Action": [ + "s3:GetBucketLocation", + "s3:GetEncryptionConfiguration", + "s3:ListBucket", + "s3:ListBucketVersions" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws-test:s3:::placeholder-write-bucket" + ] + }, + { + "Action": [ + "route53:ChangeResourceRecordSets", + "route53:ListResourceRecordSets", + "route53:GetHostedZone" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws-test:route53:::hostedzone/Z1AFAKE1ZON3YO" + ] + }, + { + "Action": [ + "route53:GetChange" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws-test:route53:::change/*" + ] + }, + { + "Action": [ + "route53:ListHostedZones", + "route53:ListTagsForResource" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": "ec2:CreateTags", + "Condition": { + "StringEquals": { + "aws:RequestTag/KubernetesCluster": "minimal-etcd.example.com", + "ec2:CreateAction": [ + "CreateSecurityGroup" + ] + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws-test:ec2:*:*:security-group/*" + ] + }, + { + "Action": [ + "ec2:CreateTags", + "ec2:DeleteTags" + ], + "Condition": { + "Null": { + "aws:RequestTag/KubernetesCluster": "true" + }, + "StringEquals": { + "aws:ResourceTag/KubernetesCluster": "minimal-etcd.example.com" + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws-test:ec2:*:*:security-group/*" + ] + }, + { + "Action": "ec2:CreateTags", + "Condition": { + "StringEquals": { + "aws:RequestTag/KubernetesCluster": "minimal-etcd.example.com", + "ec2:CreateAction": [ + "CreateVolume", + "CreateSnapshot" + ] + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws-test:ec2:*:*:volume/*", + "arn:aws-test:ec2:*:*:snapshot/*" + ] + }, + { + "Action": [ + "ec2:CreateTags", + "ec2:DeleteTags" + ], + "Condition": { + "Null": { + "aws:RequestTag/KubernetesCluster": "true" + }, + "StringEquals": { + "aws:ResourceTag/KubernetesCluster": "minimal-etcd.example.com" + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws-test:ec2:*:*:volume/*", + "arn:aws-test:ec2:*:*:snapshot/*" + ] + }, + { + "Action": [ + "autoscaling:DescribeAutoScalingGroups", + "autoscaling:DescribeAutoScalingInstances", + "autoscaling:DescribeLaunchConfigurations", + "autoscaling:DescribeScalingActivities", + "autoscaling:DescribeTags", + "ec2:AttachVolume", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CreateSecurityGroup", + "ec2:CreateTags", + "ec2:DeleteRoute", + "ec2:DeleteSecurityGroup", + "ec2:DeleteVolume", + "ec2:DescribeAccountAttributes", + "ec2:DescribeInstanceTypes", + "ec2:DescribeInstances", + "ec2:DescribeLaunchTemplateVersions", + "ec2:DescribeRegions", + "ec2:DescribeRouteTables", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeTags", + "ec2:DescribeVolumes", + "ec2:DescribeVolumesModifications", + "ec2:DescribeVpcs", + "ec2:DetachVolume", + "ec2:ModifyInstanceAttribute", + "ec2:ModifyVolume", + "elasticloadbalancing:AddTags", + "elasticloadbalancing:CreateListener", + "elasticloadbalancing:CreateTargetGroup", + "elasticloadbalancing:DeleteListener", + "elasticloadbalancing:DescribeListeners", + "elasticloadbalancing:DescribeLoadBalancerAttributes", + "elasticloadbalancing:DescribeLoadBalancerPolicies", + "elasticloadbalancing:DescribeLoadBalancers", + "elasticloadbalancing:DescribeTargetGroups", + "elasticloadbalancing:DescribeTargetHealth", + "elasticloadbalancing:ModifyListener", + "elasticloadbalancing:RegisterTargets", + "iam:GetServerCertificate", + "iam:ListServerCertificates", + "kms:DescribeKey", + "kms:GenerateRandom" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "autoscaling:SetDesiredCapacity", + "autoscaling:TerminateInstanceInAutoScalingGroup", + "ec2:AttachVolume", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:DeleteSecurityGroup", + "ec2:DeleteVolume", + "ec2:DetachVolume", + "ec2:ModifyInstanceAttribute", + "ec2:ModifyVolume", + "ec2:RevokeSecurityGroupIngress", + "elasticloadbalancing:AddTags", + "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", + "elasticloadbalancing:AttachLoadBalancerToSubnets", + "elasticloadbalancing:ConfigureHealthCheck", + "elasticloadbalancing:CreateLoadBalancerListeners", + "elasticloadbalancing:CreateLoadBalancerPolicy", + "elasticloadbalancing:DeleteListener", + "elasticloadbalancing:DeleteLoadBalancer", + "elasticloadbalancing:DeleteLoadBalancerListeners", + "elasticloadbalancing:DeleteTargetGroup", + "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", + "elasticloadbalancing:DeregisterTargets", + "elasticloadbalancing:DetachLoadBalancerFromSubnets", + "elasticloadbalancing:ModifyListener", + "elasticloadbalancing:ModifyLoadBalancerAttributes", + "elasticloadbalancing:ModifyTargetGroup", + "elasticloadbalancing:RegisterInstancesWithLoadBalancer", + "elasticloadbalancing:RegisterTargets", + "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", + "elasticloadbalancing:SetLoadBalancerPoliciesOfListener" + ], + "Condition": { + "StringEquals": { + "aws:ResourceTag/KubernetesCluster": "minimal-etcd.example.com" + } + }, + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "ec2:CreateSecurityGroup", + "ec2:CreateSnapshot", + "ec2:CreateVolume", + "elasticloadbalancing:CreateListener", + "elasticloadbalancing:CreateLoadBalancer", + "elasticloadbalancing:CreateTargetGroup" + ], + "Condition": { + "StringEquals": { + "aws:RequestTag/KubernetesCluster": "minimal-etcd.example.com" + } + }, + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "ec2:CreateSecurityGroup", + "Effect": "Allow", + "Resource": "arn:aws-test:ec2:*:*:vpc/*" + } + ], + "Version": "2012-10-17" +} diff --git a/tests/integration/update_cluster/minimal-etcd/data/aws_iam_role_policy_nodes.minimal-etcd.example.com_policy b/tests/integration/update_cluster/minimal-etcd/data/aws_iam_role_policy_nodes.minimal-etcd.example.com_policy new file mode 100644 index 0000000000000..153ab3c7f64f4 --- /dev/null +++ b/tests/integration/update_cluster/minimal-etcd/data/aws_iam_role_policy_nodes.minimal-etcd.example.com_policy @@ -0,0 +1,30 @@ +{ + "Statement": [ + { + "Action": [ + "s3:GetBucketLocation", + "s3:GetEncryptionConfiguration", + "s3:ListBucket", + "s3:ListBucketVersions" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws-test:s3:::placeholder-read-bucket" + ] + }, + { + "Action": [ + "autoscaling:DescribeAutoScalingInstances", + "ec2:DescribeInstanceTypes", + "ec2:DescribeInstances", + "ec2:DescribeRegions", + "iam:GetServerCertificate", + "iam:ListServerCertificates", + "kms:GenerateRandom" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" +} diff --git a/tests/integration/update_cluster/minimal-etcd/data/aws_key_pair_kubernetes.minimal-etcd.example.com-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key b/tests/integration/update_cluster/minimal-etcd/data/aws_key_pair_kubernetes.minimal-etcd.example.com-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key new file mode 100644 index 0000000000000..81cb0127830e7 --- /dev/null +++ b/tests/integration/update_cluster/minimal-etcd/data/aws_key_pair_kubernetes.minimal-etcd.example.com-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key @@ -0,0 +1 @@ +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQCtWu40XQo8dczLsCq0OWV+hxm9uV3WxeH9Kgh4sMzQxNtoU1pvW0XdjpkBesRKGoolfWeCLXWxpyQb1IaiMkKoz7MdhQ/6UKjMjP66aFWWp3pwD0uj0HuJ7tq4gKHKRYGTaZIRWpzUiANBrjugVgA+Sd7E/mYwc/DMXkIyRZbvhQ== diff --git a/tests/integration/update_cluster/minimal-etcd/data/aws_launch_template_master-us-test-1a.masters.minimal-etcd.example.com_user_data b/tests/integration/update_cluster/minimal-etcd/data/aws_launch_template_master-us-test-1a.masters.minimal-etcd.example.com_user_data new file mode 100644 index 0000000000000..85bade362d306 --- /dev/null +++ b/tests/integration/update_cluster/minimal-etcd/data/aws_launch_template_master-us-test-1a.masters.minimal-etcd.example.com_user_data @@ -0,0 +1,269 @@ +#!/bin/bash +set -o errexit +set -o nounset +set -o pipefail + +NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 +NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 +NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 +NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 + +export AWS_REGION=us-test-1 + + + + +sysctl -w net.core.rmem_max=16777216 || true +sysctl -w net.core.wmem_max=16777216 || true +sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true +sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true + + +function ensure-install-dir() { + INSTALL_DIR="/opt/kops" + # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec + if [[ -d /var/lib/toolbox ]]; then + INSTALL_DIR="/var/lib/toolbox/kops" + fi + mkdir -p ${INSTALL_DIR}/bin + mkdir -p ${INSTALL_DIR}/conf + cd ${INSTALL_DIR} +} + +# Retry a download until we get it. args: name, sha, urls +download-or-bust() { + local -r file="$1" + local -r hash="$2" + local -r urls=( $(split-commas "$3") ) + + if [[ -f "${file}" ]]; then + if ! validate-hash "${file}" "${hash}"; then + rm -f "${file}" + else + return 0 + fi + fi + + while true; do + for url in "${urls[@]}"; do + commands=( + "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + ) + for cmd in "${commands[@]}"; do + echo "Attempting download with: ${cmd} {url}" + if ! (${cmd} "${url}"); then + echo "== Download failed with ${cmd} ==" + continue + fi + if ! validate-hash "${file}" "${hash}"; then + echo "== Hash validation of ${url} failed. Retrying. ==" + rm -f "${file}" + else + echo "== Downloaded ${url} (SHA256 = ${hash}) ==" + return 0 + fi + done + done + + echo "All downloads failed; sleeping before retrying" + sleep 60 + done +} + +validate-hash() { + local -r file="$1" + local -r expected="$2" + local actual + + actual=$(sha256sum ${file} | awk '{ print $1 }') || true + if [[ "${actual}" != "${expected}" ]]; then + echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" + return 1 + fi +} + +function split-commas() { + echo $1 | tr "," "\n" +} + +function download-release() { + case "$(uname -m)" in + x86_64*|i?86_64*|amd64*) + NODEUP_URL="${NODEUP_URL_AMD64}" + NODEUP_HASH="${NODEUP_HASH_AMD64}" + ;; + aarch64*|arm64*) + NODEUP_URL="${NODEUP_URL_ARM64}" + NODEUP_HASH="${NODEUP_HASH_ARM64}" + ;; + *) + echo "Unsupported host arch: $(uname -m)" >&2 + exit 1 + ;; + esac + + cd ${INSTALL_DIR}/bin + download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" + + chmod +x nodeup + + echo "Running nodeup" + # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 + ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) +} + +#################################################################################### + +/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" + +echo "== nodeup node config starting ==" +ensure-install-dir + +cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' +cloudConfig: + awsEBSCSIDriver: + enabled: false + manageStorageClasses: true +containerRuntime: containerd +containerd: + logLevel: info + version: 1.4.13 +docker: + skipInstall: true +encryptionConfig: null +etcdClusters: + events: + etcdMembers: + - name: us-test-1a + volumeSize: 20 + manager: + env: + - name: ETCD_MANAGER_HOURLY_BACKUPS_RETENTION + value: 1d + - name: ETCD_MANAGER_DAILY_BACKUPS_RETENTION + value: 30d + image: gcr.io/k8s-staging-etcdadm/etcd:v20210430-v0.1.3-739-g7da12acc + version: 3.4.13 + main: + etcdMembers: + - name: us-test-1a + volumeSize: 20 + manager: + image: gcr.io/k8s-staging-etcdadm/etcd:v20210430-v0.1.3-739-g7da12acc + logLevel: 10 + version: 3.4.13 +kubeAPIServer: + allowPrivileged: true + anonymousAuth: false + apiAudiences: + - kubernetes.svc.default + apiServerCount: 1 + authorizationMode: AlwaysAllow + bindAddress: 0.0.0.0 + cloudProvider: aws + enableAdmissionPlugins: + - NamespaceLifecycle + - LimitRanger + - ServiceAccount + - DefaultStorageClass + - DefaultTolerationSeconds + - MutatingAdmissionWebhook + - ValidatingAdmissionWebhook + - NodeRestriction + - ResourceQuota + etcdServers: + - https://127.0.0.1:4001 + etcdServersOverrides: + - /events#https://127.0.0.1:4002 + image: registry.k8s.io/kube-apiserver:v1.21.0 + kubeletPreferredAddressTypes: + - InternalIP + - Hostname + - ExternalIP + logLevel: 2 + requestheaderAllowedNames: + - aggregator + requestheaderExtraHeaderPrefixes: + - X-Remote-Extra- + requestheaderGroupHeaders: + - X-Remote-Group + requestheaderUsernameHeaders: + - X-Remote-User + securePort: 443 + serviceAccountIssuer: https://api.internal.minimal-etcd.example.com + serviceAccountJWKSURI: https://api.internal.minimal-etcd.example.com/openid/v1/jwks + serviceClusterIPRange: 100.64.0.0/13 + storageBackend: etcd3 +kubeControllerManager: + allocateNodeCIDRs: true + attachDetachReconcileSyncPeriod: 1m0s + cloudProvider: aws + clusterCIDR: 100.96.0.0/11 + clusterName: minimal-etcd.example.com + configureCloudRoutes: false + image: registry.k8s.io/kube-controller-manager:v1.21.0 + leaderElection: + leaderElect: true + logLevel: 2 + useServiceAccountCredentials: true +kubeProxy: + clusterCIDR: 100.96.0.0/11 + cpuRequest: 100m + image: registry.k8s.io/kube-proxy:v1.21.0 + logLevel: 2 +kubeScheduler: + image: registry.k8s.io/kube-scheduler:v1.21.0 + leaderElection: + leaderElect: true + logLevel: 2 +kubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: registry.k8s.io/pause:3.6 + podManifestPath: /etc/kubernetes/manifests + registerSchedulable: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s +masterKubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: registry.k8s.io/pause:3.6 + podManifestPath: /etc/kubernetes/manifests + registerSchedulable: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + +__EOF_CLUSTER_SPEC + +cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' +CloudProvider: aws +ConfigBase: memfs://clusters.example.com/minimal-etcd.example.com +InstanceGroupName: master-us-test-1a +InstanceGroupRole: Master +NodeupConfigHash: 6t2fVzcVS700vzqTGel+2diC36uSLb+ilZjsIjNbIBw= + +__EOF_KUBE_ENV + +download-release +echo "== nodeup node config done ==" diff --git a/tests/integration/update_cluster/minimal-etcd/data/aws_launch_template_nodes.minimal-etcd.example.com_user_data b/tests/integration/update_cluster/minimal-etcd/data/aws_launch_template_nodes.minimal-etcd.example.com_user_data new file mode 100644 index 0000000000000..02363fd8e76cf --- /dev/null +++ b/tests/integration/update_cluster/minimal-etcd/data/aws_launch_template_nodes.minimal-etcd.example.com_user_data @@ -0,0 +1,193 @@ +#!/bin/bash +set -o errexit +set -o nounset +set -o pipefail + +NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 +NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 +NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 +NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 + +export AWS_REGION=us-test-1 + + + + +sysctl -w net.core.rmem_max=16777216 || true +sysctl -w net.core.wmem_max=16777216 || true +sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true +sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true + + +function ensure-install-dir() { + INSTALL_DIR="/opt/kops" + # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec + if [[ -d /var/lib/toolbox ]]; then + INSTALL_DIR="/var/lib/toolbox/kops" + fi + mkdir -p ${INSTALL_DIR}/bin + mkdir -p ${INSTALL_DIR}/conf + cd ${INSTALL_DIR} +} + +# Retry a download until we get it. args: name, sha, urls +download-or-bust() { + local -r file="$1" + local -r hash="$2" + local -r urls=( $(split-commas "$3") ) + + if [[ -f "${file}" ]]; then + if ! validate-hash "${file}" "${hash}"; then + rm -f "${file}" + else + return 0 + fi + fi + + while true; do + for url in "${urls[@]}"; do + commands=( + "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + ) + for cmd in "${commands[@]}"; do + echo "Attempting download with: ${cmd} {url}" + if ! (${cmd} "${url}"); then + echo "== Download failed with ${cmd} ==" + continue + fi + if ! validate-hash "${file}" "${hash}"; then + echo "== Hash validation of ${url} failed. Retrying. ==" + rm -f "${file}" + else + echo "== Downloaded ${url} (SHA256 = ${hash}) ==" + return 0 + fi + done + done + + echo "All downloads failed; sleeping before retrying" + sleep 60 + done +} + +validate-hash() { + local -r file="$1" + local -r expected="$2" + local actual + + actual=$(sha256sum ${file} | awk '{ print $1 }') || true + if [[ "${actual}" != "${expected}" ]]; then + echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" + return 1 + fi +} + +function split-commas() { + echo $1 | tr "," "\n" +} + +function download-release() { + case "$(uname -m)" in + x86_64*|i?86_64*|amd64*) + NODEUP_URL="${NODEUP_URL_AMD64}" + NODEUP_HASH="${NODEUP_HASH_AMD64}" + ;; + aarch64*|arm64*) + NODEUP_URL="${NODEUP_URL_ARM64}" + NODEUP_HASH="${NODEUP_HASH_ARM64}" + ;; + *) + echo "Unsupported host arch: $(uname -m)" >&2 + exit 1 + ;; + esac + + cd ${INSTALL_DIR}/bin + download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" + + chmod +x nodeup + + echo "Running nodeup" + # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 + ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) +} + +#################################################################################### + +/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" + +echo "== nodeup node config starting ==" +ensure-install-dir + +cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' +cloudConfig: + awsEBSCSIDriver: + enabled: false + manageStorageClasses: true +containerRuntime: containerd +containerd: + logLevel: info + version: 1.4.13 +docker: + skipInstall: true +kubeProxy: + clusterCIDR: 100.96.0.0/11 + cpuRequest: 100m + image: registry.k8s.io/kube-proxy:v1.21.0 + logLevel: 2 +kubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: registry.k8s.io/pause:3.6 + podManifestPath: /etc/kubernetes/manifests + registerSchedulable: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + +__EOF_CLUSTER_SPEC + +cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' +CloudProvider: aws +ConfigServer: + CACertificates: | + -----BEGIN CERTIFICATE----- + MIIBbjCCARigAwIBAgIMFpANqBD8NSD82AUSMA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwODAwWhcNMzEwNzA3MDcw + ODAwWjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD + SwAwSAJBANFI3zr0Tk8krsW8vwjfMpzJOlWQ8616vG3YPa2qAgI7V4oKwfV0yIg1 + jt+H6f4P/wkPAPTPTfRp9Iy8oHEEFw0CAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG + MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNG3zVjTcLlJwDsJ4/K9DV7KohUA + MA0GCSqGSIb3DQEBCwUAA0EAB8d03fY2w7WKpfO29qI295pu2C4ca9AiVGOpgSc8 + tmQsq6rcxt3T+rb589PVtz0mw/cKTxOk6gH2CCC+yHfy2w== + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBbjCCARigAwIBAgIMFpANvmSa0OAlYmXKMA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwOTM2WhcNMzEwNzA3MDcw + OTM2WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD + SwAwSAJBAMF6F4aZdpe0RUpyykaBpWwZCnwbffhYGOw+fs6RdLuUq7QCNmJm/Eq7 + WWOziMYDiI9SbclpD+6QiJ0N3EqppVUCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG + MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLImp6ARjPDAH6nhI+scWVt3Q9bn + MA0GCSqGSIb3DQEBCwUAA0EAVQVx5MUtuAIeePuP9o51xtpT2S6Fvfi8J4ICxnlA + 9B7UD2ushcVFPtaeoL9Gfu8aY4KJBeqqg5ojl4qmRnThjw== + -----END CERTIFICATE----- + server: https://kops-controller.internal.minimal-etcd.example.com:3988/ +InstanceGroupName: nodes +InstanceGroupRole: Node +NodeupConfigHash: ukThrd8BJM+6ERw5+e6WgG9jB4sh9VuLOfx+xd/cnzI= + +__EOF_KUBE_ENV + +download-release +echo "== nodeup node config done ==" diff --git a/tests/integration/update_cluster/minimal-etcd/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/minimal-etcd/data/aws_s3_object_cluster-completed.spec_content new file mode 100644 index 0000000000000..e0bdf12766146 --- /dev/null +++ b/tests/integration/update_cluster/minimal-etcd/data/aws_s3_object_cluster-completed.spec_content @@ -0,0 +1,193 @@ +apiVersion: kops.k8s.io/v1alpha2 +kind: Cluster +metadata: + creationTimestamp: "2016-12-10T22:42:27Z" + name: minimal-etcd.example.com +spec: + api: + dns: {} + authorization: + alwaysAllow: {} + channel: stable + cloudConfig: + awsEBSCSIDriver: + enabled: false + manageStorageClasses: true + cloudProvider: aws + clusterDNSDomain: cluster.local + configBase: memfs://clusters.example.com/minimal-etcd.example.com + configStore: memfs://clusters.example.com/minimal-etcd.example.com + containerRuntime: containerd + containerd: + logLevel: info + version: 1.4.13 + dnsZone: Z1AFAKE1ZON3YO + docker: + skipInstall: true + etcdClusters: + - backups: + backupStore: memfs://clusters.example.com/minimal-etcd.example.com/backups/etcd/main + etcdMembers: + - instanceGroup: master-us-test-1a + name: us-test-1a + volumeSize: 20 + manager: + image: gcr.io/k8s-staging-etcdadm/etcd:v20210430-v0.1.3-739-g7da12acc + logLevel: 10 + name: main + version: 3.4.13 + - backups: + backupStore: memfs://clusters.example.com/minimal-etcd.example.com/backups/etcd/events + etcdMembers: + - instanceGroup: master-us-test-1a + name: us-test-1a + volumeSize: 20 + manager: + env: + - name: ETCD_MANAGER_HOURLY_BACKUPS_RETENTION + value: 1d + - name: ETCD_MANAGER_DAILY_BACKUPS_RETENTION + value: 30d + image: gcr.io/k8s-staging-etcdadm/etcd:v20210430-v0.1.3-739-g7da12acc + name: events + version: 3.4.13 + externalDns: + provider: dns-controller + iam: + legacy: false + keyStore: memfs://clusters.example.com/minimal-etcd.example.com/pki + kubeAPIServer: + allowPrivileged: true + anonymousAuth: false + apiAudiences: + - kubernetes.svc.default + apiServerCount: 1 + authorizationMode: AlwaysAllow + bindAddress: 0.0.0.0 + cloudProvider: aws + enableAdmissionPlugins: + - NamespaceLifecycle + - LimitRanger + - ServiceAccount + - DefaultStorageClass + - DefaultTolerationSeconds + - MutatingAdmissionWebhook + - ValidatingAdmissionWebhook + - NodeRestriction + - ResourceQuota + etcdServers: + - https://127.0.0.1:4001 + etcdServersOverrides: + - /events#https://127.0.0.1:4002 + image: registry.k8s.io/kube-apiserver:v1.21.0 + kubeletPreferredAddressTypes: + - InternalIP + - Hostname + - ExternalIP + logLevel: 2 + requestheaderAllowedNames: + - aggregator + requestheaderExtraHeaderPrefixes: + - X-Remote-Extra- + requestheaderGroupHeaders: + - X-Remote-Group + requestheaderUsernameHeaders: + - X-Remote-User + securePort: 443 + serviceAccountIssuer: https://api.internal.minimal-etcd.example.com + serviceAccountJWKSURI: https://api.internal.minimal-etcd.example.com/openid/v1/jwks + serviceClusterIPRange: 100.64.0.0/13 + storageBackend: etcd3 + kubeControllerManager: + allocateNodeCIDRs: true + attachDetachReconcileSyncPeriod: 1m0s + cloudProvider: aws + clusterCIDR: 100.96.0.0/11 + clusterName: minimal-etcd.example.com + configureCloudRoutes: false + image: registry.k8s.io/kube-controller-manager:v1.21.0 + leaderElection: + leaderElect: true + logLevel: 2 + useServiceAccountCredentials: true + kubeDNS: + cacheMaxConcurrent: 150 + cacheMaxSize: 1000 + cpuRequest: 100m + domain: cluster.local + memoryLimit: 170Mi + memoryRequest: 70Mi + nodeLocalDNS: + cpuRequest: 25m + enabled: false + image: registry.k8s.io/dns/k8s-dns-node-cache:1.22.8 + memoryRequest: 5Mi + provider: CoreDNS + serverIP: 100.64.0.10 + kubeProxy: + clusterCIDR: 100.96.0.0/11 + cpuRequest: 100m + image: registry.k8s.io/kube-proxy:v1.21.0 + logLevel: 2 + kubeScheduler: + image: registry.k8s.io/kube-scheduler:v1.21.0 + leaderElection: + leaderElect: true + logLevel: 2 + kubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: registry.k8s.io/pause:3.6 + podManifestPath: /etc/kubernetes/manifests + registerSchedulable: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + kubernetesApiAccess: + - 0.0.0.0/0 + kubernetesVersion: 1.21.0 + masterKubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: registry.k8s.io/pause:3.6 + podManifestPath: /etc/kubernetes/manifests + registerSchedulable: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + masterPublicName: api.minimal-etcd.example.com + networkCIDR: 172.20.0.0/16 + networking: + cni: {} + nonMasqueradeCIDR: 100.64.0.0/10 + podCIDR: 100.96.0.0/11 + secretStore: memfs://clusters.example.com/minimal-etcd.example.com/secrets + serviceClusterIPRange: 100.64.0.0/13 + sshAccess: + - 0.0.0.0/0 + subnets: + - cidr: 172.20.32.0/19 + name: us-test-1a + type: Public + zone: us-test-1a + topology: + dns: + type: Public + masters: public + nodes: public diff --git a/tests/integration/update_cluster/minimal-etcd/data/aws_s3_object_etcd-cluster-spec-events_content b/tests/integration/update_cluster/minimal-etcd/data/aws_s3_object_etcd-cluster-spec-events_content new file mode 100644 index 0000000000000..bb8ddb0e2e0ec --- /dev/null +++ b/tests/integration/update_cluster/minimal-etcd/data/aws_s3_object_etcd-cluster-spec-events_content @@ -0,0 +1,4 @@ +{ + "memberCount": 1, + "etcdVersion": "3.4.13" +} diff --git a/tests/integration/update_cluster/minimal-etcd/data/aws_s3_object_etcd-cluster-spec-main_content b/tests/integration/update_cluster/minimal-etcd/data/aws_s3_object_etcd-cluster-spec-main_content new file mode 100644 index 0000000000000..bb8ddb0e2e0ec --- /dev/null +++ b/tests/integration/update_cluster/minimal-etcd/data/aws_s3_object_etcd-cluster-spec-main_content @@ -0,0 +1,4 @@ +{ + "memberCount": 1, + "etcdVersion": "3.4.13" +} diff --git a/tests/integration/update_cluster/minimal-etcd/data/aws_s3_object_kops-version.txt_content b/tests/integration/update_cluster/minimal-etcd/data/aws_s3_object_kops-version.txt_content new file mode 100644 index 0000000000000..b7340298dcdd5 --- /dev/null +++ b/tests/integration/update_cluster/minimal-etcd/data/aws_s3_object_kops-version.txt_content @@ -0,0 +1 @@ +1.21.0-alpha.1 diff --git a/tests/integration/update_cluster/minimal-etcd/data/aws_s3_object_manifests-etcdmanager-events-master-us-test-1a_content b/tests/integration/update_cluster/minimal-etcd/data/aws_s3_object_manifests-etcdmanager-events-master-us-test-1a_content new file mode 100644 index 0000000000000..c7ef296f924a5 --- /dev/null +++ b/tests/integration/update_cluster/minimal-etcd/data/aws_s3_object_manifests-etcdmanager-events-master-us-test-1a_content @@ -0,0 +1,67 @@ +apiVersion: v1 +kind: Pod +metadata: + creationTimestamp: null + labels: + k8s-app: etcd-manager-events + name: etcd-manager-events + namespace: kube-system +spec: + containers: + - command: + - /bin/sh + - -c + - mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /etcd-manager + --backup-store=memfs://clusters.example.com/minimal-etcd.example.com/backups/etcd/events + --client-urls=https://__name__:4002 --cluster-name=etcd-events --containerized=true + --dns-suffix=.internal.minimal-etcd.example.com --grpc-port=3997 --peer-urls=https://__name__:2381 + --quarantine-client-urls=https://__name__:3995 --v=6 --volume-name-tag=k8s.io/etcd/events + --volume-provider=aws --volume-tag=k8s.io/etcd/events --volume-tag=k8s.io/role/master=1 + --volume-tag=kubernetes.io/cluster/minimal-etcd.example.com=owned > /tmp/pipe + 2>&1 + env: + - name: ETCD_MANAGER_HOURLY_BACKUPS_RETENTION + value: 1d + - name: ETCD_MANAGER_DAILY_BACKUPS_RETENTION + value: 30d + image: gcr.io/k8s-staging-etcdadm/etcd:v20210430-v0.1.3-739-g7da12acc + name: etcd-manager + resources: + requests: + cpu: 200m + memory: 100Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /rootfs + name: rootfs + - mountPath: /run + name: run + - mountPath: /etc/kubernetes/pki/etcd-manager + name: pki + - mountPath: /var/log/etcd.log + name: varlogetcd + hostNetwork: true + hostPID: true + priorityClassName: system-cluster-critical + tolerations: + - key: CriticalAddonsOnly + operator: Exists + volumes: + - hostPath: + path: / + type: Directory + name: rootfs + - hostPath: + path: /run + type: DirectoryOrCreate + name: run + - hostPath: + path: /etc/kubernetes/pki/etcd-manager-events + type: DirectoryOrCreate + name: pki + - hostPath: + path: /var/log/etcd-events.log + type: FileOrCreate + name: varlogetcd +status: {} diff --git a/tests/integration/update_cluster/minimal-etcd/data/aws_s3_object_manifests-etcdmanager-main-master-us-test-1a_content b/tests/integration/update_cluster/minimal-etcd/data/aws_s3_object_manifests-etcdmanager-main-master-us-test-1a_content new file mode 100644 index 0000000000000..a2488684fba02 --- /dev/null +++ b/tests/integration/update_cluster/minimal-etcd/data/aws_s3_object_manifests-etcdmanager-main-master-us-test-1a_content @@ -0,0 +1,62 @@ +apiVersion: v1 +kind: Pod +metadata: + creationTimestamp: null + labels: + k8s-app: etcd-manager-main + name: etcd-manager-main + namespace: kube-system +spec: + containers: + - command: + - /bin/sh + - -c + - mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /etcd-manager + --backup-store=memfs://clusters.example.com/minimal-etcd.example.com/backups/etcd/main + --client-urls=https://__name__:4001 --cluster-name=etcd --containerized=true + --dns-suffix=.internal.minimal-etcd.example.com --grpc-port=3996 --peer-urls=https://__name__:2380 + --quarantine-client-urls=https://__name__:3994 --v=10 --volume-name-tag=k8s.io/etcd/main + --volume-provider=aws --volume-tag=k8s.io/etcd/main --volume-tag=k8s.io/role/master=1 + --volume-tag=kubernetes.io/cluster/minimal-etcd.example.com=owned > /tmp/pipe + 2>&1 + image: gcr.io/k8s-staging-etcdadm/etcd:v20210430-v0.1.3-739-g7da12acc + name: etcd-manager + resources: + requests: + cpu: 200m + memory: 100Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /rootfs + name: rootfs + - mountPath: /run + name: run + - mountPath: /etc/kubernetes/pki/etcd-manager + name: pki + - mountPath: /var/log/etcd.log + name: varlogetcd + hostNetwork: true + hostPID: true + priorityClassName: system-cluster-critical + tolerations: + - key: CriticalAddonsOnly + operator: Exists + volumes: + - hostPath: + path: / + type: Directory + name: rootfs + - hostPath: + path: /run + type: DirectoryOrCreate + name: run + - hostPath: + path: /etc/kubernetes/pki/etcd-manager-main + type: DirectoryOrCreate + name: pki + - hostPath: + path: /var/log/etcd.log + type: FileOrCreate + name: varlogetcd +status: {} diff --git a/tests/integration/update_cluster/minimal-etcd/data/aws_s3_object_manifests-static-kube-apiserver-healthcheck_content b/tests/integration/update_cluster/minimal-etcd/data/aws_s3_object_manifests-static-kube-apiserver-healthcheck_content new file mode 100644 index 0000000000000..5cb249fea763e --- /dev/null +++ b/tests/integration/update_cluster/minimal-etcd/data/aws_s3_object_manifests-static-kube-apiserver-healthcheck_content @@ -0,0 +1,33 @@ +apiVersion: v1 +kind: Pod +metadata: + creationTimestamp: null +spec: + containers: + - args: + - --ca-cert=/secrets/ca.crt + - --client-cert=/secrets/client.crt + - --client-key=/secrets/client.key + image: registry.k8s.io/kops/kube-apiserver-healthcheck:1.26.0-alpha.1 + livenessProbe: + httpGet: + host: 127.0.0.1 + path: /.kube-apiserver-healthcheck/healthz + port: 3990 + initialDelaySeconds: 5 + timeoutSeconds: 5 + name: healthcheck + resources: {} + securityContext: + runAsNonRoot: true + runAsUser: 10012 + volumeMounts: + - mountPath: /secrets + name: healthcheck-secrets + readOnly: true + volumes: + - hostPath: + path: /etc/kubernetes/kube-apiserver-healthcheck/secrets + type: Directory + name: healthcheck-secrets +status: {} diff --git a/tests/integration/update_cluster/minimal-etcd/data/aws_s3_object_minimal-etcd.example.com-addons-bootstrap_content b/tests/integration/update_cluster/minimal-etcd/data/aws_s3_object_minimal-etcd.example.com-addons-bootstrap_content new file mode 100644 index 0000000000000..0a1503cbcc443 --- /dev/null +++ b/tests/integration/update_cluster/minimal-etcd/data/aws_s3_object_minimal-etcd.example.com-addons-bootstrap_content @@ -0,0 +1,48 @@ +kind: Addons +metadata: + creationTimestamp: null + name: bootstrap +spec: + addons: + - id: k8s-1.16 + manifest: kops-controller.addons.k8s.io/k8s-1.16.yaml + manifestHash: 74b53e24ad74b84c0ab2385520d59f28229f2350da5a08a7b33e5cef57ea4cbe + name: kops-controller.addons.k8s.io + needsRollingUpdate: control-plane + selector: + k8s-addon: kops-controller.addons.k8s.io + version: 9.99.0 + - id: k8s-1.12 + manifest: coredns.addons.k8s.io/k8s-1.12.yaml + manifestHash: cd1e8f47fe52b13fee5536b0d4b4429ef256829d87a51cbc189fa0f21ff3503b + name: coredns.addons.k8s.io + selector: + k8s-addon: coredns.addons.k8s.io + version: 9.99.0 + - id: k8s-1.9 + manifest: kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml + manifestHash: 01c120e887bd98d82ef57983ad58a0b22bc85efb48108092a24c4b82e4c9ea81 + name: kubelet-api.rbac.addons.k8s.io + selector: + k8s-addon: kubelet-api.rbac.addons.k8s.io + version: 9.99.0 + - manifest: limit-range.addons.k8s.io/v1.5.0.yaml + manifestHash: 2d55c3bc5e354e84a3730a65b42f39aba630a59dc8d32b30859fcce3d3178bc2 + name: limit-range.addons.k8s.io + selector: + k8s-addon: limit-range.addons.k8s.io + version: 9.99.0 + - id: k8s-1.12 + manifest: dns-controller.addons.k8s.io/k8s-1.12.yaml + manifestHash: 6c8f01b2470d323965dfb22d410f322e0b429f7acc3831f41a763ec072dfc69b + name: dns-controller.addons.k8s.io + selector: + k8s-addon: dns-controller.addons.k8s.io + version: 9.99.0 + - id: v1.15.0 + manifest: storage-aws.addons.k8s.io/v1.15.0.yaml + manifestHash: 065ae832ddac8d0931e9992d6a76f43a33a36975a38003b34f4c5d86a7d42780 + name: storage-aws.addons.k8s.io + selector: + k8s-addon: storage-aws.addons.k8s.io + version: 9.99.0 diff --git a/tests/integration/update_cluster/minimal-etcd/data/aws_s3_object_minimal-etcd.example.com-addons-coredns.addons.k8s.io-k8s-1.12_content b/tests/integration/update_cluster/minimal-etcd/data/aws_s3_object_minimal-etcd.example.com-addons-coredns.addons.k8s.io-k8s-1.12_content new file mode 100644 index 0000000000000..fd5b8a7c053f2 --- /dev/null +++ b/tests/integration/update_cluster/minimal-etcd/data/aws_s3_object_minimal-etcd.example.com-addons-coredns.addons.k8s.io-k8s-1.12_content @@ -0,0 +1,383 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + kubernetes.io/cluster-service: "true" + name: coredns + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + kubernetes.io/bootstrapping: rbac-defaults + name: system:coredns +rules: +- apiGroups: + - "" + resources: + - endpoints + - services + - pods + - namespaces + verbs: + - list + - watch +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - list + - watch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + kubernetes.io/bootstrapping: rbac-defaults + name: system:coredns +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:coredns +subjects: +- kind: ServiceAccount + name: coredns + namespace: kube-system + +--- + +apiVersion: v1 +data: + Corefile: |- + .:53 { + errors + health { + lameduck 5s + } + ready + kubernetes cluster.local. in-addr.arpa ip6.arpa { + pods insecure + fallthrough in-addr.arpa ip6.arpa + ttl 30 + } + prometheus :9153 + forward . /etc/resolv.conf { + max_concurrent 1000 + } + cache 30 + loop + reload + loadbalance + } +kind: ConfigMap +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + addonmanager.kubernetes.io/mode: EnsureExists + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + name: coredns + namespace: kube-system + +--- + +apiVersion: apps/v1 +kind: Deployment +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + k8s-app: kube-dns + kubernetes.io/cluster-service: "true" + kubernetes.io/name: CoreDNS + name: coredns + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: kube-dns + strategy: + rollingUpdate: + maxSurge: 10% + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + creationTimestamp: null + labels: + k8s-app: kube-dns + kops.k8s.io/managed-by: kops + spec: + containers: + - args: + - -conf + - /etc/coredns/Corefile + image: registry.k8s.io/coredns/coredns:v1.9.3 + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 5 + httpGet: + path: /health + port: 8080 + scheme: HTTP + initialDelaySeconds: 60 + successThreshold: 1 + timeoutSeconds: 5 + name: coredns + ports: + - containerPort: 53 + name: dns + protocol: UDP + - containerPort: 53 + name: dns-tcp + protocol: TCP + - containerPort: 9153 + name: metrics + protocol: TCP + readinessProbe: + httpGet: + path: /ready + port: 8181 + scheme: HTTP + resources: + limits: + memory: 170Mi + requests: + cpu: 100m + memory: 70Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_BIND_SERVICE + drop: + - all + readOnlyRootFilesystem: true + volumeMounts: + - mountPath: /etc/coredns + name: config-volume + readOnly: true + dnsPolicy: Default + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-cluster-critical + serviceAccountName: coredns + tolerations: + - key: CriticalAddonsOnly + operator: Exists + topologySpreadConstraints: + - labelSelector: + matchLabels: + k8s-app: kube-dns + maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: ScheduleAnyway + - labelSelector: + matchLabels: + k8s-app: kube-dns + maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + volumes: + - configMap: + name: coredns + name: config-volume + +--- + +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/port: "9153" + prometheus.io/scrape: "true" + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + k8s-app: kube-dns + kubernetes.io/cluster-service: "true" + kubernetes.io/name: CoreDNS + name: kube-dns + namespace: kube-system + resourceVersion: "0" +spec: + clusterIP: 100.64.0.10 + ports: + - name: dns + port: 53 + protocol: UDP + - name: dns-tcp + port: 53 + protocol: TCP + - name: metrics + port: 9153 + protocol: TCP + selector: + k8s-app: kube-dns + +--- + +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + name: kube-dns + namespace: kube-system +spec: + maxUnavailable: 50% + selector: + matchLabels: + k8s-app: kube-dns + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + name: coredns-autoscaler + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + name: coredns-autoscaler +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch +- apiGroups: + - "" + resources: + - replicationcontrollers/scale + verbs: + - get + - update +- apiGroups: + - extensions + - apps + resources: + - deployments/scale + - replicasets/scale + verbs: + - get + - update +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - create + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + name: coredns-autoscaler +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: coredns-autoscaler +subjects: +- kind: ServiceAccount + name: coredns-autoscaler + namespace: kube-system + +--- + +apiVersion: apps/v1 +kind: Deployment +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + k8s-app: coredns-autoscaler + kubernetes.io/cluster-service: "true" + name: coredns-autoscaler + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: coredns-autoscaler + template: + metadata: + creationTimestamp: null + labels: + k8s-app: coredns-autoscaler + kops.k8s.io/managed-by: kops + spec: + containers: + - command: + - /cluster-proportional-autoscaler + - --namespace=kube-system + - --configmap=coredns-autoscaler + - --target=Deployment/coredns + - --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":16,"preventSinglePointFailure":true}} + - --logtostderr=true + - --v=2 + image: registry.k8s.io/cpa/cluster-proportional-autoscaler:1.8.5 + name: autoscaler + resources: + requests: + cpu: 20m + memory: 10Mi + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-cluster-critical + serviceAccountName: coredns-autoscaler + tolerations: + - key: CriticalAddonsOnly + operator: Exists diff --git a/tests/integration/update_cluster/minimal-etcd/data/aws_s3_object_minimal-etcd.example.com-addons-dns-controller.addons.k8s.io-k8s-1.12_content b/tests/integration/update_cluster/minimal-etcd/data/aws_s3_object_minimal-etcd.example.com-addons-dns-controller.addons.k8s.io-k8s-1.12_content new file mode 100644 index 0000000000000..2eab063fdb45a --- /dev/null +++ b/tests/integration/update_cluster/minimal-etcd/data/aws_s3_object_minimal-etcd.example.com-addons-dns-controller.addons.k8s.io-k8s-1.12_content @@ -0,0 +1,138 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: dns-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: dns-controller.addons.k8s.io + k8s-app: dns-controller + version: v1.26.0-alpha.1 + name: dns-controller + namespace: kube-system +spec: + replicas: 1 + selector: + matchLabels: + k8s-app: dns-controller + strategy: + type: Recreate + template: + metadata: + creationTimestamp: null + labels: + k8s-addon: dns-controller.addons.k8s.io + k8s-app: dns-controller + kops.k8s.io/managed-by: kops + version: v1.26.0-alpha.1 + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + - matchExpressions: + - key: node-role.kubernetes.io/master + operator: Exists + containers: + - args: + - --watch-ingress=false + - --dns=aws-route53 + - --zone=*/Z1AFAKE1ZON3YO + - --internal-ipv4 + - --zone=*/* + - -v=2 + command: null + env: + - name: KUBERNETES_SERVICE_HOST + value: 127.0.0.1 + image: registry.k8s.io/kops/dns-controller:1.26.0-alpha.1 + name: dns-controller + resources: + requests: + cpu: 50m + memory: 50Mi + securityContext: + runAsNonRoot: true + dnsPolicy: Default + hostNetwork: true + nodeSelector: null + priorityClassName: system-cluster-critical + serviceAccount: dns-controller + tolerations: + - key: node.cloudprovider.kubernetes.io/uninitialized + operator: Exists + - key: node.kubernetes.io/not-ready + operator: Exists + - key: node-role.kubernetes.io/control-plane + operator: Exists + - key: node-role.kubernetes.io/master + operator: Exists + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: dns-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: dns-controller.addons.k8s.io + name: dns-controller + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: dns-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: dns-controller.addons.k8s.io + name: kops:dns-controller +rules: +- apiGroups: + - "" + resources: + - endpoints + - services + - pods + - ingress + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: dns-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: dns-controller.addons.k8s.io + name: kops:dns-controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kops:dns-controller +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: system:serviceaccount:kube-system:dns-controller diff --git a/tests/integration/update_cluster/minimal-etcd/data/aws_s3_object_minimal-etcd.example.com-addons-kops-controller.addons.k8s.io-k8s-1.16_content b/tests/integration/update_cluster/minimal-etcd/data/aws_s3_object_minimal-etcd.example.com-addons-kops-controller.addons.k8s.io-k8s-1.16_content new file mode 100644 index 0000000000000..ca4c43bdcf565 --- /dev/null +++ b/tests/integration/update_cluster/minimal-etcd/data/aws_s3_object_minimal-etcd.example.com-addons-kops-controller.addons.k8s.io-k8s-1.16_content @@ -0,0 +1,225 @@ +apiVersion: v1 +data: + config.yaml: | + {"cloud":"aws","configBase":"memfs://clusters.example.com/minimal-etcd.example.com","secretStore":"memfs://clusters.example.com/minimal-etcd.example.com/secrets","server":{"Listen":":3988","provider":{"aws":{"nodesRoles":["nodes.minimal-etcd.example.com"],"Region":"us-test-1"}},"serverKeyPath":"/etc/kubernetes/kops-controller/pki/kops-controller.key","serverCertificatePath":"/etc/kubernetes/kops-controller/pki/kops-controller.crt","caBasePath":"/etc/kubernetes/kops-controller/pki","signingCAs":["kubernetes-ca"],"certNames":["kubelet","kubelet-server","kube-proxy"]}} +kind: ConfigMap +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller + namespace: kube-system + +--- + +apiVersion: apps/v1 +kind: DaemonSet +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + k8s-app: kops-controller + version: v1.26.0-alpha.1 + name: kops-controller + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: kops-controller + template: + metadata: + annotations: + dns.alpha.kubernetes.io/internal: kops-controller.internal.minimal-etcd.example.com + creationTimestamp: null + labels: + k8s-addon: kops-controller.addons.k8s.io + k8s-app: kops-controller + kops.k8s.io/managed-by: kops + version: v1.26.0-alpha.1 + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + - key: kops.k8s.io/kops-controller-pki + operator: Exists + - matchExpressions: + - key: node-role.kubernetes.io/master + operator: Exists + - key: kops.k8s.io/kops-controller-pki + operator: Exists + containers: + - args: + - --v=2 + - --conf=/etc/kubernetes/kops-controller/config/config.yaml + command: null + env: + - name: KUBERNETES_SERVICE_HOST + value: 127.0.0.1 + image: registry.k8s.io/kops/kops-controller:1.26.0-alpha.1 + name: kops-controller + resources: + requests: + cpu: 50m + memory: 50Mi + securityContext: + runAsNonRoot: true + runAsUser: 10011 + volumeMounts: + - mountPath: /etc/kubernetes/kops-controller/config/ + name: kops-controller-config + - mountPath: /etc/kubernetes/kops-controller/pki/ + name: kops-controller-pki + dnsPolicy: Default + hostNetwork: true + nodeSelector: null + priorityClassName: system-cluster-critical + serviceAccount: kops-controller + tolerations: + - key: node.cloudprovider.kubernetes.io/uninitialized + operator: Exists + - key: node.kubernetes.io/not-ready + operator: Exists + - key: node-role.kubernetes.io/master + operator: Exists + - key: node-role.kubernetes.io/control-plane + operator: Exists + volumes: + - configMap: + name: kops-controller + name: kops-controller-config + - hostPath: + path: /etc/kubernetes/kops-controller/ + type: Directory + name: kops-controller-pki + updateStrategy: + type: OnDelete + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - patch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kops-controller +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: system:serviceaccount:kube-system:kops-controller + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller + namespace: kube-system +rules: +- apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + - create +- apiGroups: + - "" + - coordination.k8s.io + resourceNames: + - kops-controller-leader + resources: + - configmaps + - leases + verbs: + - get + - list + - watch + - patch + - update + - delete +- apiGroups: + - "" + - coordination.k8s.io + resources: + - configmaps + - leases + verbs: + - create + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: kops-controller +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: system:serviceaccount:kube-system:kops-controller diff --git a/tests/integration/update_cluster/minimal-etcd/data/aws_s3_object_minimal-etcd.example.com-addons-kubelet-api.rbac.addons.k8s.io-k8s-1.9_content b/tests/integration/update_cluster/minimal-etcd/data/aws_s3_object_minimal-etcd.example.com-addons-kubelet-api.rbac.addons.k8s.io-k8s-1.9_content new file mode 100644 index 0000000000000..36761e1c56255 --- /dev/null +++ b/tests/integration/update_cluster/minimal-etcd/data/aws_s3_object_minimal-etcd.example.com-addons-kubelet-api.rbac.addons.k8s.io-k8s-1.9_content @@ -0,0 +1,17 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kubelet-api.rbac.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kubelet-api.rbac.addons.k8s.io + name: kops:system:kubelet-api-admin +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:kubelet-api-admin +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: kubelet-api diff --git a/tests/integration/update_cluster/minimal-etcd/data/aws_s3_object_minimal-etcd.example.com-addons-limit-range.addons.k8s.io_content b/tests/integration/update_cluster/minimal-etcd/data/aws_s3_object_minimal-etcd.example.com-addons-limit-range.addons.k8s.io_content new file mode 100644 index 0000000000000..4dcdce48b9ab9 --- /dev/null +++ b/tests/integration/update_cluster/minimal-etcd/data/aws_s3_object_minimal-etcd.example.com-addons-limit-range.addons.k8s.io_content @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: LimitRange +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: limit-range.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: limit-range.addons.k8s.io + name: limits + namespace: default +spec: + limits: + - defaultRequest: + cpu: 100m + type: Container diff --git a/tests/integration/update_cluster/minimal-etcd/data/aws_s3_object_minimal-etcd.example.com-addons-storage-aws.addons.k8s.io-v1.15.0_content b/tests/integration/update_cluster/minimal-etcd/data/aws_s3_object_minimal-etcd.example.com-addons-storage-aws.addons.k8s.io-v1.15.0_content new file mode 100644 index 0000000000000..21efd54326518 --- /dev/null +++ b/tests/integration/update_cluster/minimal-etcd/data/aws_s3_object_minimal-etcd.example.com-addons-storage-aws.addons.k8s.io-v1.15.0_content @@ -0,0 +1,98 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: storage-aws.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: storage-aws.addons.k8s.io + name: default +parameters: + type: gp2 +provisioner: kubernetes.io/aws-ebs + +--- + +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + annotations: + storageclass.kubernetes.io/is-default-class: "false" + creationTimestamp: null + labels: + addon.kops.k8s.io/name: storage-aws.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: storage-aws.addons.k8s.io + name: gp2 +parameters: + type: gp2 +provisioner: kubernetes.io/aws-ebs + +--- + +allowVolumeExpansion: true +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + annotations: + storageclass.kubernetes.io/is-default-class: "true" + creationTimestamp: null + labels: + addon.kops.k8s.io/name: storage-aws.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: storage-aws.addons.k8s.io + name: kops-ssd-1-17 +parameters: + encrypted: "true" + type: gp2 +provisioner: kubernetes.io/aws-ebs +volumeBindingMode: WaitForFirstConsumer + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: storage-aws.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: storage-aws.addons.k8s.io + name: system:aws-cloud-provider +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - patch +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: storage-aws.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: storage-aws.addons.k8s.io + name: system:aws-cloud-provider +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:aws-cloud-provider +subjects: +- kind: ServiceAccount + name: aws-cloud-provider + namespace: kube-system diff --git a/tests/integration/update_cluster/minimal-etcd/data/aws_s3_object_nodeupconfig-master-us-test-1a_content b/tests/integration/update_cluster/minimal-etcd/data/aws_s3_object_nodeupconfig-master-us-test-1a_content new file mode 100644 index 0000000000000..5d680eae39185 --- /dev/null +++ b/tests/integration/update_cluster/minimal-etcd/data/aws_s3_object_nodeupconfig-master-us-test-1a_content @@ -0,0 +1,276 @@ +APIServerConfig: + KubeAPIServer: + allowPrivileged: true + anonymousAuth: false + apiAudiences: + - kubernetes.svc.default + apiServerCount: 1 + authorizationMode: AlwaysAllow + bindAddress: 0.0.0.0 + cloudProvider: aws + enableAdmissionPlugins: + - NamespaceLifecycle + - LimitRanger + - ServiceAccount + - DefaultStorageClass + - DefaultTolerationSeconds + - MutatingAdmissionWebhook + - ValidatingAdmissionWebhook + - NodeRestriction + - ResourceQuota + etcdServers: + - https://127.0.0.1:4001 + etcdServersOverrides: + - /events#https://127.0.0.1:4002 + image: registry.k8s.io/kube-apiserver:v1.21.0 + kubeletPreferredAddressTypes: + - InternalIP + - Hostname + - ExternalIP + logLevel: 2 + requestheaderAllowedNames: + - aggregator + requestheaderExtraHeaderPrefixes: + - X-Remote-Extra- + requestheaderGroupHeaders: + - X-Remote-Group + requestheaderUsernameHeaders: + - X-Remote-User + securePort: 443 + serviceAccountIssuer: https://api.internal.minimal-etcd.example.com + serviceAccountJWKSURI: https://api.internal.minimal-etcd.example.com/openid/v1/jwks + serviceClusterIPRange: 100.64.0.0/13 + storageBackend: etcd3 + ServiceAccountPublicKeys: | + -----BEGIN RSA PUBLIC KEY----- + MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBANiW3hfHTcKnxCig+uWhpVbOfH1pANKm + XVSysPKgE80QSU4tZ6m49pAEeIMsvwvDMaLsb2v6JvXe0qvCmueU+/sCAwEAAQ== + -----END RSA PUBLIC KEY----- + -----BEGIN RSA PUBLIC KEY----- + MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAKOE64nZbH+GM91AIrqf7HEk4hvzqsZF + Ftxc+8xir1XC3mI/RhCCrs6AdVRZNZ26A6uHArhi33c2kHQkCjyLA7sCAwEAAQ== + -----END RSA PUBLIC KEY----- +Assets: + amd64: + - 681c81b7934ae2bf38b9f12d891683972d1fbbf6d7d97e50940a47b139d41b35@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kubelet + - 9f74f2fa7ee32ad07e17211725992248470310ca1988214518806b39b1dad9f0@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kubectl + - 977824932d5667c7a37aa6a3cbba40100a6873e7bd97e83e8be837e3e7afd0a8@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-amd64-v0.8.7.tgz + - 29ef1e8635795c2a49a20a56e778f45ff163c5400a5428ca33999ed53d44e3d8@https://github.com/containerd/containerd/releases/download/v1.4.13/cri-containerd-cni-1.4.13-linux-amd64.tar.gz + - f90ed6dcef534e6d1ae17907dc7eb40614b8945ad4af7f0e98d2be7cde8165c6@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/protokube,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/protokube-linux-amd64 + - 9992e7eb2a2e93f799e5a9e98eb718637433524bc65f630357201a79f49b13d0@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/channels,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/channels-linux-amd64 + arm64: + - 17832b192be5ea314714f7e16efd5e5f65347974bbbf41def6b02f68931380c4@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/arm64/kubelet + - a4dd7100f547a40d3e2f83850d0bab75c6ea5eb553f0a80adcf73155bef1fd0d@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/arm64/kubectl + - ae13d7b5c05bd180ea9b5b68f44bdaa7bfb41034a2ef1d68fd8e1259797d642f@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-arm64-v0.8.7.tgz + - debed306ed9a4e70dcbcb228a0b3898f9730099e324f34bb0e76abbaddf7a6a7@https://download.docker.com/linux/static/stable/aarch64/docker-20.10.13.tgz + - 2f599c3d54f4c4bdbcc95aaf0c7b513a845d8f9503ec5b34c9f86aa1bc34fc0c@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/protokube,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/protokube-linux-arm64 + - 9d842e3636a95de2315cdea2be7a282355aac0658ef0b86d5dc2449066538f13@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/channels,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/channels-linux-arm64 +CAs: + apiserver-aggregator-ca: | + -----BEGIN CERTIFICATE----- + MIIBgjCCASygAwIBAgIMFo3gINaZLHjisEcbMA0GCSqGSIb3DQEBCwUAMCIxIDAe + BgNVBAMTF2FwaXNlcnZlci1hZ2dyZWdhdG9yLWNhMB4XDTIxMDYzMDA0NTExMloX + DTMxMDYzMDA0NTExMlowIjEgMB4GA1UEAxMXYXBpc2VydmVyLWFnZ3JlZ2F0b3It + Y2EwXDANBgkqhkiG9w0BAQEFAANLADBIAkEAyyE71AOU3go5XFegLQ6fidI0LhhM + x7CzpTzh2xWKcHUfbNI7itgJvC/+GlyG5W+DF5V7ba0IJiQLsFve0oLdewIDAQAB + o0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU + ALfqF5ZmfqvqORuJIFilZYKF3d0wDQYJKoZIhvcNAQELBQADQQAHAomFKsF4jvYX + WM/UzQXDj9nSAFTf8dBPCXyZZNotsOH7+P6W4mMiuVs8bAuGiXGUdbsQ2lpiT/Rk + CzMeMdr4 + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBgjCCASygAwIBAgIMFo3gM0nxQpiX/agfMA0GCSqGSIb3DQEBCwUAMCIxIDAe + BgNVBAMTF2FwaXNlcnZlci1hZ2dyZWdhdG9yLWNhMB4XDTIxMDYzMDA0NTIzMVoX + DTMxMDYzMDA0NTIzMVowIjEgMB4GA1UEAxMXYXBpc2VydmVyLWFnZ3JlZ2F0b3It + Y2EwXDANBgkqhkiG9w0BAQEFAANLADBIAkEAyyE71AOU3go5XFegLQ6fidI0LhhM + x7CzpTzh2xWKcHUfbNI7itgJvC/+GlyG5W+DF5V7ba0IJiQLsFve0oLdewIDAQAB + o0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU + ALfqF5ZmfqvqORuJIFilZYKF3d0wDQYJKoZIhvcNAQELBQADQQCXsoezoxXu2CEN + QdlXZOfmBT6cqxIX/RMHXhpHwRiqPsTO8IO2bVA8CSzxNwMuSv/ZtrMHoh8+PcVW + HLtkTXH8 + -----END CERTIFICATE----- + etcd-clients-ca: | + -----BEGIN CERTIFICATE----- + MIIBcjCCARygAwIBAgIMFo1ogHnr26DL9YkqMA0GCSqGSIb3DQEBCwUAMBoxGDAW + BgNVBAMTD2V0Y2QtY2xpZW50cy1jYTAeFw0yMTA2MjgxNjE5MDFaFw0zMTA2Mjgx + NjE5MDFaMBoxGDAWBgNVBAMTD2V0Y2QtY2xpZW50cy1jYTBcMA0GCSqGSIb3DQEB + AQUAA0sAMEgCQQDYlt4Xx03Cp8QooPrloaVWznx9aQDSpl1UsrDyoBPNEElOLWep + uPaQBHiDLL8LwzGi7G9r+ib13tKrwprnlPv7AgMBAAGjQjBAMA4GA1UdDwEB/wQE + AwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQjlt4Ue54AbJPWlDpRM51s + x+PeBDANBgkqhkiG9w0BAQsFAANBAAZAdf8ROEVkr3Rf7I+s+CQOil2toadlKWOY + qCeJ2XaEROfp9aUTEIU1MGM3g57MPyAPPU7mURskuOQz6B1UFaY= + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBcjCCARygAwIBAgIMFo1olfBnC/CsT+dqMA0GCSqGSIb3DQEBCwUAMBoxGDAW + BgNVBAMTD2V0Y2QtY2xpZW50cy1jYTAeFw0yMTA2MjgxNjIwMzNaFw0zMTA2Mjgx + NjIwMzNaMBoxGDAWBgNVBAMTD2V0Y2QtY2xpZW50cy1jYTBcMA0GCSqGSIb3DQEB + AQUAA0sAMEgCQQDYlt4Xx03Cp8QooPrloaVWznx9aQDSpl1UsrDyoBPNEElOLWep + uPaQBHiDLL8LwzGi7G9r+ib13tKrwprnlPv7AgMBAAGjQjBAMA4GA1UdDwEB/wQE + AwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQjlt4Ue54AbJPWlDpRM51s + x+PeBDANBgkqhkiG9w0BAQsFAANBAF1xUz77PlUVUnd9duF8F7plou0TONC9R6/E + YQ8C6vM1b+9NSDGjCW8YmwEU2fBgskb/BBX2lwVZ32/RUEju4Co= + -----END CERTIFICATE----- + etcd-manager-ca-events: | + -----BEGIN CERTIFICATE----- + MIIBgDCCASqgAwIBAgIMFo+bKjm04vB4rNtaMA0GCSqGSIb3DQEBCwUAMCExHzAd + BgNVBAMTFmV0Y2QtbWFuYWdlci1jYS1ldmVudHMwHhcNMjEwNzA1MjAwOTU2WhcN + MzEwNzA1MjAwOTU2WjAhMR8wHQYDVQQDExZldGNkLW1hbmFnZXItY2EtZXZlbnRz + MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAKiC8tndMlEFZ7qzeKxeKqFVjaYpsh/H + g7RxWo15+1kgH3suO0lxp9+RxSVv97hnsfbySTPZVhy2cIQj7eZtZt8CAwEAAaNC + MEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFBg6 + CEZkQNnRkARBwFce03AEWa+sMA0GCSqGSIb3DQEBCwUAA0EAJMnBThok/uUe8q8O + sS5q19KUuE8YCTUzMDj36EBKf6NX4NoakCa1h6kfQVtlMtEIMWQZCjbm8xGK5ffs + GS/VUw== + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBgDCCASqgAwIBAgIMFo+bQ+EgIiBmGghjMA0GCSqGSIb3DQEBCwUAMCExHzAd + BgNVBAMTFmV0Y2QtbWFuYWdlci1jYS1ldmVudHMwHhcNMjEwNzA1MjAxMTQ2WhcN + MzEwNzA1MjAxMTQ2WjAhMR8wHQYDVQQDExZldGNkLW1hbmFnZXItY2EtZXZlbnRz + MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAKFhHVVxxDGv8d1jBvtdSxz7KIVoBOjL + DMxsmTsINiQkTQaFlb+XPlnY1ar4+RhE519AFUkqfhypk4Zxqf1YFXUCAwEAAaNC + MEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNuW + LLH5c8kDubDbr6BHgedW0iJ9MA0GCSqGSIb3DQEBCwUAA0EAiKUoBoaGu7XzboFE + hjfKlX0TujqWuW3qMxDEJwj4dVzlSLrAoB/G01MJ+xxYKh456n48aG6N827UPXhV + cPfVNg== + -----END CERTIFICATE----- + etcd-manager-ca-main: | + -----BEGIN CERTIFICATE----- + MIIBfDCCASagAwIBAgIMFo+bKjm1c3jfv6hIMA0GCSqGSIb3DQEBCwUAMB8xHTAb + BgNVBAMTFGV0Y2QtbWFuYWdlci1jYS1tYWluMB4XDTIxMDcwNTIwMDk1NloXDTMx + MDcwNTIwMDk1NlowHzEdMBsGA1UEAxMUZXRjZC1tYW5hZ2VyLWNhLW1haW4wXDAN + BgkqhkiG9w0BAQEFAANLADBIAkEAxbkDbGYmCSShpRG3r+lzTOFujyuruRfjOhYm + ZRX4w1Utd5y63dUc98sjc9GGUYMHd+0k1ql/a48tGhnK6N6jJwIDAQABo0IwQDAO + BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUWZLkbBFx + GAgPU4i62c52unSo7RswDQYJKoZIhvcNAQELBQADQQAj6Pgd0va/8FtkyMlnohLu + Gf4v8RJO6zk3Y6jJ4+cwWziipFM1ielMzSOZfFcCZgH3m5Io40is4hPSqyq2TOA6 + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBfDCCASagAwIBAgIMFo+bQ+Eg8Si30gr4MA0GCSqGSIb3DQEBCwUAMB8xHTAb + BgNVBAMTFGV0Y2QtbWFuYWdlci1jYS1tYWluMB4XDTIxMDcwNTIwMTE0NloXDTMx + MDcwNTIwMTE0NlowHzEdMBsGA1UEAxMUZXRjZC1tYW5hZ2VyLWNhLW1haW4wXDAN + BgkqhkiG9w0BAQEFAANLADBIAkEAw33jzcd/iosN04b0WXbDt7B0c3sJ3aafcGLP + vG3xRB9N5bYr9+qZAq3mzAFkxscn4j1ce5b1/GKTDEAClmZgdQIDAQABo0IwQDAO + BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUE/h+3gDP + DvKwHRyiYlXM8voZ1wowDQYJKoZIhvcNAQELBQADQQBXuimeEoAOu5HN4hG7NqL9 + t40K3ZRhRZv3JQWnRVJCBDjg1rD0GQJR/n+DoWvbeijI5C9pNjr2pWSIYR1eYCvd + -----END CERTIFICATE----- + etcd-peers-ca-events: | + -----BEGIN CERTIFICATE----- + MIIBfDCCASagAwIBAgIMFo+bKjmxTPh3/lYJMA0GCSqGSIb3DQEBCwUAMB8xHTAb + BgNVBAMTFGV0Y2QtcGVlcnMtY2EtZXZlbnRzMB4XDTIxMDcwNTIwMDk1NloXDTMx + MDcwNTIwMDk1NlowHzEdMBsGA1UEAxMUZXRjZC1wZWVycy1jYS1ldmVudHMwXDAN + BgkqhkiG9w0BAQEFAANLADBIAkEAv5g4HF2xmrYyouJfY9jXx1M3gPLD/pupvxPY + xyjJw5pNCy5M5XGS3iTqRD5RDE0fWudVHFZKLIe8WPc06NApXwIDAQABo0IwQDAO + BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUf6xiDI+O + Yph1ziCGr2hZaQYt+fUwDQYJKoZIhvcNAQELBQADQQBBxj5hqEQstonTb8lnqeGB + DEYtUeAk4eR/HzvUMjF52LVGuvN3XVt+JTrFeKNvb6/RDUbBNRj3azalcUkpPh6V + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBfDCCASagAwIBAgIMFo+bQ+Eq69jgzpKwMA0GCSqGSIb3DQEBCwUAMB8xHTAb + BgNVBAMTFGV0Y2QtcGVlcnMtY2EtZXZlbnRzMB4XDTIxMDcwNTIwMTE0NloXDTMx + MDcwNTIwMTE0NlowHzEdMBsGA1UEAxMUZXRjZC1wZWVycy1jYS1ldmVudHMwXDAN + BgkqhkiG9w0BAQEFAANLADBIAkEAo5Nj2CjX1qp3mEPw1H5nHAFWLoGNSLSlRFJW + 03NxaNPMFzL5PrCoyOXrX8/MWczuZYw0Crf8EPOOQWi2+W0XLwIDAQABo0IwQDAO + BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUxauhhKQh + cvdZND78rHe0RQVTTiswDQYJKoZIhvcNAQELBQADQQB+cq4jIS9q0zXslaRa+ViI + J+dviA3sMygbmSJO0s4DxYmoazKJblux5q0ASSvS9iL1l9ShuZ1dWyp2tpZawHyb + -----END CERTIFICATE----- + etcd-peers-ca-main: | + -----BEGIN CERTIFICATE----- + MIIBeDCCASKgAwIBAgIMFo+bKjmuLDDLcDHsMA0GCSqGSIb3DQEBCwUAMB0xGzAZ + BgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjAeFw0yMTA3MDUyMDA5NTZaFw0zMTA3 + MDUyMDA5NTZaMB0xGzAZBgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjBcMA0GCSqG + SIb3DQEBAQUAA0sAMEgCQQCyRaXWpwgN6INQqws9p/BvPElJv2Rno9dVTFhlQqDA + aUJXe7MBmiO4NJcW76EozeBh5ztR3/4NE1FM2x8TisS3AgMBAAGjQjBAMA4GA1Ud + DwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQtE1d49uSvpURf + OQ25Vlu6liY20DANBgkqhkiG9w0BAQsFAANBAAgLVaetJZcfOA3OIMMvQbz2Ydrt + uWF9BKkIad8jrcIrm3IkOtR8bKGmDIIaRKuG/ZUOL6NMe2fky3AAfKwleL4= + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBeDCCASKgAwIBAgIMFo+bQ+EuVthBfuZvMA0GCSqGSIb3DQEBCwUAMB0xGzAZ + BgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjAeFw0yMTA3MDUyMDExNDZaFw0zMTA3 + MDUyMDExNDZaMB0xGzAZBgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjBcMA0GCSqG + SIb3DQEBAQUAA0sAMEgCQQCxNbycDZNx5V1ZOiXxZSvaFpHRwKeHDfcuMUitdoPt + naVMlMTGDWAMuCVmFHFAWohIYynemEegmZkZ15S7AErfAgMBAAGjQjBAMA4GA1Ud + DwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTAjQ8T4HclPIsC + qipEfUIcLP6jqTANBgkqhkiG9w0BAQsFAANBAJdZ17TN3HlWrH7HQgfR12UBwz8K + G9DurDznVaBVUYaHY8Sg5AvAXeb+yIF2JMmRR+bK+/G1QYY2D3/P31Ic2Oo= + -----END CERTIFICATE----- + kubernetes-ca: | + -----BEGIN CERTIFICATE----- + MIIBbjCCARigAwIBAgIMFpANqBD8NSD82AUSMA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwODAwWhcNMzEwNzA3MDcw + ODAwWjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD + SwAwSAJBANFI3zr0Tk8krsW8vwjfMpzJOlWQ8616vG3YPa2qAgI7V4oKwfV0yIg1 + jt+H6f4P/wkPAPTPTfRp9Iy8oHEEFw0CAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG + MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNG3zVjTcLlJwDsJ4/K9DV7KohUA + MA0GCSqGSIb3DQEBCwUAA0EAB8d03fY2w7WKpfO29qI295pu2C4ca9AiVGOpgSc8 + tmQsq6rcxt3T+rb589PVtz0mw/cKTxOk6gH2CCC+yHfy2w== + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBbjCCARigAwIBAgIMFpANvmSa0OAlYmXKMA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwOTM2WhcNMzEwNzA3MDcw + OTM2WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD + SwAwSAJBAMF6F4aZdpe0RUpyykaBpWwZCnwbffhYGOw+fs6RdLuUq7QCNmJm/Eq7 + WWOziMYDiI9SbclpD+6QiJ0N3EqppVUCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG + MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLImp6ARjPDAH6nhI+scWVt3Q9bn + MA0GCSqGSIb3DQEBCwUAA0EAVQVx5MUtuAIeePuP9o51xtpT2S6Fvfi8J4ICxnlA + 9B7UD2ushcVFPtaeoL9Gfu8aY4KJBeqqg5ojl4qmRnThjw== + -----END CERTIFICATE----- +ClusterName: minimal-etcd.example.com +FileAssets: +- content: | + apiVersion: kubescheduler.config.k8s.io/v1beta1 + clientConnection: + kubeconfig: /var/lib/kube-scheduler/kubeconfig + kind: KubeSchedulerConfiguration + path: /var/lib/kube-scheduler/config.yaml +Hooks: +- null +- null +KeypairIDs: + apiserver-aggregator-ca: "6980187172486667078076483355" + etcd-clients-ca: "6979622252718071085282986282" + etcd-manager-ca-events: "6982279354000777253151890266" + etcd-manager-ca-main: "6982279354000936168671127624" + etcd-peers-ca-events: "6982279353999767935825892873" + etcd-peers-ca-main: "6982279353998887468930183660" + kubernetes-ca: "6982820025135291416230495506" + service-account: "2" +KubeletConfig: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + nodeLabels: + kops.k8s.io/kops-controller-pki: "" + kubernetes.io/role: master + node-role.kubernetes.io/control-plane: "" + node-role.kubernetes.io/master: "" + node.kubernetes.io/exclude-from-external-load-balancers: "" + podInfraContainerImage: registry.k8s.io/pause:3.6 + podManifestPath: /etc/kubernetes/manifests + registerSchedulable: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + taints: + - node-role.kubernetes.io/master=:NoSchedule +UpdatePolicy: automatic +channels: +- memfs://clusters.example.com/minimal-etcd.example.com/addons/bootstrap-channel.yaml +containerdConfig: + logLevel: info + version: 1.4.13 +etcdManifests: +- memfs://clusters.example.com/minimal-etcd.example.com/manifests/etcd/main-master-us-test-1a.yaml +- memfs://clusters.example.com/minimal-etcd.example.com/manifests/etcd/events-master-us-test-1a.yaml +staticManifests: +- key: kube-apiserver-healthcheck + path: manifests/static/kube-apiserver-healthcheck.yaml diff --git a/tests/integration/update_cluster/minimal-etcd/data/aws_s3_object_nodeupconfig-nodes_content b/tests/integration/update_cluster/minimal-etcd/data/aws_s3_object_nodeupconfig-nodes_content new file mode 100644 index 0000000000000..c2f43fc0b21c6 --- /dev/null +++ b/tests/integration/update_cluster/minimal-etcd/data/aws_s3_object_nodeupconfig-nodes_content @@ -0,0 +1,44 @@ +Assets: + amd64: + - 681c81b7934ae2bf38b9f12d891683972d1fbbf6d7d97e50940a47b139d41b35@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kubelet + - 9f74f2fa7ee32ad07e17211725992248470310ca1988214518806b39b1dad9f0@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kubectl + - 977824932d5667c7a37aa6a3cbba40100a6873e7bd97e83e8be837e3e7afd0a8@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-amd64-v0.8.7.tgz + - 29ef1e8635795c2a49a20a56e778f45ff163c5400a5428ca33999ed53d44e3d8@https://github.com/containerd/containerd/releases/download/v1.4.13/cri-containerd-cni-1.4.13-linux-amd64.tar.gz + arm64: + - 17832b192be5ea314714f7e16efd5e5f65347974bbbf41def6b02f68931380c4@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/arm64/kubelet + - a4dd7100f547a40d3e2f83850d0bab75c6ea5eb553f0a80adcf73155bef1fd0d@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/arm64/kubectl + - ae13d7b5c05bd180ea9b5b68f44bdaa7bfb41034a2ef1d68fd8e1259797d642f@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-arm64-v0.8.7.tgz + - debed306ed9a4e70dcbcb228a0b3898f9730099e324f34bb0e76abbaddf7a6a7@https://download.docker.com/linux/static/stable/aarch64/docker-20.10.13.tgz +CAs: {} +ClusterName: minimal-etcd.example.com +Hooks: +- null +- null +KeypairIDs: + kubernetes-ca: "6982820025135291416230495506" +KubeletConfig: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + nodeLabels: + kubernetes.io/role: node + node-role.kubernetes.io/node: "" + podInfraContainerImage: registry.k8s.io/pause:3.6 + podManifestPath: /etc/kubernetes/manifests + registerSchedulable: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s +UpdatePolicy: automatic +channels: +- memfs://clusters.example.com/minimal-etcd.example.com/addons/bootstrap-channel.yaml +containerdConfig: + logLevel: info + version: 1.4.13 diff --git a/tests/integration/update_cluster/minimal-etcd/id_rsa.pub b/tests/integration/update_cluster/minimal-etcd/id_rsa.pub old mode 100755 new mode 100644 diff --git a/tests/integration/update_cluster/minimal-etcd/in-v1alpha2.yaml b/tests/integration/update_cluster/minimal-etcd/in-v1alpha2.yaml index f40ff657be5c5..a3bdaf4e9f483 100644 --- a/tests/integration/update_cluster/minimal-etcd/in-v1alpha2.yaml +++ b/tests/integration/update_cluster/minimal-etcd/in-v1alpha2.yaml @@ -34,7 +34,6 @@ spec: kubelet: anonymousAuth: false kubernetesVersion: v1.21.0 - masterInternalName: api.internal.minimal-etcd.example.com masterPublicName: api.minimal-etcd.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/tests/integration/update_cluster/minimal-etcd/kubernetes.tf b/tests/integration/update_cluster/minimal-etcd/kubernetes.tf new file mode 100644 index 0000000000000..cd2a180776187 --- /dev/null +++ b/tests/integration/update_cluster/minimal-etcd/kubernetes.tf @@ -0,0 +1,820 @@ +locals { + cluster_name = "minimal-etcd.example.com" + master_autoscaling_group_ids = [aws_autoscaling_group.master-us-test-1a-masters-minimal-etcd-example-com.id] + master_security_group_ids = [aws_security_group.masters-minimal-etcd-example-com.id] + masters_role_arn = aws_iam_role.masters-minimal-etcd-example-com.arn + masters_role_name = aws_iam_role.masters-minimal-etcd-example-com.name + node_autoscaling_group_ids = [aws_autoscaling_group.nodes-minimal-etcd-example-com.id] + node_security_group_ids = [aws_security_group.nodes-minimal-etcd-example-com.id] + node_subnet_ids = [aws_subnet.us-test-1a-minimal-etcd-example-com.id] + nodes_role_arn = aws_iam_role.nodes-minimal-etcd-example-com.arn + nodes_role_name = aws_iam_role.nodes-minimal-etcd-example-com.name + region = "us-test-1" + route_table_public_id = aws_route_table.minimal-etcd-example-com.id + subnet_us-test-1a_id = aws_subnet.us-test-1a-minimal-etcd-example-com.id + vpc_cidr_block = aws_vpc.minimal-etcd-example-com.cidr_block + vpc_id = aws_vpc.minimal-etcd-example-com.id +} + +output "cluster_name" { + value = "minimal-etcd.example.com" +} + +output "master_autoscaling_group_ids" { + value = [aws_autoscaling_group.master-us-test-1a-masters-minimal-etcd-example-com.id] +} + +output "master_security_group_ids" { + value = [aws_security_group.masters-minimal-etcd-example-com.id] +} + +output "masters_role_arn" { + value = aws_iam_role.masters-minimal-etcd-example-com.arn +} + +output "masters_role_name" { + value = aws_iam_role.masters-minimal-etcd-example-com.name +} + +output "node_autoscaling_group_ids" { + value = [aws_autoscaling_group.nodes-minimal-etcd-example-com.id] +} + +output "node_security_group_ids" { + value = [aws_security_group.nodes-minimal-etcd-example-com.id] +} + +output "node_subnet_ids" { + value = [aws_subnet.us-test-1a-minimal-etcd-example-com.id] +} + +output "nodes_role_arn" { + value = aws_iam_role.nodes-minimal-etcd-example-com.arn +} + +output "nodes_role_name" { + value = aws_iam_role.nodes-minimal-etcd-example-com.name +} + +output "region" { + value = "us-test-1" +} + +output "route_table_public_id" { + value = aws_route_table.minimal-etcd-example-com.id +} + +output "subnet_us-test-1a_id" { + value = aws_subnet.us-test-1a-minimal-etcd-example-com.id +} + +output "vpc_cidr_block" { + value = aws_vpc.minimal-etcd-example-com.cidr_block +} + +output "vpc_id" { + value = aws_vpc.minimal-etcd-example-com.id +} + +provider "aws" { + region = "us-test-1" +} + +provider "aws" { + alias = "files" + region = "us-test-1" +} + +resource "aws_autoscaling_group" "master-us-test-1a-masters-minimal-etcd-example-com" { + enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"] + launch_template { + id = aws_launch_template.master-us-test-1a-masters-minimal-etcd-example-com.id + version = aws_launch_template.master-us-test-1a-masters-minimal-etcd-example-com.latest_version + } + max_instance_lifetime = 0 + max_size = 1 + metrics_granularity = "1Minute" + min_size = 1 + name = "master-us-test-1a.masters.minimal-etcd.example.com" + protect_from_scale_in = false + tag { + key = "KubernetesCluster" + propagate_at_launch = true + value = "minimal-etcd.example.com" + } + tag { + key = "Name" + propagate_at_launch = true + value = "master-us-test-1a.masters.minimal-etcd.example.com" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" + propagate_at_launch = true + value = "master" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/role/master" + propagate_at_launch = true + value = "1" + } + tag { + key = "kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "master-us-test-1a" + } + tag { + key = "kubernetes.io/cluster/minimal-etcd.example.com" + propagate_at_launch = true + value = "owned" + } + vpc_zone_identifier = [aws_subnet.us-test-1a-minimal-etcd-example-com.id] +} + +resource "aws_autoscaling_group" "nodes-minimal-etcd-example-com" { + enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"] + launch_template { + id = aws_launch_template.nodes-minimal-etcd-example-com.id + version = aws_launch_template.nodes-minimal-etcd-example-com.latest_version + } + max_instance_lifetime = 0 + max_size = 2 + metrics_granularity = "1Minute" + min_size = 2 + name = "nodes.minimal-etcd.example.com" + protect_from_scale_in = false + tag { + key = "KubernetesCluster" + propagate_at_launch = true + value = "minimal-etcd.example.com" + } + tag { + key = "Name" + propagate_at_launch = true + value = "nodes.minimal-etcd.example.com" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" + propagate_at_launch = true + value = "node" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/role/node" + propagate_at_launch = true + value = "1" + } + tag { + key = "kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "nodes" + } + tag { + key = "kubernetes.io/cluster/minimal-etcd.example.com" + propagate_at_launch = true + value = "owned" + } + vpc_zone_identifier = [aws_subnet.us-test-1a-minimal-etcd-example-com.id] +} + +resource "aws_ebs_volume" "us-test-1a-etcd-events-minimal-etcd-example-com" { + availability_zone = "us-test-1a" + encrypted = false + iops = 3000 + size = 20 + tags = { + "KubernetesCluster" = "minimal-etcd.example.com" + "Name" = "us-test-1a.etcd-events.minimal-etcd.example.com" + "k8s.io/etcd/events" = "us-test-1a/us-test-1a" + "k8s.io/role/master" = "1" + "kubernetes.io/cluster/minimal-etcd.example.com" = "owned" + } + throughput = 125 + type = "gp3" +} + +resource "aws_ebs_volume" "us-test-1a-etcd-main-minimal-etcd-example-com" { + availability_zone = "us-test-1a" + encrypted = false + iops = 3000 + size = 20 + tags = { + "KubernetesCluster" = "minimal-etcd.example.com" + "Name" = "us-test-1a.etcd-main.minimal-etcd.example.com" + "k8s.io/etcd/main" = "us-test-1a/us-test-1a" + "k8s.io/role/master" = "1" + "kubernetes.io/cluster/minimal-etcd.example.com" = "owned" + } + throughput = 125 + type = "gp3" +} + +resource "aws_iam_instance_profile" "masters-minimal-etcd-example-com" { + name = "masters.minimal-etcd.example.com" + role = aws_iam_role.masters-minimal-etcd-example-com.name + tags = { + "KubernetesCluster" = "minimal-etcd.example.com" + "Name" = "masters.minimal-etcd.example.com" + "kubernetes.io/cluster/minimal-etcd.example.com" = "owned" + } +} + +resource "aws_iam_instance_profile" "nodes-minimal-etcd-example-com" { + name = "nodes.minimal-etcd.example.com" + role = aws_iam_role.nodes-minimal-etcd-example-com.name + tags = { + "KubernetesCluster" = "minimal-etcd.example.com" + "Name" = "nodes.minimal-etcd.example.com" + "kubernetes.io/cluster/minimal-etcd.example.com" = "owned" + } +} + +resource "aws_iam_role" "masters-minimal-etcd-example-com" { + assume_role_policy = file("${path.module}/data/aws_iam_role_masters.minimal-etcd.example.com_policy") + name = "masters.minimal-etcd.example.com" + tags = { + "KubernetesCluster" = "minimal-etcd.example.com" + "Name" = "masters.minimal-etcd.example.com" + "kubernetes.io/cluster/minimal-etcd.example.com" = "owned" + } +} + +resource "aws_iam_role" "nodes-minimal-etcd-example-com" { + assume_role_policy = file("${path.module}/data/aws_iam_role_nodes.minimal-etcd.example.com_policy") + name = "nodes.minimal-etcd.example.com" + tags = { + "KubernetesCluster" = "minimal-etcd.example.com" + "Name" = "nodes.minimal-etcd.example.com" + "kubernetes.io/cluster/minimal-etcd.example.com" = "owned" + } +} + +resource "aws_iam_role_policy" "masters-minimal-etcd-example-com" { + name = "masters.minimal-etcd.example.com" + policy = file("${path.module}/data/aws_iam_role_policy_masters.minimal-etcd.example.com_policy") + role = aws_iam_role.masters-minimal-etcd-example-com.name +} + +resource "aws_iam_role_policy" "nodes-minimal-etcd-example-com" { + name = "nodes.minimal-etcd.example.com" + policy = file("${path.module}/data/aws_iam_role_policy_nodes.minimal-etcd.example.com_policy") + role = aws_iam_role.nodes-minimal-etcd-example-com.name +} + +resource "aws_internet_gateway" "minimal-etcd-example-com" { + tags = { + "KubernetesCluster" = "minimal-etcd.example.com" + "Name" = "minimal-etcd.example.com" + "kubernetes.io/cluster/minimal-etcd.example.com" = "owned" + } + vpc_id = aws_vpc.minimal-etcd-example-com.id +} + +resource "aws_key_pair" "kubernetes-minimal-etcd-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157" { + key_name = "kubernetes.minimal-etcd.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57" + public_key = file("${path.module}/data/aws_key_pair_kubernetes.minimal-etcd.example.com-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key") + tags = { + "KubernetesCluster" = "minimal-etcd.example.com" + "Name" = "minimal-etcd.example.com" + "kubernetes.io/cluster/minimal-etcd.example.com" = "owned" + } +} + +resource "aws_launch_template" "master-us-test-1a-masters-minimal-etcd-example-com" { + block_device_mappings { + device_name = "/dev/xvda" + ebs { + delete_on_termination = true + encrypted = true + iops = 3000 + throughput = 125 + volume_size = 64 + volume_type = "gp3" + } + } + block_device_mappings { + device_name = "/dev/sdc" + virtual_name = "ephemeral0" + } + iam_instance_profile { + name = aws_iam_instance_profile.masters-minimal-etcd-example-com.id + } + image_id = "ami-12345678" + instance_type = "m3.medium" + key_name = aws_key_pair.kubernetes-minimal-etcd-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id + lifecycle { + create_before_destroy = true + } + metadata_options { + http_endpoint = "enabled" + http_protocol_ipv6 = "disabled" + http_put_response_hop_limit = 1 + http_tokens = "optional" + } + monitoring { + enabled = false + } + name = "master-us-test-1a.masters.minimal-etcd.example.com" + network_interfaces { + associate_public_ip_address = true + delete_on_termination = true + ipv6_address_count = 0 + security_groups = [aws_security_group.masters-minimal-etcd-example-com.id] + } + tag_specifications { + resource_type = "instance" + tags = { + "KubernetesCluster" = "minimal-etcd.example.com" + "Name" = "master-us-test-1a.masters.minimal-etcd.example.com" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" = "" + "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = "" + "k8s.io/role/master" = "1" + "kops.k8s.io/instancegroup" = "master-us-test-1a" + "kubernetes.io/cluster/minimal-etcd.example.com" = "owned" + } + } + tag_specifications { + resource_type = "volume" + tags = { + "KubernetesCluster" = "minimal-etcd.example.com" + "Name" = "master-us-test-1a.masters.minimal-etcd.example.com" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" = "" + "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = "" + "k8s.io/role/master" = "1" + "kops.k8s.io/instancegroup" = "master-us-test-1a" + "kubernetes.io/cluster/minimal-etcd.example.com" = "owned" + } + } + tags = { + "KubernetesCluster" = "minimal-etcd.example.com" + "Name" = "master-us-test-1a.masters.minimal-etcd.example.com" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" = "" + "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = "" + "k8s.io/role/master" = "1" + "kops.k8s.io/instancegroup" = "master-us-test-1a" + "kubernetes.io/cluster/minimal-etcd.example.com" = "owned" + } + user_data = filebase64("${path.module}/data/aws_launch_template_master-us-test-1a.masters.minimal-etcd.example.com_user_data") +} + +resource "aws_launch_template" "nodes-minimal-etcd-example-com" { + block_device_mappings { + device_name = "/dev/xvda" + ebs { + delete_on_termination = true + encrypted = true + iops = 3000 + throughput = 125 + volume_size = 128 + volume_type = "gp3" + } + } + iam_instance_profile { + name = aws_iam_instance_profile.nodes-minimal-etcd-example-com.id + } + image_id = "ami-12345678" + instance_type = "t2.medium" + key_name = aws_key_pair.kubernetes-minimal-etcd-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id + lifecycle { + create_before_destroy = true + } + metadata_options { + http_endpoint = "enabled" + http_protocol_ipv6 = "disabled" + http_put_response_hop_limit = 1 + http_tokens = "optional" + } + monitoring { + enabled = false + } + name = "nodes.minimal-etcd.example.com" + network_interfaces { + associate_public_ip_address = true + delete_on_termination = true + ipv6_address_count = 0 + security_groups = [aws_security_group.nodes-minimal-etcd-example-com.id] + } + tag_specifications { + resource_type = "instance" + tags = { + "KubernetesCluster" = "minimal-etcd.example.com" + "Name" = "nodes.minimal-etcd.example.com" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "nodes" + "kubernetes.io/cluster/minimal-etcd.example.com" = "owned" + } + } + tag_specifications { + resource_type = "volume" + tags = { + "KubernetesCluster" = "minimal-etcd.example.com" + "Name" = "nodes.minimal-etcd.example.com" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "nodes" + "kubernetes.io/cluster/minimal-etcd.example.com" = "owned" + } + } + tags = { + "KubernetesCluster" = "minimal-etcd.example.com" + "Name" = "nodes.minimal-etcd.example.com" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "nodes" + "kubernetes.io/cluster/minimal-etcd.example.com" = "owned" + } + user_data = filebase64("${path.module}/data/aws_launch_template_nodes.minimal-etcd.example.com_user_data") +} + +resource "aws_route" "route-0-0-0-0--0" { + destination_cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.minimal-etcd-example-com.id + route_table_id = aws_route_table.minimal-etcd-example-com.id +} + +resource "aws_route" "route-__--0" { + destination_ipv6_cidr_block = "::/0" + gateway_id = aws_internet_gateway.minimal-etcd-example-com.id + route_table_id = aws_route_table.minimal-etcd-example-com.id +} + +resource "aws_route_table" "minimal-etcd-example-com" { + tags = { + "KubernetesCluster" = "minimal-etcd.example.com" + "Name" = "minimal-etcd.example.com" + "kubernetes.io/cluster/minimal-etcd.example.com" = "owned" + "kubernetes.io/kops/role" = "public" + } + vpc_id = aws_vpc.minimal-etcd-example-com.id +} + +resource "aws_route_table_association" "us-test-1a-minimal-etcd-example-com" { + route_table_id = aws_route_table.minimal-etcd-example-com.id + subnet_id = aws_subnet.us-test-1a-minimal-etcd-example-com.id +} + +resource "aws_s3_object" "cluster-completed-spec" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_cluster-completed.spec_content") + key = "clusters.example.com/minimal-etcd.example.com/cluster-completed.spec" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "etcd-cluster-spec-events" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_etcd-cluster-spec-events_content") + key = "clusters.example.com/minimal-etcd.example.com/backups/etcd/events/control/etcd-cluster-spec" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "etcd-cluster-spec-main" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_etcd-cluster-spec-main_content") + key = "clusters.example.com/minimal-etcd.example.com/backups/etcd/main/control/etcd-cluster-spec" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "kops-version-txt" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_kops-version.txt_content") + key = "clusters.example.com/minimal-etcd.example.com/kops-version.txt" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "manifests-etcdmanager-events-master-us-test-1a" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_manifests-etcdmanager-events-master-us-test-1a_content") + key = "clusters.example.com/minimal-etcd.example.com/manifests/etcd/events-master-us-test-1a.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "manifests-etcdmanager-main-master-us-test-1a" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_manifests-etcdmanager-main-master-us-test-1a_content") + key = "clusters.example.com/minimal-etcd.example.com/manifests/etcd/main-master-us-test-1a.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "manifests-static-kube-apiserver-healthcheck" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_manifests-static-kube-apiserver-healthcheck_content") + key = "clusters.example.com/minimal-etcd.example.com/manifests/static/kube-apiserver-healthcheck.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "minimal-etcd-example-com-addons-bootstrap" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_minimal-etcd.example.com-addons-bootstrap_content") + key = "clusters.example.com/minimal-etcd.example.com/addons/bootstrap-channel.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "minimal-etcd-example-com-addons-coredns-addons-k8s-io-k8s-1-12" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_minimal-etcd.example.com-addons-coredns.addons.k8s.io-k8s-1.12_content") + key = "clusters.example.com/minimal-etcd.example.com/addons/coredns.addons.k8s.io/k8s-1.12.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "minimal-etcd-example-com-addons-dns-controller-addons-k8s-io-k8s-1-12" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_minimal-etcd.example.com-addons-dns-controller.addons.k8s.io-k8s-1.12_content") + key = "clusters.example.com/minimal-etcd.example.com/addons/dns-controller.addons.k8s.io/k8s-1.12.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "minimal-etcd-example-com-addons-kops-controller-addons-k8s-io-k8s-1-16" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_minimal-etcd.example.com-addons-kops-controller.addons.k8s.io-k8s-1.16_content") + key = "clusters.example.com/minimal-etcd.example.com/addons/kops-controller.addons.k8s.io/k8s-1.16.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "minimal-etcd-example-com-addons-kubelet-api-rbac-addons-k8s-io-k8s-1-9" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_minimal-etcd.example.com-addons-kubelet-api.rbac.addons.k8s.io-k8s-1.9_content") + key = "clusters.example.com/minimal-etcd.example.com/addons/kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "minimal-etcd-example-com-addons-limit-range-addons-k8s-io" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_minimal-etcd.example.com-addons-limit-range.addons.k8s.io_content") + key = "clusters.example.com/minimal-etcd.example.com/addons/limit-range.addons.k8s.io/v1.5.0.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "minimal-etcd-example-com-addons-storage-aws-addons-k8s-io-v1-15-0" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_minimal-etcd.example.com-addons-storage-aws.addons.k8s.io-v1.15.0_content") + key = "clusters.example.com/minimal-etcd.example.com/addons/storage-aws.addons.k8s.io/v1.15.0.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nodeupconfig-master-us-test-1a" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_nodeupconfig-master-us-test-1a_content") + key = "clusters.example.com/minimal-etcd.example.com/igconfig/master/master-us-test-1a/nodeupconfig.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nodeupconfig-nodes" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_nodeupconfig-nodes_content") + key = "clusters.example.com/minimal-etcd.example.com/igconfig/node/nodes/nodeupconfig.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_security_group" "masters-minimal-etcd-example-com" { + description = "Security group for masters" + name = "masters.minimal-etcd.example.com" + tags = { + "KubernetesCluster" = "minimal-etcd.example.com" + "Name" = "masters.minimal-etcd.example.com" + "kubernetes.io/cluster/minimal-etcd.example.com" = "owned" + } + vpc_id = aws_vpc.minimal-etcd-example-com.id +} + +resource "aws_security_group" "nodes-minimal-etcd-example-com" { + description = "Security group for nodes" + name = "nodes.minimal-etcd.example.com" + tags = { + "KubernetesCluster" = "minimal-etcd.example.com" + "Name" = "nodes.minimal-etcd.example.com" + "kubernetes.io/cluster/minimal-etcd.example.com" = "owned" + } + vpc_id = aws_vpc.minimal-etcd-example-com.id +} + +resource "aws_security_group_rule" "from-0-0-0-0--0-ingress-tcp-22to22-masters-minimal-etcd-example-com" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 22 + protocol = "tcp" + security_group_id = aws_security_group.masters-minimal-etcd-example-com.id + to_port = 22 + type = "ingress" +} + +resource "aws_security_group_rule" "from-0-0-0-0--0-ingress-tcp-22to22-nodes-minimal-etcd-example-com" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 22 + protocol = "tcp" + security_group_id = aws_security_group.nodes-minimal-etcd-example-com.id + to_port = 22 + type = "ingress" +} + +resource "aws_security_group_rule" "from-0-0-0-0--0-ingress-tcp-443to443-masters-minimal-etcd-example-com" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 443 + protocol = "tcp" + security_group_id = aws_security_group.masters-minimal-etcd-example-com.id + to_port = 443 + type = "ingress" +} + +resource "aws_security_group_rule" "from-masters-minimal-etcd-example-com-egress-all-0to0-0-0-0-0--0" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.masters-minimal-etcd-example-com.id + to_port = 0 + type = "egress" +} + +resource "aws_security_group_rule" "from-masters-minimal-etcd-example-com-egress-all-0to0-__--0" { + from_port = 0 + ipv6_cidr_blocks = ["::/0"] + protocol = "-1" + security_group_id = aws_security_group.masters-minimal-etcd-example-com.id + to_port = 0 + type = "egress" +} + +resource "aws_security_group_rule" "from-masters-minimal-etcd-example-com-ingress-all-0to0-masters-minimal-etcd-example-com" { + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.masters-minimal-etcd-example-com.id + source_security_group_id = aws_security_group.masters-minimal-etcd-example-com.id + to_port = 0 + type = "ingress" +} + +resource "aws_security_group_rule" "from-masters-minimal-etcd-example-com-ingress-all-0to0-nodes-minimal-etcd-example-com" { + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.nodes-minimal-etcd-example-com.id + source_security_group_id = aws_security_group.masters-minimal-etcd-example-com.id + to_port = 0 + type = "ingress" +} + +resource "aws_security_group_rule" "from-nodes-minimal-etcd-example-com-egress-all-0to0-0-0-0-0--0" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.nodes-minimal-etcd-example-com.id + to_port = 0 + type = "egress" +} + +resource "aws_security_group_rule" "from-nodes-minimal-etcd-example-com-egress-all-0to0-__--0" { + from_port = 0 + ipv6_cidr_blocks = ["::/0"] + protocol = "-1" + security_group_id = aws_security_group.nodes-minimal-etcd-example-com.id + to_port = 0 + type = "egress" +} + +resource "aws_security_group_rule" "from-nodes-minimal-etcd-example-com-ingress-all-0to0-nodes-minimal-etcd-example-com" { + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.nodes-minimal-etcd-example-com.id + source_security_group_id = aws_security_group.nodes-minimal-etcd-example-com.id + to_port = 0 + type = "ingress" +} + +resource "aws_security_group_rule" "from-nodes-minimal-etcd-example-com-ingress-tcp-1to2379-masters-minimal-etcd-example-com" { + from_port = 1 + protocol = "tcp" + security_group_id = aws_security_group.masters-minimal-etcd-example-com.id + source_security_group_id = aws_security_group.nodes-minimal-etcd-example-com.id + to_port = 2379 + type = "ingress" +} + +resource "aws_security_group_rule" "from-nodes-minimal-etcd-example-com-ingress-tcp-2382to4000-masters-minimal-etcd-example-com" { + from_port = 2382 + protocol = "tcp" + security_group_id = aws_security_group.masters-minimal-etcd-example-com.id + source_security_group_id = aws_security_group.nodes-minimal-etcd-example-com.id + to_port = 4000 + type = "ingress" +} + +resource "aws_security_group_rule" "from-nodes-minimal-etcd-example-com-ingress-tcp-4003to65535-masters-minimal-etcd-example-com" { + from_port = 4003 + protocol = "tcp" + security_group_id = aws_security_group.masters-minimal-etcd-example-com.id + source_security_group_id = aws_security_group.nodes-minimal-etcd-example-com.id + to_port = 65535 + type = "ingress" +} + +resource "aws_security_group_rule" "from-nodes-minimal-etcd-example-com-ingress-udp-1to65535-masters-minimal-etcd-example-com" { + from_port = 1 + protocol = "udp" + security_group_id = aws_security_group.masters-minimal-etcd-example-com.id + source_security_group_id = aws_security_group.nodes-minimal-etcd-example-com.id + to_port = 65535 + type = "ingress" +} + +resource "aws_subnet" "us-test-1a-minimal-etcd-example-com" { + availability_zone = "us-test-1a" + cidr_block = "172.20.32.0/19" + tags = { + "KubernetesCluster" = "minimal-etcd.example.com" + "Name" = "us-test-1a.minimal-etcd.example.com" + "SubnetType" = "Public" + "kops.k8s.io/instance-group/master-us-test-1a" = "true" + "kops.k8s.io/instance-group/nodes" = "true" + "kubernetes.io/cluster/minimal-etcd.example.com" = "owned" + "kubernetes.io/role/elb" = "1" + "kubernetes.io/role/internal-elb" = "1" + } + vpc_id = aws_vpc.minimal-etcd-example-com.id +} + +resource "aws_vpc" "minimal-etcd-example-com" { + assign_generated_ipv6_cidr_block = true + cidr_block = "172.20.0.0/16" + enable_dns_hostnames = true + enable_dns_support = true + tags = { + "KubernetesCluster" = "minimal-etcd.example.com" + "Name" = "minimal-etcd.example.com" + "kubernetes.io/cluster/minimal-etcd.example.com" = "owned" + } +} + +resource "aws_vpc_dhcp_options" "minimal-etcd-example-com" { + domain_name = "us-test-1.compute.internal" + domain_name_servers = ["AmazonProvidedDNS"] + tags = { + "KubernetesCluster" = "minimal-etcd.example.com" + "Name" = "minimal-etcd.example.com" + "kubernetes.io/cluster/minimal-etcd.example.com" = "owned" + } +} + +resource "aws_vpc_dhcp_options_association" "minimal-etcd-example-com" { + dhcp_options_id = aws_vpc_dhcp_options.minimal-etcd-example-com.id + vpc_id = aws_vpc.minimal-etcd-example-com.id +} + +terraform { + required_version = ">= 0.15.0" + required_providers { + aws = { + "configuration_aliases" = [aws.files] + "source" = "hashicorp/aws" + "version" = ">= 4.0.0" + } + } +} diff --git a/tests/integration/update_cluster/minimal-gp3/cloudformation.json b/tests/integration/update_cluster/minimal-gp3/cloudformation.json deleted file mode 100644 index 50e264706010a..0000000000000 --- a/tests/integration/update_cluster/minimal-gp3/cloudformation.json +++ /dev/null @@ -1,1345 +0,0 @@ -{ - "Resources": { - "AWSAutoScalingAutoScalingGroupmasterustest1amastersminimalexamplecom": { - "Type": "AWS::AutoScaling::AutoScalingGroup", - "Properties": { - "AutoScalingGroupName": "master-us-test-1a.masters.minimal.example.com", - "LaunchTemplate": { - "LaunchTemplateId": { - "Ref": "AWSEC2LaunchTemplatemasterustest1amastersminimalexamplecom" - }, - "Version": { - "Fn::GetAtt": [ - "AWSEC2LaunchTemplatemasterustest1amastersminimalexamplecom", - "LatestVersionNumber" - ] - } - }, - "MaxSize": "1", - "MinSize": "1", - "VPCZoneIdentifier": [ - { - "Ref": "AWSEC2Subnetustest1aminimalexamplecom" - } - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.minimal.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/role/master", - "Value": "1", - "PropagateAtLaunch": true - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a", - "PropagateAtLaunch": true - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned", - "PropagateAtLaunch": true - } - ], - "MetricsCollection": [ - { - "Granularity": "1Minute", - "Metrics": [ - "GroupDesiredCapacity", - "GroupInServiceInstances", - "GroupMaxSize", - "GroupMinSize", - "GroupPendingInstances", - "GroupStandbyInstances", - "GroupTerminatingInstances", - "GroupTotalInstances" - ] - } - ] - } - }, - "AWSAutoScalingAutoScalingGroupnodesminimalexamplecom": { - "Type": "AWS::AutoScaling::AutoScalingGroup", - "Properties": { - "AutoScalingGroupName": "nodes.minimal.example.com", - "LaunchTemplate": { - "LaunchTemplateId": { - "Ref": "AWSEC2LaunchTemplatenodesminimalexamplecom" - }, - "Version": { - "Fn::GetAtt": [ - "AWSEC2LaunchTemplatenodesminimalexamplecom", - "LatestVersionNumber" - ] - } - }, - "MaxSize": "2", - "MinSize": "2", - "VPCZoneIdentifier": [ - { - "Ref": "AWSEC2Subnetustest1aminimalexamplecom" - } - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "Name", - "Value": "nodes.minimal.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/role/node", - "Value": "1", - "PropagateAtLaunch": true - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes", - "PropagateAtLaunch": true - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned", - "PropagateAtLaunch": true - } - ], - "MetricsCollection": [ - { - "Granularity": "1Minute", - "Metrics": [ - "GroupDesiredCapacity", - "GroupInServiceInstances", - "GroupMaxSize", - "GroupMinSize", - "GroupPendingInstances", - "GroupStandbyInstances", - "GroupTerminatingInstances", - "GroupTotalInstances" - ] - } - ] - } - }, - "AWSEC2DHCPOptionsminimalexamplecom": { - "Type": "AWS::EC2::DHCPOptions", - "Properties": { - "DomainName": "us-test-1.compute.internal", - "DomainNameServers": [ - "AmazonProvidedDNS" - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "minimal.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2InternetGatewayminimalexamplecom": { - "Type": "AWS::EC2::InternetGateway", - "Properties": { - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "minimal.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2LaunchTemplatemasterustest1amastersminimalexamplecom": { - "Type": "AWS::EC2::LaunchTemplate", - "Properties": { - "LaunchTemplateName": "master-us-test-1a.masters.minimal.example.com", - "LaunchTemplateData": { - "BlockDeviceMappings": [ - { - "DeviceName": "/dev/xvda", - "Ebs": { - "VolumeType": "gp3", - "VolumeSize": 64, - "Iops": 4000, - "Throughput": 200, - "DeleteOnTermination": true, - "Encrypted": true - } - } - ], - "IamInstanceProfile": { - "Name": { - "Ref": "AWSIAMInstanceProfilemastersminimalexamplecom" - } - }, - "ImageId": "ami-12345678", - "InstanceType": "m5.large", - "KeyName": "kubernetes.minimal.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57", - "MetadataOptions": { - "HttpPutResponseHopLimit": 1, - "HttpTokens": "optional" - }, - "Monitoring": { - "Enabled": false - }, - "NetworkInterfaces": [ - { - "AssociatePublicIpAddress": true, - "DeleteOnTermination": true, - "DeviceIndex": 0, - "Ipv6AddressCount": 0, - "Groups": [ - { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - } - ] - } - ], - "TagSpecifications": [ - { - "ResourceType": "instance", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.minimal.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - }, - { - "ResourceType": "volume", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.minimal.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - ], - "UserData": "extracted" - } - } - }, - "AWSEC2LaunchTemplatenodesminimalexamplecom": { - "Type": "AWS::EC2::LaunchTemplate", - "Properties": { - "LaunchTemplateName": "nodes.minimal.example.com", - "LaunchTemplateData": { - "BlockDeviceMappings": [ - { - "DeviceName": "/dev/xvda", - "Ebs": { - "VolumeType": "gp3", - "VolumeSize": 128, - "Iops": 3000, - "Throughput": 125, - "DeleteOnTermination": true, - "Encrypted": true - } - } - ], - "IamInstanceProfile": { - "Name": { - "Ref": "AWSIAMInstanceProfilenodesminimalexamplecom" - } - }, - "ImageId": "ami-12345678", - "InstanceType": "t3.large", - "KeyName": "kubernetes.minimal.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57", - "MetadataOptions": { - "HttpPutResponseHopLimit": 1, - "HttpTokens": "optional" - }, - "Monitoring": { - "Enabled": false - }, - "NetworkInterfaces": [ - { - "AssociatePublicIpAddress": true, - "DeleteOnTermination": true, - "DeviceIndex": 0, - "Ipv6AddressCount": 0, - "Groups": [ - { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - } - ] - } - ], - "TagSpecifications": [ - { - "ResourceType": "instance", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "nodes.minimal.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "" - }, - { - "Key": "k8s.io/role/node", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - }, - { - "ResourceType": "volume", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "nodes.minimal.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "" - }, - { - "Key": "k8s.io/role/node", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - ], - "UserData": "extracted" - } - } - }, - "AWSEC2Route0": { - "Type": "AWS::EC2::Route", - "Properties": { - "RouteTableId": { - "Ref": "AWSEC2RouteTableminimalexamplecom" - }, - "DestinationIpv6CidrBlock": "::/0", - "GatewayId": { - "Ref": "AWSEC2InternetGatewayminimalexamplecom" - } - } - }, - "AWSEC2Route00000": { - "Type": "AWS::EC2::Route", - "Properties": { - "RouteTableId": { - "Ref": "AWSEC2RouteTableminimalexamplecom" - }, - "DestinationCidrBlock": "0.0.0.0/0", - "GatewayId": { - "Ref": "AWSEC2InternetGatewayminimalexamplecom" - } - } - }, - "AWSEC2RouteTableminimalexamplecom": { - "Type": "AWS::EC2::RouteTable", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCminimalexamplecom" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "minimal.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - }, - { - "Key": "kubernetes.io/kops/role", - "Value": "public" - } - ] - } - }, - "AWSEC2SecurityGroupEgressfrommastersminimalexamplecomegressall0to00": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIpv6": "::/0" - } - }, - "AWSEC2SecurityGroupEgressfrommastersminimalexamplecomegressall0to000000": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupEgressfromnodesminimalexamplecomegressall0to00": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIpv6": "::/0" - } - }, - "AWSEC2SecurityGroupEgressfromnodesminimalexamplecomegressall0to000000": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom00000ingresstcp22to22mastersminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "FromPort": 22, - "ToPort": 22, - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom00000ingresstcp22to22nodesminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "FromPort": 22, - "ToPort": 22, - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom00000ingresstcp443to443mastersminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "FromPort": 443, - "ToPort": 443, - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrommastersminimalexamplecomingressall0to0mastersminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfrommastersminimalexamplecomingressall0to0nodesminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfromnodesminimalexamplecomingressall0to0nodesminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfromnodesminimalexamplecomingresstcp1to2379mastersminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "FromPort": 1, - "ToPort": 2379, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodesminimalexamplecomingresstcp2382to4000mastersminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "FromPort": 2382, - "ToPort": 4000, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodesminimalexamplecomingresstcp4003to65535mastersminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "FromPort": 4003, - "ToPort": 65535, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodesminimalexamplecomingressudp1to65535mastersminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "FromPort": 1, - "ToPort": 65535, - "IpProtocol": "udp" - } - }, - "AWSEC2SecurityGroupmastersminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupName": "masters.minimal.example.com", - "VpcId": { - "Ref": "AWSEC2VPCminimalexamplecom" - }, - "GroupDescription": "Security group for masters", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "masters.minimal.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2SecurityGroupnodesminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupName": "nodes.minimal.example.com", - "VpcId": { - "Ref": "AWSEC2VPCminimalexamplecom" - }, - "GroupDescription": "Security group for nodes", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "nodes.minimal.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2SubnetRouteTableAssociationustest1aminimalexamplecom": { - "Type": "AWS::EC2::SubnetRouteTableAssociation", - "Properties": { - "SubnetId": { - "Ref": "AWSEC2Subnetustest1aminimalexamplecom" - }, - "RouteTableId": { - "Ref": "AWSEC2RouteTableminimalexamplecom" - } - } - }, - "AWSEC2Subnetustest1aminimalexamplecom": { - "Type": "AWS::EC2::Subnet", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCminimalexamplecom" - }, - "CidrBlock": "172.20.32.0/19", - "AvailabilityZone": "us-test-1a", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.minimal.example.com" - }, - { - "Key": "SubnetType", - "Value": "Public" - }, - { - "Key": "kops.k8s.io/instance-group/master-us-test-1a", - "Value": "true" - }, - { - "Key": "kops.k8s.io/instance-group/nodes", - "Value": "true" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - }, - { - "Key": "kubernetes.io/role/elb", - "Value": "1" - }, - { - "Key": "kubernetes.io/role/internal-elb", - "Value": "1" - } - ] - } - }, - "AWSEC2VPCCidrBlockAmazonIPv6": { - "Type": "AWS::EC2::VPCCidrBlock", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCminimalexamplecom" - }, - "AmazonProvidedIpv6CidrBlock": true - } - }, - "AWSEC2VPCDHCPOptionsAssociationminimalexamplecom": { - "Type": "AWS::EC2::VPCDHCPOptionsAssociation", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCminimalexamplecom" - }, - "DhcpOptionsId": { - "Ref": "AWSEC2DHCPOptionsminimalexamplecom" - } - } - }, - "AWSEC2VPCGatewayAttachmentminimalexamplecom": { - "Type": "AWS::EC2::VPCGatewayAttachment", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCminimalexamplecom" - }, - "InternetGatewayId": { - "Ref": "AWSEC2InternetGatewayminimalexamplecom" - } - } - }, - "AWSEC2VPCminimalexamplecom": { - "Type": "AWS::EC2::VPC", - "Properties": { - "CidrBlock": "172.20.0.0/16", - "EnableDnsHostnames": true, - "EnableDnsSupport": true, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "minimal.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2Volumeustest1aetcdeventsminimalexamplecom": { - "Type": "AWS::EC2::Volume", - "Properties": { - "AvailabilityZone": "us-test-1a", - "Size": 20, - "VolumeType": "gp3", - "Iops": 3000, - "Throughput": 125, - "Encrypted": false, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.etcd-events.minimal.example.com" - }, - { - "Key": "k8s.io/etcd/events", - "Value": "us-test-1a/us-test-1a" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2Volumeustest1aetcdmainminimalexamplecom": { - "Type": "AWS::EC2::Volume", - "Properties": { - "AvailabilityZone": "us-test-1a", - "Size": 50, - "VolumeType": "gp3", - "Iops": 5000, - "Throughput": 125, - "Encrypted": false, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.etcd-main.minimal.example.com" - }, - { - "Key": "k8s.io/etcd/main", - "Value": "us-test-1a/us-test-1a" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - }, - "AWSIAMInstanceProfilemastersminimalexamplecom": { - "Type": "AWS::IAM::InstanceProfile", - "Properties": { - "InstanceProfileName": "masters.minimal.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolemastersminimalexamplecom" - } - ] - } - }, - "AWSIAMInstanceProfilenodesminimalexamplecom": { - "Type": "AWS::IAM::InstanceProfile", - "Properties": { - "InstanceProfileName": "nodes.minimal.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolenodesminimalexamplecom" - } - ] - } - }, - "AWSIAMPolicymastersminimalexamplecom": { - "Type": "AWS::IAM::Policy", - "Properties": { - "PolicyName": "masters.minimal.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolemastersminimalexamplecom" - } - ], - "PolicyDocument": { - "Statement": [ - { - "Action": "ec2:AttachVolume", - "Condition": { - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "minimal.example.com", - "aws:ResourceTag/k8s.io/role/master": "1" - } - }, - "Effect": "Allow", - "Resource": [ - "*" - ] - }, - { - "Action": [ - "s3:Get*" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-read-bucket/clusters.example.com/minimal.example.com/*" - }, - { - "Action": [ - "s3:GetObject", - "s3:DeleteObject", - "s3:DeleteObjectVersion", - "s3:PutObject" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/minimal.example.com/backups/etcd/main/*" - }, - { - "Action": [ - "s3:GetObject", - "s3:DeleteObject", - "s3:DeleteObjectVersion", - "s3:PutObject" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/minimal.example.com/backups/etcd/events/*" - }, - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-read-bucket" - ] - }, - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-write-bucket" - ] - }, - { - "Action": [ - "route53:ChangeResourceRecordSets", - "route53:ListResourceRecordSets", - "route53:GetHostedZone" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:route53:::hostedzone/Z1AFAKE1ZON3YO" - ] - }, - { - "Action": [ - "route53:GetChange" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:route53:::change/*" - ] - }, - { - "Action": [ - "route53:ListHostedZones", - "route53:ListTagsForResource" - ], - "Effect": "Allow", - "Resource": [ - "*" - ] - }, - { - "Action": "ec2:CreateTags", - "Condition": { - "StringEquals": { - "aws:RequestTag/KubernetesCluster": "minimal.example.com", - "ec2:CreateAction": [ - "CreateSecurityGroup" - ] - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:security-group/*" - ] - }, - { - "Action": [ - "ec2:CreateTags", - "ec2:DeleteTags" - ], - "Condition": { - "Null": { - "aws:RequestTag/KubernetesCluster": "true" - }, - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "minimal.example.com" - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:security-group/*" - ] - }, - { - "Action": "ec2:CreateTags", - "Condition": { - "StringEquals": { - "aws:RequestTag/KubernetesCluster": "minimal.example.com", - "ec2:CreateAction": [ - "CreateVolume", - "CreateSnapshot" - ] - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:volume/*", - "arn:aws-test:ec2:*:*:snapshot/*" - ] - }, - { - "Action": [ - "ec2:CreateTags", - "ec2:DeleteTags" - ], - "Condition": { - "Null": { - "aws:RequestTag/KubernetesCluster": "true" - }, - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "minimal.example.com" - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:volume/*", - "arn:aws-test:ec2:*:*:snapshot/*" - ] - }, - { - "Action": [ - "autoscaling:DescribeAutoScalingGroups", - "autoscaling:DescribeAutoScalingInstances", - "autoscaling:DescribeLaunchConfigurations", - "autoscaling:DescribeScalingActivities", - "autoscaling:DescribeTags", - "ec2:AttachVolume", - "ec2:AuthorizeSecurityGroupIngress", - "ec2:CreateSecurityGroup", - "ec2:CreateTags", - "ec2:DeleteRoute", - "ec2:DeleteSecurityGroup", - "ec2:DeleteVolume", - "ec2:DescribeAccountAttributes", - "ec2:DescribeInstanceTypes", - "ec2:DescribeInstances", - "ec2:DescribeLaunchTemplateVersions", - "ec2:DescribeRegions", - "ec2:DescribeRouteTables", - "ec2:DescribeSecurityGroups", - "ec2:DescribeSubnets", - "ec2:DescribeTags", - "ec2:DescribeVolumes", - "ec2:DescribeVolumesModifications", - "ec2:DescribeVpcs", - "ec2:DetachVolume", - "ec2:ModifyInstanceAttribute", - "ec2:ModifyVolume", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:CreateListener", - "elasticloadbalancing:CreateTargetGroup", - "elasticloadbalancing:DeleteListener", - "elasticloadbalancing:DescribeListeners", - "elasticloadbalancing:DescribeLoadBalancerAttributes", - "elasticloadbalancing:DescribeLoadBalancerPolicies", - "elasticloadbalancing:DescribeLoadBalancers", - "elasticloadbalancing:DescribeTargetGroups", - "elasticloadbalancing:DescribeTargetHealth", - "elasticloadbalancing:ModifyListener", - "elasticloadbalancing:RegisterTargets", - "iam:GetServerCertificate", - "iam:ListServerCertificates", - "kms:DescribeKey", - "kms:GenerateRandom" - ], - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": [ - "autoscaling:SetDesiredCapacity", - "autoscaling:TerminateInstanceInAutoScalingGroup", - "ec2:AttachVolume", - "ec2:AuthorizeSecurityGroupIngress", - "ec2:DeleteSecurityGroup", - "ec2:DeleteVolume", - "ec2:DetachVolume", - "ec2:ModifyInstanceAttribute", - "ec2:ModifyVolume", - "ec2:RevokeSecurityGroupIngress", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", - "elasticloadbalancing:AttachLoadBalancerToSubnets", - "elasticloadbalancing:ConfigureHealthCheck", - "elasticloadbalancing:CreateLoadBalancerListeners", - "elasticloadbalancing:CreateLoadBalancerPolicy", - "elasticloadbalancing:DeleteListener", - "elasticloadbalancing:DeleteLoadBalancer", - "elasticloadbalancing:DeleteLoadBalancerListeners", - "elasticloadbalancing:DeleteTargetGroup", - "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", - "elasticloadbalancing:DeregisterTargets", - "elasticloadbalancing:DetachLoadBalancerFromSubnets", - "elasticloadbalancing:ModifyListener", - "elasticloadbalancing:ModifyLoadBalancerAttributes", - "elasticloadbalancing:ModifyTargetGroup", - "elasticloadbalancing:RegisterInstancesWithLoadBalancer", - "elasticloadbalancing:RegisterTargets", - "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", - "elasticloadbalancing:SetLoadBalancerPoliciesOfListener" - ], - "Condition": { - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "minimal.example.com" - } - }, - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": [ - "ec2:CreateSecurityGroup", - "ec2:CreateSnapshot", - "ec2:CreateVolume", - "elasticloadbalancing:CreateListener", - "elasticloadbalancing:CreateLoadBalancer", - "elasticloadbalancing:CreateTargetGroup" - ], - "Condition": { - "StringEquals": { - "aws:RequestTag/KubernetesCluster": "minimal.example.com" - } - }, - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": "ec2:CreateSecurityGroup", - "Effect": "Allow", - "Resource": "arn:aws-test:ec2:*:*:vpc/*" - } - ], - "Version": "2012-10-17" - } - } - }, - "AWSIAMPolicynodesminimalexamplecom": { - "Type": "AWS::IAM::Policy", - "Properties": { - "PolicyName": "nodes.minimal.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolenodesminimalexamplecom" - } - ], - "PolicyDocument": { - "Statement": [ - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-read-bucket" - ] - }, - { - "Action": [ - "autoscaling:DescribeAutoScalingInstances", - "ec2:DescribeInstanceTypes", - "ec2:DescribeInstances", - "ec2:DescribeRegions", - "iam:GetServerCertificate", - "iam:ListServerCertificates", - "kms:GenerateRandom" - ], - "Effect": "Allow", - "Resource": "*" - } - ], - "Version": "2012-10-17" - } - } - }, - "AWSIAMRolemastersminimalexamplecom": { - "Type": "AWS::IAM::Role", - "Properties": { - "RoleName": "masters.minimal.example.com", - "AssumeRolePolicyDocument": { - "Statement": [ - { - "Action": "sts:AssumeRole", - "Effect": "Allow", - "Principal": { - "Service": "ec2.amazonaws.com" - } - } - ], - "Version": "2012-10-17" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "masters.minimal.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - }, - "AWSIAMRolenodesminimalexamplecom": { - "Type": "AWS::IAM::Role", - "Properties": { - "RoleName": "nodes.minimal.example.com", - "AssumeRolePolicyDocument": { - "Statement": [ - { - "Action": "sts:AssumeRole", - "Effect": "Allow", - "Principal": { - "Service": "ec2.amazonaws.com" - } - } - ], - "Version": "2012-10-17" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "nodes.minimal.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - } - } -} diff --git a/tests/integration/update_cluster/minimal-gp3/cloudformation.json.extracted.yaml b/tests/integration/update_cluster/minimal-gp3/cloudformation.json.extracted.yaml deleted file mode 100644 index 37670b545f0f3..0000000000000 --- a/tests/integration/update_cluster/minimal-gp3/cloudformation.json.extracted.yaml +++ /dev/null @@ -1,454 +0,0 @@ -Resources.AWSEC2LaunchTemplatemasterustest1amastersminimalexamplecom.Properties.LaunchTemplateData.UserData: | - #!/bin/bash - set -o errexit - set -o nounset - set -o pipefail - - NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 - NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 - NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 - NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 - - export AWS_REGION=us-test-1 - - - - - sysctl -w net.core.rmem_max=16777216 || true - sysctl -w net.core.wmem_max=16777216 || true - sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true - sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true - - - function ensure-install-dir() { - INSTALL_DIR="/opt/kops" - # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec - if [[ -d /var/lib/toolbox ]]; then - INSTALL_DIR="/var/lib/toolbox/kops" - fi - mkdir -p ${INSTALL_DIR}/bin - mkdir -p ${INSTALL_DIR}/conf - cd ${INSTALL_DIR} - } - - # Retry a download until we get it. args: name, sha, urls - download-or-bust() { - local -r file="$1" - local -r hash="$2" - local -r urls=( $(split-commas "$3") ) - - if [[ -f "${file}" ]]; then - if ! validate-hash "${file}" "${hash}"; then - rm -f "${file}" - else - return 0 - fi - fi - - while true; do - for url in "${urls[@]}"; do - commands=( - "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - ) - for cmd in "${commands[@]}"; do - echo "Attempting download with: ${cmd} {url}" - if ! (${cmd} "${url}"); then - echo "== Download failed with ${cmd} ==" - continue - fi - if ! validate-hash "${file}" "${hash}"; then - echo "== Hash validation of ${url} failed. Retrying. ==" - rm -f "${file}" - else - echo "== Downloaded ${url} (SHA256 = ${hash}) ==" - return 0 - fi - done - done - - echo "All downloads failed; sleeping before retrying" - sleep 60 - done - } - - validate-hash() { - local -r file="$1" - local -r expected="$2" - local actual - - actual=$(sha256sum ${file} | awk '{ print $1 }') || true - if [[ "${actual}" != "${expected}" ]]; then - echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" - return 1 - fi - } - - function split-commas() { - echo $1 | tr "," "\n" - } - - function download-release() { - case "$(uname -m)" in - x86_64*|i?86_64*|amd64*) - NODEUP_URL="${NODEUP_URL_AMD64}" - NODEUP_HASH="${NODEUP_HASH_AMD64}" - ;; - aarch64*|arm64*) - NODEUP_URL="${NODEUP_URL_ARM64}" - NODEUP_HASH="${NODEUP_HASH_ARM64}" - ;; - *) - echo "Unsupported host arch: $(uname -m)" >&2 - exit 1 - ;; - esac - - cd ${INSTALL_DIR}/bin - download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" - - chmod +x nodeup - - echo "Running nodeup" - # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 - ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) - } - - #################################################################################### - - /bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" - - echo "== nodeup node config starting ==" - ensure-install-dir - - cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' - cloudConfig: - awsEBSCSIDriver: - enabled: false - manageStorageClasses: true - containerRuntime: containerd - containerd: - logLevel: info - version: 1.4.13 - docker: - skipInstall: true - encryptionConfig: null - etcdClusters: - events: - etcdMembers: - - name: us-test-1a - volumeSize: 20 - version: 3.4.13 - main: - etcdMembers: - - name: us-test-1a - volumeSize: 50 - version: 3.4.13 - kubeAPIServer: - allowPrivileged: true - anonymousAuth: false - apiAudiences: - - kubernetes.svc.default - apiServerCount: 1 - authorizationMode: AlwaysAllow - bindAddress: 0.0.0.0 - cloudProvider: aws - enableAdmissionPlugins: - - NamespaceLifecycle - - LimitRanger - - ServiceAccount - - DefaultStorageClass - - DefaultTolerationSeconds - - MutatingAdmissionWebhook - - ValidatingAdmissionWebhook - - NodeRestriction - - ResourceQuota - etcdServers: - - https://127.0.0.1:4001 - etcdServersOverrides: - - /events#https://127.0.0.1:4002 - image: registry.k8s.io/kube-apiserver:v1.21.0 - kubeletPreferredAddressTypes: - - InternalIP - - Hostname - - ExternalIP - logLevel: 2 - requestheaderAllowedNames: - - aggregator - requestheaderExtraHeaderPrefixes: - - X-Remote-Extra- - requestheaderGroupHeaders: - - X-Remote-Group - requestheaderUsernameHeaders: - - X-Remote-User - securePort: 443 - serviceAccountIssuer: https://api.internal.minimal.example.com - serviceAccountJWKSURI: https://api.internal.minimal.example.com/openid/v1/jwks - serviceClusterIPRange: 100.64.0.0/13 - storageBackend: etcd3 - kubeControllerManager: - allocateNodeCIDRs: true - attachDetachReconcileSyncPeriod: 1m0s - cloudProvider: aws - clusterCIDR: 100.96.0.0/11 - clusterName: minimal.example.com - configureCloudRoutes: false - image: registry.k8s.io/kube-controller-manager:v1.21.0 - leaderElection: - leaderElect: true - logLevel: 2 - useServiceAccountCredentials: true - kubeProxy: - clusterCIDR: 100.96.0.0/11 - cpuRequest: 100m - image: registry.k8s.io/kube-proxy:v1.21.0 - logLevel: 2 - kubeScheduler: - image: registry.k8s.io/kube-scheduler:v1.21.0 - leaderElection: - leaderElect: true - logLevel: 2 - kubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: aws - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - masterKubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: aws - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - - __EOF_CLUSTER_SPEC - - cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' - CloudProvider: aws - ConfigBase: memfs://clusters.example.com/minimal.example.com - InstanceGroupName: master-us-test-1a - InstanceGroupRole: Master - NodeupConfigHash: 60mXYDhn2rgNTlX5fCqkcYlpIoYatEz0xmlTKl5N5Tc= - - __EOF_KUBE_ENV - - download-release - echo "== nodeup node config done ==" -Resources.AWSEC2LaunchTemplatenodesminimalexamplecom.Properties.LaunchTemplateData.UserData: | - #!/bin/bash - set -o errexit - set -o nounset - set -o pipefail - - NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 - NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 - NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 - NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 - - export AWS_REGION=us-test-1 - - - - - sysctl -w net.core.rmem_max=16777216 || true - sysctl -w net.core.wmem_max=16777216 || true - sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true - sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true - - - function ensure-install-dir() { - INSTALL_DIR="/opt/kops" - # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec - if [[ -d /var/lib/toolbox ]]; then - INSTALL_DIR="/var/lib/toolbox/kops" - fi - mkdir -p ${INSTALL_DIR}/bin - mkdir -p ${INSTALL_DIR}/conf - cd ${INSTALL_DIR} - } - - # Retry a download until we get it. args: name, sha, urls - download-or-bust() { - local -r file="$1" - local -r hash="$2" - local -r urls=( $(split-commas "$3") ) - - if [[ -f "${file}" ]]; then - if ! validate-hash "${file}" "${hash}"; then - rm -f "${file}" - else - return 0 - fi - fi - - while true; do - for url in "${urls[@]}"; do - commands=( - "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - ) - for cmd in "${commands[@]}"; do - echo "Attempting download with: ${cmd} {url}" - if ! (${cmd} "${url}"); then - echo "== Download failed with ${cmd} ==" - continue - fi - if ! validate-hash "${file}" "${hash}"; then - echo "== Hash validation of ${url} failed. Retrying. ==" - rm -f "${file}" - else - echo "== Downloaded ${url} (SHA256 = ${hash}) ==" - return 0 - fi - done - done - - echo "All downloads failed; sleeping before retrying" - sleep 60 - done - } - - validate-hash() { - local -r file="$1" - local -r expected="$2" - local actual - - actual=$(sha256sum ${file} | awk '{ print $1 }') || true - if [[ "${actual}" != "${expected}" ]]; then - echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" - return 1 - fi - } - - function split-commas() { - echo $1 | tr "," "\n" - } - - function download-release() { - case "$(uname -m)" in - x86_64*|i?86_64*|amd64*) - NODEUP_URL="${NODEUP_URL_AMD64}" - NODEUP_HASH="${NODEUP_HASH_AMD64}" - ;; - aarch64*|arm64*) - NODEUP_URL="${NODEUP_URL_ARM64}" - NODEUP_HASH="${NODEUP_HASH_ARM64}" - ;; - *) - echo "Unsupported host arch: $(uname -m)" >&2 - exit 1 - ;; - esac - - cd ${INSTALL_DIR}/bin - download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" - - chmod +x nodeup - - echo "Running nodeup" - # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 - ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) - } - - #################################################################################### - - /bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" - - echo "== nodeup node config starting ==" - ensure-install-dir - - cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' - cloudConfig: - awsEBSCSIDriver: - enabled: false - manageStorageClasses: true - containerRuntime: containerd - containerd: - logLevel: info - version: 1.4.13 - docker: - skipInstall: true - kubeProxy: - clusterCIDR: 100.96.0.0/11 - cpuRequest: 100m - image: registry.k8s.io/kube-proxy:v1.21.0 - logLevel: 2 - kubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: aws - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - - __EOF_CLUSTER_SPEC - - cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' - CloudProvider: aws - ConfigServer: - CACertificates: | - -----BEGIN CERTIFICATE----- - MIIBbjCCARigAwIBAgIMFpANqBD8NSD82AUSMA0GCSqGSIb3DQEBCwUAMBgxFjAU - BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwODAwWhcNMzEwNzA3MDcw - ODAwWjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD - SwAwSAJBANFI3zr0Tk8krsW8vwjfMpzJOlWQ8616vG3YPa2qAgI7V4oKwfV0yIg1 - jt+H6f4P/wkPAPTPTfRp9Iy8oHEEFw0CAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG - MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNG3zVjTcLlJwDsJ4/K9DV7KohUA - MA0GCSqGSIb3DQEBCwUAA0EAB8d03fY2w7WKpfO29qI295pu2C4ca9AiVGOpgSc8 - tmQsq6rcxt3T+rb589PVtz0mw/cKTxOk6gH2CCC+yHfy2w== - -----END CERTIFICATE----- - -----BEGIN CERTIFICATE----- - MIIBbjCCARigAwIBAgIMFpANvmSa0OAlYmXKMA0GCSqGSIb3DQEBCwUAMBgxFjAU - BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwOTM2WhcNMzEwNzA3MDcw - OTM2WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD - SwAwSAJBAMF6F4aZdpe0RUpyykaBpWwZCnwbffhYGOw+fs6RdLuUq7QCNmJm/Eq7 - WWOziMYDiI9SbclpD+6QiJ0N3EqppVUCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG - MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLImp6ARjPDAH6nhI+scWVt3Q9bn - MA0GCSqGSIb3DQEBCwUAA0EAVQVx5MUtuAIeePuP9o51xtpT2S6Fvfi8J4ICxnlA - 9B7UD2ushcVFPtaeoL9Gfu8aY4KJBeqqg5ojl4qmRnThjw== - -----END CERTIFICATE----- - server: https://kops-controller.internal.minimal.example.com:3988/ - InstanceGroupName: nodes - InstanceGroupRole: Node - NodeupConfigHash: 9DUpZRVmc6wlDASGHOilemwB8uS48cVjaxMtHdeu8oE= - - __EOF_KUBE_ENV - - download-release - echo "== nodeup node config done ==" diff --git a/tests/integration/update_cluster/minimal-gp3/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/minimal-gp3/data/aws_s3_object_cluster-completed.spec_content index 3a50c0c6397f8..57e86948a1c29 100644 --- a/tests/integration/update_cluster/minimal-gp3/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/minimal-gp3/data/aws_s3_object_cluster-completed.spec_content @@ -148,7 +148,6 @@ spec: kubernetesApiAccess: - 0.0.0.0/0 kubernetesVersion: 1.21.0 - masterInternalName: api.internal.minimal.example.com masterKubelet: anonymousAuth: false cgroupDriver: systemd diff --git a/tests/integration/update_cluster/minimal-gp3/in-v1alpha2.yaml b/tests/integration/update_cluster/minimal-gp3/in-v1alpha2.yaml index 1dcff0f2c747d..fef7af3929626 100644 --- a/tests/integration/update_cluster/minimal-gp3/in-v1alpha2.yaml +++ b/tests/integration/update_cluster/minimal-gp3/in-v1alpha2.yaml @@ -28,7 +28,6 @@ spec: kubelet: anonymousAuth: false kubernetesVersion: v1.21.0 - masterInternalName: api.internal.minimal.example.com masterPublicName: api.minimal.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/tests/integration/update_cluster/minimal-ipv6-calico/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/minimal-ipv6-calico/data/aws_s3_object_cluster-completed.spec_content index d65ac7e1af353..e79453d2c2862 100644 --- a/tests/integration/update_cluster/minimal-ipv6-calico/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/minimal-ipv6-calico/data/aws_s3_object_cluster-completed.spec_content @@ -175,7 +175,6 @@ spec: - 0.0.0.0/0 - ::/0 kubernetesVersion: 1.25.0 - masterInternalName: api.internal.minimal-ipv6.example.com masterKubelet: anonymousAuth: false cgroupDriver: systemd diff --git a/tests/integration/update_cluster/minimal-ipv6-calico/in-v1alpha2.yaml b/tests/integration/update_cluster/minimal-ipv6-calico/in-v1alpha2.yaml index fd556e18dbef2..a9f40fcd1b7ea 100644 --- a/tests/integration/update_cluster/minimal-ipv6-calico/in-v1alpha2.yaml +++ b/tests/integration/update_cluster/minimal-ipv6-calico/in-v1alpha2.yaml @@ -33,7 +33,6 @@ spec: - 0.0.0.0/0 - ::/0 kubernetesVersion: v1.25.0 - masterInternalName: api.internal.minimal-ipv6.example.com masterPublicName: api.minimal-ipv6.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/tests/integration/update_cluster/minimal-ipv6-cilium/cloudformation.json b/tests/integration/update_cluster/minimal-ipv6-cilium/cloudformation.json deleted file mode 100644 index f72598496eb62..0000000000000 --- a/tests/integration/update_cluster/minimal-ipv6-cilium/cloudformation.json +++ /dev/null @@ -1,1685 +0,0 @@ -{ - "Resources": { - "AWSAutoScalingAutoScalingGroupmasterustest1amastersminimalipv6examplecom": { - "Type": "AWS::AutoScaling::AutoScalingGroup", - "Properties": { - "AutoScalingGroupName": "master-us-test-1a.masters.minimal-ipv6.example.com", - "LaunchTemplate": { - "LaunchTemplateId": { - "Ref": "AWSEC2LaunchTemplatemasterustest1amastersminimalipv6examplecom" - }, - "Version": { - "Fn::GetAtt": [ - "AWSEC2LaunchTemplatemasterustest1amastersminimalipv6examplecom", - "LatestVersionNumber" - ] - } - }, - "MaxSize": "1", - "MinSize": "1", - "VPCZoneIdentifier": [ - { - "Ref": "AWSEC2Subnetustest1aminimalipv6examplecom" - } - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal-ipv6.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.minimal-ipv6.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/role/master", - "Value": "1", - "PropagateAtLaunch": true - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a", - "PropagateAtLaunch": true - }, - { - "Key": "kubernetes.io/cluster/minimal-ipv6.example.com", - "Value": "owned", - "PropagateAtLaunch": true - } - ], - "MetricsCollection": [ - { - "Granularity": "1Minute", - "Metrics": [ - "GroupDesiredCapacity", - "GroupInServiceInstances", - "GroupMaxSize", - "GroupMinSize", - "GroupPendingInstances", - "GroupStandbyInstances", - "GroupTerminatingInstances", - "GroupTotalInstances" - ] - } - ], - "TargetGroupARNs": [ - { - "Ref": "AWSElasticLoadBalancingV2TargetGrouptcpminimalipv6examplebne5ih" - } - ] - } - }, - "AWSAutoScalingAutoScalingGroupnodesminimalipv6examplecom": { - "Type": "AWS::AutoScaling::AutoScalingGroup", - "Properties": { - "AutoScalingGroupName": "nodes.minimal-ipv6.example.com", - "LaunchTemplate": { - "LaunchTemplateId": { - "Ref": "AWSEC2LaunchTemplatenodesminimalipv6examplecom" - }, - "Version": { - "Fn::GetAtt": [ - "AWSEC2LaunchTemplatenodesminimalipv6examplecom", - "LatestVersionNumber" - ] - } - }, - "MaxSize": "2", - "MinSize": "2", - "VPCZoneIdentifier": [ - { - "Ref": "AWSEC2Subnetustest1aminimalipv6examplecom" - } - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal-ipv6.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "Name", - "Value": "nodes.minimal-ipv6.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/role/node", - "Value": "1", - "PropagateAtLaunch": true - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes", - "PropagateAtLaunch": true - }, - { - "Key": "kubernetes.io/cluster/minimal-ipv6.example.com", - "Value": "owned", - "PropagateAtLaunch": true - } - ], - "MetricsCollection": [ - { - "Granularity": "1Minute", - "Metrics": [ - "GroupDesiredCapacity", - "GroupInServiceInstances", - "GroupMaxSize", - "GroupMinSize", - "GroupPendingInstances", - "GroupStandbyInstances", - "GroupTerminatingInstances", - "GroupTotalInstances" - ] - } - ] - } - }, - "AWSEC2DHCPOptionsminimalipv6examplecom": { - "Type": "AWS::EC2::DHCPOptions", - "Properties": { - "DomainName": "us-test-1.compute.internal", - "DomainNameServers": [ - "AmazonProvidedDNS" - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal-ipv6.example.com" - }, - { - "Key": "Name", - "Value": "minimal-ipv6.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal-ipv6.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2EIPustest1aminimalipv6examplecom": { - "Type": "AWS::EC2::EIP", - "Properties": { - "Domain": "vpc", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal-ipv6.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.minimal-ipv6.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal-ipv6.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2InternetGatewayminimalipv6examplecom": { - "Type": "AWS::EC2::InternetGateway", - "Properties": { - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal-ipv6.example.com" - }, - { - "Key": "Name", - "Value": "minimal-ipv6.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal-ipv6.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2LaunchTemplatemasterustest1amastersminimalipv6examplecom": { - "Type": "AWS::EC2::LaunchTemplate", - "Properties": { - "LaunchTemplateName": "master-us-test-1a.masters.minimal-ipv6.example.com", - "LaunchTemplateData": { - "BlockDeviceMappings": [ - { - "DeviceName": "/dev/xvda", - "Ebs": { - "VolumeType": "gp3", - "VolumeSize": 64, - "Iops": 3000, - "Throughput": 125, - "DeleteOnTermination": true, - "Encrypted": true - } - }, - { - "DeviceName": "/dev/sdc", - "VirtualName": "ephemeral0" - } - ], - "IamInstanceProfile": { - "Name": { - "Ref": "AWSIAMInstanceProfilemastersminimalipv6examplecom" - } - }, - "ImageId": "ami-12345678", - "InstanceType": "m3.medium", - "KeyName": "kubernetes.minimal-ipv6.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57", - "MetadataOptions": { - "HttpPutResponseHopLimit": 1, - "HttpTokens": "optional" - }, - "Monitoring": { - "Enabled": false - }, - "NetworkInterfaces": [ - { - "AssociatePublicIpAddress": true, - "DeleteOnTermination": true, - "DeviceIndex": 0, - "Ipv6AddressCount": 1, - "Groups": [ - { - "Ref": "AWSEC2SecurityGroupmastersminimalipv6examplecom" - } - ] - } - ], - "TagSpecifications": [ - { - "ResourceType": "instance", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal-ipv6.example.com" - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.minimal-ipv6.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a" - }, - { - "Key": "kubernetes.io/cluster/minimal-ipv6.example.com", - "Value": "owned" - } - ] - }, - { - "ResourceType": "volume", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal-ipv6.example.com" - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.minimal-ipv6.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a" - }, - { - "Key": "kubernetes.io/cluster/minimal-ipv6.example.com", - "Value": "owned" - } - ] - } - ], - "UserData": "extracted" - } - } - }, - "AWSEC2LaunchTemplatenodesminimalipv6examplecom": { - "Type": "AWS::EC2::LaunchTemplate", - "Properties": { - "LaunchTemplateName": "nodes.minimal-ipv6.example.com", - "LaunchTemplateData": { - "BlockDeviceMappings": [ - { - "DeviceName": "/dev/xvda", - "Ebs": { - "VolumeType": "gp3", - "VolumeSize": 128, - "Iops": 3000, - "Throughput": 125, - "DeleteOnTermination": true, - "Encrypted": true - } - } - ], - "IamInstanceProfile": { - "Name": { - "Ref": "AWSIAMInstanceProfilenodesminimalipv6examplecom" - } - }, - "ImageId": "ami-12345678", - "InstanceType": "t2.medium", - "KeyName": "kubernetes.minimal-ipv6.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57", - "MetadataOptions": { - "HttpPutResponseHopLimit": 1, - "HttpTokens": "optional" - }, - "Monitoring": { - "Enabled": false - }, - "NetworkInterfaces": [ - { - "AssociatePublicIpAddress": true, - "DeleteOnTermination": true, - "DeviceIndex": 0, - "Ipv6AddressCount": 1, - "Groups": [ - { - "Ref": "AWSEC2SecurityGroupnodesminimalipv6examplecom" - } - ] - } - ], - "TagSpecifications": [ - { - "ResourceType": "instance", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal-ipv6.example.com" - }, - { - "Key": "Name", - "Value": "nodes.minimal-ipv6.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "" - }, - { - "Key": "k8s.io/role/node", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes" - }, - { - "Key": "kubernetes.io/cluster/minimal-ipv6.example.com", - "Value": "owned" - } - ] - }, - { - "ResourceType": "volume", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal-ipv6.example.com" - }, - { - "Key": "Name", - "Value": "nodes.minimal-ipv6.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "" - }, - { - "Key": "k8s.io/role/node", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes" - }, - { - "Key": "kubernetes.io/cluster/minimal-ipv6.example.com", - "Value": "owned" - } - ] - } - ], - "UserData": "extracted" - } - } - }, - "AWSEC2NatGatewayustest1aminimalipv6examplecom": { - "Type": "AWS::EC2::NatGateway", - "Properties": { - "AllocationId": { - "Fn::GetAtt": [ - "AWSEC2EIPustest1aminimalipv6examplecom", - "AllocationId" - ] - }, - "SubnetId": { - "Ref": "AWSEC2Subnetustest1aminimalipv6examplecom" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal-ipv6.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.minimal-ipv6.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal-ipv6.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2Route0": { - "Type": "AWS::EC2::Route", - "Properties": { - "RouteTableId": { - "Ref": "AWSEC2RouteTableminimalipv6examplecom" - }, - "DestinationIpv6CidrBlock": "::/0", - "GatewayId": { - "Ref": "AWSEC2InternetGatewayminimalipv6examplecom" - } - } - }, - "AWSEC2Route00000": { - "Type": "AWS::EC2::Route", - "Properties": { - "RouteTableId": { - "Ref": "AWSEC2RouteTableminimalipv6examplecom" - }, - "DestinationCidrBlock": "0.0.0.0/0", - "GatewayId": { - "Ref": "AWSEC2InternetGatewayminimalipv6examplecom" - } - } - }, - "AWSEC2RouteTableminimalipv6examplecom": { - "Type": "AWS::EC2::RouteTable", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCminimalipv6examplecom" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal-ipv6.example.com" - }, - { - "Key": "Name", - "Value": "minimal-ipv6.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal-ipv6.example.com", - "Value": "owned" - }, - { - "Key": "kubernetes.io/kops/role", - "Value": "public" - } - ] - } - }, - "AWSEC2RouteTablepublicustest1aminimalipv6examplecom": { - "Type": "AWS::EC2::RouteTable", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCminimalipv6examplecom" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal-ipv6.example.com" - }, - { - "Key": "Name", - "Value": "public-us-test-1a.minimal-ipv6.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal-ipv6.example.com", - "Value": "owned" - }, - { - "Key": "kubernetes.io/kops/role", - "Value": "public-us-test-1a" - } - ] - } - }, - "AWSEC2Routepublicustest1a0": { - "Type": "AWS::EC2::Route", - "Properties": { - "RouteTableId": { - "Ref": "AWSEC2RouteTablepublicustest1aminimalipv6examplecom" - }, - "DestinationIpv6CidrBlock": "::/0", - "GatewayId": { - "Ref": "AWSEC2InternetGatewayminimalipv6examplecom" - } - } - }, - "AWSEC2Routepublicustest1a00000": { - "Type": "AWS::EC2::Route", - "Properties": { - "RouteTableId": { - "Ref": "AWSEC2RouteTablepublicustest1aminimalipv6examplecom" - }, - "DestinationCidrBlock": "0.0.0.0/0", - "GatewayId": { - "Ref": "AWSEC2InternetGatewayminimalipv6examplecom" - } - } - }, - "AWSEC2Routepublicustest1a64ff9b96": { - "Type": "AWS::EC2::Route", - "Properties": { - "RouteTableId": { - "Ref": "AWSEC2RouteTablepublicustest1aminimalipv6examplecom" - }, - "DestinationIpv6CidrBlock": "64:ff9b::/96", - "NatGatewayId": { - "Ref": "AWSEC2NatGatewayustest1aminimalipv6examplecom" - } - } - }, - "AWSEC2SecurityGroupEgressfrommastersminimalipv6examplecomegressall0to00": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalipv6examplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIpv6": "::/0" - } - }, - "AWSEC2SecurityGroupEgressfrommastersminimalipv6examplecomegressall0to000000": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalipv6examplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupEgressfromnodesminimalipv6examplecomegressall0to00": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalipv6examplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIpv6": "::/0" - } - }, - "AWSEC2SecurityGroupEgressfromnodesminimalipv6examplecomegressall0to000000": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalipv6examplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom00000ingresstcp22to22mastersminimalipv6examplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalipv6examplecom" - }, - "FromPort": 22, - "ToPort": 22, - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom00000ingresstcp22to22nodesminimalipv6examplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalipv6examplecom" - }, - "FromPort": 22, - "ToPort": 22, - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom00000ingresstcp443to443mastersminimalipv6examplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalipv6examplecom" - }, - "FromPort": 443, - "ToPort": 443, - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom0ingresstcp22to22mastersminimalipv6examplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalipv6examplecom" - }, - "FromPort": 22, - "ToPort": 22, - "IpProtocol": "tcp", - "CidrIpv6": "::/0" - } - }, - "AWSEC2SecurityGroupIngressfrom0ingresstcp22to22nodesminimalipv6examplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalipv6examplecom" - }, - "FromPort": 22, - "ToPort": 22, - "IpProtocol": "tcp", - "CidrIpv6": "::/0" - } - }, - "AWSEC2SecurityGroupIngressfrom0ingresstcp443to443mastersminimalipv6examplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalipv6examplecom" - }, - "FromPort": 443, - "ToPort": 443, - "IpProtocol": "tcp", - "CidrIpv6": "::/0" - } - }, - "AWSEC2SecurityGroupIngressfrommastersminimalipv6examplecomingressall0to0mastersminimalipv6examplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalipv6examplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalipv6examplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfrommastersminimalipv6examplecomingressall0to0nodesminimalipv6examplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalipv6examplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalipv6examplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfromnodesminimalipv6examplecomingressall0to0nodesminimalipv6examplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalipv6examplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalipv6examplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfromnodesminimalipv6examplecomingresstcp1to2379mastersminimalipv6examplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalipv6examplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalipv6examplecom" - }, - "FromPort": 1, - "ToPort": 2379, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodesminimalipv6examplecomingresstcp2382to4000mastersminimalipv6examplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalipv6examplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalipv6examplecom" - }, - "FromPort": 2382, - "ToPort": 4000, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodesminimalipv6examplecomingresstcp4003to65535mastersminimalipv6examplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalipv6examplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalipv6examplecom" - }, - "FromPort": 4003, - "ToPort": 65535, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodesminimalipv6examplecomingressudp1to65535mastersminimalipv6examplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalipv6examplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalipv6examplecom" - }, - "FromPort": 1, - "ToPort": 65535, - "IpProtocol": "udp" - } - }, - "AWSEC2SecurityGroupIngresshttpselbtomaster": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalipv6examplecom" - }, - "FromPort": 443, - "ToPort": 443, - "IpProtocol": "tcp", - "CidrIp": "172.20.0.0/16" - } - }, - "AWSEC2SecurityGroupIngressicmppmtuapielb00000": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalipv6examplecom" - }, - "FromPort": 3, - "ToPort": 4, - "IpProtocol": "icmp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressicmpv6pmtuapielb0": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalipv6examplecom" - }, - "FromPort": -1, - "ToPort": -1, - "IpProtocol": "icmpv6", - "CidrIpv6": "::/0" - } - }, - "AWSEC2SecurityGroupapielbminimalipv6examplecom": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupName": "api-elb.minimal-ipv6.example.com", - "VpcId": { - "Ref": "AWSEC2VPCminimalipv6examplecom" - }, - "GroupDescription": "Security group for api ELB", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal-ipv6.example.com" - }, - { - "Key": "Name", - "Value": "api-elb.minimal-ipv6.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal-ipv6.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2SecurityGroupmastersminimalipv6examplecom": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupName": "masters.minimal-ipv6.example.com", - "VpcId": { - "Ref": "AWSEC2VPCminimalipv6examplecom" - }, - "GroupDescription": "Security group for masters", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal-ipv6.example.com" - }, - { - "Key": "Name", - "Value": "masters.minimal-ipv6.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal-ipv6.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2SecurityGroupnodesminimalipv6examplecom": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupName": "nodes.minimal-ipv6.example.com", - "VpcId": { - "Ref": "AWSEC2VPCminimalipv6examplecom" - }, - "GroupDescription": "Security group for nodes", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal-ipv6.example.com" - }, - { - "Key": "Name", - "Value": "nodes.minimal-ipv6.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal-ipv6.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2SubnetRouteTableAssociationpublicustest1aminimalipv6examplecom": { - "Type": "AWS::EC2::SubnetRouteTableAssociation", - "Properties": { - "SubnetId": { - "Ref": "AWSEC2Subnetustest1aminimalipv6examplecom" - }, - "RouteTableId": { - "Ref": "AWSEC2RouteTablepublicustest1aminimalipv6examplecom" - } - } - }, - "AWSEC2Subnetustest1aminimalipv6examplecom": { - "Type": "AWS::EC2::Subnet", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCminimalipv6examplecom" - }, - "CidrBlock": "172.20.32.0/19", - "Ipv6CidrBlock": "2001:db8:0:111::/64", - "AvailabilityZone": "us-test-1a", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal-ipv6.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.minimal-ipv6.example.com" - }, - { - "Key": "SubnetType", - "Value": "Public" - }, - { - "Key": "kops.k8s.io/instance-group/master-us-test-1a", - "Value": "true" - }, - { - "Key": "kops.k8s.io/instance-group/nodes", - "Value": "true" - }, - { - "Key": "kubernetes.io/cluster/minimal-ipv6.example.com", - "Value": "owned" - }, - { - "Key": "kubernetes.io/role/elb", - "Value": "1" - }, - { - "Key": "kubernetes.io/role/internal-elb", - "Value": "1" - } - ] - } - }, - "AWSEC2VPCCidrBlockAmazonIPv6": { - "Type": "AWS::EC2::VPCCidrBlock", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCminimalipv6examplecom" - }, - "AmazonProvidedIpv6CidrBlock": true - } - }, - "AWSEC2VPCDHCPOptionsAssociationminimalipv6examplecom": { - "Type": "AWS::EC2::VPCDHCPOptionsAssociation", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCminimalipv6examplecom" - }, - "DhcpOptionsId": { - "Ref": "AWSEC2DHCPOptionsminimalipv6examplecom" - } - } - }, - "AWSEC2VPCGatewayAttachmentminimalipv6examplecom": { - "Type": "AWS::EC2::VPCGatewayAttachment", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCminimalipv6examplecom" - }, - "InternetGatewayId": { - "Ref": "AWSEC2InternetGatewayminimalipv6examplecom" - } - } - }, - "AWSEC2VPCminimalipv6examplecom": { - "Type": "AWS::EC2::VPC", - "Properties": { - "CidrBlock": "172.20.0.0/16", - "EnableDnsHostnames": true, - "EnableDnsSupport": true, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal-ipv6.example.com" - }, - { - "Key": "Name", - "Value": "minimal-ipv6.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal-ipv6.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2Volumeustest1aetcdeventsminimalipv6examplecom": { - "Type": "AWS::EC2::Volume", - "Properties": { - "AvailabilityZone": "us-test-1a", - "Size": 20, - "VolumeType": "gp3", - "Iops": 3000, - "Throughput": 125, - "Encrypted": false, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal-ipv6.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.etcd-events.minimal-ipv6.example.com" - }, - { - "Key": "k8s.io/etcd/events", - "Value": "us-test-1a/us-test-1a" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kubernetes.io/cluster/minimal-ipv6.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2Volumeustest1aetcdmainminimalipv6examplecom": { - "Type": "AWS::EC2::Volume", - "Properties": { - "AvailabilityZone": "us-test-1a", - "Size": 20, - "VolumeType": "gp3", - "Iops": 3000, - "Throughput": 125, - "Encrypted": false, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal-ipv6.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.etcd-main.minimal-ipv6.example.com" - }, - { - "Key": "k8s.io/etcd/main", - "Value": "us-test-1a/us-test-1a" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kubernetes.io/cluster/minimal-ipv6.example.com", - "Value": "owned" - } - ] - } - }, - "AWSElasticLoadBalancingV2Listenerapiminimalipv6examplecom443": { - "Type": "AWS::ElasticLoadBalancingV2::Listener", - "Properties": { - "DefaultActions": [ - { - "Type": "forward", - "TargetGroupArn": { - "Ref": "AWSElasticLoadBalancingV2TargetGrouptcpminimalipv6examplebne5ih" - } - } - ], - "LoadBalancerArn": { - "Ref": "AWSElasticLoadBalancingV2LoadBalancerapiminimalipv6examplecom" - }, - "Port": 443, - "Protocol": "TCP" - } - }, - "AWSElasticLoadBalancingV2LoadBalancerapiminimalipv6examplecom": { - "Type": "AWS::ElasticLoadBalancingV2::LoadBalancer", - "Properties": { - "Name": "api-minimal-ipv6-example--jhj9te", - "Scheme": "internet-facing", - "SubnetMappings": [ - { - "SubnetId": { - "Ref": "AWSEC2Subnetustest1aminimalipv6examplecom" - } - } - ], - "Type": "network", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal-ipv6.example.com" - }, - { - "Key": "Name", - "Value": "api.minimal-ipv6.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal-ipv6.example.com", - "Value": "owned" - } - ] - } - }, - "AWSElasticLoadBalancingV2TargetGrouptcpminimalipv6examplebne5ih": { - "Type": "AWS::ElasticLoadBalancingV2::TargetGroup", - "Properties": { - "Name": "tcp-minimal-ipv6-example--bne5ih", - "Port": 443, - "Protocol": "TCP", - "VpcId": { - "Ref": "AWSEC2VPCminimalipv6examplecom" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal-ipv6.example.com" - }, - { - "Key": "Name", - "Value": "tcp-minimal-ipv6-example--bne5ih" - }, - { - "Key": "kubernetes.io/cluster/minimal-ipv6.example.com", - "Value": "owned" - } - ], - "HealthCheckProtocol": "TCP", - "HealthyThresholdCount": 2, - "UnhealthyThresholdCount": 2 - } - }, - "AWSIAMInstanceProfilemastersminimalipv6examplecom": { - "Type": "AWS::IAM::InstanceProfile", - "Properties": { - "InstanceProfileName": "masters.minimal-ipv6.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolemastersminimalipv6examplecom" - } - ] - } - }, - "AWSIAMInstanceProfilenodesminimalipv6examplecom": { - "Type": "AWS::IAM::InstanceProfile", - "Properties": { - "InstanceProfileName": "nodes.minimal-ipv6.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolenodesminimalipv6examplecom" - } - ] - } - }, - "AWSIAMPolicymastersminimalipv6examplecom": { - "Type": "AWS::IAM::Policy", - "Properties": { - "PolicyName": "masters.minimal-ipv6.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolemastersminimalipv6examplecom" - } - ], - "PolicyDocument": { - "Statement": [ - { - "Action": "ec2:AttachVolume", - "Condition": { - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "minimal-ipv6.example.com", - "aws:ResourceTag/k8s.io/role/master": "1" - } - }, - "Effect": "Allow", - "Resource": [ - "*" - ] - }, - { - "Action": [ - "s3:Get*" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-read-bucket/clusters.example.com/minimal-ipv6.example.com/*" - }, - { - "Action": [ - "s3:GetObject", - "s3:DeleteObject", - "s3:DeleteObjectVersion", - "s3:PutObject" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/minimal-ipv6.example.com/backups/etcd/main/*" - }, - { - "Action": [ - "s3:GetObject", - "s3:DeleteObject", - "s3:DeleteObjectVersion", - "s3:PutObject" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/minimal-ipv6.example.com/backups/etcd/events/*" - }, - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-read-bucket" - ] - }, - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-write-bucket" - ] - }, - { - "Action": [ - "route53:ChangeResourceRecordSets", - "route53:ListResourceRecordSets", - "route53:GetHostedZone" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:route53:::hostedzone/Z1AFAKE1ZON3YO" - ] - }, - { - "Action": [ - "route53:GetChange" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:route53:::change/*" - ] - }, - { - "Action": [ - "route53:ListHostedZones", - "route53:ListTagsForResource" - ], - "Effect": "Allow", - "Resource": [ - "*" - ] - }, - { - "Action": "ec2:CreateTags", - "Condition": { - "StringEquals": { - "aws:RequestTag/KubernetesCluster": "minimal-ipv6.example.com", - "ec2:CreateAction": [ - "CreateVolume", - "CreateSnapshot" - ] - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:volume/*", - "arn:aws-test:ec2:*:*:snapshot/*" - ] - }, - { - "Action": [ - "ec2:CreateTags", - "ec2:DeleteTags" - ], - "Condition": { - "Null": { - "aws:RequestTag/KubernetesCluster": "true" - }, - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "minimal-ipv6.example.com" - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:volume/*", - "arn:aws-test:ec2:*:*:snapshot/*" - ] - }, - { - "Action": "ec2:CreateTags", - "Condition": { - "StringEquals": { - "aws:RequestTag/KubernetesCluster": "minimal-ipv6.example.com", - "ec2:CreateAction": [ - "CreateSecurityGroup" - ] - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:security-group/*" - ] - }, - { - "Action": [ - "ec2:CreateTags", - "ec2:DeleteTags" - ], - "Condition": { - "Null": { - "aws:RequestTag/KubernetesCluster": "true" - }, - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "minimal-ipv6.example.com" - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:security-group/*" - ] - }, - { - "Action": [ - "autoscaling:DescribeAutoScalingGroups", - "autoscaling:DescribeAutoScalingInstances", - "autoscaling:DescribeLaunchConfigurations", - "autoscaling:DescribeScalingActivities", - "autoscaling:DescribeTags", - "ec2:AssignIpv6Addresses", - "ec2:AttachVolume", - "ec2:AuthorizeSecurityGroupIngress", - "ec2:CreateSecurityGroup", - "ec2:CreateTags", - "ec2:DeleteRoute", - "ec2:DeleteSecurityGroup", - "ec2:DeleteVolume", - "ec2:DescribeAccountAttributes", - "ec2:DescribeInstanceTypes", - "ec2:DescribeInstances", - "ec2:DescribeLaunchTemplateVersions", - "ec2:DescribeNetworkInterfaces", - "ec2:DescribeRegions", - "ec2:DescribeRouteTables", - "ec2:DescribeSecurityGroups", - "ec2:DescribeSubnets", - "ec2:DescribeTags", - "ec2:DescribeVolumes", - "ec2:DescribeVolumesModifications", - "ec2:DescribeVpcs", - "ec2:DetachVolume", - "ec2:ModifyInstanceAttribute", - "ec2:ModifyVolume", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:CreateListener", - "elasticloadbalancing:CreateTargetGroup", - "elasticloadbalancing:DeleteListener", - "elasticloadbalancing:DescribeListeners", - "elasticloadbalancing:DescribeLoadBalancerAttributes", - "elasticloadbalancing:DescribeLoadBalancerPolicies", - "elasticloadbalancing:DescribeLoadBalancers", - "elasticloadbalancing:DescribeTargetGroups", - "elasticloadbalancing:DescribeTargetHealth", - "elasticloadbalancing:ModifyListener", - "elasticloadbalancing:RegisterTargets", - "iam:GetServerCertificate", - "iam:ListServerCertificates", - "kms:DescribeKey", - "kms:GenerateRandom" - ], - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": [ - "autoscaling:SetDesiredCapacity", - "autoscaling:TerminateInstanceInAutoScalingGroup", - "ec2:AttachVolume", - "ec2:AuthorizeSecurityGroupIngress", - "ec2:DeleteSecurityGroup", - "ec2:DeleteVolume", - "ec2:DetachVolume", - "ec2:ModifyInstanceAttribute", - "ec2:ModifyVolume", - "ec2:RevokeSecurityGroupIngress", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", - "elasticloadbalancing:AttachLoadBalancerToSubnets", - "elasticloadbalancing:ConfigureHealthCheck", - "elasticloadbalancing:CreateLoadBalancerListeners", - "elasticloadbalancing:CreateLoadBalancerPolicy", - "elasticloadbalancing:DeleteListener", - "elasticloadbalancing:DeleteLoadBalancer", - "elasticloadbalancing:DeleteLoadBalancerListeners", - "elasticloadbalancing:DeleteTargetGroup", - "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", - "elasticloadbalancing:DeregisterTargets", - "elasticloadbalancing:DetachLoadBalancerFromSubnets", - "elasticloadbalancing:ModifyListener", - "elasticloadbalancing:ModifyLoadBalancerAttributes", - "elasticloadbalancing:ModifyTargetGroup", - "elasticloadbalancing:RegisterInstancesWithLoadBalancer", - "elasticloadbalancing:RegisterTargets", - "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", - "elasticloadbalancing:SetLoadBalancerPoliciesOfListener" - ], - "Condition": { - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "minimal-ipv6.example.com" - } - }, - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": [ - "ec2:CreateSecurityGroup", - "ec2:CreateSnapshot", - "ec2:CreateVolume", - "elasticloadbalancing:CreateListener", - "elasticloadbalancing:CreateLoadBalancer", - "elasticloadbalancing:CreateTargetGroup" - ], - "Condition": { - "StringEquals": { - "aws:RequestTag/KubernetesCluster": "minimal-ipv6.example.com" - } - }, - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": "ec2:CreateSecurityGroup", - "Effect": "Allow", - "Resource": "arn:aws-test:ec2:*:*:vpc/*" - } - ], - "Version": "2012-10-17" - } - } - }, - "AWSIAMPolicynodesminimalipv6examplecom": { - "Type": "AWS::IAM::Policy", - "Properties": { - "PolicyName": "nodes.minimal-ipv6.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolenodesminimalipv6examplecom" - } - ], - "PolicyDocument": { - "Statement": [ - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-read-bucket" - ] - }, - { - "Action": [ - "autoscaling:DescribeAutoScalingInstances", - "ec2:AssignIpv6Addresses", - "ec2:DescribeInstanceTypes", - "ec2:DescribeInstances", - "ec2:DescribeRegions", - "iam:GetServerCertificate", - "iam:ListServerCertificates", - "kms:GenerateRandom" - ], - "Effect": "Allow", - "Resource": "*" - } - ], - "Version": "2012-10-17" - } - } - }, - "AWSIAMRolemastersminimalipv6examplecom": { - "Type": "AWS::IAM::Role", - "Properties": { - "RoleName": "masters.minimal-ipv6.example.com", - "AssumeRolePolicyDocument": { - "Statement": [ - { - "Action": "sts:AssumeRole", - "Effect": "Allow", - "Principal": { - "Service": "ec2.amazonaws.com" - } - } - ], - "Version": "2012-10-17" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal-ipv6.example.com" - }, - { - "Key": "Name", - "Value": "masters.minimal-ipv6.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal-ipv6.example.com", - "Value": "owned" - } - ] - } - }, - "AWSIAMRolenodesminimalipv6examplecom": { - "Type": "AWS::IAM::Role", - "Properties": { - "RoleName": "nodes.minimal-ipv6.example.com", - "AssumeRolePolicyDocument": { - "Statement": [ - { - "Action": "sts:AssumeRole", - "Effect": "Allow", - "Principal": { - "Service": "ec2.amazonaws.com" - } - } - ], - "Version": "2012-10-17" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal-ipv6.example.com" - }, - { - "Key": "Name", - "Value": "nodes.minimal-ipv6.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal-ipv6.example.com", - "Value": "owned" - } - ] - } - }, - "AWSRoute53RecordSetapiminimalipv6examplecom": { - "Type": "AWS::Route53::RecordSet", - "Properties": { - "Name": "api.minimal-ipv6.example.com", - "Type": "A", - "AliasTarget": { - "DNSName": { - "Fn::GetAtt": [ - "AWSElasticLoadBalancingV2LoadBalancerapiminimalipv6examplecom", - "DNSName" - ] - }, - "HostedZoneId": { - "Fn::GetAtt": [ - "AWSElasticLoadBalancingV2LoadBalancerapiminimalipv6examplecom", - "CanonicalHostedZoneID" - ] - }, - "EvaluateTargetHealth": false - }, - "HostedZoneId": "/hostedzone/Z1AFAKE1ZON3YO" - } - }, - "AWSRoute53RecordSetapiminimalipv6examplecomAAAA": { - "Type": "AWS::Route53::RecordSet", - "Properties": { - "Name": "api.minimal-ipv6.example.com", - "Type": "AAAA", - "AliasTarget": { - "DNSName": { - "Fn::GetAtt": [ - "AWSElasticLoadBalancingV2LoadBalancerapiminimalipv6examplecom", - "DNSName" - ] - }, - "HostedZoneId": { - "Fn::GetAtt": [ - "AWSElasticLoadBalancingV2LoadBalancerapiminimalipv6examplecom", - "CanonicalHostedZoneID" - ] - }, - "EvaluateTargetHealth": false - }, - "HostedZoneId": "/hostedzone/Z1AFAKE1ZON3YO" - } - } - } -} diff --git a/tests/integration/update_cluster/minimal-ipv6-cilium/cloudformation.json.extracted.yaml b/tests/integration/update_cluster/minimal-ipv6-cilium/cloudformation.json.extracted.yaml deleted file mode 100644 index 3cb85ff88c6b0..0000000000000 --- a/tests/integration/update_cluster/minimal-ipv6-cilium/cloudformation.json.extracted.yaml +++ /dev/null @@ -1,474 +0,0 @@ -Resources.AWSEC2LaunchTemplatemasterustest1amastersminimalipv6examplecom.Properties.LaunchTemplateData.UserData: | - #!/bin/bash - set -o errexit - set -o nounset - set -o pipefail - - NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 - NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 - NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 - NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 - - export AWS_REGION=us-test-1 - - - - - sysctl -w net.core.rmem_max=16777216 || true - sysctl -w net.core.wmem_max=16777216 || true - sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true - sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true - - - function ensure-install-dir() { - INSTALL_DIR="/opt/kops" - # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec - if [[ -d /var/lib/toolbox ]]; then - INSTALL_DIR="/var/lib/toolbox/kops" - fi - mkdir -p ${INSTALL_DIR}/bin - mkdir -p ${INSTALL_DIR}/conf - cd ${INSTALL_DIR} - } - - # Retry a download until we get it. args: name, sha, urls - download-or-bust() { - local -r file="$1" - local -r hash="$2" - local -r urls=( $(split-commas "$3") ) - - if [[ -f "${file}" ]]; then - if ! validate-hash "${file}" "${hash}"; then - rm -f "${file}" - else - return 0 - fi - fi - - while true; do - for url in "${urls[@]}"; do - commands=( - "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - ) - for cmd in "${commands[@]}"; do - echo "Attempting download with: ${cmd} {url}" - if ! (${cmd} "${url}"); then - echo "== Download failed with ${cmd} ==" - continue - fi - if ! validate-hash "${file}" "${hash}"; then - echo "== Hash validation of ${url} failed. Retrying. ==" - rm -f "${file}" - else - echo "== Downloaded ${url} (SHA256 = ${hash}) ==" - return 0 - fi - done - done - - echo "All downloads failed; sleeping before retrying" - sleep 60 - done - } - - validate-hash() { - local -r file="$1" - local -r expected="$2" - local actual - - actual=$(sha256sum ${file} | awk '{ print $1 }') || true - if [[ "${actual}" != "${expected}" ]]; then - echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" - return 1 - fi - } - - function split-commas() { - echo $1 | tr "," "\n" - } - - function download-release() { - case "$(uname -m)" in - x86_64*|i?86_64*|amd64*) - NODEUP_URL="${NODEUP_URL_AMD64}" - NODEUP_HASH="${NODEUP_HASH_AMD64}" - ;; - aarch64*|arm64*) - NODEUP_URL="${NODEUP_URL_ARM64}" - NODEUP_HASH="${NODEUP_HASH_ARM64}" - ;; - *) - echo "Unsupported host arch: $(uname -m)" >&2 - exit 1 - ;; - esac - - cd ${INSTALL_DIR}/bin - download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" - - chmod +x nodeup - - echo "Running nodeup" - # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 - ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) - } - - #################################################################################### - - /bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" - - echo "== nodeup node config starting ==" - ensure-install-dir - - cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' - cloudConfig: - awsEBSCSIDriver: - enabled: true - version: v1.12.0 - manageStorageClasses: true - nodeIPFamilies: - - ipv6 - - ipv4 - containerRuntime: containerd - containerd: - logLevel: info - version: 1.4.13 - docker: - skipInstall: true - encryptionConfig: null - etcdClusters: - events: - version: 3.4.13 - main: - version: 3.4.13 - kubeAPIServer: - allowPrivileged: true - anonymousAuth: false - apiAudiences: - - kubernetes.svc.default - apiServerCount: 1 - authorizationMode: AlwaysAllow - bindAddress: '::' - cloudProvider: external - enableAdmissionPlugins: - - NamespaceLifecycle - - LimitRanger - - ServiceAccount - - DefaultStorageClass - - DefaultTolerationSeconds - - MutatingAdmissionWebhook - - ValidatingAdmissionWebhook - - NodeRestriction - - ResourceQuota - etcdServers: - - https://127.0.0.1:4001 - etcdServersOverrides: - - /events#https://127.0.0.1:4002 - featureGates: - CSIMigrationAWS: "true" - InTreePluginAWSUnregister: "true" - image: registry.k8s.io/kube-apiserver:v1.21.0 - kubeletPreferredAddressTypes: - - InternalIP - - Hostname - - ExternalIP - logLevel: 2 - requestheaderAllowedNames: - - aggregator - requestheaderExtraHeaderPrefixes: - - X-Remote-Extra- - requestheaderGroupHeaders: - - X-Remote-Group - requestheaderUsernameHeaders: - - X-Remote-User - securePort: 443 - serviceAccountIssuer: https://api.internal.minimal-ipv6.example.com - serviceAccountJWKSURI: https://api.internal.minimal-ipv6.example.com/openid/v1/jwks - serviceClusterIPRange: fd00:5e4f:ce::/108 - storageBackend: etcd3 - kubeControllerManager: - allocateNodeCIDRs: false - attachDetachReconcileSyncPeriod: 1m0s - cloudProvider: external - clusterName: minimal-ipv6.example.com - configureCloudRoutes: false - controllers: - - '*' - - -nodeipam - featureGates: - CSIMigrationAWS: "true" - InTreePluginAWSUnregister: "true" - image: registry.k8s.io/kube-controller-manager:v1.21.0 - leaderElection: - leaderElect: true - logLevel: 2 - useServiceAccountCredentials: true - kubeProxy: - cpuRequest: 100m - image: registry.k8s.io/kube-proxy:v1.21.0 - logLevel: 2 - kubeScheduler: - featureGates: - CSIMigrationAWS: "true" - InTreePluginAWSUnregister: "true" - image: registry.k8s.io/kube-scheduler:v1.21.0 - leaderElection: - leaderElect: true - logLevel: 2 - kubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: external - clusterDNS: fd00:5e4f:ce::a - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - featureGates: - CSIMigrationAWS: "true" - InTreePluginAWSUnregister: "true" - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - masterKubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: external - clusterDNS: fd00:5e4f:ce::a - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - featureGates: - CSIMigrationAWS: "true" - InTreePluginAWSUnregister: "true" - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - - __EOF_CLUSTER_SPEC - - cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' - CloudProvider: aws - ConfigBase: memfs://clusters.example.com/minimal-ipv6.example.com - InstanceGroupName: master-us-test-1a - InstanceGroupRole: Master - NodeupConfigHash: hNKVZ05MEjtdAlYuY3nPe+ccpbipMaSa2AYXkSzJUwg= - - __EOF_KUBE_ENV - - download-release - echo "== nodeup node config done ==" -Resources.AWSEC2LaunchTemplatenodesminimalipv6examplecom.Properties.LaunchTemplateData.UserData: | - #!/bin/bash - set -o errexit - set -o nounset - set -o pipefail - - NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 - NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 - NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 - NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 - - export AWS_REGION=us-test-1 - - - - - sysctl -w net.core.rmem_max=16777216 || true - sysctl -w net.core.wmem_max=16777216 || true - sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true - sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true - - - function ensure-install-dir() { - INSTALL_DIR="/opt/kops" - # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec - if [[ -d /var/lib/toolbox ]]; then - INSTALL_DIR="/var/lib/toolbox/kops" - fi - mkdir -p ${INSTALL_DIR}/bin - mkdir -p ${INSTALL_DIR}/conf - cd ${INSTALL_DIR} - } - - # Retry a download until we get it. args: name, sha, urls - download-or-bust() { - local -r file="$1" - local -r hash="$2" - local -r urls=( $(split-commas "$3") ) - - if [[ -f "${file}" ]]; then - if ! validate-hash "${file}" "${hash}"; then - rm -f "${file}" - else - return 0 - fi - fi - - while true; do - for url in "${urls[@]}"; do - commands=( - "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - ) - for cmd in "${commands[@]}"; do - echo "Attempting download with: ${cmd} {url}" - if ! (${cmd} "${url}"); then - echo "== Download failed with ${cmd} ==" - continue - fi - if ! validate-hash "${file}" "${hash}"; then - echo "== Hash validation of ${url} failed. Retrying. ==" - rm -f "${file}" - else - echo "== Downloaded ${url} (SHA256 = ${hash}) ==" - return 0 - fi - done - done - - echo "All downloads failed; sleeping before retrying" - sleep 60 - done - } - - validate-hash() { - local -r file="$1" - local -r expected="$2" - local actual - - actual=$(sha256sum ${file} | awk '{ print $1 }') || true - if [[ "${actual}" != "${expected}" ]]; then - echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" - return 1 - fi - } - - function split-commas() { - echo $1 | tr "," "\n" - } - - function download-release() { - case "$(uname -m)" in - x86_64*|i?86_64*|amd64*) - NODEUP_URL="${NODEUP_URL_AMD64}" - NODEUP_HASH="${NODEUP_HASH_AMD64}" - ;; - aarch64*|arm64*) - NODEUP_URL="${NODEUP_URL_ARM64}" - NODEUP_HASH="${NODEUP_HASH_ARM64}" - ;; - *) - echo "Unsupported host arch: $(uname -m)" >&2 - exit 1 - ;; - esac - - cd ${INSTALL_DIR}/bin - download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" - - chmod +x nodeup - - echo "Running nodeup" - # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 - ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) - } - - #################################################################################### - - /bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" - - echo "== nodeup node config starting ==" - ensure-install-dir - - cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' - cloudConfig: - awsEBSCSIDriver: - enabled: true - version: v1.12.0 - manageStorageClasses: true - nodeIPFamilies: - - ipv6 - - ipv4 - containerRuntime: containerd - containerd: - logLevel: info - version: 1.4.13 - docker: - skipInstall: true - kubeProxy: - cpuRequest: 100m - image: registry.k8s.io/kube-proxy:v1.21.0 - logLevel: 2 - kubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: external - clusterDNS: fd00:5e4f:ce::a - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - featureGates: - CSIMigrationAWS: "true" - InTreePluginAWSUnregister: "true" - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - - __EOF_CLUSTER_SPEC - - cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' - CloudProvider: aws - ConfigServer: - CACertificates: | - -----BEGIN CERTIFICATE----- - MIIBbjCCARigAwIBAgIMFpANqBD8NSD82AUSMA0GCSqGSIb3DQEBCwUAMBgxFjAU - BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwODAwWhcNMzEwNzA3MDcw - ODAwWjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD - SwAwSAJBANFI3zr0Tk8krsW8vwjfMpzJOlWQ8616vG3YPa2qAgI7V4oKwfV0yIg1 - jt+H6f4P/wkPAPTPTfRp9Iy8oHEEFw0CAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG - MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNG3zVjTcLlJwDsJ4/K9DV7KohUA - MA0GCSqGSIb3DQEBCwUAA0EAB8d03fY2w7WKpfO29qI295pu2C4ca9AiVGOpgSc8 - tmQsq6rcxt3T+rb589PVtz0mw/cKTxOk6gH2CCC+yHfy2w== - -----END CERTIFICATE----- - -----BEGIN CERTIFICATE----- - MIIBbjCCARigAwIBAgIMFpANvmSa0OAlYmXKMA0GCSqGSIb3DQEBCwUAMBgxFjAU - BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwOTM2WhcNMzEwNzA3MDcw - OTM2WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD - SwAwSAJBAMF6F4aZdpe0RUpyykaBpWwZCnwbffhYGOw+fs6RdLuUq7QCNmJm/Eq7 - WWOziMYDiI9SbclpD+6QiJ0N3EqppVUCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG - MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLImp6ARjPDAH6nhI+scWVt3Q9bn - MA0GCSqGSIb3DQEBCwUAA0EAVQVx5MUtuAIeePuP9o51xtpT2S6Fvfi8J4ICxnlA - 9B7UD2ushcVFPtaeoL9Gfu8aY4KJBeqqg5ojl4qmRnThjw== - -----END CERTIFICATE----- - server: https://kops-controller.internal.minimal-ipv6.example.com:3988/ - InstanceGroupName: nodes - InstanceGroupRole: Node - NodeupConfigHash: oGVQyiIw+lfmCmcmtVnX8V+zNcpHJoAz2DauLNcSutA= - - __EOF_KUBE_ENV - - download-release - echo "== nodeup node config done ==" diff --git a/tests/integration/update_cluster/minimal-ipv6-cilium/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/minimal-ipv6-cilium/data/aws_s3_object_cluster-completed.spec_content index b03fce8603d29..a1a72eb4c83c7 100644 --- a/tests/integration/update_cluster/minimal-ipv6-cilium/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/minimal-ipv6-cilium/data/aws_s3_object_cluster-completed.spec_content @@ -172,7 +172,6 @@ spec: - 0.0.0.0/0 - ::/0 kubernetesVersion: 1.21.0 - masterInternalName: api.internal.minimal-ipv6.example.com masterKubelet: anonymousAuth: false cgroupDriver: systemd diff --git a/tests/integration/update_cluster/minimal-ipv6-cilium/in-v1alpha2.yaml b/tests/integration/update_cluster/minimal-ipv6-cilium/in-v1alpha2.yaml index 2971d32f84606..196c36682b7fa 100644 --- a/tests/integration/update_cluster/minimal-ipv6-cilium/in-v1alpha2.yaml +++ b/tests/integration/update_cluster/minimal-ipv6-cilium/in-v1alpha2.yaml @@ -34,7 +34,6 @@ spec: kubelet: anonymousAuth: false kubernetesVersion: v1.21.0 - masterInternalName: api.internal.minimal-ipv6.example.com masterPublicName: api.minimal-ipv6.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/tests/integration/update_cluster/minimal-ipv6-private/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/minimal-ipv6-private/data/aws_s3_object_cluster-completed.spec_content index b82563649e036..56da9e97baebd 100644 --- a/tests/integration/update_cluster/minimal-ipv6-private/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/minimal-ipv6-private/data/aws_s3_object_cluster-completed.spec_content @@ -175,7 +175,6 @@ spec: - 0.0.0.0/0 - ::/0 kubernetesVersion: 1.23.0 - masterInternalName: api.internal.minimal-ipv6.example.com masterKubelet: anonymousAuth: false cgroupDriver: systemd diff --git a/tests/integration/update_cluster/minimal-ipv6-private/in-v1alpha2.yaml b/tests/integration/update_cluster/minimal-ipv6-private/in-v1alpha2.yaml index 3c607fe6bee32..9a9fcfc76810d 100644 --- a/tests/integration/update_cluster/minimal-ipv6-private/in-v1alpha2.yaml +++ b/tests/integration/update_cluster/minimal-ipv6-private/in-v1alpha2.yaml @@ -34,7 +34,6 @@ spec: kubelet: anonymousAuth: false kubernetesVersion: v1.23.0 - masterInternalName: api.internal.minimal-ipv6.example.com masterPublicName: api.minimal-ipv6.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/tests/integration/update_cluster/minimal-ipv6/cloudformation.json b/tests/integration/update_cluster/minimal-ipv6/cloudformation.json deleted file mode 100644 index f72598496eb62..0000000000000 --- a/tests/integration/update_cluster/minimal-ipv6/cloudformation.json +++ /dev/null @@ -1,1685 +0,0 @@ -{ - "Resources": { - "AWSAutoScalingAutoScalingGroupmasterustest1amastersminimalipv6examplecom": { - "Type": "AWS::AutoScaling::AutoScalingGroup", - "Properties": { - "AutoScalingGroupName": "master-us-test-1a.masters.minimal-ipv6.example.com", - "LaunchTemplate": { - "LaunchTemplateId": { - "Ref": "AWSEC2LaunchTemplatemasterustest1amastersminimalipv6examplecom" - }, - "Version": { - "Fn::GetAtt": [ - "AWSEC2LaunchTemplatemasterustest1amastersminimalipv6examplecom", - "LatestVersionNumber" - ] - } - }, - "MaxSize": "1", - "MinSize": "1", - "VPCZoneIdentifier": [ - { - "Ref": "AWSEC2Subnetustest1aminimalipv6examplecom" - } - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal-ipv6.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.minimal-ipv6.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/role/master", - "Value": "1", - "PropagateAtLaunch": true - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a", - "PropagateAtLaunch": true - }, - { - "Key": "kubernetes.io/cluster/minimal-ipv6.example.com", - "Value": "owned", - "PropagateAtLaunch": true - } - ], - "MetricsCollection": [ - { - "Granularity": "1Minute", - "Metrics": [ - "GroupDesiredCapacity", - "GroupInServiceInstances", - "GroupMaxSize", - "GroupMinSize", - "GroupPendingInstances", - "GroupStandbyInstances", - "GroupTerminatingInstances", - "GroupTotalInstances" - ] - } - ], - "TargetGroupARNs": [ - { - "Ref": "AWSElasticLoadBalancingV2TargetGrouptcpminimalipv6examplebne5ih" - } - ] - } - }, - "AWSAutoScalingAutoScalingGroupnodesminimalipv6examplecom": { - "Type": "AWS::AutoScaling::AutoScalingGroup", - "Properties": { - "AutoScalingGroupName": "nodes.minimal-ipv6.example.com", - "LaunchTemplate": { - "LaunchTemplateId": { - "Ref": "AWSEC2LaunchTemplatenodesminimalipv6examplecom" - }, - "Version": { - "Fn::GetAtt": [ - "AWSEC2LaunchTemplatenodesminimalipv6examplecom", - "LatestVersionNumber" - ] - } - }, - "MaxSize": "2", - "MinSize": "2", - "VPCZoneIdentifier": [ - { - "Ref": "AWSEC2Subnetustest1aminimalipv6examplecom" - } - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal-ipv6.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "Name", - "Value": "nodes.minimal-ipv6.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/role/node", - "Value": "1", - "PropagateAtLaunch": true - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes", - "PropagateAtLaunch": true - }, - { - "Key": "kubernetes.io/cluster/minimal-ipv6.example.com", - "Value": "owned", - "PropagateAtLaunch": true - } - ], - "MetricsCollection": [ - { - "Granularity": "1Minute", - "Metrics": [ - "GroupDesiredCapacity", - "GroupInServiceInstances", - "GroupMaxSize", - "GroupMinSize", - "GroupPendingInstances", - "GroupStandbyInstances", - "GroupTerminatingInstances", - "GroupTotalInstances" - ] - } - ] - } - }, - "AWSEC2DHCPOptionsminimalipv6examplecom": { - "Type": "AWS::EC2::DHCPOptions", - "Properties": { - "DomainName": "us-test-1.compute.internal", - "DomainNameServers": [ - "AmazonProvidedDNS" - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal-ipv6.example.com" - }, - { - "Key": "Name", - "Value": "minimal-ipv6.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal-ipv6.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2EIPustest1aminimalipv6examplecom": { - "Type": "AWS::EC2::EIP", - "Properties": { - "Domain": "vpc", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal-ipv6.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.minimal-ipv6.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal-ipv6.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2InternetGatewayminimalipv6examplecom": { - "Type": "AWS::EC2::InternetGateway", - "Properties": { - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal-ipv6.example.com" - }, - { - "Key": "Name", - "Value": "minimal-ipv6.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal-ipv6.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2LaunchTemplatemasterustest1amastersminimalipv6examplecom": { - "Type": "AWS::EC2::LaunchTemplate", - "Properties": { - "LaunchTemplateName": "master-us-test-1a.masters.minimal-ipv6.example.com", - "LaunchTemplateData": { - "BlockDeviceMappings": [ - { - "DeviceName": "/dev/xvda", - "Ebs": { - "VolumeType": "gp3", - "VolumeSize": 64, - "Iops": 3000, - "Throughput": 125, - "DeleteOnTermination": true, - "Encrypted": true - } - }, - { - "DeviceName": "/dev/sdc", - "VirtualName": "ephemeral0" - } - ], - "IamInstanceProfile": { - "Name": { - "Ref": "AWSIAMInstanceProfilemastersminimalipv6examplecom" - } - }, - "ImageId": "ami-12345678", - "InstanceType": "m3.medium", - "KeyName": "kubernetes.minimal-ipv6.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57", - "MetadataOptions": { - "HttpPutResponseHopLimit": 1, - "HttpTokens": "optional" - }, - "Monitoring": { - "Enabled": false - }, - "NetworkInterfaces": [ - { - "AssociatePublicIpAddress": true, - "DeleteOnTermination": true, - "DeviceIndex": 0, - "Ipv6AddressCount": 1, - "Groups": [ - { - "Ref": "AWSEC2SecurityGroupmastersminimalipv6examplecom" - } - ] - } - ], - "TagSpecifications": [ - { - "ResourceType": "instance", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal-ipv6.example.com" - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.minimal-ipv6.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a" - }, - { - "Key": "kubernetes.io/cluster/minimal-ipv6.example.com", - "Value": "owned" - } - ] - }, - { - "ResourceType": "volume", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal-ipv6.example.com" - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.minimal-ipv6.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a" - }, - { - "Key": "kubernetes.io/cluster/minimal-ipv6.example.com", - "Value": "owned" - } - ] - } - ], - "UserData": "extracted" - } - } - }, - "AWSEC2LaunchTemplatenodesminimalipv6examplecom": { - "Type": "AWS::EC2::LaunchTemplate", - "Properties": { - "LaunchTemplateName": "nodes.minimal-ipv6.example.com", - "LaunchTemplateData": { - "BlockDeviceMappings": [ - { - "DeviceName": "/dev/xvda", - "Ebs": { - "VolumeType": "gp3", - "VolumeSize": 128, - "Iops": 3000, - "Throughput": 125, - "DeleteOnTermination": true, - "Encrypted": true - } - } - ], - "IamInstanceProfile": { - "Name": { - "Ref": "AWSIAMInstanceProfilenodesminimalipv6examplecom" - } - }, - "ImageId": "ami-12345678", - "InstanceType": "t2.medium", - "KeyName": "kubernetes.minimal-ipv6.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57", - "MetadataOptions": { - "HttpPutResponseHopLimit": 1, - "HttpTokens": "optional" - }, - "Monitoring": { - "Enabled": false - }, - "NetworkInterfaces": [ - { - "AssociatePublicIpAddress": true, - "DeleteOnTermination": true, - "DeviceIndex": 0, - "Ipv6AddressCount": 1, - "Groups": [ - { - "Ref": "AWSEC2SecurityGroupnodesminimalipv6examplecom" - } - ] - } - ], - "TagSpecifications": [ - { - "ResourceType": "instance", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal-ipv6.example.com" - }, - { - "Key": "Name", - "Value": "nodes.minimal-ipv6.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "" - }, - { - "Key": "k8s.io/role/node", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes" - }, - { - "Key": "kubernetes.io/cluster/minimal-ipv6.example.com", - "Value": "owned" - } - ] - }, - { - "ResourceType": "volume", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal-ipv6.example.com" - }, - { - "Key": "Name", - "Value": "nodes.minimal-ipv6.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "" - }, - { - "Key": "k8s.io/role/node", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes" - }, - { - "Key": "kubernetes.io/cluster/minimal-ipv6.example.com", - "Value": "owned" - } - ] - } - ], - "UserData": "extracted" - } - } - }, - "AWSEC2NatGatewayustest1aminimalipv6examplecom": { - "Type": "AWS::EC2::NatGateway", - "Properties": { - "AllocationId": { - "Fn::GetAtt": [ - "AWSEC2EIPustest1aminimalipv6examplecom", - "AllocationId" - ] - }, - "SubnetId": { - "Ref": "AWSEC2Subnetustest1aminimalipv6examplecom" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal-ipv6.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.minimal-ipv6.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal-ipv6.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2Route0": { - "Type": "AWS::EC2::Route", - "Properties": { - "RouteTableId": { - "Ref": "AWSEC2RouteTableminimalipv6examplecom" - }, - "DestinationIpv6CidrBlock": "::/0", - "GatewayId": { - "Ref": "AWSEC2InternetGatewayminimalipv6examplecom" - } - } - }, - "AWSEC2Route00000": { - "Type": "AWS::EC2::Route", - "Properties": { - "RouteTableId": { - "Ref": "AWSEC2RouteTableminimalipv6examplecom" - }, - "DestinationCidrBlock": "0.0.0.0/0", - "GatewayId": { - "Ref": "AWSEC2InternetGatewayminimalipv6examplecom" - } - } - }, - "AWSEC2RouteTableminimalipv6examplecom": { - "Type": "AWS::EC2::RouteTable", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCminimalipv6examplecom" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal-ipv6.example.com" - }, - { - "Key": "Name", - "Value": "minimal-ipv6.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal-ipv6.example.com", - "Value": "owned" - }, - { - "Key": "kubernetes.io/kops/role", - "Value": "public" - } - ] - } - }, - "AWSEC2RouteTablepublicustest1aminimalipv6examplecom": { - "Type": "AWS::EC2::RouteTable", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCminimalipv6examplecom" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal-ipv6.example.com" - }, - { - "Key": "Name", - "Value": "public-us-test-1a.minimal-ipv6.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal-ipv6.example.com", - "Value": "owned" - }, - { - "Key": "kubernetes.io/kops/role", - "Value": "public-us-test-1a" - } - ] - } - }, - "AWSEC2Routepublicustest1a0": { - "Type": "AWS::EC2::Route", - "Properties": { - "RouteTableId": { - "Ref": "AWSEC2RouteTablepublicustest1aminimalipv6examplecom" - }, - "DestinationIpv6CidrBlock": "::/0", - "GatewayId": { - "Ref": "AWSEC2InternetGatewayminimalipv6examplecom" - } - } - }, - "AWSEC2Routepublicustest1a00000": { - "Type": "AWS::EC2::Route", - "Properties": { - "RouteTableId": { - "Ref": "AWSEC2RouteTablepublicustest1aminimalipv6examplecom" - }, - "DestinationCidrBlock": "0.0.0.0/0", - "GatewayId": { - "Ref": "AWSEC2InternetGatewayminimalipv6examplecom" - } - } - }, - "AWSEC2Routepublicustest1a64ff9b96": { - "Type": "AWS::EC2::Route", - "Properties": { - "RouteTableId": { - "Ref": "AWSEC2RouteTablepublicustest1aminimalipv6examplecom" - }, - "DestinationIpv6CidrBlock": "64:ff9b::/96", - "NatGatewayId": { - "Ref": "AWSEC2NatGatewayustest1aminimalipv6examplecom" - } - } - }, - "AWSEC2SecurityGroupEgressfrommastersminimalipv6examplecomegressall0to00": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalipv6examplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIpv6": "::/0" - } - }, - "AWSEC2SecurityGroupEgressfrommastersminimalipv6examplecomegressall0to000000": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalipv6examplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupEgressfromnodesminimalipv6examplecomegressall0to00": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalipv6examplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIpv6": "::/0" - } - }, - "AWSEC2SecurityGroupEgressfromnodesminimalipv6examplecomegressall0to000000": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalipv6examplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom00000ingresstcp22to22mastersminimalipv6examplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalipv6examplecom" - }, - "FromPort": 22, - "ToPort": 22, - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom00000ingresstcp22to22nodesminimalipv6examplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalipv6examplecom" - }, - "FromPort": 22, - "ToPort": 22, - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom00000ingresstcp443to443mastersminimalipv6examplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalipv6examplecom" - }, - "FromPort": 443, - "ToPort": 443, - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom0ingresstcp22to22mastersminimalipv6examplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalipv6examplecom" - }, - "FromPort": 22, - "ToPort": 22, - "IpProtocol": "tcp", - "CidrIpv6": "::/0" - } - }, - "AWSEC2SecurityGroupIngressfrom0ingresstcp22to22nodesminimalipv6examplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalipv6examplecom" - }, - "FromPort": 22, - "ToPort": 22, - "IpProtocol": "tcp", - "CidrIpv6": "::/0" - } - }, - "AWSEC2SecurityGroupIngressfrom0ingresstcp443to443mastersminimalipv6examplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalipv6examplecom" - }, - "FromPort": 443, - "ToPort": 443, - "IpProtocol": "tcp", - "CidrIpv6": "::/0" - } - }, - "AWSEC2SecurityGroupIngressfrommastersminimalipv6examplecomingressall0to0mastersminimalipv6examplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalipv6examplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalipv6examplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfrommastersminimalipv6examplecomingressall0to0nodesminimalipv6examplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalipv6examplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalipv6examplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfromnodesminimalipv6examplecomingressall0to0nodesminimalipv6examplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalipv6examplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalipv6examplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfromnodesminimalipv6examplecomingresstcp1to2379mastersminimalipv6examplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalipv6examplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalipv6examplecom" - }, - "FromPort": 1, - "ToPort": 2379, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodesminimalipv6examplecomingresstcp2382to4000mastersminimalipv6examplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalipv6examplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalipv6examplecom" - }, - "FromPort": 2382, - "ToPort": 4000, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodesminimalipv6examplecomingresstcp4003to65535mastersminimalipv6examplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalipv6examplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalipv6examplecom" - }, - "FromPort": 4003, - "ToPort": 65535, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodesminimalipv6examplecomingressudp1to65535mastersminimalipv6examplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalipv6examplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalipv6examplecom" - }, - "FromPort": 1, - "ToPort": 65535, - "IpProtocol": "udp" - } - }, - "AWSEC2SecurityGroupIngresshttpselbtomaster": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalipv6examplecom" - }, - "FromPort": 443, - "ToPort": 443, - "IpProtocol": "tcp", - "CidrIp": "172.20.0.0/16" - } - }, - "AWSEC2SecurityGroupIngressicmppmtuapielb00000": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalipv6examplecom" - }, - "FromPort": 3, - "ToPort": 4, - "IpProtocol": "icmp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressicmpv6pmtuapielb0": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalipv6examplecom" - }, - "FromPort": -1, - "ToPort": -1, - "IpProtocol": "icmpv6", - "CidrIpv6": "::/0" - } - }, - "AWSEC2SecurityGroupapielbminimalipv6examplecom": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupName": "api-elb.minimal-ipv6.example.com", - "VpcId": { - "Ref": "AWSEC2VPCminimalipv6examplecom" - }, - "GroupDescription": "Security group for api ELB", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal-ipv6.example.com" - }, - { - "Key": "Name", - "Value": "api-elb.minimal-ipv6.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal-ipv6.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2SecurityGroupmastersminimalipv6examplecom": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupName": "masters.minimal-ipv6.example.com", - "VpcId": { - "Ref": "AWSEC2VPCminimalipv6examplecom" - }, - "GroupDescription": "Security group for masters", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal-ipv6.example.com" - }, - { - "Key": "Name", - "Value": "masters.minimal-ipv6.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal-ipv6.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2SecurityGroupnodesminimalipv6examplecom": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupName": "nodes.minimal-ipv6.example.com", - "VpcId": { - "Ref": "AWSEC2VPCminimalipv6examplecom" - }, - "GroupDescription": "Security group for nodes", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal-ipv6.example.com" - }, - { - "Key": "Name", - "Value": "nodes.minimal-ipv6.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal-ipv6.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2SubnetRouteTableAssociationpublicustest1aminimalipv6examplecom": { - "Type": "AWS::EC2::SubnetRouteTableAssociation", - "Properties": { - "SubnetId": { - "Ref": "AWSEC2Subnetustest1aminimalipv6examplecom" - }, - "RouteTableId": { - "Ref": "AWSEC2RouteTablepublicustest1aminimalipv6examplecom" - } - } - }, - "AWSEC2Subnetustest1aminimalipv6examplecom": { - "Type": "AWS::EC2::Subnet", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCminimalipv6examplecom" - }, - "CidrBlock": "172.20.32.0/19", - "Ipv6CidrBlock": "2001:db8:0:111::/64", - "AvailabilityZone": "us-test-1a", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal-ipv6.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.minimal-ipv6.example.com" - }, - { - "Key": "SubnetType", - "Value": "Public" - }, - { - "Key": "kops.k8s.io/instance-group/master-us-test-1a", - "Value": "true" - }, - { - "Key": "kops.k8s.io/instance-group/nodes", - "Value": "true" - }, - { - "Key": "kubernetes.io/cluster/minimal-ipv6.example.com", - "Value": "owned" - }, - { - "Key": "kubernetes.io/role/elb", - "Value": "1" - }, - { - "Key": "kubernetes.io/role/internal-elb", - "Value": "1" - } - ] - } - }, - "AWSEC2VPCCidrBlockAmazonIPv6": { - "Type": "AWS::EC2::VPCCidrBlock", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCminimalipv6examplecom" - }, - "AmazonProvidedIpv6CidrBlock": true - } - }, - "AWSEC2VPCDHCPOptionsAssociationminimalipv6examplecom": { - "Type": "AWS::EC2::VPCDHCPOptionsAssociation", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCminimalipv6examplecom" - }, - "DhcpOptionsId": { - "Ref": "AWSEC2DHCPOptionsminimalipv6examplecom" - } - } - }, - "AWSEC2VPCGatewayAttachmentminimalipv6examplecom": { - "Type": "AWS::EC2::VPCGatewayAttachment", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCminimalipv6examplecom" - }, - "InternetGatewayId": { - "Ref": "AWSEC2InternetGatewayminimalipv6examplecom" - } - } - }, - "AWSEC2VPCminimalipv6examplecom": { - "Type": "AWS::EC2::VPC", - "Properties": { - "CidrBlock": "172.20.0.0/16", - "EnableDnsHostnames": true, - "EnableDnsSupport": true, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal-ipv6.example.com" - }, - { - "Key": "Name", - "Value": "minimal-ipv6.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal-ipv6.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2Volumeustest1aetcdeventsminimalipv6examplecom": { - "Type": "AWS::EC2::Volume", - "Properties": { - "AvailabilityZone": "us-test-1a", - "Size": 20, - "VolumeType": "gp3", - "Iops": 3000, - "Throughput": 125, - "Encrypted": false, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal-ipv6.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.etcd-events.minimal-ipv6.example.com" - }, - { - "Key": "k8s.io/etcd/events", - "Value": "us-test-1a/us-test-1a" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kubernetes.io/cluster/minimal-ipv6.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2Volumeustest1aetcdmainminimalipv6examplecom": { - "Type": "AWS::EC2::Volume", - "Properties": { - "AvailabilityZone": "us-test-1a", - "Size": 20, - "VolumeType": "gp3", - "Iops": 3000, - "Throughput": 125, - "Encrypted": false, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal-ipv6.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.etcd-main.minimal-ipv6.example.com" - }, - { - "Key": "k8s.io/etcd/main", - "Value": "us-test-1a/us-test-1a" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kubernetes.io/cluster/minimal-ipv6.example.com", - "Value": "owned" - } - ] - } - }, - "AWSElasticLoadBalancingV2Listenerapiminimalipv6examplecom443": { - "Type": "AWS::ElasticLoadBalancingV2::Listener", - "Properties": { - "DefaultActions": [ - { - "Type": "forward", - "TargetGroupArn": { - "Ref": "AWSElasticLoadBalancingV2TargetGrouptcpminimalipv6examplebne5ih" - } - } - ], - "LoadBalancerArn": { - "Ref": "AWSElasticLoadBalancingV2LoadBalancerapiminimalipv6examplecom" - }, - "Port": 443, - "Protocol": "TCP" - } - }, - "AWSElasticLoadBalancingV2LoadBalancerapiminimalipv6examplecom": { - "Type": "AWS::ElasticLoadBalancingV2::LoadBalancer", - "Properties": { - "Name": "api-minimal-ipv6-example--jhj9te", - "Scheme": "internet-facing", - "SubnetMappings": [ - { - "SubnetId": { - "Ref": "AWSEC2Subnetustest1aminimalipv6examplecom" - } - } - ], - "Type": "network", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal-ipv6.example.com" - }, - { - "Key": "Name", - "Value": "api.minimal-ipv6.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal-ipv6.example.com", - "Value": "owned" - } - ] - } - }, - "AWSElasticLoadBalancingV2TargetGrouptcpminimalipv6examplebne5ih": { - "Type": "AWS::ElasticLoadBalancingV2::TargetGroup", - "Properties": { - "Name": "tcp-minimal-ipv6-example--bne5ih", - "Port": 443, - "Protocol": "TCP", - "VpcId": { - "Ref": "AWSEC2VPCminimalipv6examplecom" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal-ipv6.example.com" - }, - { - "Key": "Name", - "Value": "tcp-minimal-ipv6-example--bne5ih" - }, - { - "Key": "kubernetes.io/cluster/minimal-ipv6.example.com", - "Value": "owned" - } - ], - "HealthCheckProtocol": "TCP", - "HealthyThresholdCount": 2, - "UnhealthyThresholdCount": 2 - } - }, - "AWSIAMInstanceProfilemastersminimalipv6examplecom": { - "Type": "AWS::IAM::InstanceProfile", - "Properties": { - "InstanceProfileName": "masters.minimal-ipv6.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolemastersminimalipv6examplecom" - } - ] - } - }, - "AWSIAMInstanceProfilenodesminimalipv6examplecom": { - "Type": "AWS::IAM::InstanceProfile", - "Properties": { - "InstanceProfileName": "nodes.minimal-ipv6.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolenodesminimalipv6examplecom" - } - ] - } - }, - "AWSIAMPolicymastersminimalipv6examplecom": { - "Type": "AWS::IAM::Policy", - "Properties": { - "PolicyName": "masters.minimal-ipv6.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolemastersminimalipv6examplecom" - } - ], - "PolicyDocument": { - "Statement": [ - { - "Action": "ec2:AttachVolume", - "Condition": { - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "minimal-ipv6.example.com", - "aws:ResourceTag/k8s.io/role/master": "1" - } - }, - "Effect": "Allow", - "Resource": [ - "*" - ] - }, - { - "Action": [ - "s3:Get*" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-read-bucket/clusters.example.com/minimal-ipv6.example.com/*" - }, - { - "Action": [ - "s3:GetObject", - "s3:DeleteObject", - "s3:DeleteObjectVersion", - "s3:PutObject" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/minimal-ipv6.example.com/backups/etcd/main/*" - }, - { - "Action": [ - "s3:GetObject", - "s3:DeleteObject", - "s3:DeleteObjectVersion", - "s3:PutObject" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/minimal-ipv6.example.com/backups/etcd/events/*" - }, - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-read-bucket" - ] - }, - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-write-bucket" - ] - }, - { - "Action": [ - "route53:ChangeResourceRecordSets", - "route53:ListResourceRecordSets", - "route53:GetHostedZone" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:route53:::hostedzone/Z1AFAKE1ZON3YO" - ] - }, - { - "Action": [ - "route53:GetChange" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:route53:::change/*" - ] - }, - { - "Action": [ - "route53:ListHostedZones", - "route53:ListTagsForResource" - ], - "Effect": "Allow", - "Resource": [ - "*" - ] - }, - { - "Action": "ec2:CreateTags", - "Condition": { - "StringEquals": { - "aws:RequestTag/KubernetesCluster": "minimal-ipv6.example.com", - "ec2:CreateAction": [ - "CreateVolume", - "CreateSnapshot" - ] - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:volume/*", - "arn:aws-test:ec2:*:*:snapshot/*" - ] - }, - { - "Action": [ - "ec2:CreateTags", - "ec2:DeleteTags" - ], - "Condition": { - "Null": { - "aws:RequestTag/KubernetesCluster": "true" - }, - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "minimal-ipv6.example.com" - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:volume/*", - "arn:aws-test:ec2:*:*:snapshot/*" - ] - }, - { - "Action": "ec2:CreateTags", - "Condition": { - "StringEquals": { - "aws:RequestTag/KubernetesCluster": "minimal-ipv6.example.com", - "ec2:CreateAction": [ - "CreateSecurityGroup" - ] - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:security-group/*" - ] - }, - { - "Action": [ - "ec2:CreateTags", - "ec2:DeleteTags" - ], - "Condition": { - "Null": { - "aws:RequestTag/KubernetesCluster": "true" - }, - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "minimal-ipv6.example.com" - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:security-group/*" - ] - }, - { - "Action": [ - "autoscaling:DescribeAutoScalingGroups", - "autoscaling:DescribeAutoScalingInstances", - "autoscaling:DescribeLaunchConfigurations", - "autoscaling:DescribeScalingActivities", - "autoscaling:DescribeTags", - "ec2:AssignIpv6Addresses", - "ec2:AttachVolume", - "ec2:AuthorizeSecurityGroupIngress", - "ec2:CreateSecurityGroup", - "ec2:CreateTags", - "ec2:DeleteRoute", - "ec2:DeleteSecurityGroup", - "ec2:DeleteVolume", - "ec2:DescribeAccountAttributes", - "ec2:DescribeInstanceTypes", - "ec2:DescribeInstances", - "ec2:DescribeLaunchTemplateVersions", - "ec2:DescribeNetworkInterfaces", - "ec2:DescribeRegions", - "ec2:DescribeRouteTables", - "ec2:DescribeSecurityGroups", - "ec2:DescribeSubnets", - "ec2:DescribeTags", - "ec2:DescribeVolumes", - "ec2:DescribeVolumesModifications", - "ec2:DescribeVpcs", - "ec2:DetachVolume", - "ec2:ModifyInstanceAttribute", - "ec2:ModifyVolume", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:CreateListener", - "elasticloadbalancing:CreateTargetGroup", - "elasticloadbalancing:DeleteListener", - "elasticloadbalancing:DescribeListeners", - "elasticloadbalancing:DescribeLoadBalancerAttributes", - "elasticloadbalancing:DescribeLoadBalancerPolicies", - "elasticloadbalancing:DescribeLoadBalancers", - "elasticloadbalancing:DescribeTargetGroups", - "elasticloadbalancing:DescribeTargetHealth", - "elasticloadbalancing:ModifyListener", - "elasticloadbalancing:RegisterTargets", - "iam:GetServerCertificate", - "iam:ListServerCertificates", - "kms:DescribeKey", - "kms:GenerateRandom" - ], - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": [ - "autoscaling:SetDesiredCapacity", - "autoscaling:TerminateInstanceInAutoScalingGroup", - "ec2:AttachVolume", - "ec2:AuthorizeSecurityGroupIngress", - "ec2:DeleteSecurityGroup", - "ec2:DeleteVolume", - "ec2:DetachVolume", - "ec2:ModifyInstanceAttribute", - "ec2:ModifyVolume", - "ec2:RevokeSecurityGroupIngress", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", - "elasticloadbalancing:AttachLoadBalancerToSubnets", - "elasticloadbalancing:ConfigureHealthCheck", - "elasticloadbalancing:CreateLoadBalancerListeners", - "elasticloadbalancing:CreateLoadBalancerPolicy", - "elasticloadbalancing:DeleteListener", - "elasticloadbalancing:DeleteLoadBalancer", - "elasticloadbalancing:DeleteLoadBalancerListeners", - "elasticloadbalancing:DeleteTargetGroup", - "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", - "elasticloadbalancing:DeregisterTargets", - "elasticloadbalancing:DetachLoadBalancerFromSubnets", - "elasticloadbalancing:ModifyListener", - "elasticloadbalancing:ModifyLoadBalancerAttributes", - "elasticloadbalancing:ModifyTargetGroup", - "elasticloadbalancing:RegisterInstancesWithLoadBalancer", - "elasticloadbalancing:RegisterTargets", - "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", - "elasticloadbalancing:SetLoadBalancerPoliciesOfListener" - ], - "Condition": { - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "minimal-ipv6.example.com" - } - }, - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": [ - "ec2:CreateSecurityGroup", - "ec2:CreateSnapshot", - "ec2:CreateVolume", - "elasticloadbalancing:CreateListener", - "elasticloadbalancing:CreateLoadBalancer", - "elasticloadbalancing:CreateTargetGroup" - ], - "Condition": { - "StringEquals": { - "aws:RequestTag/KubernetesCluster": "minimal-ipv6.example.com" - } - }, - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": "ec2:CreateSecurityGroup", - "Effect": "Allow", - "Resource": "arn:aws-test:ec2:*:*:vpc/*" - } - ], - "Version": "2012-10-17" - } - } - }, - "AWSIAMPolicynodesminimalipv6examplecom": { - "Type": "AWS::IAM::Policy", - "Properties": { - "PolicyName": "nodes.minimal-ipv6.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolenodesminimalipv6examplecom" - } - ], - "PolicyDocument": { - "Statement": [ - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-read-bucket" - ] - }, - { - "Action": [ - "autoscaling:DescribeAutoScalingInstances", - "ec2:AssignIpv6Addresses", - "ec2:DescribeInstanceTypes", - "ec2:DescribeInstances", - "ec2:DescribeRegions", - "iam:GetServerCertificate", - "iam:ListServerCertificates", - "kms:GenerateRandom" - ], - "Effect": "Allow", - "Resource": "*" - } - ], - "Version": "2012-10-17" - } - } - }, - "AWSIAMRolemastersminimalipv6examplecom": { - "Type": "AWS::IAM::Role", - "Properties": { - "RoleName": "masters.minimal-ipv6.example.com", - "AssumeRolePolicyDocument": { - "Statement": [ - { - "Action": "sts:AssumeRole", - "Effect": "Allow", - "Principal": { - "Service": "ec2.amazonaws.com" - } - } - ], - "Version": "2012-10-17" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal-ipv6.example.com" - }, - { - "Key": "Name", - "Value": "masters.minimal-ipv6.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal-ipv6.example.com", - "Value": "owned" - } - ] - } - }, - "AWSIAMRolenodesminimalipv6examplecom": { - "Type": "AWS::IAM::Role", - "Properties": { - "RoleName": "nodes.minimal-ipv6.example.com", - "AssumeRolePolicyDocument": { - "Statement": [ - { - "Action": "sts:AssumeRole", - "Effect": "Allow", - "Principal": { - "Service": "ec2.amazonaws.com" - } - } - ], - "Version": "2012-10-17" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal-ipv6.example.com" - }, - { - "Key": "Name", - "Value": "nodes.minimal-ipv6.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal-ipv6.example.com", - "Value": "owned" - } - ] - } - }, - "AWSRoute53RecordSetapiminimalipv6examplecom": { - "Type": "AWS::Route53::RecordSet", - "Properties": { - "Name": "api.minimal-ipv6.example.com", - "Type": "A", - "AliasTarget": { - "DNSName": { - "Fn::GetAtt": [ - "AWSElasticLoadBalancingV2LoadBalancerapiminimalipv6examplecom", - "DNSName" - ] - }, - "HostedZoneId": { - "Fn::GetAtt": [ - "AWSElasticLoadBalancingV2LoadBalancerapiminimalipv6examplecom", - "CanonicalHostedZoneID" - ] - }, - "EvaluateTargetHealth": false - }, - "HostedZoneId": "/hostedzone/Z1AFAKE1ZON3YO" - } - }, - "AWSRoute53RecordSetapiminimalipv6examplecomAAAA": { - "Type": "AWS::Route53::RecordSet", - "Properties": { - "Name": "api.minimal-ipv6.example.com", - "Type": "AAAA", - "AliasTarget": { - "DNSName": { - "Fn::GetAtt": [ - "AWSElasticLoadBalancingV2LoadBalancerapiminimalipv6examplecom", - "DNSName" - ] - }, - "HostedZoneId": { - "Fn::GetAtt": [ - "AWSElasticLoadBalancingV2LoadBalancerapiminimalipv6examplecom", - "CanonicalHostedZoneID" - ] - }, - "EvaluateTargetHealth": false - }, - "HostedZoneId": "/hostedzone/Z1AFAKE1ZON3YO" - } - } - } -} diff --git a/tests/integration/update_cluster/minimal-ipv6/cloudformation.json.extracted.yaml b/tests/integration/update_cluster/minimal-ipv6/cloudformation.json.extracted.yaml deleted file mode 100644 index 3cb85ff88c6b0..0000000000000 --- a/tests/integration/update_cluster/minimal-ipv6/cloudformation.json.extracted.yaml +++ /dev/null @@ -1,474 +0,0 @@ -Resources.AWSEC2LaunchTemplatemasterustest1amastersminimalipv6examplecom.Properties.LaunchTemplateData.UserData: | - #!/bin/bash - set -o errexit - set -o nounset - set -o pipefail - - NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 - NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 - NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 - NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 - - export AWS_REGION=us-test-1 - - - - - sysctl -w net.core.rmem_max=16777216 || true - sysctl -w net.core.wmem_max=16777216 || true - sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true - sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true - - - function ensure-install-dir() { - INSTALL_DIR="/opt/kops" - # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec - if [[ -d /var/lib/toolbox ]]; then - INSTALL_DIR="/var/lib/toolbox/kops" - fi - mkdir -p ${INSTALL_DIR}/bin - mkdir -p ${INSTALL_DIR}/conf - cd ${INSTALL_DIR} - } - - # Retry a download until we get it. args: name, sha, urls - download-or-bust() { - local -r file="$1" - local -r hash="$2" - local -r urls=( $(split-commas "$3") ) - - if [[ -f "${file}" ]]; then - if ! validate-hash "${file}" "${hash}"; then - rm -f "${file}" - else - return 0 - fi - fi - - while true; do - for url in "${urls[@]}"; do - commands=( - "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - ) - for cmd in "${commands[@]}"; do - echo "Attempting download with: ${cmd} {url}" - if ! (${cmd} "${url}"); then - echo "== Download failed with ${cmd} ==" - continue - fi - if ! validate-hash "${file}" "${hash}"; then - echo "== Hash validation of ${url} failed. Retrying. ==" - rm -f "${file}" - else - echo "== Downloaded ${url} (SHA256 = ${hash}) ==" - return 0 - fi - done - done - - echo "All downloads failed; sleeping before retrying" - sleep 60 - done - } - - validate-hash() { - local -r file="$1" - local -r expected="$2" - local actual - - actual=$(sha256sum ${file} | awk '{ print $1 }') || true - if [[ "${actual}" != "${expected}" ]]; then - echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" - return 1 - fi - } - - function split-commas() { - echo $1 | tr "," "\n" - } - - function download-release() { - case "$(uname -m)" in - x86_64*|i?86_64*|amd64*) - NODEUP_URL="${NODEUP_URL_AMD64}" - NODEUP_HASH="${NODEUP_HASH_AMD64}" - ;; - aarch64*|arm64*) - NODEUP_URL="${NODEUP_URL_ARM64}" - NODEUP_HASH="${NODEUP_HASH_ARM64}" - ;; - *) - echo "Unsupported host arch: $(uname -m)" >&2 - exit 1 - ;; - esac - - cd ${INSTALL_DIR}/bin - download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" - - chmod +x nodeup - - echo "Running nodeup" - # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 - ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) - } - - #################################################################################### - - /bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" - - echo "== nodeup node config starting ==" - ensure-install-dir - - cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' - cloudConfig: - awsEBSCSIDriver: - enabled: true - version: v1.12.0 - manageStorageClasses: true - nodeIPFamilies: - - ipv6 - - ipv4 - containerRuntime: containerd - containerd: - logLevel: info - version: 1.4.13 - docker: - skipInstall: true - encryptionConfig: null - etcdClusters: - events: - version: 3.4.13 - main: - version: 3.4.13 - kubeAPIServer: - allowPrivileged: true - anonymousAuth: false - apiAudiences: - - kubernetes.svc.default - apiServerCount: 1 - authorizationMode: AlwaysAllow - bindAddress: '::' - cloudProvider: external - enableAdmissionPlugins: - - NamespaceLifecycle - - LimitRanger - - ServiceAccount - - DefaultStorageClass - - DefaultTolerationSeconds - - MutatingAdmissionWebhook - - ValidatingAdmissionWebhook - - NodeRestriction - - ResourceQuota - etcdServers: - - https://127.0.0.1:4001 - etcdServersOverrides: - - /events#https://127.0.0.1:4002 - featureGates: - CSIMigrationAWS: "true" - InTreePluginAWSUnregister: "true" - image: registry.k8s.io/kube-apiserver:v1.21.0 - kubeletPreferredAddressTypes: - - InternalIP - - Hostname - - ExternalIP - logLevel: 2 - requestheaderAllowedNames: - - aggregator - requestheaderExtraHeaderPrefixes: - - X-Remote-Extra- - requestheaderGroupHeaders: - - X-Remote-Group - requestheaderUsernameHeaders: - - X-Remote-User - securePort: 443 - serviceAccountIssuer: https://api.internal.minimal-ipv6.example.com - serviceAccountJWKSURI: https://api.internal.minimal-ipv6.example.com/openid/v1/jwks - serviceClusterIPRange: fd00:5e4f:ce::/108 - storageBackend: etcd3 - kubeControllerManager: - allocateNodeCIDRs: false - attachDetachReconcileSyncPeriod: 1m0s - cloudProvider: external - clusterName: minimal-ipv6.example.com - configureCloudRoutes: false - controllers: - - '*' - - -nodeipam - featureGates: - CSIMigrationAWS: "true" - InTreePluginAWSUnregister: "true" - image: registry.k8s.io/kube-controller-manager:v1.21.0 - leaderElection: - leaderElect: true - logLevel: 2 - useServiceAccountCredentials: true - kubeProxy: - cpuRequest: 100m - image: registry.k8s.io/kube-proxy:v1.21.0 - logLevel: 2 - kubeScheduler: - featureGates: - CSIMigrationAWS: "true" - InTreePluginAWSUnregister: "true" - image: registry.k8s.io/kube-scheduler:v1.21.0 - leaderElection: - leaderElect: true - logLevel: 2 - kubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: external - clusterDNS: fd00:5e4f:ce::a - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - featureGates: - CSIMigrationAWS: "true" - InTreePluginAWSUnregister: "true" - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - masterKubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: external - clusterDNS: fd00:5e4f:ce::a - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - featureGates: - CSIMigrationAWS: "true" - InTreePluginAWSUnregister: "true" - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - - __EOF_CLUSTER_SPEC - - cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' - CloudProvider: aws - ConfigBase: memfs://clusters.example.com/minimal-ipv6.example.com - InstanceGroupName: master-us-test-1a - InstanceGroupRole: Master - NodeupConfigHash: hNKVZ05MEjtdAlYuY3nPe+ccpbipMaSa2AYXkSzJUwg= - - __EOF_KUBE_ENV - - download-release - echo "== nodeup node config done ==" -Resources.AWSEC2LaunchTemplatenodesminimalipv6examplecom.Properties.LaunchTemplateData.UserData: | - #!/bin/bash - set -o errexit - set -o nounset - set -o pipefail - - NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 - NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 - NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 - NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 - - export AWS_REGION=us-test-1 - - - - - sysctl -w net.core.rmem_max=16777216 || true - sysctl -w net.core.wmem_max=16777216 || true - sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true - sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true - - - function ensure-install-dir() { - INSTALL_DIR="/opt/kops" - # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec - if [[ -d /var/lib/toolbox ]]; then - INSTALL_DIR="/var/lib/toolbox/kops" - fi - mkdir -p ${INSTALL_DIR}/bin - mkdir -p ${INSTALL_DIR}/conf - cd ${INSTALL_DIR} - } - - # Retry a download until we get it. args: name, sha, urls - download-or-bust() { - local -r file="$1" - local -r hash="$2" - local -r urls=( $(split-commas "$3") ) - - if [[ -f "${file}" ]]; then - if ! validate-hash "${file}" "${hash}"; then - rm -f "${file}" - else - return 0 - fi - fi - - while true; do - for url in "${urls[@]}"; do - commands=( - "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - ) - for cmd in "${commands[@]}"; do - echo "Attempting download with: ${cmd} {url}" - if ! (${cmd} "${url}"); then - echo "== Download failed with ${cmd} ==" - continue - fi - if ! validate-hash "${file}" "${hash}"; then - echo "== Hash validation of ${url} failed. Retrying. ==" - rm -f "${file}" - else - echo "== Downloaded ${url} (SHA256 = ${hash}) ==" - return 0 - fi - done - done - - echo "All downloads failed; sleeping before retrying" - sleep 60 - done - } - - validate-hash() { - local -r file="$1" - local -r expected="$2" - local actual - - actual=$(sha256sum ${file} | awk '{ print $1 }') || true - if [[ "${actual}" != "${expected}" ]]; then - echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" - return 1 - fi - } - - function split-commas() { - echo $1 | tr "," "\n" - } - - function download-release() { - case "$(uname -m)" in - x86_64*|i?86_64*|amd64*) - NODEUP_URL="${NODEUP_URL_AMD64}" - NODEUP_HASH="${NODEUP_HASH_AMD64}" - ;; - aarch64*|arm64*) - NODEUP_URL="${NODEUP_URL_ARM64}" - NODEUP_HASH="${NODEUP_HASH_ARM64}" - ;; - *) - echo "Unsupported host arch: $(uname -m)" >&2 - exit 1 - ;; - esac - - cd ${INSTALL_DIR}/bin - download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" - - chmod +x nodeup - - echo "Running nodeup" - # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 - ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) - } - - #################################################################################### - - /bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" - - echo "== nodeup node config starting ==" - ensure-install-dir - - cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' - cloudConfig: - awsEBSCSIDriver: - enabled: true - version: v1.12.0 - manageStorageClasses: true - nodeIPFamilies: - - ipv6 - - ipv4 - containerRuntime: containerd - containerd: - logLevel: info - version: 1.4.13 - docker: - skipInstall: true - kubeProxy: - cpuRequest: 100m - image: registry.k8s.io/kube-proxy:v1.21.0 - logLevel: 2 - kubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: external - clusterDNS: fd00:5e4f:ce::a - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - featureGates: - CSIMigrationAWS: "true" - InTreePluginAWSUnregister: "true" - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - - __EOF_CLUSTER_SPEC - - cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' - CloudProvider: aws - ConfigServer: - CACertificates: | - -----BEGIN CERTIFICATE----- - MIIBbjCCARigAwIBAgIMFpANqBD8NSD82AUSMA0GCSqGSIb3DQEBCwUAMBgxFjAU - BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwODAwWhcNMzEwNzA3MDcw - ODAwWjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD - SwAwSAJBANFI3zr0Tk8krsW8vwjfMpzJOlWQ8616vG3YPa2qAgI7V4oKwfV0yIg1 - jt+H6f4P/wkPAPTPTfRp9Iy8oHEEFw0CAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG - MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNG3zVjTcLlJwDsJ4/K9DV7KohUA - MA0GCSqGSIb3DQEBCwUAA0EAB8d03fY2w7WKpfO29qI295pu2C4ca9AiVGOpgSc8 - tmQsq6rcxt3T+rb589PVtz0mw/cKTxOk6gH2CCC+yHfy2w== - -----END CERTIFICATE----- - -----BEGIN CERTIFICATE----- - MIIBbjCCARigAwIBAgIMFpANvmSa0OAlYmXKMA0GCSqGSIb3DQEBCwUAMBgxFjAU - BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwOTM2WhcNMzEwNzA3MDcw - OTM2WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD - SwAwSAJBAMF6F4aZdpe0RUpyykaBpWwZCnwbffhYGOw+fs6RdLuUq7QCNmJm/Eq7 - WWOziMYDiI9SbclpD+6QiJ0N3EqppVUCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG - MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLImp6ARjPDAH6nhI+scWVt3Q9bn - MA0GCSqGSIb3DQEBCwUAA0EAVQVx5MUtuAIeePuP9o51xtpT2S6Fvfi8J4ICxnlA - 9B7UD2ushcVFPtaeoL9Gfu8aY4KJBeqqg5ojl4qmRnThjw== - -----END CERTIFICATE----- - server: https://kops-controller.internal.minimal-ipv6.example.com:3988/ - InstanceGroupName: nodes - InstanceGroupRole: Node - NodeupConfigHash: oGVQyiIw+lfmCmcmtVnX8V+zNcpHJoAz2DauLNcSutA= - - __EOF_KUBE_ENV - - download-release - echo "== nodeup node config done ==" diff --git a/tests/integration/update_cluster/minimal-ipv6/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/minimal-ipv6/data/aws_s3_object_cluster-completed.spec_content index 9d691e31c06a6..c7f6d4f0df6c6 100644 --- a/tests/integration/update_cluster/minimal-ipv6/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/minimal-ipv6/data/aws_s3_object_cluster-completed.spec_content @@ -172,7 +172,6 @@ spec: - 0.0.0.0/0 - ::/0 kubernetesVersion: 1.21.0 - masterInternalName: api.internal.minimal-ipv6.example.com masterKubelet: anonymousAuth: false cgroupDriver: systemd diff --git a/tests/integration/update_cluster/minimal-ipv6/in-v1alpha2.yaml b/tests/integration/update_cluster/minimal-ipv6/in-v1alpha2.yaml index 7a28a735b60e4..b670c9639ca45 100644 --- a/tests/integration/update_cluster/minimal-ipv6/in-v1alpha2.yaml +++ b/tests/integration/update_cluster/minimal-ipv6/in-v1alpha2.yaml @@ -34,7 +34,6 @@ spec: kubelet: anonymousAuth: false kubernetesVersion: v1.21.0 - masterInternalName: api.internal.minimal-ipv6.example.com masterPublicName: api.minimal-ipv6.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/tests/integration/update_cluster/minimal-longclustername/cloudformation.json b/tests/integration/update_cluster/minimal-longclustername/cloudformation.json deleted file mode 100644 index 5182b55826b03..0000000000000 --- a/tests/integration/update_cluster/minimal-longclustername/cloudformation.json +++ /dev/null @@ -1,1349 +0,0 @@ -{ - "Resources": { - "AWSAutoScalingAutoScalingGroupmasterustest1amastersthisistrulyareallyreallylongclusternameminimalexamplecom": { - "Type": "AWS::AutoScaling::AutoScalingGroup", - "Properties": { - "AutoScalingGroupName": "master-us-test-1a.masters.this.is.truly.a.really.really.long.cluster-name.minimal.example.com", - "LaunchTemplate": { - "LaunchTemplateId": { - "Ref": "AWSEC2LaunchTemplatemasterustest1amastersthisistrulyareallyreallylongclusternameminimalexamplecom" - }, - "Version": { - "Fn::GetAtt": [ - "AWSEC2LaunchTemplatemasterustest1amastersthisistrulyareallyreallylongclusternameminimalexamplecom", - "LatestVersionNumber" - ] - } - }, - "MaxSize": "1", - "MinSize": "1", - "VPCZoneIdentifier": [ - { - "Ref": "AWSEC2Subnetustest1athisistrulyareallyreallylongclusternameminimalexamplecom" - } - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "this.is.truly.a.really.really.long.cluster-name.minimal.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.this.is.truly.a.really.really.long.cluster-name.minimal.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/role/master", - "Value": "1", - "PropagateAtLaunch": true - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a", - "PropagateAtLaunch": true - }, - { - "Key": "kubernetes.io/cluster/this.is.truly.a.really.really.long.cluster-name.minimal.example.com", - "Value": "owned", - "PropagateAtLaunch": true - } - ], - "MetricsCollection": [ - { - "Granularity": "1Minute", - "Metrics": [ - "GroupDesiredCapacity", - "GroupInServiceInstances", - "GroupMaxSize", - "GroupMinSize", - "GroupPendingInstances", - "GroupStandbyInstances", - "GroupTerminatingInstances", - "GroupTotalInstances" - ] - } - ] - } - }, - "AWSAutoScalingAutoScalingGroupnodesthisistrulyareallyreallylongclusternameminimalexamplecom": { - "Type": "AWS::AutoScaling::AutoScalingGroup", - "Properties": { - "AutoScalingGroupName": "nodes.this.is.truly.a.really.really.long.cluster-name.minimal.example.com", - "LaunchTemplate": { - "LaunchTemplateId": { - "Ref": "AWSEC2LaunchTemplatenodesthisistrulyareallyreallylongclusternameminimalexamplecom" - }, - "Version": { - "Fn::GetAtt": [ - "AWSEC2LaunchTemplatenodesthisistrulyareallyreallylongclusternameminimalexamplecom", - "LatestVersionNumber" - ] - } - }, - "MaxSize": "2", - "MinSize": "2", - "VPCZoneIdentifier": [ - { - "Ref": "AWSEC2Subnetustest1athisistrulyareallyreallylongclusternameminimalexamplecom" - } - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "this.is.truly.a.really.really.long.cluster-name.minimal.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "Name", - "Value": "nodes.this.is.truly.a.really.really.long.cluster-name.minimal.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/role/node", - "Value": "1", - "PropagateAtLaunch": true - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes", - "PropagateAtLaunch": true - }, - { - "Key": "kubernetes.io/cluster/this.is.truly.a.really.really.long.cluster-name.minimal.example.com", - "Value": "owned", - "PropagateAtLaunch": true - } - ], - "MetricsCollection": [ - { - "Granularity": "1Minute", - "Metrics": [ - "GroupDesiredCapacity", - "GroupInServiceInstances", - "GroupMaxSize", - "GroupMinSize", - "GroupPendingInstances", - "GroupStandbyInstances", - "GroupTerminatingInstances", - "GroupTotalInstances" - ] - } - ] - } - }, - "AWSEC2DHCPOptionsthisistrulyareallyreallylongclusternameminimalexamplecom": { - "Type": "AWS::EC2::DHCPOptions", - "Properties": { - "DomainName": "us-test-1.compute.internal", - "DomainNameServers": [ - "AmazonProvidedDNS" - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "this.is.truly.a.really.really.long.cluster-name.minimal.example.com" - }, - { - "Key": "Name", - "Value": "this.is.truly.a.really.really.long.cluster-name.minimal.example.com" - }, - { - "Key": "kubernetes.io/cluster/this.is.truly.a.really.really.long.cluster-name.minimal.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2InternetGatewaythisistrulyareallyreallylongclusternameminimalexamplecom": { - "Type": "AWS::EC2::InternetGateway", - "Properties": { - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "this.is.truly.a.really.really.long.cluster-name.minimal.example.com" - }, - { - "Key": "Name", - "Value": "this.is.truly.a.really.really.long.cluster-name.minimal.example.com" - }, - { - "Key": "kubernetes.io/cluster/this.is.truly.a.really.really.long.cluster-name.minimal.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2LaunchTemplatemasterustest1amastersthisistrulyareallyreallylongclusternameminimalexamplecom": { - "Type": "AWS::EC2::LaunchTemplate", - "Properties": { - "LaunchTemplateName": "master-us-test-1a.masters.this.is.truly.a.really.really.long.cluster-name.minimal.example.com", - "LaunchTemplateData": { - "BlockDeviceMappings": [ - { - "DeviceName": "/dev/xvda", - "Ebs": { - "VolumeType": "gp3", - "VolumeSize": 64, - "Iops": 3000, - "Throughput": 125, - "DeleteOnTermination": true, - "Encrypted": true - } - }, - { - "DeviceName": "/dev/sdc", - "VirtualName": "ephemeral0" - } - ], - "IamInstanceProfile": { - "Name": { - "Ref": "AWSIAMInstanceProfilemastersthisistrulyareallyreallylongclusternamemkaamp9" - } - }, - "ImageId": "ami-12345678", - "InstanceType": "m3.medium", - "KeyName": "kubernetes.this.is.truly.a.really.really.long.cluster-name.minimal.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57", - "MetadataOptions": { - "HttpPutResponseHopLimit": 1, - "HttpTokens": "optional" - }, - "Monitoring": { - "Enabled": false - }, - "NetworkInterfaces": [ - { - "AssociatePublicIpAddress": true, - "DeleteOnTermination": true, - "DeviceIndex": 0, - "Ipv6AddressCount": 0, - "Groups": [ - { - "Ref": "AWSEC2SecurityGroupmastersthisistrulyareallyreallylongclusternameminimalexamplecom" - } - ] - } - ], - "TagSpecifications": [ - { - "ResourceType": "instance", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "this.is.truly.a.really.really.long.cluster-name.minimal.example.com" - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.this.is.truly.a.really.really.long.cluster-name.minimal.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a" - }, - { - "Key": "kubernetes.io/cluster/this.is.truly.a.really.really.long.cluster-name.minimal.example.com", - "Value": "owned" - } - ] - }, - { - "ResourceType": "volume", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "this.is.truly.a.really.really.long.cluster-name.minimal.example.com" - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.this.is.truly.a.really.really.long.cluster-name.minimal.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a" - }, - { - "Key": "kubernetes.io/cluster/this.is.truly.a.really.really.long.cluster-name.minimal.example.com", - "Value": "owned" - } - ] - } - ], - "UserData": "extracted" - } - } - }, - "AWSEC2LaunchTemplatenodesthisistrulyareallyreallylongclusternameminimalexamplecom": { - "Type": "AWS::EC2::LaunchTemplate", - "Properties": { - "LaunchTemplateName": "nodes.this.is.truly.a.really.really.long.cluster-name.minimal.example.com", - "LaunchTemplateData": { - "BlockDeviceMappings": [ - { - "DeviceName": "/dev/xvda", - "Ebs": { - "VolumeType": "gp3", - "VolumeSize": 128, - "Iops": 3000, - "Throughput": 125, - "DeleteOnTermination": true, - "Encrypted": true - } - } - ], - "IamInstanceProfile": { - "Name": { - "Ref": "AWSIAMInstanceProfilenodesthisistrulyareallyreallylongclusternameminh1jir9" - } - }, - "ImageId": "ami-12345678", - "InstanceType": "t2.medium", - "KeyName": "kubernetes.this.is.truly.a.really.really.long.cluster-name.minimal.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57", - "MetadataOptions": { - "HttpPutResponseHopLimit": 1, - "HttpTokens": "optional" - }, - "Monitoring": { - "Enabled": false - }, - "NetworkInterfaces": [ - { - "AssociatePublicIpAddress": true, - "DeleteOnTermination": true, - "DeviceIndex": 0, - "Ipv6AddressCount": 0, - "Groups": [ - { - "Ref": "AWSEC2SecurityGroupnodesthisistrulyareallyreallylongclusternameminimalexamplecom" - } - ] - } - ], - "TagSpecifications": [ - { - "ResourceType": "instance", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "this.is.truly.a.really.really.long.cluster-name.minimal.example.com" - }, - { - "Key": "Name", - "Value": "nodes.this.is.truly.a.really.really.long.cluster-name.minimal.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "" - }, - { - "Key": "k8s.io/role/node", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes" - }, - { - "Key": "kubernetes.io/cluster/this.is.truly.a.really.really.long.cluster-name.minimal.example.com", - "Value": "owned" - } - ] - }, - { - "ResourceType": "volume", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "this.is.truly.a.really.really.long.cluster-name.minimal.example.com" - }, - { - "Key": "Name", - "Value": "nodes.this.is.truly.a.really.really.long.cluster-name.minimal.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "" - }, - { - "Key": "k8s.io/role/node", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes" - }, - { - "Key": "kubernetes.io/cluster/this.is.truly.a.really.really.long.cluster-name.minimal.example.com", - "Value": "owned" - } - ] - } - ], - "UserData": "extracted" - } - } - }, - "AWSEC2Route0": { - "Type": "AWS::EC2::Route", - "Properties": { - "RouteTableId": { - "Ref": "AWSEC2RouteTablethisistrulyareallyreallylongclusternameminimalexamplecom" - }, - "DestinationIpv6CidrBlock": "::/0", - "GatewayId": { - "Ref": "AWSEC2InternetGatewaythisistrulyareallyreallylongclusternameminimalexamplecom" - } - } - }, - "AWSEC2Route00000": { - "Type": "AWS::EC2::Route", - "Properties": { - "RouteTableId": { - "Ref": "AWSEC2RouteTablethisistrulyareallyreallylongclusternameminimalexamplecom" - }, - "DestinationCidrBlock": "0.0.0.0/0", - "GatewayId": { - "Ref": "AWSEC2InternetGatewaythisistrulyareallyreallylongclusternameminimalexamplecom" - } - } - }, - "AWSEC2RouteTablethisistrulyareallyreallylongclusternameminimalexamplecom": { - "Type": "AWS::EC2::RouteTable", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCthisistrulyareallyreallylongclusternameminimalexamplecom" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "this.is.truly.a.really.really.long.cluster-name.minimal.example.com" - }, - { - "Key": "Name", - "Value": "this.is.truly.a.really.really.long.cluster-name.minimal.example.com" - }, - { - "Key": "kubernetes.io/cluster/this.is.truly.a.really.really.long.cluster-name.minimal.example.com", - "Value": "owned" - }, - { - "Key": "kubernetes.io/kops/role", - "Value": "public" - } - ] - } - }, - "AWSEC2SecurityGroupEgressfrommastersthisistrulyareallyreallylongclusternameminimalexamplecomegressall0to00": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersthisistrulyareallyreallylongclusternameminimalexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIpv6": "::/0" - } - }, - "AWSEC2SecurityGroupEgressfrommastersthisistrulyareallyreallylongclusternameminimalexamplecomegressall0to000000": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersthisistrulyareallyreallylongclusternameminimalexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupEgressfromnodesthisistrulyareallyreallylongclusternameminimalexamplecomegressall0to00": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesthisistrulyareallyreallylongclusternameminimalexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIpv6": "::/0" - } - }, - "AWSEC2SecurityGroupEgressfromnodesthisistrulyareallyreallylongclusternameminimalexamplecomegressall0to000000": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesthisistrulyareallyreallylongclusternameminimalexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom00000ingresstcp22to22mastersthisistrulyareallyreallylongclusternameminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersthisistrulyareallyreallylongclusternameminimalexamplecom" - }, - "FromPort": 22, - "ToPort": 22, - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom00000ingresstcp22to22nodesthisistrulyareallyreallylongclusternameminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesthisistrulyareallyreallylongclusternameminimalexamplecom" - }, - "FromPort": 22, - "ToPort": 22, - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom00000ingresstcp443to443mastersthisistrulyareallyreallylongclusternameminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersthisistrulyareallyreallylongclusternameminimalexamplecom" - }, - "FromPort": 443, - "ToPort": 443, - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrommastersthisistrulyareallyreallylongclusternameminimalexamplecomingressall0to0mastersthisistrulyareallyreallylongclusternameminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersthisistrulyareallyreallylongclusternameminimalexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupmastersthisistrulyareallyreallylongclusternameminimalexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfrommastersthisistrulyareallyreallylongclusternameminimalexamplecomingressall0to0nodesthisistrulyareallyreallylongclusternameminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesthisistrulyareallyreallylongclusternameminimalexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupmastersthisistrulyareallyreallylongclusternameminimalexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfromnodesthisistrulyareallyreallylongclusternameminimalexamplecomingressall0to0nodesthisistrulyareallyreallylongclusternameminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesthisistrulyareallyreallylongclusternameminimalexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesthisistrulyareallyreallylongclusternameminimalexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfromnodesthisistrulyareallyreallylongclusternameminimalexamplecomingresstcp1to2379mastersthisistrulyareallyreallylongclusternameminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersthisistrulyareallyreallylongclusternameminimalexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesthisistrulyareallyreallylongclusternameminimalexamplecom" - }, - "FromPort": 1, - "ToPort": 2379, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodesthisistrulyareallyreallylongclusternameminimalexamplecomingresstcp2382to4000mastersthisistrulyareallyreallylongclusternameminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersthisistrulyareallyreallylongclusternameminimalexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesthisistrulyareallyreallylongclusternameminimalexamplecom" - }, - "FromPort": 2382, - "ToPort": 4000, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodesthisistrulyareallyreallylongclusternameminimalexamplecomingresstcp4003to65535mastersthisistrulyareallyreallylongclusternameminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersthisistrulyareallyreallylongclusternameminimalexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesthisistrulyareallyreallylongclusternameminimalexamplecom" - }, - "FromPort": 4003, - "ToPort": 65535, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodesthisistrulyareallyreallylongclusternameminimalexamplecomingressudp1to65535mastersthisistrulyareallyreallylongclusternameminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersthisistrulyareallyreallylongclusternameminimalexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesthisistrulyareallyreallylongclusternameminimalexamplecom" - }, - "FromPort": 1, - "ToPort": 65535, - "IpProtocol": "udp" - } - }, - "AWSEC2SecurityGroupmastersthisistrulyareallyreallylongclusternameminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupName": "masters.this.is.truly.a.really.really.long.cluster-name.minimal.example.com", - "VpcId": { - "Ref": "AWSEC2VPCthisistrulyareallyreallylongclusternameminimalexamplecom" - }, - "GroupDescription": "Security group for masters", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "this.is.truly.a.really.really.long.cluster-name.minimal.example.com" - }, - { - "Key": "Name", - "Value": "masters.this.is.truly.a.really.really.long.cluster-name.minimal.example.com" - }, - { - "Key": "kubernetes.io/cluster/this.is.truly.a.really.really.long.cluster-name.minimal.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2SecurityGroupnodesthisistrulyareallyreallylongclusternameminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupName": "nodes.this.is.truly.a.really.really.long.cluster-name.minimal.example.com", - "VpcId": { - "Ref": "AWSEC2VPCthisistrulyareallyreallylongclusternameminimalexamplecom" - }, - "GroupDescription": "Security group for nodes", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "this.is.truly.a.really.really.long.cluster-name.minimal.example.com" - }, - { - "Key": "Name", - "Value": "nodes.this.is.truly.a.really.really.long.cluster-name.minimal.example.com" - }, - { - "Key": "kubernetes.io/cluster/this.is.truly.a.really.really.long.cluster-name.minimal.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2SubnetRouteTableAssociationustest1athisistrulyareallyreallylongclusternameminimalexamplecom": { - "Type": "AWS::EC2::SubnetRouteTableAssociation", - "Properties": { - "SubnetId": { - "Ref": "AWSEC2Subnetustest1athisistrulyareallyreallylongclusternameminimalexamplecom" - }, - "RouteTableId": { - "Ref": "AWSEC2RouteTablethisistrulyareallyreallylongclusternameminimalexamplecom" - } - } - }, - "AWSEC2Subnetustest1athisistrulyareallyreallylongclusternameminimalexamplecom": { - "Type": "AWS::EC2::Subnet", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCthisistrulyareallyreallylongclusternameminimalexamplecom" - }, - "CidrBlock": "172.20.32.0/19", - "AvailabilityZone": "us-test-1a", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "this.is.truly.a.really.really.long.cluster-name.minimal.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.this.is.truly.a.really.really.long.cluster-name.minimal.example.com" - }, - { - "Key": "SubnetType", - "Value": "Public" - }, - { - "Key": "kops.k8s.io/instance-group/master-us-test-1a", - "Value": "true" - }, - { - "Key": "kops.k8s.io/instance-group/nodes", - "Value": "true" - }, - { - "Key": "kubernetes.io/cluster/this.is.truly.a.really.really.long.cluster-name.minimal.example.com", - "Value": "owned" - }, - { - "Key": "kubernetes.io/role/elb", - "Value": "1" - }, - { - "Key": "kubernetes.io/role/internal-elb", - "Value": "1" - } - ] - } - }, - "AWSEC2VPCCidrBlockAmazonIPv6": { - "Type": "AWS::EC2::VPCCidrBlock", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCthisistrulyareallyreallylongclusternameminimalexamplecom" - }, - "AmazonProvidedIpv6CidrBlock": true - } - }, - "AWSEC2VPCDHCPOptionsAssociationthisistrulyareallyreallylongclusternameminimalexamplecom": { - "Type": "AWS::EC2::VPCDHCPOptionsAssociation", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCthisistrulyareallyreallylongclusternameminimalexamplecom" - }, - "DhcpOptionsId": { - "Ref": "AWSEC2DHCPOptionsthisistrulyareallyreallylongclusternameminimalexamplecom" - } - } - }, - "AWSEC2VPCGatewayAttachmentthisistrulyareallyreallylongclusternameminimalexamplecom": { - "Type": "AWS::EC2::VPCGatewayAttachment", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCthisistrulyareallyreallylongclusternameminimalexamplecom" - }, - "InternetGatewayId": { - "Ref": "AWSEC2InternetGatewaythisistrulyareallyreallylongclusternameminimalexamplecom" - } - } - }, - "AWSEC2VPCthisistrulyareallyreallylongclusternameminimalexamplecom": { - "Type": "AWS::EC2::VPC", - "Properties": { - "CidrBlock": "172.20.0.0/16", - "EnableDnsHostnames": true, - "EnableDnsSupport": true, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "this.is.truly.a.really.really.long.cluster-name.minimal.example.com" - }, - { - "Key": "Name", - "Value": "this.is.truly.a.really.really.long.cluster-name.minimal.example.com" - }, - { - "Key": "kubernetes.io/cluster/this.is.truly.a.really.really.long.cluster-name.minimal.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2Volumeustest1aetcdeventsthisistrulyareallyreallylongclusternameminimalexamplecom": { - "Type": "AWS::EC2::Volume", - "Properties": { - "AvailabilityZone": "us-test-1a", - "Size": 20, - "VolumeType": "gp3", - "Iops": 3000, - "Throughput": 125, - "Encrypted": false, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "this.is.truly.a.really.really.long.cluster-name.minimal.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.etcd-events.this.is.truly.a.really.really.long.cluster-name.minimal.example.com" - }, - { - "Key": "k8s.io/etcd/events", - "Value": "us-test-1a/us-test-1a" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kubernetes.io/cluster/this.is.truly.a.really.really.long.cluster-name.minimal.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2Volumeustest1aetcdmainthisistrulyareallyreallylongclusternameminimalexamplecom": { - "Type": "AWS::EC2::Volume", - "Properties": { - "AvailabilityZone": "us-test-1a", - "Size": 20, - "VolumeType": "gp3", - "Iops": 3000, - "Throughput": 125, - "Encrypted": false, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "this.is.truly.a.really.really.long.cluster-name.minimal.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.etcd-main.this.is.truly.a.really.really.long.cluster-name.minimal.example.com" - }, - { - "Key": "k8s.io/etcd/main", - "Value": "us-test-1a/us-test-1a" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kubernetes.io/cluster/this.is.truly.a.really.really.long.cluster-name.minimal.example.com", - "Value": "owned" - } - ] - } - }, - "AWSIAMInstanceProfilemastersthisistrulyareallyreallylongclusternamemkaamp9": { - "Type": "AWS::IAM::InstanceProfile", - "Properties": { - "InstanceProfileName": "masters.this.is.truly.a.really.really.long.cluster-name.m-kaamp9", - "Roles": [ - { - "Ref": "AWSIAMRolemastersthisistrulyareallyreallylongclusternamemkaamp9" - } - ] - } - }, - "AWSIAMInstanceProfilenodesthisistrulyareallyreallylongclusternameminh1jir9": { - "Type": "AWS::IAM::InstanceProfile", - "Properties": { - "InstanceProfileName": "nodes.this.is.truly.a.really.really.long.cluster-name.min-h1jir9", - "Roles": [ - { - "Ref": "AWSIAMRolenodesthisistrulyareallyreallylongclusternameminh1jir9" - } - ] - } - }, - "AWSIAMPolicymastersthisistrulyareallyreallylongclusternamemkaamp9": { - "Type": "AWS::IAM::Policy", - "Properties": { - "PolicyName": "masters.this.is.truly.a.really.really.long.cluster-name.m-kaamp9", - "Roles": [ - { - "Ref": "AWSIAMRolemastersthisistrulyareallyreallylongclusternamemkaamp9" - } - ], - "PolicyDocument": { - "Statement": [ - { - "Action": "ec2:AttachVolume", - "Condition": { - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "this.is.truly.a.really.really.long.cluster-name.minimal.example.com", - "aws:ResourceTag/k8s.io/role/master": "1" - } - }, - "Effect": "Allow", - "Resource": [ - "*" - ] - }, - { - "Action": [ - "s3:Get*" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-read-bucket/clusters.example.com/this.is.truly.a.really.really.long.cluster-name.minimal.example.com/*" - }, - { - "Action": [ - "s3:GetObject", - "s3:DeleteObject", - "s3:DeleteObjectVersion", - "s3:PutObject" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/this.is.truly.a.really.really.long.cluster-name.minimal.example.com/backups/etcd/main/*" - }, - { - "Action": [ - "s3:GetObject", - "s3:DeleteObject", - "s3:DeleteObjectVersion", - "s3:PutObject" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/this.is.truly.a.really.really.long.cluster-name.minimal.example.com/backups/etcd/events/*" - }, - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-read-bucket" - ] - }, - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-write-bucket" - ] - }, - { - "Action": [ - "route53:ChangeResourceRecordSets", - "route53:ListResourceRecordSets", - "route53:GetHostedZone" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:route53:::hostedzone/Z1AFAKE1ZON3YO" - ] - }, - { - "Action": [ - "route53:GetChange" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:route53:::change/*" - ] - }, - { - "Action": [ - "route53:ListHostedZones", - "route53:ListTagsForResource" - ], - "Effect": "Allow", - "Resource": [ - "*" - ] - }, - { - "Action": "ec2:CreateTags", - "Condition": { - "StringEquals": { - "aws:RequestTag/KubernetesCluster": "this.is.truly.a.really.really.long.cluster-name.minimal.example.com", - "ec2:CreateAction": [ - "CreateSecurityGroup" - ] - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:security-group/*" - ] - }, - { - "Action": [ - "ec2:CreateTags", - "ec2:DeleteTags" - ], - "Condition": { - "Null": { - "aws:RequestTag/KubernetesCluster": "true" - }, - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "this.is.truly.a.really.really.long.cluster-name.minimal.example.com" - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:security-group/*" - ] - }, - { - "Action": "ec2:CreateTags", - "Condition": { - "StringEquals": { - "aws:RequestTag/KubernetesCluster": "this.is.truly.a.really.really.long.cluster-name.minimal.example.com", - "ec2:CreateAction": [ - "CreateVolume", - "CreateSnapshot" - ] - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:volume/*", - "arn:aws-test:ec2:*:*:snapshot/*" - ] - }, - { - "Action": [ - "ec2:CreateTags", - "ec2:DeleteTags" - ], - "Condition": { - "Null": { - "aws:RequestTag/KubernetesCluster": "true" - }, - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "this.is.truly.a.really.really.long.cluster-name.minimal.example.com" - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:volume/*", - "arn:aws-test:ec2:*:*:snapshot/*" - ] - }, - { - "Action": [ - "autoscaling:DescribeAutoScalingGroups", - "autoscaling:DescribeAutoScalingInstances", - "autoscaling:DescribeLaunchConfigurations", - "autoscaling:DescribeScalingActivities", - "autoscaling:DescribeTags", - "ec2:AttachVolume", - "ec2:AuthorizeSecurityGroupIngress", - "ec2:CreateSecurityGroup", - "ec2:CreateTags", - "ec2:DeleteRoute", - "ec2:DeleteSecurityGroup", - "ec2:DeleteVolume", - "ec2:DescribeAccountAttributes", - "ec2:DescribeInstanceTypes", - "ec2:DescribeInstances", - "ec2:DescribeLaunchTemplateVersions", - "ec2:DescribeRegions", - "ec2:DescribeRouteTables", - "ec2:DescribeSecurityGroups", - "ec2:DescribeSubnets", - "ec2:DescribeTags", - "ec2:DescribeVolumes", - "ec2:DescribeVolumesModifications", - "ec2:DescribeVpcs", - "ec2:DetachVolume", - "ec2:ModifyInstanceAttribute", - "ec2:ModifyVolume", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:CreateListener", - "elasticloadbalancing:CreateTargetGroup", - "elasticloadbalancing:DeleteListener", - "elasticloadbalancing:DescribeListeners", - "elasticloadbalancing:DescribeLoadBalancerAttributes", - "elasticloadbalancing:DescribeLoadBalancerPolicies", - "elasticloadbalancing:DescribeLoadBalancers", - "elasticloadbalancing:DescribeTargetGroups", - "elasticloadbalancing:DescribeTargetHealth", - "elasticloadbalancing:ModifyListener", - "elasticloadbalancing:RegisterTargets", - "iam:GetServerCertificate", - "iam:ListServerCertificates", - "kms:DescribeKey", - "kms:GenerateRandom" - ], - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": [ - "autoscaling:SetDesiredCapacity", - "autoscaling:TerminateInstanceInAutoScalingGroup", - "ec2:AttachVolume", - "ec2:AuthorizeSecurityGroupIngress", - "ec2:DeleteSecurityGroup", - "ec2:DeleteVolume", - "ec2:DetachVolume", - "ec2:ModifyInstanceAttribute", - "ec2:ModifyVolume", - "ec2:RevokeSecurityGroupIngress", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", - "elasticloadbalancing:AttachLoadBalancerToSubnets", - "elasticloadbalancing:ConfigureHealthCheck", - "elasticloadbalancing:CreateLoadBalancerListeners", - "elasticloadbalancing:CreateLoadBalancerPolicy", - "elasticloadbalancing:DeleteListener", - "elasticloadbalancing:DeleteLoadBalancer", - "elasticloadbalancing:DeleteLoadBalancerListeners", - "elasticloadbalancing:DeleteTargetGroup", - "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", - "elasticloadbalancing:DeregisterTargets", - "elasticloadbalancing:DetachLoadBalancerFromSubnets", - "elasticloadbalancing:ModifyListener", - "elasticloadbalancing:ModifyLoadBalancerAttributes", - "elasticloadbalancing:ModifyTargetGroup", - "elasticloadbalancing:RegisterInstancesWithLoadBalancer", - "elasticloadbalancing:RegisterTargets", - "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", - "elasticloadbalancing:SetLoadBalancerPoliciesOfListener" - ], - "Condition": { - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "this.is.truly.a.really.really.long.cluster-name.minimal.example.com" - } - }, - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": [ - "ec2:CreateSecurityGroup", - "ec2:CreateSnapshot", - "ec2:CreateVolume", - "elasticloadbalancing:CreateListener", - "elasticloadbalancing:CreateLoadBalancer", - "elasticloadbalancing:CreateTargetGroup" - ], - "Condition": { - "StringEquals": { - "aws:RequestTag/KubernetesCluster": "this.is.truly.a.really.really.long.cluster-name.minimal.example.com" - } - }, - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": "ec2:CreateSecurityGroup", - "Effect": "Allow", - "Resource": "arn:aws-test:ec2:*:*:vpc/*" - } - ], - "Version": "2012-10-17" - } - } - }, - "AWSIAMPolicynodesthisistrulyareallyreallylongclusternameminh1jir9": { - "Type": "AWS::IAM::Policy", - "Properties": { - "PolicyName": "nodes.this.is.truly.a.really.really.long.cluster-name.min-h1jir9", - "Roles": [ - { - "Ref": "AWSIAMRolenodesthisistrulyareallyreallylongclusternameminh1jir9" - } - ], - "PolicyDocument": { - "Statement": [ - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-read-bucket" - ] - }, - { - "Action": [ - "autoscaling:DescribeAutoScalingInstances", - "ec2:DescribeInstanceTypes", - "ec2:DescribeInstances", - "ec2:DescribeRegions", - "iam:GetServerCertificate", - "iam:ListServerCertificates", - "kms:GenerateRandom" - ], - "Effect": "Allow", - "Resource": "*" - } - ], - "Version": "2012-10-17" - } - } - }, - "AWSIAMRolemastersthisistrulyareallyreallylongclusternamemkaamp9": { - "Type": "AWS::IAM::Role", - "Properties": { - "RoleName": "masters.this.is.truly.a.really.really.long.cluster-name.m-kaamp9", - "AssumeRolePolicyDocument": { - "Statement": [ - { - "Action": "sts:AssumeRole", - "Effect": "Allow", - "Principal": { - "Service": "ec2.amazonaws.com" - } - } - ], - "Version": "2012-10-17" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "this.is.truly.a.really.really.long.cluster-name.minimal.example.com" - }, - { - "Key": "Name", - "Value": "masters.this.is.truly.a.really.really.long.cluster-name.m-kaamp9" - }, - { - "Key": "kubernetes.io/cluster/this.is.truly.a.really.really.long.cluster-name.minimal.example.com", - "Value": "owned" - } - ] - } - }, - "AWSIAMRolenodesthisistrulyareallyreallylongclusternameminh1jir9": { - "Type": "AWS::IAM::Role", - "Properties": { - "RoleName": "nodes.this.is.truly.a.really.really.long.cluster-name.min-h1jir9", - "AssumeRolePolicyDocument": { - "Statement": [ - { - "Action": "sts:AssumeRole", - "Effect": "Allow", - "Principal": { - "Service": "ec2.amazonaws.com" - } - } - ], - "Version": "2012-10-17" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "this.is.truly.a.really.really.long.cluster-name.minimal.example.com" - }, - { - "Key": "Name", - "Value": "nodes.this.is.truly.a.really.really.long.cluster-name.min-h1jir9" - }, - { - "Key": "kubernetes.io/cluster/this.is.truly.a.really.really.long.cluster-name.minimal.example.com", - "Value": "owned" - } - ] - } - } - } -} diff --git a/tests/integration/update_cluster/minimal-longclustername/cloudformation.json.extracted.yaml b/tests/integration/update_cluster/minimal-longclustername/cloudformation.json.extracted.yaml deleted file mode 100644 index 04aed7872b807..0000000000000 --- a/tests/integration/update_cluster/minimal-longclustername/cloudformation.json.extracted.yaml +++ /dev/null @@ -1,447 +0,0 @@ -? Resources.AWSEC2LaunchTemplatemasterustest1amastersthisistrulyareallyreallylongclusternameminimalexamplecom.Properties.LaunchTemplateData.UserData -: | - #!/bin/bash - set -o errexit - set -o nounset - set -o pipefail - - NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 - NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 - NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 - NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 - - export AWS_REGION=us-test-1 - - - - - sysctl -w net.core.rmem_max=16777216 || true - sysctl -w net.core.wmem_max=16777216 || true - sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true - sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true - - - function ensure-install-dir() { - INSTALL_DIR="/opt/kops" - # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec - if [[ -d /var/lib/toolbox ]]; then - INSTALL_DIR="/var/lib/toolbox/kops" - fi - mkdir -p ${INSTALL_DIR}/bin - mkdir -p ${INSTALL_DIR}/conf - cd ${INSTALL_DIR} - } - - # Retry a download until we get it. args: name, sha, urls - download-or-bust() { - local -r file="$1" - local -r hash="$2" - local -r urls=( $(split-commas "$3") ) - - if [[ -f "${file}" ]]; then - if ! validate-hash "${file}" "${hash}"; then - rm -f "${file}" - else - return 0 - fi - fi - - while true; do - for url in "${urls[@]}"; do - commands=( - "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - ) - for cmd in "${commands[@]}"; do - echo "Attempting download with: ${cmd} {url}" - if ! (${cmd} "${url}"); then - echo "== Download failed with ${cmd} ==" - continue - fi - if ! validate-hash "${file}" "${hash}"; then - echo "== Hash validation of ${url} failed. Retrying. ==" - rm -f "${file}" - else - echo "== Downloaded ${url} (SHA256 = ${hash}) ==" - return 0 - fi - done - done - - echo "All downloads failed; sleeping before retrying" - sleep 60 - done - } - - validate-hash() { - local -r file="$1" - local -r expected="$2" - local actual - - actual=$(sha256sum ${file} | awk '{ print $1 }') || true - if [[ "${actual}" != "${expected}" ]]; then - echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" - return 1 - fi - } - - function split-commas() { - echo $1 | tr "," "\n" - } - - function download-release() { - case "$(uname -m)" in - x86_64*|i?86_64*|amd64*) - NODEUP_URL="${NODEUP_URL_AMD64}" - NODEUP_HASH="${NODEUP_HASH_AMD64}" - ;; - aarch64*|arm64*) - NODEUP_URL="${NODEUP_URL_ARM64}" - NODEUP_HASH="${NODEUP_HASH_ARM64}" - ;; - *) - echo "Unsupported host arch: $(uname -m)" >&2 - exit 1 - ;; - esac - - cd ${INSTALL_DIR}/bin - download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" - - chmod +x nodeup - - echo "Running nodeup" - # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 - ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) - } - - #################################################################################### - - /bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" - - echo "== nodeup node config starting ==" - ensure-install-dir - - cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' - cloudConfig: - awsEBSCSIDriver: - enabled: false - manageStorageClasses: true - containerRuntime: containerd - containerd: - logLevel: info - version: 1.4.13 - docker: - skipInstall: true - encryptionConfig: null - etcdClusters: - events: - version: 3.4.13 - main: - version: 3.4.13 - kubeAPIServer: - allowPrivileged: true - anonymousAuth: false - apiAudiences: - - kubernetes.svc.default - apiServerCount: 1 - authorizationMode: AlwaysAllow - bindAddress: 0.0.0.0 - cloudProvider: aws - enableAdmissionPlugins: - - NamespaceLifecycle - - LimitRanger - - ServiceAccount - - DefaultStorageClass - - DefaultTolerationSeconds - - MutatingAdmissionWebhook - - ValidatingAdmissionWebhook - - NodeRestriction - - ResourceQuota - etcdServers: - - https://127.0.0.1:4001 - etcdServersOverrides: - - /events#https://127.0.0.1:4002 - image: registry.k8s.io/kube-apiserver:v1.21.0 - kubeletPreferredAddressTypes: - - InternalIP - - Hostname - - ExternalIP - logLevel: 2 - requestheaderAllowedNames: - - aggregator - requestheaderExtraHeaderPrefixes: - - X-Remote-Extra- - requestheaderGroupHeaders: - - X-Remote-Group - requestheaderUsernameHeaders: - - X-Remote-User - securePort: 443 - serviceAccountIssuer: https://api.internal.this.is.truly.a.really.really.long.cluster-name.minimal.example.com - serviceAccountJWKSURI: https://api.internal.this.is.truly.a.really.really.long.cluster-name.minimal.example.com/openid/v1/jwks - serviceClusterIPRange: 100.64.0.0/13 - storageBackend: etcd3 - kubeControllerManager: - allocateNodeCIDRs: true - attachDetachReconcileSyncPeriod: 1m0s - cloudProvider: aws - clusterCIDR: 100.96.0.0/11 - clusterName: this.is.truly.a.really.really.long.cluster-name.minimal.example.com - configureCloudRoutes: false - image: registry.k8s.io/kube-controller-manager:v1.21.0 - leaderElection: - leaderElect: true - logLevel: 2 - useServiceAccountCredentials: true - kubeProxy: - clusterCIDR: 100.96.0.0/11 - cpuRequest: 100m - image: registry.k8s.io/kube-proxy:v1.21.0 - logLevel: 2 - kubeScheduler: - image: registry.k8s.io/kube-scheduler:v1.21.0 - leaderElection: - leaderElect: true - logLevel: 2 - kubelet: - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: aws - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - masterKubelet: - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: aws - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - - __EOF_CLUSTER_SPEC - - cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' - CloudProvider: aws - ConfigBase: memfs://clusters.example.com/this.is.truly.a.really.really.long.cluster-name.minimal.example.com - InstanceGroupName: master-us-test-1a - InstanceGroupRole: Master - NodeupConfigHash: KToUiOgOP4z6ZY/Z+S/EvAiKVSMBTxKy00twsEe/UW4= - - __EOF_KUBE_ENV - - download-release - echo "== nodeup node config done ==" -? Resources.AWSEC2LaunchTemplatenodesthisistrulyareallyreallylongclusternameminimalexamplecom.Properties.LaunchTemplateData.UserData -: | - #!/bin/bash - set -o errexit - set -o nounset - set -o pipefail - - NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 - NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 - NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 - NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 - - export AWS_REGION=us-test-1 - - - - - sysctl -w net.core.rmem_max=16777216 || true - sysctl -w net.core.wmem_max=16777216 || true - sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true - sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true - - - function ensure-install-dir() { - INSTALL_DIR="/opt/kops" - # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec - if [[ -d /var/lib/toolbox ]]; then - INSTALL_DIR="/var/lib/toolbox/kops" - fi - mkdir -p ${INSTALL_DIR}/bin - mkdir -p ${INSTALL_DIR}/conf - cd ${INSTALL_DIR} - } - - # Retry a download until we get it. args: name, sha, urls - download-or-bust() { - local -r file="$1" - local -r hash="$2" - local -r urls=( $(split-commas "$3") ) - - if [[ -f "${file}" ]]; then - if ! validate-hash "${file}" "${hash}"; then - rm -f "${file}" - else - return 0 - fi - fi - - while true; do - for url in "${urls[@]}"; do - commands=( - "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - ) - for cmd in "${commands[@]}"; do - echo "Attempting download with: ${cmd} {url}" - if ! (${cmd} "${url}"); then - echo "== Download failed with ${cmd} ==" - continue - fi - if ! validate-hash "${file}" "${hash}"; then - echo "== Hash validation of ${url} failed. Retrying. ==" - rm -f "${file}" - else - echo "== Downloaded ${url} (SHA256 = ${hash}) ==" - return 0 - fi - done - done - - echo "All downloads failed; sleeping before retrying" - sleep 60 - done - } - - validate-hash() { - local -r file="$1" - local -r expected="$2" - local actual - - actual=$(sha256sum ${file} | awk '{ print $1 }') || true - if [[ "${actual}" != "${expected}" ]]; then - echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" - return 1 - fi - } - - function split-commas() { - echo $1 | tr "," "\n" - } - - function download-release() { - case "$(uname -m)" in - x86_64*|i?86_64*|amd64*) - NODEUP_URL="${NODEUP_URL_AMD64}" - NODEUP_HASH="${NODEUP_HASH_AMD64}" - ;; - aarch64*|arm64*) - NODEUP_URL="${NODEUP_URL_ARM64}" - NODEUP_HASH="${NODEUP_HASH_ARM64}" - ;; - *) - echo "Unsupported host arch: $(uname -m)" >&2 - exit 1 - ;; - esac - - cd ${INSTALL_DIR}/bin - download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" - - chmod +x nodeup - - echo "Running nodeup" - # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 - ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) - } - - #################################################################################### - - /bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" - - echo "== nodeup node config starting ==" - ensure-install-dir - - cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' - cloudConfig: - awsEBSCSIDriver: - enabled: false - manageStorageClasses: true - containerRuntime: containerd - containerd: - logLevel: info - version: 1.4.13 - docker: - skipInstall: true - kubeProxy: - clusterCIDR: 100.96.0.0/11 - cpuRequest: 100m - image: registry.k8s.io/kube-proxy:v1.21.0 - logLevel: 2 - kubelet: - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: aws - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - - __EOF_CLUSTER_SPEC - - cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' - CloudProvider: aws - ConfigServer: - CACertificates: | - -----BEGIN CERTIFICATE----- - MIIBbjCCARigAwIBAgIMFpANqBD8NSD82AUSMA0GCSqGSIb3DQEBCwUAMBgxFjAU - BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwODAwWhcNMzEwNzA3MDcw - ODAwWjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD - SwAwSAJBANFI3zr0Tk8krsW8vwjfMpzJOlWQ8616vG3YPa2qAgI7V4oKwfV0yIg1 - jt+H6f4P/wkPAPTPTfRp9Iy8oHEEFw0CAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG - MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNG3zVjTcLlJwDsJ4/K9DV7KohUA - MA0GCSqGSIb3DQEBCwUAA0EAB8d03fY2w7WKpfO29qI295pu2C4ca9AiVGOpgSc8 - tmQsq6rcxt3T+rb589PVtz0mw/cKTxOk6gH2CCC+yHfy2w== - -----END CERTIFICATE----- - -----BEGIN CERTIFICATE----- - MIIBbjCCARigAwIBAgIMFpANvmSa0OAlYmXKMA0GCSqGSIb3DQEBCwUAMBgxFjAU - BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwOTM2WhcNMzEwNzA3MDcw - OTM2WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD - SwAwSAJBAMF6F4aZdpe0RUpyykaBpWwZCnwbffhYGOw+fs6RdLuUq7QCNmJm/Eq7 - WWOziMYDiI9SbclpD+6QiJ0N3EqppVUCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG - MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLImp6ARjPDAH6nhI+scWVt3Q9bn - MA0GCSqGSIb3DQEBCwUAA0EAVQVx5MUtuAIeePuP9o51xtpT2S6Fvfi8J4ICxnlA - 9B7UD2ushcVFPtaeoL9Gfu8aY4KJBeqqg5ojl4qmRnThjw== - -----END CERTIFICATE----- - server: https://kops-controller.internal.this.is.truly.a.really.really.long.cluster-name.minimal.example.com:3988/ - InstanceGroupName: nodes - InstanceGroupRole: Node - NodeupConfigHash: as4GCjlYfH3CCIgOU5ZjqkThgpcPJXEe/bVB87TIynA= - - __EOF_KUBE_ENV - - download-release - echo "== nodeup node config done ==" diff --git a/tests/integration/update_cluster/minimal-longclustername/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/minimal-longclustername/data/aws_s3_object_cluster-completed.spec_content index 8ebcb830a2dd2..8b1b51027e73b 100644 --- a/tests/integration/update_cluster/minimal-longclustername/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/minimal-longclustername/data/aws_s3_object_cluster-completed.spec_content @@ -139,7 +139,6 @@ spec: kubernetesApiAccess: - 0.0.0.0/0 kubernetesVersion: 1.21.0 - masterInternalName: api.internal.this.is.truly.a.really.really.long.cluster-name.minimal.example.com masterKubelet: cgroupDriver: systemd cgroupRoot: / diff --git a/tests/integration/update_cluster/minimal-longclustername/in-v1alpha2.yaml b/tests/integration/update_cluster/minimal-longclustername/in-v1alpha2.yaml index 557113e290c40..2546e2ad076d1 100644 --- a/tests/integration/update_cluster/minimal-longclustername/in-v1alpha2.yaml +++ b/tests/integration/update_cluster/minimal-longclustername/in-v1alpha2.yaml @@ -19,7 +19,6 @@ spec: name: us-test-1a name: events kubernetesVersion: v1.21.0 - masterInternalName: api.internal.this.is.truly.a.really.really.long.cluster-name.minimal.example.com masterPublicName: api.this.is.truly.a.really.really.long.cluster-name.minimal.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/tests/integration/update_cluster/minimal-warmpool/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/minimal-warmpool/data/aws_s3_object_cluster-completed.spec_content index d46b255ee9b9e..90f9de86afd4f 100644 --- a/tests/integration/update_cluster/minimal-warmpool/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/minimal-warmpool/data/aws_s3_object_cluster-completed.spec_content @@ -155,7 +155,6 @@ spec: kubernetesApiAccess: - 0.0.0.0/0 kubernetesVersion: 1.21.0 - masterInternalName: api.internal.minimal-warmpool.example.com masterKubelet: anonymousAuth: false cgroupDriver: systemd diff --git a/tests/integration/update_cluster/minimal-warmpool/in-v1alpha2.yaml b/tests/integration/update_cluster/minimal-warmpool/in-v1alpha2.yaml index 21404927c615a..970633e81719e 100644 --- a/tests/integration/update_cluster/minimal-warmpool/in-v1alpha2.yaml +++ b/tests/integration/update_cluster/minimal-warmpool/in-v1alpha2.yaml @@ -25,7 +25,6 @@ spec: kubelet: anonymousAuth: false kubernetesVersion: v1.21.0 - masterInternalName: api.internal.minimal-warmpool.example.com masterPublicName: api.minimal-warmpool.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/tests/integration/update_cluster/minimal/cloudformation.json b/tests/integration/update_cluster/minimal/cloudformation.json deleted file mode 100644 index a2316cd5dd576..0000000000000 --- a/tests/integration/update_cluster/minimal/cloudformation.json +++ /dev/null @@ -1,1349 +0,0 @@ -{ - "Resources": { - "AWSAutoScalingAutoScalingGroupmasterustest1amastersminimalexamplecom": { - "Type": "AWS::AutoScaling::AutoScalingGroup", - "Properties": { - "AutoScalingGroupName": "master-us-test-1a.masters.minimal.example.com", - "LaunchTemplate": { - "LaunchTemplateId": { - "Ref": "AWSEC2LaunchTemplatemasterustest1amastersminimalexamplecom" - }, - "Version": { - "Fn::GetAtt": [ - "AWSEC2LaunchTemplatemasterustest1amastersminimalexamplecom", - "LatestVersionNumber" - ] - } - }, - "MaxSize": "1", - "MinSize": "1", - "VPCZoneIdentifier": [ - { - "Ref": "AWSEC2Subnetustest1aminimalexamplecom" - } - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.minimal.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/role/master", - "Value": "1", - "PropagateAtLaunch": true - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a", - "PropagateAtLaunch": true - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned", - "PropagateAtLaunch": true - } - ], - "MetricsCollection": [ - { - "Granularity": "1Minute", - "Metrics": [ - "GroupDesiredCapacity", - "GroupInServiceInstances", - "GroupMaxSize", - "GroupMinSize", - "GroupPendingInstances", - "GroupStandbyInstances", - "GroupTerminatingInstances", - "GroupTotalInstances" - ] - } - ] - } - }, - "AWSAutoScalingAutoScalingGroupnodesminimalexamplecom": { - "Type": "AWS::AutoScaling::AutoScalingGroup", - "Properties": { - "AutoScalingGroupName": "nodes.minimal.example.com", - "LaunchTemplate": { - "LaunchTemplateId": { - "Ref": "AWSEC2LaunchTemplatenodesminimalexamplecom" - }, - "Version": { - "Fn::GetAtt": [ - "AWSEC2LaunchTemplatenodesminimalexamplecom", - "LatestVersionNumber" - ] - } - }, - "MaxSize": "2", - "MinSize": "2", - "VPCZoneIdentifier": [ - { - "Ref": "AWSEC2Subnetustest1aminimalexamplecom" - } - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "Name", - "Value": "nodes.minimal.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/role/node", - "Value": "1", - "PropagateAtLaunch": true - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes", - "PropagateAtLaunch": true - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned", - "PropagateAtLaunch": true - } - ], - "MetricsCollection": [ - { - "Granularity": "1Minute", - "Metrics": [ - "GroupDesiredCapacity", - "GroupInServiceInstances", - "GroupMaxSize", - "GroupMinSize", - "GroupPendingInstances", - "GroupStandbyInstances", - "GroupTerminatingInstances", - "GroupTotalInstances" - ] - } - ] - } - }, - "AWSEC2DHCPOptionsminimalexamplecom": { - "Type": "AWS::EC2::DHCPOptions", - "Properties": { - "DomainName": "us-test-1.compute.internal", - "DomainNameServers": [ - "AmazonProvidedDNS" - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "minimal.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2InternetGatewayminimalexamplecom": { - "Type": "AWS::EC2::InternetGateway", - "Properties": { - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "minimal.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2LaunchTemplatemasterustest1amastersminimalexamplecom": { - "Type": "AWS::EC2::LaunchTemplate", - "Properties": { - "LaunchTemplateName": "master-us-test-1a.masters.minimal.example.com", - "LaunchTemplateData": { - "BlockDeviceMappings": [ - { - "DeviceName": "/dev/xvda", - "Ebs": { - "VolumeType": "gp3", - "VolumeSize": 64, - "Iops": 3000, - "Throughput": 125, - "DeleteOnTermination": true, - "Encrypted": true - } - }, - { - "DeviceName": "/dev/sdc", - "VirtualName": "ephemeral0" - } - ], - "IamInstanceProfile": { - "Name": { - "Ref": "AWSIAMInstanceProfilemastersminimalexamplecom" - } - }, - "ImageId": "ami-12345678", - "InstanceType": "m3.medium", - "KeyName": "kubernetes.minimal.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57", - "MetadataOptions": { - "HttpPutResponseHopLimit": 1, - "HttpTokens": "optional" - }, - "Monitoring": { - "Enabled": false - }, - "NetworkInterfaces": [ - { - "AssociatePublicIpAddress": true, - "DeleteOnTermination": true, - "DeviceIndex": 0, - "Ipv6AddressCount": 0, - "Groups": [ - { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - } - ] - } - ], - "TagSpecifications": [ - { - "ResourceType": "instance", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.minimal.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - }, - { - "ResourceType": "volume", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.minimal.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - ], - "UserData": "extracted" - } - } - }, - "AWSEC2LaunchTemplatenodesminimalexamplecom": { - "Type": "AWS::EC2::LaunchTemplate", - "Properties": { - "LaunchTemplateName": "nodes.minimal.example.com", - "LaunchTemplateData": { - "BlockDeviceMappings": [ - { - "DeviceName": "/dev/xvda", - "Ebs": { - "VolumeType": "gp3", - "VolumeSize": 128, - "Iops": 3000, - "Throughput": 125, - "DeleteOnTermination": true, - "Encrypted": true - } - } - ], - "IamInstanceProfile": { - "Name": { - "Ref": "AWSIAMInstanceProfilenodesminimalexamplecom" - } - }, - "ImageId": "ami-12345678", - "InstanceType": "t2.medium", - "KeyName": "kubernetes.minimal.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57", - "MetadataOptions": { - "HttpPutResponseHopLimit": 1, - "HttpTokens": "optional" - }, - "Monitoring": { - "Enabled": false - }, - "NetworkInterfaces": [ - { - "AssociatePublicIpAddress": true, - "DeleteOnTermination": true, - "DeviceIndex": 0, - "Ipv6AddressCount": 0, - "Groups": [ - { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - } - ] - } - ], - "TagSpecifications": [ - { - "ResourceType": "instance", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "nodes.minimal.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "" - }, - { - "Key": "k8s.io/role/node", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - }, - { - "ResourceType": "volume", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "nodes.minimal.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "" - }, - { - "Key": "k8s.io/role/node", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - ], - "UserData": "extracted" - } - } - }, - "AWSEC2Route0": { - "Type": "AWS::EC2::Route", - "Properties": { - "RouteTableId": { - "Ref": "AWSEC2RouteTableminimalexamplecom" - }, - "DestinationIpv6CidrBlock": "::/0", - "GatewayId": { - "Ref": "AWSEC2InternetGatewayminimalexamplecom" - } - } - }, - "AWSEC2Route00000": { - "Type": "AWS::EC2::Route", - "Properties": { - "RouteTableId": { - "Ref": "AWSEC2RouteTableminimalexamplecom" - }, - "DestinationCidrBlock": "0.0.0.0/0", - "GatewayId": { - "Ref": "AWSEC2InternetGatewayminimalexamplecom" - } - } - }, - "AWSEC2RouteTableminimalexamplecom": { - "Type": "AWS::EC2::RouteTable", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCminimalexamplecom" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "minimal.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - }, - { - "Key": "kubernetes.io/kops/role", - "Value": "public" - } - ] - } - }, - "AWSEC2SecurityGroupEgressfrommastersminimalexamplecomegressall0to00": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIpv6": "::/0" - } - }, - "AWSEC2SecurityGroupEgressfrommastersminimalexamplecomegressall0to000000": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupEgressfromnodesminimalexamplecomegressall0to00": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIpv6": "::/0" - } - }, - "AWSEC2SecurityGroupEgressfromnodesminimalexamplecomegressall0to000000": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom00000ingresstcp22to22mastersminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "FromPort": 22, - "ToPort": 22, - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom00000ingresstcp22to22nodesminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "FromPort": 22, - "ToPort": 22, - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom00000ingresstcp443to443mastersminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "FromPort": 443, - "ToPort": 443, - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrommastersminimalexamplecomingressall0to0mastersminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfrommastersminimalexamplecomingressall0to0nodesminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfromnodesminimalexamplecomingressall0to0nodesminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfromnodesminimalexamplecomingresstcp1to2379mastersminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "FromPort": 1, - "ToPort": 2379, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodesminimalexamplecomingresstcp2382to4000mastersminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "FromPort": 2382, - "ToPort": 4000, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodesminimalexamplecomingresstcp4003to65535mastersminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "FromPort": 4003, - "ToPort": 65535, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodesminimalexamplecomingressudp1to65535mastersminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "FromPort": 1, - "ToPort": 65535, - "IpProtocol": "udp" - } - }, - "AWSEC2SecurityGroupmastersminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupName": "masters.minimal.example.com", - "VpcId": { - "Ref": "AWSEC2VPCminimalexamplecom" - }, - "GroupDescription": "Security group for masters", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "masters.minimal.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2SecurityGroupnodesminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupName": "nodes.minimal.example.com", - "VpcId": { - "Ref": "AWSEC2VPCminimalexamplecom" - }, - "GroupDescription": "Security group for nodes", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "nodes.minimal.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2SubnetRouteTableAssociationustest1aminimalexamplecom": { - "Type": "AWS::EC2::SubnetRouteTableAssociation", - "Properties": { - "SubnetId": { - "Ref": "AWSEC2Subnetustest1aminimalexamplecom" - }, - "RouteTableId": { - "Ref": "AWSEC2RouteTableminimalexamplecom" - } - } - }, - "AWSEC2Subnetustest1aminimalexamplecom": { - "Type": "AWS::EC2::Subnet", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCminimalexamplecom" - }, - "CidrBlock": "172.20.32.0/19", - "AvailabilityZone": "us-test-1a", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.minimal.example.com" - }, - { - "Key": "SubnetType", - "Value": "Public" - }, - { - "Key": "kops.k8s.io/instance-group/master-us-test-1a", - "Value": "true" - }, - { - "Key": "kops.k8s.io/instance-group/nodes", - "Value": "true" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - }, - { - "Key": "kubernetes.io/role/elb", - "Value": "1" - }, - { - "Key": "kubernetes.io/role/internal-elb", - "Value": "1" - } - ] - } - }, - "AWSEC2VPCCidrBlockAmazonIPv6": { - "Type": "AWS::EC2::VPCCidrBlock", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCminimalexamplecom" - }, - "AmazonProvidedIpv6CidrBlock": true - } - }, - "AWSEC2VPCDHCPOptionsAssociationminimalexamplecom": { - "Type": "AWS::EC2::VPCDHCPOptionsAssociation", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCminimalexamplecom" - }, - "DhcpOptionsId": { - "Ref": "AWSEC2DHCPOptionsminimalexamplecom" - } - } - }, - "AWSEC2VPCGatewayAttachmentminimalexamplecom": { - "Type": "AWS::EC2::VPCGatewayAttachment", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCminimalexamplecom" - }, - "InternetGatewayId": { - "Ref": "AWSEC2InternetGatewayminimalexamplecom" - } - } - }, - "AWSEC2VPCminimalexamplecom": { - "Type": "AWS::EC2::VPC", - "Properties": { - "CidrBlock": "172.20.0.0/16", - "EnableDnsHostnames": true, - "EnableDnsSupport": true, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "minimal.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2Volumeustest1aetcdeventsminimalexamplecom": { - "Type": "AWS::EC2::Volume", - "Properties": { - "AvailabilityZone": "us-test-1a", - "Size": 20, - "VolumeType": "gp3", - "Iops": 3000, - "Throughput": 125, - "Encrypted": false, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.etcd-events.minimal.example.com" - }, - { - "Key": "k8s.io/etcd/events", - "Value": "us-test-1a/us-test-1a" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2Volumeustest1aetcdmainminimalexamplecom": { - "Type": "AWS::EC2::Volume", - "Properties": { - "AvailabilityZone": "us-test-1a", - "Size": 20, - "VolumeType": "gp3", - "Iops": 3000, - "Throughput": 125, - "Encrypted": false, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.etcd-main.minimal.example.com" - }, - { - "Key": "k8s.io/etcd/main", - "Value": "us-test-1a/us-test-1a" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - }, - "AWSIAMInstanceProfilemastersminimalexamplecom": { - "Type": "AWS::IAM::InstanceProfile", - "Properties": { - "InstanceProfileName": "masters.minimal.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolemastersminimalexamplecom" - } - ] - } - }, - "AWSIAMInstanceProfilenodesminimalexamplecom": { - "Type": "AWS::IAM::InstanceProfile", - "Properties": { - "InstanceProfileName": "nodes.minimal.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolenodesminimalexamplecom" - } - ] - } - }, - "AWSIAMPolicymastersminimalexamplecom": { - "Type": "AWS::IAM::Policy", - "Properties": { - "PolicyName": "masters.minimal.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolemastersminimalexamplecom" - } - ], - "PolicyDocument": { - "Statement": [ - { - "Action": "ec2:AttachVolume", - "Condition": { - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "minimal.example.com", - "aws:ResourceTag/k8s.io/role/master": "1" - } - }, - "Effect": "Allow", - "Resource": [ - "*" - ] - }, - { - "Action": [ - "s3:Get*" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-read-bucket/clusters.example.com/minimal.example.com/*" - }, - { - "Action": [ - "s3:GetObject", - "s3:DeleteObject", - "s3:DeleteObjectVersion", - "s3:PutObject" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/minimal.example.com/backups/etcd/main/*" - }, - { - "Action": [ - "s3:GetObject", - "s3:DeleteObject", - "s3:DeleteObjectVersion", - "s3:PutObject" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/minimal.example.com/backups/etcd/events/*" - }, - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-read-bucket" - ] - }, - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-write-bucket" - ] - }, - { - "Action": [ - "route53:ChangeResourceRecordSets", - "route53:ListResourceRecordSets", - "route53:GetHostedZone" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:route53:::hostedzone/Z1AFAKE1ZON3YO" - ] - }, - { - "Action": [ - "route53:GetChange" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:route53:::change/*" - ] - }, - { - "Action": [ - "route53:ListHostedZones", - "route53:ListTagsForResource" - ], - "Effect": "Allow", - "Resource": [ - "*" - ] - }, - { - "Action": "ec2:CreateTags", - "Condition": { - "StringEquals": { - "aws:RequestTag/KubernetesCluster": "minimal.example.com", - "ec2:CreateAction": [ - "CreateSecurityGroup" - ] - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:security-group/*" - ] - }, - { - "Action": [ - "ec2:CreateTags", - "ec2:DeleteTags" - ], - "Condition": { - "Null": { - "aws:RequestTag/KubernetesCluster": "true" - }, - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "minimal.example.com" - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:security-group/*" - ] - }, - { - "Action": "ec2:CreateTags", - "Condition": { - "StringEquals": { - "aws:RequestTag/KubernetesCluster": "minimal.example.com", - "ec2:CreateAction": [ - "CreateVolume", - "CreateSnapshot" - ] - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:volume/*", - "arn:aws-test:ec2:*:*:snapshot/*" - ] - }, - { - "Action": [ - "ec2:CreateTags", - "ec2:DeleteTags" - ], - "Condition": { - "Null": { - "aws:RequestTag/KubernetesCluster": "true" - }, - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "minimal.example.com" - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:volume/*", - "arn:aws-test:ec2:*:*:snapshot/*" - ] - }, - { - "Action": [ - "autoscaling:DescribeAutoScalingGroups", - "autoscaling:DescribeAutoScalingInstances", - "autoscaling:DescribeLaunchConfigurations", - "autoscaling:DescribeScalingActivities", - "autoscaling:DescribeTags", - "ec2:AttachVolume", - "ec2:AuthorizeSecurityGroupIngress", - "ec2:CreateSecurityGroup", - "ec2:CreateTags", - "ec2:DeleteRoute", - "ec2:DeleteSecurityGroup", - "ec2:DeleteVolume", - "ec2:DescribeAccountAttributes", - "ec2:DescribeInstanceTypes", - "ec2:DescribeInstances", - "ec2:DescribeLaunchTemplateVersions", - "ec2:DescribeRegions", - "ec2:DescribeRouteTables", - "ec2:DescribeSecurityGroups", - "ec2:DescribeSubnets", - "ec2:DescribeTags", - "ec2:DescribeVolumes", - "ec2:DescribeVolumesModifications", - "ec2:DescribeVpcs", - "ec2:DetachVolume", - "ec2:ModifyInstanceAttribute", - "ec2:ModifyVolume", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:CreateListener", - "elasticloadbalancing:CreateTargetGroup", - "elasticloadbalancing:DeleteListener", - "elasticloadbalancing:DescribeListeners", - "elasticloadbalancing:DescribeLoadBalancerAttributes", - "elasticloadbalancing:DescribeLoadBalancerPolicies", - "elasticloadbalancing:DescribeLoadBalancers", - "elasticloadbalancing:DescribeTargetGroups", - "elasticloadbalancing:DescribeTargetHealth", - "elasticloadbalancing:ModifyListener", - "elasticloadbalancing:RegisterTargets", - "iam:GetServerCertificate", - "iam:ListServerCertificates", - "kms:DescribeKey", - "kms:GenerateRandom" - ], - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": [ - "autoscaling:SetDesiredCapacity", - "autoscaling:TerminateInstanceInAutoScalingGroup", - "ec2:AttachVolume", - "ec2:AuthorizeSecurityGroupIngress", - "ec2:DeleteSecurityGroup", - "ec2:DeleteVolume", - "ec2:DetachVolume", - "ec2:ModifyInstanceAttribute", - "ec2:ModifyVolume", - "ec2:RevokeSecurityGroupIngress", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", - "elasticloadbalancing:AttachLoadBalancerToSubnets", - "elasticloadbalancing:ConfigureHealthCheck", - "elasticloadbalancing:CreateLoadBalancerListeners", - "elasticloadbalancing:CreateLoadBalancerPolicy", - "elasticloadbalancing:DeleteListener", - "elasticloadbalancing:DeleteLoadBalancer", - "elasticloadbalancing:DeleteLoadBalancerListeners", - "elasticloadbalancing:DeleteTargetGroup", - "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", - "elasticloadbalancing:DeregisterTargets", - "elasticloadbalancing:DetachLoadBalancerFromSubnets", - "elasticloadbalancing:ModifyListener", - "elasticloadbalancing:ModifyLoadBalancerAttributes", - "elasticloadbalancing:ModifyTargetGroup", - "elasticloadbalancing:RegisterInstancesWithLoadBalancer", - "elasticloadbalancing:RegisterTargets", - "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", - "elasticloadbalancing:SetLoadBalancerPoliciesOfListener" - ], - "Condition": { - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "minimal.example.com" - } - }, - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": [ - "ec2:CreateSecurityGroup", - "ec2:CreateSnapshot", - "ec2:CreateVolume", - "elasticloadbalancing:CreateListener", - "elasticloadbalancing:CreateLoadBalancer", - "elasticloadbalancing:CreateTargetGroup" - ], - "Condition": { - "StringEquals": { - "aws:RequestTag/KubernetesCluster": "minimal.example.com" - } - }, - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": "ec2:CreateSecurityGroup", - "Effect": "Allow", - "Resource": "arn:aws-test:ec2:*:*:vpc/*" - } - ], - "Version": "2012-10-17" - } - } - }, - "AWSIAMPolicynodesminimalexamplecom": { - "Type": "AWS::IAM::Policy", - "Properties": { - "PolicyName": "nodes.minimal.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolenodesminimalexamplecom" - } - ], - "PolicyDocument": { - "Statement": [ - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-read-bucket" - ] - }, - { - "Action": [ - "autoscaling:DescribeAutoScalingInstances", - "ec2:DescribeInstanceTypes", - "ec2:DescribeInstances", - "ec2:DescribeRegions", - "iam:GetServerCertificate", - "iam:ListServerCertificates", - "kms:GenerateRandom" - ], - "Effect": "Allow", - "Resource": "*" - } - ], - "Version": "2012-10-17" - } - } - }, - "AWSIAMRolemastersminimalexamplecom": { - "Type": "AWS::IAM::Role", - "Properties": { - "RoleName": "masters.minimal.example.com", - "AssumeRolePolicyDocument": { - "Statement": [ - { - "Action": "sts:AssumeRole", - "Effect": "Allow", - "Principal": { - "Service": "ec2.amazonaws.com" - } - } - ], - "Version": "2012-10-17" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "masters.minimal.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - }, - "AWSIAMRolenodesminimalexamplecom": { - "Type": "AWS::IAM::Role", - "Properties": { - "RoleName": "nodes.minimal.example.com", - "AssumeRolePolicyDocument": { - "Statement": [ - { - "Action": "sts:AssumeRole", - "Effect": "Allow", - "Principal": { - "Service": "ec2.amazonaws.com" - } - } - ], - "Version": "2012-10-17" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "nodes.minimal.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - } - } -} diff --git a/tests/integration/update_cluster/minimal/cloudformation.json.extracted.yaml b/tests/integration/update_cluster/minimal/cloudformation.json.extracted.yaml deleted file mode 100644 index fcae283d9d4bd..0000000000000 --- a/tests/integration/update_cluster/minimal/cloudformation.json.extracted.yaml +++ /dev/null @@ -1,445 +0,0 @@ -Resources.AWSEC2LaunchTemplatemasterustest1amastersminimalexamplecom.Properties.LaunchTemplateData.UserData: | - #!/bin/bash - set -o errexit - set -o nounset - set -o pipefail - - NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 - NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 - NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 - NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 - - export AWS_REGION=us-test-1 - - - - - sysctl -w net.core.rmem_max=16777216 || true - sysctl -w net.core.wmem_max=16777216 || true - sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true - sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true - - - function ensure-install-dir() { - INSTALL_DIR="/opt/kops" - # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec - if [[ -d /var/lib/toolbox ]]; then - INSTALL_DIR="/var/lib/toolbox/kops" - fi - mkdir -p ${INSTALL_DIR}/bin - mkdir -p ${INSTALL_DIR}/conf - cd ${INSTALL_DIR} - } - - # Retry a download until we get it. args: name, sha, urls - download-or-bust() { - local -r file="$1" - local -r hash="$2" - local -r urls=( $(split-commas "$3") ) - - if [[ -f "${file}" ]]; then - if ! validate-hash "${file}" "${hash}"; then - rm -f "${file}" - else - return 0 - fi - fi - - while true; do - for url in "${urls[@]}"; do - commands=( - "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - ) - for cmd in "${commands[@]}"; do - echo "Attempting download with: ${cmd} {url}" - if ! (${cmd} "${url}"); then - echo "== Download failed with ${cmd} ==" - continue - fi - if ! validate-hash "${file}" "${hash}"; then - echo "== Hash validation of ${url} failed. Retrying. ==" - rm -f "${file}" - else - echo "== Downloaded ${url} (SHA256 = ${hash}) ==" - return 0 - fi - done - done - - echo "All downloads failed; sleeping before retrying" - sleep 60 - done - } - - validate-hash() { - local -r file="$1" - local -r expected="$2" - local actual - - actual=$(sha256sum ${file} | awk '{ print $1 }') || true - if [[ "${actual}" != "${expected}" ]]; then - echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" - return 1 - fi - } - - function split-commas() { - echo $1 | tr "," "\n" - } - - function download-release() { - case "$(uname -m)" in - x86_64*|i?86_64*|amd64*) - NODEUP_URL="${NODEUP_URL_AMD64}" - NODEUP_HASH="${NODEUP_HASH_AMD64}" - ;; - aarch64*|arm64*) - NODEUP_URL="${NODEUP_URL_ARM64}" - NODEUP_HASH="${NODEUP_HASH_ARM64}" - ;; - *) - echo "Unsupported host arch: $(uname -m)" >&2 - exit 1 - ;; - esac - - cd ${INSTALL_DIR}/bin - download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" - - chmod +x nodeup - - echo "Running nodeup" - # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 - ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) - } - - #################################################################################### - - /bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" - - echo "== nodeup node config starting ==" - ensure-install-dir - - cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' - cloudConfig: - awsEBSCSIDriver: - enabled: false - manageStorageClasses: true - containerRuntime: containerd - containerd: - logLevel: info - version: 1.4.13 - docker: - skipInstall: true - encryptionConfig: null - etcdClusters: - events: - version: 3.4.13 - main: - version: 3.4.13 - kubeAPIServer: - allowPrivileged: true - anonymousAuth: false - apiAudiences: - - kubernetes.svc.default - apiServerCount: 1 - authorizationMode: AlwaysAllow - bindAddress: 0.0.0.0 - cloudProvider: aws - enableAdmissionPlugins: - - NamespaceLifecycle - - LimitRanger - - ServiceAccount - - DefaultStorageClass - - DefaultTolerationSeconds - - MutatingAdmissionWebhook - - ValidatingAdmissionWebhook - - NodeRestriction - - ResourceQuota - etcdServers: - - https://127.0.0.1:4001 - etcdServersOverrides: - - /events#https://127.0.0.1:4002 - image: registry.k8s.io/kube-apiserver:v1.21.0 - kubeletPreferredAddressTypes: - - InternalIP - - Hostname - - ExternalIP - logLevel: 2 - requestheaderAllowedNames: - - aggregator - requestheaderExtraHeaderPrefixes: - - X-Remote-Extra- - requestheaderGroupHeaders: - - X-Remote-Group - requestheaderUsernameHeaders: - - X-Remote-User - securePort: 443 - serviceAccountIssuer: https://api.internal.minimal.example.com - serviceAccountJWKSURI: https://api.internal.minimal.example.com/openid/v1/jwks - serviceClusterIPRange: 100.64.0.0/13 - storageBackend: etcd3 - kubeControllerManager: - allocateNodeCIDRs: true - attachDetachReconcileSyncPeriod: 1m0s - cloudProvider: aws - clusterCIDR: 100.96.0.0/11 - clusterName: minimal.example.com - configureCloudRoutes: false - image: registry.k8s.io/kube-controller-manager:v1.21.0 - leaderElection: - leaderElect: true - logLevel: 2 - useServiceAccountCredentials: true - kubeProxy: - clusterCIDR: 100.96.0.0/11 - cpuRequest: 100m - image: registry.k8s.io/kube-proxy:v1.21.0 - logLevel: 2 - kubeScheduler: - image: registry.k8s.io/kube-scheduler:v1.21.0 - leaderElection: - leaderElect: true - logLevel: 2 - kubelet: - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: aws - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - masterKubelet: - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: aws - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - - __EOF_CLUSTER_SPEC - - cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' - CloudProvider: aws - ConfigBase: memfs://clusters.example.com/minimal.example.com - InstanceGroupName: master-us-test-1a - InstanceGroupRole: Master - NodeupConfigHash: La0zG04/pkxKQP393yIvIvcnlJl4O74oLnH+9AuoQTM= - - __EOF_KUBE_ENV - - download-release - echo "== nodeup node config done ==" -Resources.AWSEC2LaunchTemplatenodesminimalexamplecom.Properties.LaunchTemplateData.UserData: | - #!/bin/bash - set -o errexit - set -o nounset - set -o pipefail - - NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 - NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 - NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 - NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 - - export AWS_REGION=us-test-1 - - - - - sysctl -w net.core.rmem_max=16777216 || true - sysctl -w net.core.wmem_max=16777216 || true - sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true - sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true - - - function ensure-install-dir() { - INSTALL_DIR="/opt/kops" - # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec - if [[ -d /var/lib/toolbox ]]; then - INSTALL_DIR="/var/lib/toolbox/kops" - fi - mkdir -p ${INSTALL_DIR}/bin - mkdir -p ${INSTALL_DIR}/conf - cd ${INSTALL_DIR} - } - - # Retry a download until we get it. args: name, sha, urls - download-or-bust() { - local -r file="$1" - local -r hash="$2" - local -r urls=( $(split-commas "$3") ) - - if [[ -f "${file}" ]]; then - if ! validate-hash "${file}" "${hash}"; then - rm -f "${file}" - else - return 0 - fi - fi - - while true; do - for url in "${urls[@]}"; do - commands=( - "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - ) - for cmd in "${commands[@]}"; do - echo "Attempting download with: ${cmd} {url}" - if ! (${cmd} "${url}"); then - echo "== Download failed with ${cmd} ==" - continue - fi - if ! validate-hash "${file}" "${hash}"; then - echo "== Hash validation of ${url} failed. Retrying. ==" - rm -f "${file}" - else - echo "== Downloaded ${url} (SHA256 = ${hash}) ==" - return 0 - fi - done - done - - echo "All downloads failed; sleeping before retrying" - sleep 60 - done - } - - validate-hash() { - local -r file="$1" - local -r expected="$2" - local actual - - actual=$(sha256sum ${file} | awk '{ print $1 }') || true - if [[ "${actual}" != "${expected}" ]]; then - echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" - return 1 - fi - } - - function split-commas() { - echo $1 | tr "," "\n" - } - - function download-release() { - case "$(uname -m)" in - x86_64*|i?86_64*|amd64*) - NODEUP_URL="${NODEUP_URL_AMD64}" - NODEUP_HASH="${NODEUP_HASH_AMD64}" - ;; - aarch64*|arm64*) - NODEUP_URL="${NODEUP_URL_ARM64}" - NODEUP_HASH="${NODEUP_HASH_ARM64}" - ;; - *) - echo "Unsupported host arch: $(uname -m)" >&2 - exit 1 - ;; - esac - - cd ${INSTALL_DIR}/bin - download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" - - chmod +x nodeup - - echo "Running nodeup" - # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 - ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) - } - - #################################################################################### - - /bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" - - echo "== nodeup node config starting ==" - ensure-install-dir - - cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' - cloudConfig: - awsEBSCSIDriver: - enabled: false - manageStorageClasses: true - containerRuntime: containerd - containerd: - logLevel: info - version: 1.4.13 - docker: - skipInstall: true - kubeProxy: - clusterCIDR: 100.96.0.0/11 - cpuRequest: 100m - image: registry.k8s.io/kube-proxy:v1.21.0 - logLevel: 2 - kubelet: - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: aws - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - - __EOF_CLUSTER_SPEC - - cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' - CloudProvider: aws - ConfigServer: - CACertificates: | - -----BEGIN CERTIFICATE----- - MIIBbjCCARigAwIBAgIMFpANqBD8NSD82AUSMA0GCSqGSIb3DQEBCwUAMBgxFjAU - BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwODAwWhcNMzEwNzA3MDcw - ODAwWjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD - SwAwSAJBANFI3zr0Tk8krsW8vwjfMpzJOlWQ8616vG3YPa2qAgI7V4oKwfV0yIg1 - jt+H6f4P/wkPAPTPTfRp9Iy8oHEEFw0CAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG - MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNG3zVjTcLlJwDsJ4/K9DV7KohUA - MA0GCSqGSIb3DQEBCwUAA0EAB8d03fY2w7WKpfO29qI295pu2C4ca9AiVGOpgSc8 - tmQsq6rcxt3T+rb589PVtz0mw/cKTxOk6gH2CCC+yHfy2w== - -----END CERTIFICATE----- - -----BEGIN CERTIFICATE----- - MIIBbjCCARigAwIBAgIMFpANvmSa0OAlYmXKMA0GCSqGSIb3DQEBCwUAMBgxFjAU - BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwOTM2WhcNMzEwNzA3MDcw - OTM2WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD - SwAwSAJBAMF6F4aZdpe0RUpyykaBpWwZCnwbffhYGOw+fs6RdLuUq7QCNmJm/Eq7 - WWOziMYDiI9SbclpD+6QiJ0N3EqppVUCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG - MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLImp6ARjPDAH6nhI+scWVt3Q9bn - MA0GCSqGSIb3DQEBCwUAA0EAVQVx5MUtuAIeePuP9o51xtpT2S6Fvfi8J4ICxnlA - 9B7UD2ushcVFPtaeoL9Gfu8aY4KJBeqqg5ojl4qmRnThjw== - -----END CERTIFICATE----- - server: https://kops-controller.internal.minimal.example.com:3988/ - InstanceGroupName: nodes - InstanceGroupRole: Node - NodeupConfigHash: m+SniKa++7h7y0tm5tay+Q64ihlBBr7U/+QiIypg5DM= - - __EOF_KUBE_ENV - - download-release - echo "== nodeup node config done ==" diff --git a/tests/integration/update_cluster/minimal/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/minimal/data/aws_s3_object_cluster-completed.spec_content index c7acd899824b3..f6ba2d8433aaf 100644 --- a/tests/integration/update_cluster/minimal/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/minimal/data/aws_s3_object_cluster-completed.spec_content @@ -139,7 +139,6 @@ spec: kubernetesApiAccess: - 0.0.0.0/0 kubernetesVersion: 1.21.0 - masterInternalName: api.internal.minimal.example.com masterKubelet: cgroupDriver: systemd cgroupRoot: / diff --git a/tests/integration/update_cluster/minimal/in-v1alpha2.yaml b/tests/integration/update_cluster/minimal/in-v1alpha2.yaml index 47daf1384a1a5..bb6222758ef5f 100644 --- a/tests/integration/update_cluster/minimal/in-v1alpha2.yaml +++ b/tests/integration/update_cluster/minimal/in-v1alpha2.yaml @@ -19,7 +19,6 @@ spec: name: us-test-1a name: events kubernetesVersion: v1.21.0 - masterInternalName: api.internal.minimal.example.com masterPublicName: api.minimal.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/tests/integration/update_cluster/minimal_gce/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/minimal_gce/data/aws_s3_object_cluster-completed.spec_content index 1bc259c1cf7ba..88897a1f0e7c2 100644 --- a/tests/integration/update_cluster/minimal_gce/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/minimal_gce/data/aws_s3_object_cluster-completed.spec_content @@ -149,7 +149,6 @@ spec: kubernetesApiAccess: - 0.0.0.0/0 kubernetesVersion: 1.23.0 - masterInternalName: api.internal.minimal-gce.example.com masterKubelet: anonymousAuth: false cgroupDriver: systemd diff --git a/tests/integration/update_cluster/minimal_gce_ilb/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/minimal_gce_ilb/data/aws_s3_object_cluster-completed.spec_content index c4199d1eadb59..2918440999d43 100644 --- a/tests/integration/update_cluster/minimal_gce_ilb/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/minimal_gce_ilb/data/aws_s3_object_cluster-completed.spec_content @@ -149,7 +149,6 @@ spec: kubernetesApiAccess: - 0.0.0.0/0 kubernetesVersion: 1.21.0 - masterInternalName: api.internal.minimal-gce-ilb.example.com masterKubelet: anonymousAuth: false cgroupDriver: systemd diff --git a/tests/integration/update_cluster/minimal_gce_ilb_longclustername/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/minimal_gce_ilb_longclustername/data/aws_s3_object_cluster-completed.spec_content index 5412ad5698df7..e700c2cd2eb8e 100644 --- a/tests/integration/update_cluster/minimal_gce_ilb_longclustername/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/minimal_gce_ilb_longclustername/data/aws_s3_object_cluster-completed.spec_content @@ -153,7 +153,6 @@ spec: kubernetesApiAccess: - 0.0.0.0/0 kubernetesVersion: 1.23.0 - masterInternalName: api.internal.minimal-gce-with-a-very-very-very-very-very-long-name.example.com masterKubelet: anonymousAuth: false cgroupDriver: systemd diff --git a/tests/integration/update_cluster/minimal_gce_longclustername/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/minimal_gce_longclustername/data/aws_s3_object_cluster-completed.spec_content index b8bb066129832..eeb5dd90db43b 100644 --- a/tests/integration/update_cluster/minimal_gce_longclustername/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/minimal_gce_longclustername/data/aws_s3_object_cluster-completed.spec_content @@ -149,7 +149,6 @@ spec: kubernetesApiAccess: - 0.0.0.0/0 kubernetesVersion: 1.23.0 - masterInternalName: api.internal.minimal-gce-with-a-very-very-very-very-very-long-name.example.com masterKubelet: anonymousAuth: false cgroupDriver: systemd diff --git a/tests/integration/update_cluster/minimal_gce_private/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/minimal_gce_private/data/aws_s3_object_cluster-completed.spec_content index 56451f55aa350..e66ca9415062a 100644 --- a/tests/integration/update_cluster/minimal_gce_private/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/minimal_gce_private/data/aws_s3_object_cluster-completed.spec_content @@ -145,7 +145,6 @@ spec: kubernetesApiAccess: - 0.0.0.0/0 kubernetesVersion: 1.21.0 - masterInternalName: api.internal.minimal-gce-private.example.com masterKubelet: anonymousAuth: false cgroupDriver: systemd diff --git a/tests/integration/update_cluster/minimal_gossip/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/minimal_gossip/data/aws_s3_object_cluster-completed.spec_content index 074436d243f39..48134f86fecb5 100644 --- a/tests/integration/update_cluster/minimal_gossip/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/minimal_gossip/data/aws_s3_object_cluster-completed.spec_content @@ -141,7 +141,6 @@ spec: kubernetesApiAccess: - 0.0.0.0/0 kubernetesVersion: 1.21.0 - masterInternalName: api.internal.minimal.k8s.local masterKubelet: anonymousAuth: false cgroupDriver: systemd diff --git a/tests/integration/update_cluster/minimal_gossip/in-v1alpha2.yaml b/tests/integration/update_cluster/minimal_gossip/in-v1alpha2.yaml index 694524ea99c45..5215cc539eccb 100644 --- a/tests/integration/update_cluster/minimal_gossip/in-v1alpha2.yaml +++ b/tests/integration/update_cluster/minimal_gossip/in-v1alpha2.yaml @@ -22,7 +22,6 @@ spec: kubelet: anonymousAuth: false kubernetesVersion: v1.21.0 - masterInternalName: api.internal.minimal.k8s.local masterPublicName: api.minimal.k8s.local networkCIDR: 172.20.0.0/16 networking: diff --git a/tests/integration/update_cluster/minimal_gossip_irsa/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/minimal_gossip_irsa/data/aws_s3_object_cluster-completed.spec_content index 161d153d2e80d..afba8a5ecf927 100644 --- a/tests/integration/update_cluster/minimal_gossip_irsa/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/minimal_gossip_irsa/data/aws_s3_object_cluster-completed.spec_content @@ -142,7 +142,6 @@ spec: kubernetesApiAccess: - 0.0.0.0/0 kubernetesVersion: 1.21.0 - masterInternalName: api.internal.minimal.k8s.local masterKubelet: anonymousAuth: false cgroupDriver: systemd diff --git a/tests/integration/update_cluster/minimal_gossip_irsa/in-v1alpha2.yaml b/tests/integration/update_cluster/minimal_gossip_irsa/in-v1alpha2.yaml index a2fb0ed0e6e33..ac1deef728b83 100644 --- a/tests/integration/update_cluster/minimal_gossip_irsa/in-v1alpha2.yaml +++ b/tests/integration/update_cluster/minimal_gossip_irsa/in-v1alpha2.yaml @@ -23,7 +23,6 @@ spec: kubelet: anonymousAuth: false kubernetesVersion: v1.21.0 - masterInternalName: api.internal.minimal.k8s.local masterPublicName: api.minimal.k8s.local networkCIDR: 172.20.0.0/16 networking: diff --git a/tests/integration/update_cluster/minimal_hetzner/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/minimal_hetzner/data/aws_s3_object_cluster-completed.spec_content index e352de58029ea..9f5fd519b4142 100644 --- a/tests/integration/update_cluster/minimal_hetzner/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/minimal_hetzner/data/aws_s3_object_cluster-completed.spec_content @@ -155,7 +155,6 @@ spec: - 0.0.0.0/0 - ::/0 kubernetesVersion: 1.25.0 - masterInternalName: api.internal.minimal.example.com masterKubelet: anonymousAuth: false cgroupDriver: systemd diff --git a/tests/integration/update_cluster/mixed_instances/cloudformation.json b/tests/integration/update_cluster/mixed_instances/cloudformation.json deleted file mode 100644 index f042eac584df9..0000000000000 --- a/tests/integration/update_cluster/mixed_instances/cloudformation.json +++ /dev/null @@ -1,2077 +0,0 @@ -{ - "Resources": { - "AWSAutoScalingAutoScalingGroupmasterustest1amastersmixedinstancesexamplecom": { - "Type": "AWS::AutoScaling::AutoScalingGroup", - "Properties": { - "AutoScalingGroupName": "master-us-test-1a.masters.mixedinstances.example.com", - "LaunchTemplate": { - "LaunchTemplateId": { - "Ref": "AWSEC2LaunchTemplatemasterustest1amastersmixedinstancesexamplecom" - }, - "Version": { - "Fn::GetAtt": [ - "AWSEC2LaunchTemplatemasterustest1amastersmixedinstancesexamplecom", - "LatestVersionNumber" - ] - } - }, - "MaxSize": "1", - "MinSize": "1", - "VPCZoneIdentifier": [ - { - "Ref": "AWSEC2Subnetustest1amixedinstancesexamplecom" - } - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "mixedinstances.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.mixedinstances.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/role/master", - "Value": "1", - "PropagateAtLaunch": true - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a", - "PropagateAtLaunch": true - }, - { - "Key": "kubernetes.io/cluster/mixedinstances.example.com", - "Value": "owned", - "PropagateAtLaunch": true - } - ], - "MetricsCollection": [ - { - "Granularity": "1Minute", - "Metrics": [ - "GroupDesiredCapacity", - "GroupInServiceInstances", - "GroupMaxSize", - "GroupMinSize", - "GroupPendingInstances", - "GroupStandbyInstances", - "GroupTerminatingInstances", - "GroupTotalInstances" - ] - } - ] - } - }, - "AWSAutoScalingAutoScalingGroupmasterustest1bmastersmixedinstancesexamplecom": { - "Type": "AWS::AutoScaling::AutoScalingGroup", - "Properties": { - "AutoScalingGroupName": "master-us-test-1b.masters.mixedinstances.example.com", - "LaunchTemplate": { - "LaunchTemplateId": { - "Ref": "AWSEC2LaunchTemplatemasterustest1bmastersmixedinstancesexamplecom" - }, - "Version": { - "Fn::GetAtt": [ - "AWSEC2LaunchTemplatemasterustest1bmastersmixedinstancesexamplecom", - "LatestVersionNumber" - ] - } - }, - "MaxSize": "1", - "MinSize": "1", - "VPCZoneIdentifier": [ - { - "Ref": "AWSEC2Subnetustest1bmixedinstancesexamplecom" - } - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "mixedinstances.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "Name", - "Value": "master-us-test-1b.masters.mixedinstances.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/role/master", - "Value": "1", - "PropagateAtLaunch": true - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1b", - "PropagateAtLaunch": true - }, - { - "Key": "kubernetes.io/cluster/mixedinstances.example.com", - "Value": "owned", - "PropagateAtLaunch": true - } - ], - "MetricsCollection": [ - { - "Granularity": "1Minute", - "Metrics": [ - "GroupDesiredCapacity", - "GroupInServiceInstances", - "GroupMaxSize", - "GroupMinSize", - "GroupPendingInstances", - "GroupStandbyInstances", - "GroupTerminatingInstances", - "GroupTotalInstances" - ] - } - ] - } - }, - "AWSAutoScalingAutoScalingGroupmasterustest1cmastersmixedinstancesexamplecom": { - "Type": "AWS::AutoScaling::AutoScalingGroup", - "Properties": { - "AutoScalingGroupName": "master-us-test-1c.masters.mixedinstances.example.com", - "LaunchTemplate": { - "LaunchTemplateId": { - "Ref": "AWSEC2LaunchTemplatemasterustest1cmastersmixedinstancesexamplecom" - }, - "Version": { - "Fn::GetAtt": [ - "AWSEC2LaunchTemplatemasterustest1cmastersmixedinstancesexamplecom", - "LatestVersionNumber" - ] - } - }, - "MaxSize": "1", - "MinSize": "1", - "VPCZoneIdentifier": [ - { - "Ref": "AWSEC2Subnetustest1cmixedinstancesexamplecom" - } - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "mixedinstances.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "Name", - "Value": "master-us-test-1c.masters.mixedinstances.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/role/master", - "Value": "1", - "PropagateAtLaunch": true - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1c", - "PropagateAtLaunch": true - }, - { - "Key": "kubernetes.io/cluster/mixedinstances.example.com", - "Value": "owned", - "PropagateAtLaunch": true - } - ], - "MetricsCollection": [ - { - "Granularity": "1Minute", - "Metrics": [ - "GroupDesiredCapacity", - "GroupInServiceInstances", - "GroupMaxSize", - "GroupMinSize", - "GroupPendingInstances", - "GroupStandbyInstances", - "GroupTerminatingInstances", - "GroupTotalInstances" - ] - } - ] - } - }, - "AWSAutoScalingAutoScalingGroupnodesmixedinstancesexamplecom": { - "Type": "AWS::AutoScaling::AutoScalingGroup", - "Properties": { - "AutoScalingGroupName": "nodes.mixedinstances.example.com", - "MaxSize": "2", - "MinSize": "2", - "VPCZoneIdentifier": [ - { - "Ref": "AWSEC2Subnetustest1bmixedinstancesexamplecom" - } - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "mixedinstances.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "Name", - "Value": "nodes.mixedinstances.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/role/node", - "Value": "1", - "PropagateAtLaunch": true - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes", - "PropagateAtLaunch": true - }, - { - "Key": "kubernetes.io/cluster/mixedinstances.example.com", - "Value": "owned", - "PropagateAtLaunch": true - } - ], - "MetricsCollection": [ - { - "Granularity": "1Minute", - "Metrics": [ - "GroupDesiredCapacity", - "GroupInServiceInstances", - "GroupMaxSize", - "GroupMinSize", - "GroupPendingInstances", - "GroupStandbyInstances", - "GroupTerminatingInstances", - "GroupTotalInstances" - ] - } - ], - "MixedInstancesPolicy": { - "LaunchTemplate": { - "LaunchTemplateSpecification": { - "LaunchTemplateId": { - "Ref": "AWSEC2LaunchTemplatenodesmixedinstancesexamplecom" - }, - "Version": { - "Fn::GetAtt": [ - "AWSEC2LaunchTemplatenodesmixedinstancesexamplecom", - "LatestVersionNumber" - ] - } - }, - "Overrides": [ - { - "InstanceType": "m5.large" - }, - { - "InstanceType": "m5.xlarge" - }, - { - "InstanceType": "t2.medium" - } - ] - }, - "InstancesDistribution": { - "OnDemandPercentageAboveBaseCapacity": 5, - "SpotInstancePools": 3, - "SpotMaxPrice": "" - } - } - } - }, - "AWSEC2DHCPOptionsmixedinstancesexamplecom": { - "Type": "AWS::EC2::DHCPOptions", - "Properties": { - "DomainName": "us-test-1.compute.internal", - "DomainNameServers": [ - "AmazonProvidedDNS" - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "mixedinstances.example.com" - }, - { - "Key": "Name", - "Value": "mixedinstances.example.com" - }, - { - "Key": "kubernetes.io/cluster/mixedinstances.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2InternetGatewaymixedinstancesexamplecom": { - "Type": "AWS::EC2::InternetGateway", - "Properties": { - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "mixedinstances.example.com" - }, - { - "Key": "Name", - "Value": "mixedinstances.example.com" - }, - { - "Key": "kubernetes.io/cluster/mixedinstances.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2LaunchTemplatemasterustest1amastersmixedinstancesexamplecom": { - "Type": "AWS::EC2::LaunchTemplate", - "Properties": { - "LaunchTemplateName": "master-us-test-1a.masters.mixedinstances.example.com", - "LaunchTemplateData": { - "BlockDeviceMappings": [ - { - "DeviceName": "/dev/xvda", - "Ebs": { - "VolumeType": "gp3", - "VolumeSize": 64, - "Iops": 3000, - "Throughput": 125, - "DeleteOnTermination": true, - "Encrypted": true - } - }, - { - "DeviceName": "/dev/sdc", - "VirtualName": "ephemeral0" - } - ], - "IamInstanceProfile": { - "Name": { - "Ref": "AWSIAMInstanceProfilemastersmixedinstancesexamplecom" - } - }, - "ImageId": "ami-12345678", - "InstanceType": "m3.medium", - "KeyName": "kubernetes.mixedinstances.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57", - "MetadataOptions": { - "HttpPutResponseHopLimit": 1, - "HttpTokens": "optional" - }, - "Monitoring": { - "Enabled": false - }, - "NetworkInterfaces": [ - { - "AssociatePublicIpAddress": true, - "DeleteOnTermination": true, - "DeviceIndex": 0, - "Ipv6AddressCount": 0, - "Groups": [ - { - "Ref": "AWSEC2SecurityGroupmastersmixedinstancesexamplecom" - } - ] - } - ], - "TagSpecifications": [ - { - "ResourceType": "instance", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "mixedinstances.example.com" - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.mixedinstances.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a" - }, - { - "Key": "kubernetes.io/cluster/mixedinstances.example.com", - "Value": "owned" - } - ] - }, - { - "ResourceType": "volume", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "mixedinstances.example.com" - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.mixedinstances.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a" - }, - { - "Key": "kubernetes.io/cluster/mixedinstances.example.com", - "Value": "owned" - } - ] - } - ], - "UserData": "extracted" - } - } - }, - "AWSEC2LaunchTemplatemasterustest1bmastersmixedinstancesexamplecom": { - "Type": "AWS::EC2::LaunchTemplate", - "Properties": { - "LaunchTemplateName": "master-us-test-1b.masters.mixedinstances.example.com", - "LaunchTemplateData": { - "BlockDeviceMappings": [ - { - "DeviceName": "/dev/xvda", - "Ebs": { - "VolumeType": "gp3", - "VolumeSize": 64, - "Iops": 3000, - "Throughput": 125, - "DeleteOnTermination": true, - "Encrypted": true - } - }, - { - "DeviceName": "/dev/sdc", - "VirtualName": "ephemeral0" - } - ], - "IamInstanceProfile": { - "Name": { - "Ref": "AWSIAMInstanceProfilemastersmixedinstancesexamplecom" - } - }, - "ImageId": "ami-12345678", - "InstanceType": "m3.medium", - "KeyName": "kubernetes.mixedinstances.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57", - "MetadataOptions": { - "HttpPutResponseHopLimit": 1, - "HttpTokens": "optional" - }, - "Monitoring": { - "Enabled": false - }, - "NetworkInterfaces": [ - { - "AssociatePublicIpAddress": true, - "DeleteOnTermination": true, - "DeviceIndex": 0, - "Ipv6AddressCount": 0, - "Groups": [ - { - "Ref": "AWSEC2SecurityGroupmastersmixedinstancesexamplecom" - } - ] - } - ], - "TagSpecifications": [ - { - "ResourceType": "instance", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "mixedinstances.example.com" - }, - { - "Key": "Name", - "Value": "master-us-test-1b.masters.mixedinstances.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1b" - }, - { - "Key": "kubernetes.io/cluster/mixedinstances.example.com", - "Value": "owned" - } - ] - }, - { - "ResourceType": "volume", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "mixedinstances.example.com" - }, - { - "Key": "Name", - "Value": "master-us-test-1b.masters.mixedinstances.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1b" - }, - { - "Key": "kubernetes.io/cluster/mixedinstances.example.com", - "Value": "owned" - } - ] - } - ], - "UserData": "extracted" - } - } - }, - "AWSEC2LaunchTemplatemasterustest1cmastersmixedinstancesexamplecom": { - "Type": "AWS::EC2::LaunchTemplate", - "Properties": { - "LaunchTemplateName": "master-us-test-1c.masters.mixedinstances.example.com", - "LaunchTemplateData": { - "BlockDeviceMappings": [ - { - "DeviceName": "/dev/xvda", - "Ebs": { - "VolumeType": "gp3", - "VolumeSize": 64, - "Iops": 3000, - "Throughput": 125, - "DeleteOnTermination": true, - "Encrypted": true - } - }, - { - "DeviceName": "/dev/sdc", - "VirtualName": "ephemeral0" - } - ], - "IamInstanceProfile": { - "Name": { - "Ref": "AWSIAMInstanceProfilemastersmixedinstancesexamplecom" - } - }, - "ImageId": "ami-12345678", - "InstanceType": "m3.medium", - "KeyName": "kubernetes.mixedinstances.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57", - "MetadataOptions": { - "HttpPutResponseHopLimit": 1, - "HttpTokens": "optional" - }, - "Monitoring": { - "Enabled": false - }, - "NetworkInterfaces": [ - { - "AssociatePublicIpAddress": true, - "DeleteOnTermination": true, - "DeviceIndex": 0, - "Ipv6AddressCount": 0, - "Groups": [ - { - "Ref": "AWSEC2SecurityGroupmastersmixedinstancesexamplecom" - } - ] - } - ], - "TagSpecifications": [ - { - "ResourceType": "instance", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "mixedinstances.example.com" - }, - { - "Key": "Name", - "Value": "master-us-test-1c.masters.mixedinstances.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1c" - }, - { - "Key": "kubernetes.io/cluster/mixedinstances.example.com", - "Value": "owned" - } - ] - }, - { - "ResourceType": "volume", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "mixedinstances.example.com" - }, - { - "Key": "Name", - "Value": "master-us-test-1c.masters.mixedinstances.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1c" - }, - { - "Key": "kubernetes.io/cluster/mixedinstances.example.com", - "Value": "owned" - } - ] - } - ], - "UserData": "extracted" - } - } - }, - "AWSEC2LaunchTemplatenodesmixedinstancesexamplecom": { - "Type": "AWS::EC2::LaunchTemplate", - "Properties": { - "LaunchTemplateName": "nodes.mixedinstances.example.com", - "LaunchTemplateData": { - "BlockDeviceMappings": [ - { - "DeviceName": "/dev/xvda", - "Ebs": { - "VolumeType": "gp3", - "VolumeSize": 128, - "Iops": 3000, - "Throughput": 125, - "DeleteOnTermination": true, - "Encrypted": true - } - } - ], - "IamInstanceProfile": { - "Name": { - "Ref": "AWSIAMInstanceProfilenodesmixedinstancesexamplecom" - } - }, - "ImageId": "ami-12345678", - "InstanceType": "t2.medium", - "KeyName": "kubernetes.mixedinstances.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57", - "MetadataOptions": { - "HttpPutResponseHopLimit": 1, - "HttpTokens": "optional" - }, - "Monitoring": { - "Enabled": false - }, - "NetworkInterfaces": [ - { - "AssociatePublicIpAddress": true, - "DeleteOnTermination": true, - "DeviceIndex": 0, - "Ipv6AddressCount": 0, - "Groups": [ - { - "Ref": "AWSEC2SecurityGroupnodesmixedinstancesexamplecom" - } - ] - } - ], - "TagSpecifications": [ - { - "ResourceType": "instance", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "mixedinstances.example.com" - }, - { - "Key": "Name", - "Value": "nodes.mixedinstances.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "" - }, - { - "Key": "k8s.io/role/node", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes" - }, - { - "Key": "kubernetes.io/cluster/mixedinstances.example.com", - "Value": "owned" - } - ] - }, - { - "ResourceType": "volume", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "mixedinstances.example.com" - }, - { - "Key": "Name", - "Value": "nodes.mixedinstances.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "" - }, - { - "Key": "k8s.io/role/node", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes" - }, - { - "Key": "kubernetes.io/cluster/mixedinstances.example.com", - "Value": "owned" - } - ] - } - ], - "UserData": "extracted" - } - } - }, - "AWSEC2Route0": { - "Type": "AWS::EC2::Route", - "Properties": { - "RouteTableId": { - "Ref": "AWSEC2RouteTablemixedinstancesexamplecom" - }, - "DestinationIpv6CidrBlock": "::/0", - "GatewayId": { - "Ref": "AWSEC2InternetGatewaymixedinstancesexamplecom" - } - } - }, - "AWSEC2Route00000": { - "Type": "AWS::EC2::Route", - "Properties": { - "RouteTableId": { - "Ref": "AWSEC2RouteTablemixedinstancesexamplecom" - }, - "DestinationCidrBlock": "0.0.0.0/0", - "GatewayId": { - "Ref": "AWSEC2InternetGatewaymixedinstancesexamplecom" - } - } - }, - "AWSEC2RouteTablemixedinstancesexamplecom": { - "Type": "AWS::EC2::RouteTable", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCmixedinstancesexamplecom" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "mixedinstances.example.com" - }, - { - "Key": "Name", - "Value": "mixedinstances.example.com" - }, - { - "Key": "kubernetes.io/cluster/mixedinstances.example.com", - "Value": "owned" - }, - { - "Key": "kubernetes.io/kops/role", - "Value": "public" - } - ] - } - }, - "AWSEC2SecurityGroupEgressfrommastersmixedinstancesexamplecomegressall0to00": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersmixedinstancesexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIpv6": "::/0" - } - }, - "AWSEC2SecurityGroupEgressfrommastersmixedinstancesexamplecomegressall0to000000": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersmixedinstancesexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupEgressfromnodesmixedinstancesexamplecomegressall0to00": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesmixedinstancesexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIpv6": "::/0" - } - }, - "AWSEC2SecurityGroupEgressfromnodesmixedinstancesexamplecomegressall0to000000": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesmixedinstancesexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom00000ingresstcp22to22mastersmixedinstancesexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersmixedinstancesexamplecom" - }, - "FromPort": 22, - "ToPort": 22, - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom00000ingresstcp22to22nodesmixedinstancesexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesmixedinstancesexamplecom" - }, - "FromPort": 22, - "ToPort": 22, - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom00000ingresstcp443to443mastersmixedinstancesexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersmixedinstancesexamplecom" - }, - "FromPort": 443, - "ToPort": 443, - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrommastersmixedinstancesexamplecomingressall0to0mastersmixedinstancesexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersmixedinstancesexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupmastersmixedinstancesexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfrommastersmixedinstancesexamplecomingressall0to0nodesmixedinstancesexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesmixedinstancesexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupmastersmixedinstancesexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfromnodesmixedinstancesexamplecomingressall0to0nodesmixedinstancesexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesmixedinstancesexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesmixedinstancesexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfromnodesmixedinstancesexamplecomingresstcp1to2379mastersmixedinstancesexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersmixedinstancesexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesmixedinstancesexamplecom" - }, - "FromPort": 1, - "ToPort": 2379, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodesmixedinstancesexamplecomingresstcp2382to4000mastersmixedinstancesexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersmixedinstancesexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesmixedinstancesexamplecom" - }, - "FromPort": 2382, - "ToPort": 4000, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodesmixedinstancesexamplecomingresstcp4003to65535mastersmixedinstancesexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersmixedinstancesexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesmixedinstancesexamplecom" - }, - "FromPort": 4003, - "ToPort": 65535, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodesmixedinstancesexamplecomingressudp1to65535mastersmixedinstancesexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersmixedinstancesexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesmixedinstancesexamplecom" - }, - "FromPort": 1, - "ToPort": 65535, - "IpProtocol": "udp" - } - }, - "AWSEC2SecurityGroupmastersmixedinstancesexamplecom": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupName": "masters.mixedinstances.example.com", - "VpcId": { - "Ref": "AWSEC2VPCmixedinstancesexamplecom" - }, - "GroupDescription": "Security group for masters", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "mixedinstances.example.com" - }, - { - "Key": "Name", - "Value": "masters.mixedinstances.example.com" - }, - { - "Key": "kubernetes.io/cluster/mixedinstances.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2SecurityGroupnodesmixedinstancesexamplecom": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupName": "nodes.mixedinstances.example.com", - "VpcId": { - "Ref": "AWSEC2VPCmixedinstancesexamplecom" - }, - "GroupDescription": "Security group for nodes", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "mixedinstances.example.com" - }, - { - "Key": "Name", - "Value": "nodes.mixedinstances.example.com" - }, - { - "Key": "kubernetes.io/cluster/mixedinstances.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2SubnetRouteTableAssociationustest1amixedinstancesexamplecom": { - "Type": "AWS::EC2::SubnetRouteTableAssociation", - "Properties": { - "SubnetId": { - "Ref": "AWSEC2Subnetustest1amixedinstancesexamplecom" - }, - "RouteTableId": { - "Ref": "AWSEC2RouteTablemixedinstancesexamplecom" - } - } - }, - "AWSEC2SubnetRouteTableAssociationustest1bmixedinstancesexamplecom": { - "Type": "AWS::EC2::SubnetRouteTableAssociation", - "Properties": { - "SubnetId": { - "Ref": "AWSEC2Subnetustest1bmixedinstancesexamplecom" - }, - "RouteTableId": { - "Ref": "AWSEC2RouteTablemixedinstancesexamplecom" - } - } - }, - "AWSEC2SubnetRouteTableAssociationustest1cmixedinstancesexamplecom": { - "Type": "AWS::EC2::SubnetRouteTableAssociation", - "Properties": { - "SubnetId": { - "Ref": "AWSEC2Subnetustest1cmixedinstancesexamplecom" - }, - "RouteTableId": { - "Ref": "AWSEC2RouteTablemixedinstancesexamplecom" - } - } - }, - "AWSEC2Subnetustest1amixedinstancesexamplecom": { - "Type": "AWS::EC2::Subnet", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCmixedinstancesexamplecom" - }, - "CidrBlock": "10.0.1.0/24", - "AvailabilityZone": "us-test-1a", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "mixedinstances.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.mixedinstances.example.com" - }, - { - "Key": "SubnetType", - "Value": "Public" - }, - { - "Key": "kops.k8s.io/instance-group/master-us-test-1a", - "Value": "true" - }, - { - "Key": "kubernetes.io/cluster/mixedinstances.example.com", - "Value": "owned" - }, - { - "Key": "kubernetes.io/role/elb", - "Value": "1" - }, - { - "Key": "kubernetes.io/role/internal-elb", - "Value": "1" - } - ] - } - }, - "AWSEC2Subnetustest1bmixedinstancesexamplecom": { - "Type": "AWS::EC2::Subnet", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCmixedinstancesexamplecom" - }, - "CidrBlock": "10.0.2.0/24", - "AvailabilityZone": "us-test-1b", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "mixedinstances.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1b.mixedinstances.example.com" - }, - { - "Key": "SubnetType", - "Value": "Public" - }, - { - "Key": "kops.k8s.io/instance-group/master-us-test-1b", - "Value": "true" - }, - { - "Key": "kops.k8s.io/instance-group/nodes", - "Value": "true" - }, - { - "Key": "kubernetes.io/cluster/mixedinstances.example.com", - "Value": "owned" - }, - { - "Key": "kubernetes.io/role/elb", - "Value": "1" - }, - { - "Key": "kubernetes.io/role/internal-elb", - "Value": "1" - } - ] - } - }, - "AWSEC2Subnetustest1cmixedinstancesexamplecom": { - "Type": "AWS::EC2::Subnet", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCmixedinstancesexamplecom" - }, - "CidrBlock": "10.0.3.0/24", - "AvailabilityZone": "us-test-1c", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "mixedinstances.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1c.mixedinstances.example.com" - }, - { - "Key": "SubnetType", - "Value": "Public" - }, - { - "Key": "kops.k8s.io/instance-group/master-us-test-1c", - "Value": "true" - }, - { - "Key": "kubernetes.io/cluster/mixedinstances.example.com", - "Value": "owned" - }, - { - "Key": "kubernetes.io/role/elb", - "Value": "1" - }, - { - "Key": "kubernetes.io/role/internal-elb", - "Value": "1" - } - ] - } - }, - "AWSEC2VPCCidrBlockAmazonIPv6": { - "Type": "AWS::EC2::VPCCidrBlock", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCmixedinstancesexamplecom" - }, - "AmazonProvidedIpv6CidrBlock": true - } - }, - "AWSEC2VPCDHCPOptionsAssociationmixedinstancesexamplecom": { - "Type": "AWS::EC2::VPCDHCPOptionsAssociation", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCmixedinstancesexamplecom" - }, - "DhcpOptionsId": { - "Ref": "AWSEC2DHCPOptionsmixedinstancesexamplecom" - } - } - }, - "AWSEC2VPCGatewayAttachmentmixedinstancesexamplecom": { - "Type": "AWS::EC2::VPCGatewayAttachment", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCmixedinstancesexamplecom" - }, - "InternetGatewayId": { - "Ref": "AWSEC2InternetGatewaymixedinstancesexamplecom" - } - } - }, - "AWSEC2VPCmixedinstancesexamplecom": { - "Type": "AWS::EC2::VPC", - "Properties": { - "CidrBlock": "10.0.0.0/16", - "EnableDnsHostnames": true, - "EnableDnsSupport": true, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "mixedinstances.example.com" - }, - { - "Key": "Name", - "Value": "mixedinstances.example.com" - }, - { - "Key": "kubernetes.io/cluster/mixedinstances.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2Volumeustest1aetcdeventsmixedinstancesexamplecom": { - "Type": "AWS::EC2::Volume", - "Properties": { - "AvailabilityZone": "us-test-1a", - "Size": 20, - "VolumeType": "gp3", - "Iops": 3000, - "Throughput": 125, - "Encrypted": false, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "mixedinstances.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.etcd-events.mixedinstances.example.com" - }, - { - "Key": "k8s.io/etcd/events", - "Value": "us-test-1a/us-test-1a,us-test-1b,us-test-1c" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kubernetes.io/cluster/mixedinstances.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2Volumeustest1aetcdmainmixedinstancesexamplecom": { - "Type": "AWS::EC2::Volume", - "Properties": { - "AvailabilityZone": "us-test-1a", - "Size": 20, - "VolumeType": "gp3", - "Iops": 3000, - "Throughput": 125, - "Encrypted": false, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "mixedinstances.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.etcd-main.mixedinstances.example.com" - }, - { - "Key": "k8s.io/etcd/main", - "Value": "us-test-1a/us-test-1a,us-test-1b,us-test-1c" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kubernetes.io/cluster/mixedinstances.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2Volumeustest1betcdeventsmixedinstancesexamplecom": { - "Type": "AWS::EC2::Volume", - "Properties": { - "AvailabilityZone": "us-test-1b", - "Size": 20, - "VolumeType": "gp3", - "Iops": 3000, - "Throughput": 125, - "Encrypted": false, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "mixedinstances.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1b.etcd-events.mixedinstances.example.com" - }, - { - "Key": "k8s.io/etcd/events", - "Value": "us-test-1b/us-test-1a,us-test-1b,us-test-1c" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kubernetes.io/cluster/mixedinstances.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2Volumeustest1betcdmainmixedinstancesexamplecom": { - "Type": "AWS::EC2::Volume", - "Properties": { - "AvailabilityZone": "us-test-1b", - "Size": 20, - "VolumeType": "gp3", - "Iops": 3000, - "Throughput": 125, - "Encrypted": false, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "mixedinstances.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1b.etcd-main.mixedinstances.example.com" - }, - { - "Key": "k8s.io/etcd/main", - "Value": "us-test-1b/us-test-1a,us-test-1b,us-test-1c" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kubernetes.io/cluster/mixedinstances.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2Volumeustest1cetcdeventsmixedinstancesexamplecom": { - "Type": "AWS::EC2::Volume", - "Properties": { - "AvailabilityZone": "us-test-1c", - "Size": 20, - "VolumeType": "gp3", - "Iops": 3000, - "Throughput": 125, - "Encrypted": false, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "mixedinstances.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1c.etcd-events.mixedinstances.example.com" - }, - { - "Key": "k8s.io/etcd/events", - "Value": "us-test-1c/us-test-1a,us-test-1b,us-test-1c" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kubernetes.io/cluster/mixedinstances.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2Volumeustest1cetcdmainmixedinstancesexamplecom": { - "Type": "AWS::EC2::Volume", - "Properties": { - "AvailabilityZone": "us-test-1c", - "Size": 20, - "VolumeType": "gp3", - "Iops": 3000, - "Throughput": 125, - "Encrypted": false, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "mixedinstances.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1c.etcd-main.mixedinstances.example.com" - }, - { - "Key": "k8s.io/etcd/main", - "Value": "us-test-1c/us-test-1a,us-test-1b,us-test-1c" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kubernetes.io/cluster/mixedinstances.example.com", - "Value": "owned" - } - ] - } - }, - "AWSIAMInstanceProfilemastersmixedinstancesexamplecom": { - "Type": "AWS::IAM::InstanceProfile", - "Properties": { - "InstanceProfileName": "masters.mixedinstances.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolemastersmixedinstancesexamplecom" - } - ] - } - }, - "AWSIAMInstanceProfilenodesmixedinstancesexamplecom": { - "Type": "AWS::IAM::InstanceProfile", - "Properties": { - "InstanceProfileName": "nodes.mixedinstances.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolenodesmixedinstancesexamplecom" - } - ] - } - }, - "AWSIAMPolicymastersmixedinstancesexamplecom": { - "Type": "AWS::IAM::Policy", - "Properties": { - "PolicyName": "masters.mixedinstances.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolemastersmixedinstancesexamplecom" - } - ], - "PolicyDocument": { - "Statement": [ - { - "Action": "ec2:AttachVolume", - "Condition": { - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "mixedinstances.example.com", - "aws:ResourceTag/k8s.io/role/master": "1" - } - }, - "Effect": "Allow", - "Resource": [ - "*" - ] - }, - { - "Action": [ - "s3:Get*" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-read-bucket/clusters.example.com/mixedinstances.example.com/*" - }, - { - "Action": [ - "s3:GetObject", - "s3:DeleteObject", - "s3:DeleteObjectVersion", - "s3:PutObject" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/mixedinstances.example.com/backups/etcd/main/*" - }, - { - "Action": [ - "s3:GetObject", - "s3:DeleteObject", - "s3:DeleteObjectVersion", - "s3:PutObject" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/mixedinstances.example.com/backups/etcd/events/*" - }, - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-read-bucket" - ] - }, - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-write-bucket" - ] - }, - { - "Action": [ - "route53:ChangeResourceRecordSets", - "route53:ListResourceRecordSets", - "route53:GetHostedZone" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:route53:::hostedzone/Z1AFAKE1ZON3YO" - ] - }, - { - "Action": [ - "route53:GetChange" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:route53:::change/*" - ] - }, - { - "Action": [ - "route53:ListHostedZones", - "route53:ListTagsForResource" - ], - "Effect": "Allow", - "Resource": [ - "*" - ] - }, - { - "Action": "ec2:CreateTags", - "Condition": { - "StringEquals": { - "aws:RequestTag/KubernetesCluster": "mixedinstances.example.com", - "ec2:CreateAction": [ - "CreateSecurityGroup" - ] - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:security-group/*" - ] - }, - { - "Action": [ - "ec2:CreateTags", - "ec2:DeleteTags" - ], - "Condition": { - "Null": { - "aws:RequestTag/KubernetesCluster": "true" - }, - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "mixedinstances.example.com" - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:security-group/*" - ] - }, - { - "Action": "ec2:CreateTags", - "Condition": { - "StringEquals": { - "aws:RequestTag/KubernetesCluster": "mixedinstances.example.com", - "ec2:CreateAction": [ - "CreateVolume", - "CreateSnapshot" - ] - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:volume/*", - "arn:aws-test:ec2:*:*:snapshot/*" - ] - }, - { - "Action": [ - "ec2:CreateTags", - "ec2:DeleteTags" - ], - "Condition": { - "Null": { - "aws:RequestTag/KubernetesCluster": "true" - }, - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "mixedinstances.example.com" - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:volume/*", - "arn:aws-test:ec2:*:*:snapshot/*" - ] - }, - { - "Action": [ - "autoscaling:DescribeAutoScalingGroups", - "autoscaling:DescribeAutoScalingInstances", - "autoscaling:DescribeLaunchConfigurations", - "autoscaling:DescribeScalingActivities", - "autoscaling:DescribeTags", - "ec2:AttachVolume", - "ec2:AuthorizeSecurityGroupIngress", - "ec2:CreateSecurityGroup", - "ec2:CreateTags", - "ec2:DeleteRoute", - "ec2:DeleteSecurityGroup", - "ec2:DeleteVolume", - "ec2:DescribeAccountAttributes", - "ec2:DescribeInstanceTypes", - "ec2:DescribeInstances", - "ec2:DescribeLaunchTemplateVersions", - "ec2:DescribeRegions", - "ec2:DescribeRouteTables", - "ec2:DescribeSecurityGroups", - "ec2:DescribeSubnets", - "ec2:DescribeTags", - "ec2:DescribeVolumes", - "ec2:DescribeVolumesModifications", - "ec2:DescribeVpcs", - "ec2:DetachVolume", - "ec2:ModifyInstanceAttribute", - "ec2:ModifyVolume", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:CreateListener", - "elasticloadbalancing:CreateTargetGroup", - "elasticloadbalancing:DeleteListener", - "elasticloadbalancing:DescribeListeners", - "elasticloadbalancing:DescribeLoadBalancerAttributes", - "elasticloadbalancing:DescribeLoadBalancerPolicies", - "elasticloadbalancing:DescribeLoadBalancers", - "elasticloadbalancing:DescribeTargetGroups", - "elasticloadbalancing:DescribeTargetHealth", - "elasticloadbalancing:ModifyListener", - "elasticloadbalancing:RegisterTargets", - "iam:GetServerCertificate", - "iam:ListServerCertificates", - "kms:DescribeKey", - "kms:GenerateRandom" - ], - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": [ - "autoscaling:SetDesiredCapacity", - "autoscaling:TerminateInstanceInAutoScalingGroup", - "ec2:AttachVolume", - "ec2:AuthorizeSecurityGroupIngress", - "ec2:DeleteSecurityGroup", - "ec2:DeleteVolume", - "ec2:DetachVolume", - "ec2:ModifyInstanceAttribute", - "ec2:ModifyVolume", - "ec2:RevokeSecurityGroupIngress", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", - "elasticloadbalancing:AttachLoadBalancerToSubnets", - "elasticloadbalancing:ConfigureHealthCheck", - "elasticloadbalancing:CreateLoadBalancerListeners", - "elasticloadbalancing:CreateLoadBalancerPolicy", - "elasticloadbalancing:DeleteListener", - "elasticloadbalancing:DeleteLoadBalancer", - "elasticloadbalancing:DeleteLoadBalancerListeners", - "elasticloadbalancing:DeleteTargetGroup", - "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", - "elasticloadbalancing:DeregisterTargets", - "elasticloadbalancing:DetachLoadBalancerFromSubnets", - "elasticloadbalancing:ModifyListener", - "elasticloadbalancing:ModifyLoadBalancerAttributes", - "elasticloadbalancing:ModifyTargetGroup", - "elasticloadbalancing:RegisterInstancesWithLoadBalancer", - "elasticloadbalancing:RegisterTargets", - "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", - "elasticloadbalancing:SetLoadBalancerPoliciesOfListener" - ], - "Condition": { - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "mixedinstances.example.com" - } - }, - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": [ - "ec2:CreateSecurityGroup", - "ec2:CreateSnapshot", - "ec2:CreateVolume", - "elasticloadbalancing:CreateListener", - "elasticloadbalancing:CreateLoadBalancer", - "elasticloadbalancing:CreateTargetGroup" - ], - "Condition": { - "StringEquals": { - "aws:RequestTag/KubernetesCluster": "mixedinstances.example.com" - } - }, - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": "ec2:CreateSecurityGroup", - "Effect": "Allow", - "Resource": "arn:aws-test:ec2:*:*:vpc/*" - } - ], - "Version": "2012-10-17" - } - } - }, - "AWSIAMPolicynodesmixedinstancesexamplecom": { - "Type": "AWS::IAM::Policy", - "Properties": { - "PolicyName": "nodes.mixedinstances.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolenodesmixedinstancesexamplecom" - } - ], - "PolicyDocument": { - "Statement": [ - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-read-bucket" - ] - }, - { - "Action": [ - "autoscaling:DescribeAutoScalingInstances", - "ec2:DescribeInstanceTypes", - "ec2:DescribeInstances", - "ec2:DescribeRegions", - "iam:GetServerCertificate", - "iam:ListServerCertificates", - "kms:GenerateRandom" - ], - "Effect": "Allow", - "Resource": "*" - } - ], - "Version": "2012-10-17" - } - } - }, - "AWSIAMRolemastersmixedinstancesexamplecom": { - "Type": "AWS::IAM::Role", - "Properties": { - "RoleName": "masters.mixedinstances.example.com", - "AssumeRolePolicyDocument": { - "Statement": [ - { - "Action": "sts:AssumeRole", - "Effect": "Allow", - "Principal": { - "Service": "ec2.amazonaws.com" - } - } - ], - "Version": "2012-10-17" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "mixedinstances.example.com" - }, - { - "Key": "Name", - "Value": "masters.mixedinstances.example.com" - }, - { - "Key": "kubernetes.io/cluster/mixedinstances.example.com", - "Value": "owned" - } - ] - } - }, - "AWSIAMRolenodesmixedinstancesexamplecom": { - "Type": "AWS::IAM::Role", - "Properties": { - "RoleName": "nodes.mixedinstances.example.com", - "AssumeRolePolicyDocument": { - "Statement": [ - { - "Action": "sts:AssumeRole", - "Effect": "Allow", - "Principal": { - "Service": "ec2.amazonaws.com" - } - } - ], - "Version": "2012-10-17" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "mixedinstances.example.com" - }, - { - "Key": "Name", - "Value": "nodes.mixedinstances.example.com" - }, - { - "Key": "kubernetes.io/cluster/mixedinstances.example.com", - "Value": "owned" - } - ] - } - } - } -} diff --git a/tests/integration/update_cluster/mixed_instances/cloudformation.json.extracted.yaml b/tests/integration/update_cluster/mixed_instances/cloudformation.json.extracted.yaml deleted file mode 100644 index ba004d160d4e9..0000000000000 --- a/tests/integration/update_cluster/mixed_instances/cloudformation.json.extracted.yaml +++ /dev/null @@ -1,956 +0,0 @@ -Resources.AWSEC2LaunchTemplatemasterustest1amastersmixedinstancesexamplecom.Properties.LaunchTemplateData.UserData: | - #!/bin/bash - set -o errexit - set -o nounset - set -o pipefail - - NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 - NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 - NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 - NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 - - export AWS_REGION=us-test-1 - - - - - sysctl -w net.core.rmem_max=16777216 || true - sysctl -w net.core.wmem_max=16777216 || true - sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true - sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true - - - function ensure-install-dir() { - INSTALL_DIR="/opt/kops" - # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec - if [[ -d /var/lib/toolbox ]]; then - INSTALL_DIR="/var/lib/toolbox/kops" - fi - mkdir -p ${INSTALL_DIR}/bin - mkdir -p ${INSTALL_DIR}/conf - cd ${INSTALL_DIR} - } - - # Retry a download until we get it. args: name, sha, urls - download-or-bust() { - local -r file="$1" - local -r hash="$2" - local -r urls=( $(split-commas "$3") ) - - if [[ -f "${file}" ]]; then - if ! validate-hash "${file}" "${hash}"; then - rm -f "${file}" - else - return 0 - fi - fi - - while true; do - for url in "${urls[@]}"; do - commands=( - "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - ) - for cmd in "${commands[@]}"; do - echo "Attempting download with: ${cmd} {url}" - if ! (${cmd} "${url}"); then - echo "== Download failed with ${cmd} ==" - continue - fi - if ! validate-hash "${file}" "${hash}"; then - echo "== Hash validation of ${url} failed. Retrying. ==" - rm -f "${file}" - else - echo "== Downloaded ${url} (SHA256 = ${hash}) ==" - return 0 - fi - done - done - - echo "All downloads failed; sleeping before retrying" - sleep 60 - done - } - - validate-hash() { - local -r file="$1" - local -r expected="$2" - local actual - - actual=$(sha256sum ${file} | awk '{ print $1 }') || true - if [[ "${actual}" != "${expected}" ]]; then - echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" - return 1 - fi - } - - function split-commas() { - echo $1 | tr "," "\n" - } - - function download-release() { - case "$(uname -m)" in - x86_64*|i?86_64*|amd64*) - NODEUP_URL="${NODEUP_URL_AMD64}" - NODEUP_HASH="${NODEUP_HASH_AMD64}" - ;; - aarch64*|arm64*) - NODEUP_URL="${NODEUP_URL_ARM64}" - NODEUP_HASH="${NODEUP_HASH_ARM64}" - ;; - *) - echo "Unsupported host arch: $(uname -m)" >&2 - exit 1 - ;; - esac - - cd ${INSTALL_DIR}/bin - download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" - - chmod +x nodeup - - echo "Running nodeup" - # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 - ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) - } - - #################################################################################### - - /bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" - - echo "== nodeup node config starting ==" - ensure-install-dir - - cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' - cloudConfig: - awsEBSCSIDriver: - enabled: false - manageStorageClasses: true - containerRuntime: containerd - containerd: - logLevel: info - version: 1.4.13 - docker: - skipInstall: true - encryptionConfig: null - etcdClusters: - events: - version: 3.4.13 - main: - version: 3.4.13 - kubeAPIServer: - allowPrivileged: true - anonymousAuth: false - apiAudiences: - - kubernetes.svc.default - apiServerCount: 3 - authorizationMode: AlwaysAllow - bindAddress: 0.0.0.0 - cloudProvider: aws - enableAdmissionPlugins: - - NamespaceLifecycle - - LimitRanger - - ServiceAccount - - DefaultStorageClass - - DefaultTolerationSeconds - - MutatingAdmissionWebhook - - ValidatingAdmissionWebhook - - NodeRestriction - - ResourceQuota - etcdServers: - - https://127.0.0.1:4001 - etcdServersOverrides: - - /events#https://127.0.0.1:4002 - image: registry.k8s.io/kube-apiserver:v1.21.0 - kubeletPreferredAddressTypes: - - InternalIP - - Hostname - - ExternalIP - logLevel: 2 - requestheaderAllowedNames: - - aggregator - requestheaderExtraHeaderPrefixes: - - X-Remote-Extra- - requestheaderGroupHeaders: - - X-Remote-Group - requestheaderUsernameHeaders: - - X-Remote-User - securePort: 443 - serviceAccountIssuer: https://api.internal.mixedinstances.example.com - serviceAccountJWKSURI: https://api.internal.mixedinstances.example.com/openid/v1/jwks - serviceClusterIPRange: 100.64.0.0/13 - storageBackend: etcd3 - kubeControllerManager: - allocateNodeCIDRs: true - attachDetachReconcileSyncPeriod: 1m0s - cloudProvider: aws - clusterCIDR: 100.96.0.0/11 - clusterName: mixedinstances.example.com - configureCloudRoutes: false - image: registry.k8s.io/kube-controller-manager:v1.21.0 - leaderElection: - leaderElect: true - logLevel: 2 - useServiceAccountCredentials: true - kubeProxy: - clusterCIDR: 100.96.0.0/11 - cpuRequest: 100m - image: registry.k8s.io/kube-proxy:v1.21.0 - logLevel: 2 - kubeScheduler: - image: registry.k8s.io/kube-scheduler:v1.21.0 - leaderElection: - leaderElect: true - logLevel: 2 - kubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: aws - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - masterKubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: aws - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - - __EOF_CLUSTER_SPEC - - cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' - CloudProvider: aws - ConfigBase: memfs://clusters.example.com/mixedinstances.example.com - InstanceGroupName: master-us-test-1a - InstanceGroupRole: Master - NodeupConfigHash: dJQEOWgaxO/wms9AXZe5pNgHt4GIczgtIgc0rnkHO+4= - - __EOF_KUBE_ENV - - download-release - echo "== nodeup node config done ==" -Resources.AWSEC2LaunchTemplatemasterustest1bmastersmixedinstancesexamplecom.Properties.LaunchTemplateData.UserData: | - #!/bin/bash - set -o errexit - set -o nounset - set -o pipefail - - NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 - NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 - NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 - NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 - - export AWS_REGION=us-test-1 - - - - - sysctl -w net.core.rmem_max=16777216 || true - sysctl -w net.core.wmem_max=16777216 || true - sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true - sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true - - - function ensure-install-dir() { - INSTALL_DIR="/opt/kops" - # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec - if [[ -d /var/lib/toolbox ]]; then - INSTALL_DIR="/var/lib/toolbox/kops" - fi - mkdir -p ${INSTALL_DIR}/bin - mkdir -p ${INSTALL_DIR}/conf - cd ${INSTALL_DIR} - } - - # Retry a download until we get it. args: name, sha, urls - download-or-bust() { - local -r file="$1" - local -r hash="$2" - local -r urls=( $(split-commas "$3") ) - - if [[ -f "${file}" ]]; then - if ! validate-hash "${file}" "${hash}"; then - rm -f "${file}" - else - return 0 - fi - fi - - while true; do - for url in "${urls[@]}"; do - commands=( - "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - ) - for cmd in "${commands[@]}"; do - echo "Attempting download with: ${cmd} {url}" - if ! (${cmd} "${url}"); then - echo "== Download failed with ${cmd} ==" - continue - fi - if ! validate-hash "${file}" "${hash}"; then - echo "== Hash validation of ${url} failed. Retrying. ==" - rm -f "${file}" - else - echo "== Downloaded ${url} (SHA256 = ${hash}) ==" - return 0 - fi - done - done - - echo "All downloads failed; sleeping before retrying" - sleep 60 - done - } - - validate-hash() { - local -r file="$1" - local -r expected="$2" - local actual - - actual=$(sha256sum ${file} | awk '{ print $1 }') || true - if [[ "${actual}" != "${expected}" ]]; then - echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" - return 1 - fi - } - - function split-commas() { - echo $1 | tr "," "\n" - } - - function download-release() { - case "$(uname -m)" in - x86_64*|i?86_64*|amd64*) - NODEUP_URL="${NODEUP_URL_AMD64}" - NODEUP_HASH="${NODEUP_HASH_AMD64}" - ;; - aarch64*|arm64*) - NODEUP_URL="${NODEUP_URL_ARM64}" - NODEUP_HASH="${NODEUP_HASH_ARM64}" - ;; - *) - echo "Unsupported host arch: $(uname -m)" >&2 - exit 1 - ;; - esac - - cd ${INSTALL_DIR}/bin - download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" - - chmod +x nodeup - - echo "Running nodeup" - # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 - ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) - } - - #################################################################################### - - /bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" - - echo "== nodeup node config starting ==" - ensure-install-dir - - cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' - cloudConfig: - awsEBSCSIDriver: - enabled: false - manageStorageClasses: true - containerRuntime: containerd - containerd: - logLevel: info - version: 1.4.13 - docker: - skipInstall: true - encryptionConfig: null - etcdClusters: - events: - version: 3.4.13 - main: - version: 3.4.13 - kubeAPIServer: - allowPrivileged: true - anonymousAuth: false - apiAudiences: - - kubernetes.svc.default - apiServerCount: 3 - authorizationMode: AlwaysAllow - bindAddress: 0.0.0.0 - cloudProvider: aws - enableAdmissionPlugins: - - NamespaceLifecycle - - LimitRanger - - ServiceAccount - - DefaultStorageClass - - DefaultTolerationSeconds - - MutatingAdmissionWebhook - - ValidatingAdmissionWebhook - - NodeRestriction - - ResourceQuota - etcdServers: - - https://127.0.0.1:4001 - etcdServersOverrides: - - /events#https://127.0.0.1:4002 - image: registry.k8s.io/kube-apiserver:v1.21.0 - kubeletPreferredAddressTypes: - - InternalIP - - Hostname - - ExternalIP - logLevel: 2 - requestheaderAllowedNames: - - aggregator - requestheaderExtraHeaderPrefixes: - - X-Remote-Extra- - requestheaderGroupHeaders: - - X-Remote-Group - requestheaderUsernameHeaders: - - X-Remote-User - securePort: 443 - serviceAccountIssuer: https://api.internal.mixedinstances.example.com - serviceAccountJWKSURI: https://api.internal.mixedinstances.example.com/openid/v1/jwks - serviceClusterIPRange: 100.64.0.0/13 - storageBackend: etcd3 - kubeControllerManager: - allocateNodeCIDRs: true - attachDetachReconcileSyncPeriod: 1m0s - cloudProvider: aws - clusterCIDR: 100.96.0.0/11 - clusterName: mixedinstances.example.com - configureCloudRoutes: false - image: registry.k8s.io/kube-controller-manager:v1.21.0 - leaderElection: - leaderElect: true - logLevel: 2 - useServiceAccountCredentials: true - kubeProxy: - clusterCIDR: 100.96.0.0/11 - cpuRequest: 100m - image: registry.k8s.io/kube-proxy:v1.21.0 - logLevel: 2 - kubeScheduler: - image: registry.k8s.io/kube-scheduler:v1.21.0 - leaderElection: - leaderElect: true - logLevel: 2 - kubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: aws - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - masterKubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: aws - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - - __EOF_CLUSTER_SPEC - - cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' - CloudProvider: aws - ConfigBase: memfs://clusters.example.com/mixedinstances.example.com - InstanceGroupName: master-us-test-1b - InstanceGroupRole: Master - NodeupConfigHash: ZZvBpLK2OrfYcnc/xQFNgZwOECyfoi54GSU1wx239hg= - - __EOF_KUBE_ENV - - download-release - echo "== nodeup node config done ==" -Resources.AWSEC2LaunchTemplatemasterustest1cmastersmixedinstancesexamplecom.Properties.LaunchTemplateData.UserData: | - #!/bin/bash - set -o errexit - set -o nounset - set -o pipefail - - NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 - NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 - NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 - NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 - - export AWS_REGION=us-test-1 - - - - - sysctl -w net.core.rmem_max=16777216 || true - sysctl -w net.core.wmem_max=16777216 || true - sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true - sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true - - - function ensure-install-dir() { - INSTALL_DIR="/opt/kops" - # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec - if [[ -d /var/lib/toolbox ]]; then - INSTALL_DIR="/var/lib/toolbox/kops" - fi - mkdir -p ${INSTALL_DIR}/bin - mkdir -p ${INSTALL_DIR}/conf - cd ${INSTALL_DIR} - } - - # Retry a download until we get it. args: name, sha, urls - download-or-bust() { - local -r file="$1" - local -r hash="$2" - local -r urls=( $(split-commas "$3") ) - - if [[ -f "${file}" ]]; then - if ! validate-hash "${file}" "${hash}"; then - rm -f "${file}" - else - return 0 - fi - fi - - while true; do - for url in "${urls[@]}"; do - commands=( - "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - ) - for cmd in "${commands[@]}"; do - echo "Attempting download with: ${cmd} {url}" - if ! (${cmd} "${url}"); then - echo "== Download failed with ${cmd} ==" - continue - fi - if ! validate-hash "${file}" "${hash}"; then - echo "== Hash validation of ${url} failed. Retrying. ==" - rm -f "${file}" - else - echo "== Downloaded ${url} (SHA256 = ${hash}) ==" - return 0 - fi - done - done - - echo "All downloads failed; sleeping before retrying" - sleep 60 - done - } - - validate-hash() { - local -r file="$1" - local -r expected="$2" - local actual - - actual=$(sha256sum ${file} | awk '{ print $1 }') || true - if [[ "${actual}" != "${expected}" ]]; then - echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" - return 1 - fi - } - - function split-commas() { - echo $1 | tr "," "\n" - } - - function download-release() { - case "$(uname -m)" in - x86_64*|i?86_64*|amd64*) - NODEUP_URL="${NODEUP_URL_AMD64}" - NODEUP_HASH="${NODEUP_HASH_AMD64}" - ;; - aarch64*|arm64*) - NODEUP_URL="${NODEUP_URL_ARM64}" - NODEUP_HASH="${NODEUP_HASH_ARM64}" - ;; - *) - echo "Unsupported host arch: $(uname -m)" >&2 - exit 1 - ;; - esac - - cd ${INSTALL_DIR}/bin - download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" - - chmod +x nodeup - - echo "Running nodeup" - # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 - ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) - } - - #################################################################################### - - /bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" - - echo "== nodeup node config starting ==" - ensure-install-dir - - cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' - cloudConfig: - awsEBSCSIDriver: - enabled: false - manageStorageClasses: true - containerRuntime: containerd - containerd: - logLevel: info - version: 1.4.13 - docker: - skipInstall: true - encryptionConfig: null - etcdClusters: - events: - version: 3.4.13 - main: - version: 3.4.13 - kubeAPIServer: - allowPrivileged: true - anonymousAuth: false - apiAudiences: - - kubernetes.svc.default - apiServerCount: 3 - authorizationMode: AlwaysAllow - bindAddress: 0.0.0.0 - cloudProvider: aws - enableAdmissionPlugins: - - NamespaceLifecycle - - LimitRanger - - ServiceAccount - - DefaultStorageClass - - DefaultTolerationSeconds - - MutatingAdmissionWebhook - - ValidatingAdmissionWebhook - - NodeRestriction - - ResourceQuota - etcdServers: - - https://127.0.0.1:4001 - etcdServersOverrides: - - /events#https://127.0.0.1:4002 - image: registry.k8s.io/kube-apiserver:v1.21.0 - kubeletPreferredAddressTypes: - - InternalIP - - Hostname - - ExternalIP - logLevel: 2 - requestheaderAllowedNames: - - aggregator - requestheaderExtraHeaderPrefixes: - - X-Remote-Extra- - requestheaderGroupHeaders: - - X-Remote-Group - requestheaderUsernameHeaders: - - X-Remote-User - securePort: 443 - serviceAccountIssuer: https://api.internal.mixedinstances.example.com - serviceAccountJWKSURI: https://api.internal.mixedinstances.example.com/openid/v1/jwks - serviceClusterIPRange: 100.64.0.0/13 - storageBackend: etcd3 - kubeControllerManager: - allocateNodeCIDRs: true - attachDetachReconcileSyncPeriod: 1m0s - cloudProvider: aws - clusterCIDR: 100.96.0.0/11 - clusterName: mixedinstances.example.com - configureCloudRoutes: false - image: registry.k8s.io/kube-controller-manager:v1.21.0 - leaderElection: - leaderElect: true - logLevel: 2 - useServiceAccountCredentials: true - kubeProxy: - clusterCIDR: 100.96.0.0/11 - cpuRequest: 100m - image: registry.k8s.io/kube-proxy:v1.21.0 - logLevel: 2 - kubeScheduler: - image: registry.k8s.io/kube-scheduler:v1.21.0 - leaderElection: - leaderElect: true - logLevel: 2 - kubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: aws - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - masterKubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: aws - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - - __EOF_CLUSTER_SPEC - - cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' - CloudProvider: aws - ConfigBase: memfs://clusters.example.com/mixedinstances.example.com - InstanceGroupName: master-us-test-1c - InstanceGroupRole: Master - NodeupConfigHash: zQBp4ZIdhKY6soMOzwj0ECCUVsSc5eLH/QG0waKCHBA= - - __EOF_KUBE_ENV - - download-release - echo "== nodeup node config done ==" -Resources.AWSEC2LaunchTemplatenodesmixedinstancesexamplecom.Properties.LaunchTemplateData.UserData: | - #!/bin/bash - set -o errexit - set -o nounset - set -o pipefail - - NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 - NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 - NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 - NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 - - export AWS_REGION=us-test-1 - - - - - sysctl -w net.core.rmem_max=16777216 || true - sysctl -w net.core.wmem_max=16777216 || true - sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true - sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true - - - function ensure-install-dir() { - INSTALL_DIR="/opt/kops" - # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec - if [[ -d /var/lib/toolbox ]]; then - INSTALL_DIR="/var/lib/toolbox/kops" - fi - mkdir -p ${INSTALL_DIR}/bin - mkdir -p ${INSTALL_DIR}/conf - cd ${INSTALL_DIR} - } - - # Retry a download until we get it. args: name, sha, urls - download-or-bust() { - local -r file="$1" - local -r hash="$2" - local -r urls=( $(split-commas "$3") ) - - if [[ -f "${file}" ]]; then - if ! validate-hash "${file}" "${hash}"; then - rm -f "${file}" - else - return 0 - fi - fi - - while true; do - for url in "${urls[@]}"; do - commands=( - "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - ) - for cmd in "${commands[@]}"; do - echo "Attempting download with: ${cmd} {url}" - if ! (${cmd} "${url}"); then - echo "== Download failed with ${cmd} ==" - continue - fi - if ! validate-hash "${file}" "${hash}"; then - echo "== Hash validation of ${url} failed. Retrying. ==" - rm -f "${file}" - else - echo "== Downloaded ${url} (SHA256 = ${hash}) ==" - return 0 - fi - done - done - - echo "All downloads failed; sleeping before retrying" - sleep 60 - done - } - - validate-hash() { - local -r file="$1" - local -r expected="$2" - local actual - - actual=$(sha256sum ${file} | awk '{ print $1 }') || true - if [[ "${actual}" != "${expected}" ]]; then - echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" - return 1 - fi - } - - function split-commas() { - echo $1 | tr "," "\n" - } - - function download-release() { - case "$(uname -m)" in - x86_64*|i?86_64*|amd64*) - NODEUP_URL="${NODEUP_URL_AMD64}" - NODEUP_HASH="${NODEUP_HASH_AMD64}" - ;; - aarch64*|arm64*) - NODEUP_URL="${NODEUP_URL_ARM64}" - NODEUP_HASH="${NODEUP_HASH_ARM64}" - ;; - *) - echo "Unsupported host arch: $(uname -m)" >&2 - exit 1 - ;; - esac - - cd ${INSTALL_DIR}/bin - download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" - - chmod +x nodeup - - echo "Running nodeup" - # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 - ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) - } - - #################################################################################### - - /bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" - - echo "== nodeup node config starting ==" - ensure-install-dir - - cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' - cloudConfig: - awsEBSCSIDriver: - enabled: false - manageStorageClasses: true - containerRuntime: containerd - containerd: - logLevel: info - version: 1.4.13 - docker: - skipInstall: true - kubeProxy: - clusterCIDR: 100.96.0.0/11 - cpuRequest: 100m - image: registry.k8s.io/kube-proxy:v1.21.0 - logLevel: 2 - kubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: aws - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - - __EOF_CLUSTER_SPEC - - cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' - CloudProvider: aws - ConfigServer: - CACertificates: | - -----BEGIN CERTIFICATE----- - MIIBbjCCARigAwIBAgIMFpANqBD8NSD82AUSMA0GCSqGSIb3DQEBCwUAMBgxFjAU - BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwODAwWhcNMzEwNzA3MDcw - ODAwWjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD - SwAwSAJBANFI3zr0Tk8krsW8vwjfMpzJOlWQ8616vG3YPa2qAgI7V4oKwfV0yIg1 - jt+H6f4P/wkPAPTPTfRp9Iy8oHEEFw0CAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG - MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNG3zVjTcLlJwDsJ4/K9DV7KohUA - MA0GCSqGSIb3DQEBCwUAA0EAB8d03fY2w7WKpfO29qI295pu2C4ca9AiVGOpgSc8 - tmQsq6rcxt3T+rb589PVtz0mw/cKTxOk6gH2CCC+yHfy2w== - -----END CERTIFICATE----- - -----BEGIN CERTIFICATE----- - MIIBbjCCARigAwIBAgIMFpANvmSa0OAlYmXKMA0GCSqGSIb3DQEBCwUAMBgxFjAU - BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwOTM2WhcNMzEwNzA3MDcw - OTM2WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD - SwAwSAJBAMF6F4aZdpe0RUpyykaBpWwZCnwbffhYGOw+fs6RdLuUq7QCNmJm/Eq7 - WWOziMYDiI9SbclpD+6QiJ0N3EqppVUCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG - MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLImp6ARjPDAH6nhI+scWVt3Q9bn - MA0GCSqGSIb3DQEBCwUAA0EAVQVx5MUtuAIeePuP9o51xtpT2S6Fvfi8J4ICxnlA - 9B7UD2ushcVFPtaeoL9Gfu8aY4KJBeqqg5ojl4qmRnThjw== - -----END CERTIFICATE----- - server: https://kops-controller.internal.mixedinstances.example.com:3988/ - InstanceGroupName: nodes - InstanceGroupRole: Node - NodeupConfigHash: r5UH1+vKHzFuCneSZEydvmqU1JF6tjt0nngH+/VoDV8= - - __EOF_KUBE_ENV - - download-release - echo "== nodeup node config done ==" diff --git a/tests/integration/update_cluster/mixed_instances/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/mixed_instances/data/aws_s3_object_cluster-completed.spec_content index dddf4ca550166..04515033e3857 100644 --- a/tests/integration/update_cluster/mixed_instances/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/mixed_instances/data/aws_s3_object_cluster-completed.spec_content @@ -150,7 +150,6 @@ spec: kubernetesApiAccess: - 0.0.0.0/0 kubernetesVersion: 1.21.0 - masterInternalName: api.internal.mixedinstances.example.com masterKubelet: anonymousAuth: false cgroupDriver: systemd diff --git a/tests/integration/update_cluster/mixed_instances/in-v1alpha2.yaml b/tests/integration/update_cluster/mixed_instances/in-v1alpha2.yaml index f36a70b33839f..3ca454244cce7 100644 --- a/tests/integration/update_cluster/mixed_instances/in-v1alpha2.yaml +++ b/tests/integration/update_cluster/mixed_instances/in-v1alpha2.yaml @@ -30,7 +30,6 @@ spec: kubelet: anonymousAuth: false kubernetesVersion: v1.21.0 - masterInternalName: api.internal.mixedinstances.example.com masterPublicName: api.mixedinstances.example.com networkCIDR: 10.0.0.0/16 networking: diff --git a/tests/integration/update_cluster/mixed_instances_spot/cloudformation.json b/tests/integration/update_cluster/mixed_instances_spot/cloudformation.json deleted file mode 100644 index b9c8c28bb8f8a..0000000000000 --- a/tests/integration/update_cluster/mixed_instances_spot/cloudformation.json +++ /dev/null @@ -1,2077 +0,0 @@ -{ - "Resources": { - "AWSAutoScalingAutoScalingGroupmasterustest1amastersmixedinstancesexamplecom": { - "Type": "AWS::AutoScaling::AutoScalingGroup", - "Properties": { - "AutoScalingGroupName": "master-us-test-1a.masters.mixedinstances.example.com", - "LaunchTemplate": { - "LaunchTemplateId": { - "Ref": "AWSEC2LaunchTemplatemasterustest1amastersmixedinstancesexamplecom" - }, - "Version": { - "Fn::GetAtt": [ - "AWSEC2LaunchTemplatemasterustest1amastersmixedinstancesexamplecom", - "LatestVersionNumber" - ] - } - }, - "MaxSize": "1", - "MinSize": "1", - "VPCZoneIdentifier": [ - { - "Ref": "AWSEC2Subnetustest1amixedinstancesexamplecom" - } - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "mixedinstances.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.mixedinstances.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/role/master", - "Value": "1", - "PropagateAtLaunch": true - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a", - "PropagateAtLaunch": true - }, - { - "Key": "kubernetes.io/cluster/mixedinstances.example.com", - "Value": "owned", - "PropagateAtLaunch": true - } - ], - "MetricsCollection": [ - { - "Granularity": "1Minute", - "Metrics": [ - "GroupDesiredCapacity", - "GroupInServiceInstances", - "GroupMaxSize", - "GroupMinSize", - "GroupPendingInstances", - "GroupStandbyInstances", - "GroupTerminatingInstances", - "GroupTotalInstances" - ] - } - ] - } - }, - "AWSAutoScalingAutoScalingGroupmasterustest1bmastersmixedinstancesexamplecom": { - "Type": "AWS::AutoScaling::AutoScalingGroup", - "Properties": { - "AutoScalingGroupName": "master-us-test-1b.masters.mixedinstances.example.com", - "LaunchTemplate": { - "LaunchTemplateId": { - "Ref": "AWSEC2LaunchTemplatemasterustest1bmastersmixedinstancesexamplecom" - }, - "Version": { - "Fn::GetAtt": [ - "AWSEC2LaunchTemplatemasterustest1bmastersmixedinstancesexamplecom", - "LatestVersionNumber" - ] - } - }, - "MaxSize": "1", - "MinSize": "1", - "VPCZoneIdentifier": [ - { - "Ref": "AWSEC2Subnetustest1bmixedinstancesexamplecom" - } - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "mixedinstances.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "Name", - "Value": "master-us-test-1b.masters.mixedinstances.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/role/master", - "Value": "1", - "PropagateAtLaunch": true - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1b", - "PropagateAtLaunch": true - }, - { - "Key": "kubernetes.io/cluster/mixedinstances.example.com", - "Value": "owned", - "PropagateAtLaunch": true - } - ], - "MetricsCollection": [ - { - "Granularity": "1Minute", - "Metrics": [ - "GroupDesiredCapacity", - "GroupInServiceInstances", - "GroupMaxSize", - "GroupMinSize", - "GroupPendingInstances", - "GroupStandbyInstances", - "GroupTerminatingInstances", - "GroupTotalInstances" - ] - } - ] - } - }, - "AWSAutoScalingAutoScalingGroupmasterustest1cmastersmixedinstancesexamplecom": { - "Type": "AWS::AutoScaling::AutoScalingGroup", - "Properties": { - "AutoScalingGroupName": "master-us-test-1c.masters.mixedinstances.example.com", - "LaunchTemplate": { - "LaunchTemplateId": { - "Ref": "AWSEC2LaunchTemplatemasterustest1cmastersmixedinstancesexamplecom" - }, - "Version": { - "Fn::GetAtt": [ - "AWSEC2LaunchTemplatemasterustest1cmastersmixedinstancesexamplecom", - "LatestVersionNumber" - ] - } - }, - "MaxSize": "1", - "MinSize": "1", - "VPCZoneIdentifier": [ - { - "Ref": "AWSEC2Subnetustest1cmixedinstancesexamplecom" - } - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "mixedinstances.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "Name", - "Value": "master-us-test-1c.masters.mixedinstances.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/role/master", - "Value": "1", - "PropagateAtLaunch": true - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1c", - "PropagateAtLaunch": true - }, - { - "Key": "kubernetes.io/cluster/mixedinstances.example.com", - "Value": "owned", - "PropagateAtLaunch": true - } - ], - "MetricsCollection": [ - { - "Granularity": "1Minute", - "Metrics": [ - "GroupDesiredCapacity", - "GroupInServiceInstances", - "GroupMaxSize", - "GroupMinSize", - "GroupPendingInstances", - "GroupStandbyInstances", - "GroupTerminatingInstances", - "GroupTotalInstances" - ] - } - ] - } - }, - "AWSAutoScalingAutoScalingGroupnodesmixedinstancesexamplecom": { - "Type": "AWS::AutoScaling::AutoScalingGroup", - "Properties": { - "AutoScalingGroupName": "nodes.mixedinstances.example.com", - "MaxSize": "2", - "MinSize": "2", - "VPCZoneIdentifier": [ - { - "Ref": "AWSEC2Subnetustest1bmixedinstancesexamplecom" - } - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "mixedinstances.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "Name", - "Value": "nodes.mixedinstances.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/role/node", - "Value": "1", - "PropagateAtLaunch": true - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes", - "PropagateAtLaunch": true - }, - { - "Key": "kubernetes.io/cluster/mixedinstances.example.com", - "Value": "owned", - "PropagateAtLaunch": true - } - ], - "MetricsCollection": [ - { - "Granularity": "1Minute", - "Metrics": [ - "GroupDesiredCapacity", - "GroupInServiceInstances", - "GroupMaxSize", - "GroupMinSize", - "GroupPendingInstances", - "GroupStandbyInstances", - "GroupTerminatingInstances", - "GroupTotalInstances" - ] - } - ], - "MixedInstancesPolicy": { - "LaunchTemplate": { - "LaunchTemplateSpecification": { - "LaunchTemplateId": { - "Ref": "AWSEC2LaunchTemplatenodesmixedinstancesexamplecom" - }, - "Version": { - "Fn::GetAtt": [ - "AWSEC2LaunchTemplatenodesmixedinstancesexamplecom", - "LatestVersionNumber" - ] - } - }, - "Overrides": [ - { - "InstanceType": "m5.large" - }, - { - "InstanceType": "m5.xlarge" - }, - { - "InstanceType": "t2.medium" - } - ] - }, - "InstancesDistribution": { - "OnDemandPercentageAboveBaseCapacity": 5, - "SpotInstancePools": 3, - "SpotMaxPrice": "0.1" - } - } - } - }, - "AWSEC2DHCPOptionsmixedinstancesexamplecom": { - "Type": "AWS::EC2::DHCPOptions", - "Properties": { - "DomainName": "us-test-1.compute.internal", - "DomainNameServers": [ - "AmazonProvidedDNS" - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "mixedinstances.example.com" - }, - { - "Key": "Name", - "Value": "mixedinstances.example.com" - }, - { - "Key": "kubernetes.io/cluster/mixedinstances.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2InternetGatewaymixedinstancesexamplecom": { - "Type": "AWS::EC2::InternetGateway", - "Properties": { - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "mixedinstances.example.com" - }, - { - "Key": "Name", - "Value": "mixedinstances.example.com" - }, - { - "Key": "kubernetes.io/cluster/mixedinstances.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2LaunchTemplatemasterustest1amastersmixedinstancesexamplecom": { - "Type": "AWS::EC2::LaunchTemplate", - "Properties": { - "LaunchTemplateName": "master-us-test-1a.masters.mixedinstances.example.com", - "LaunchTemplateData": { - "BlockDeviceMappings": [ - { - "DeviceName": "/dev/xvda", - "Ebs": { - "VolumeType": "gp3", - "VolumeSize": 64, - "Iops": 3000, - "Throughput": 125, - "DeleteOnTermination": true, - "Encrypted": true - } - }, - { - "DeviceName": "/dev/sdc", - "VirtualName": "ephemeral0" - } - ], - "IamInstanceProfile": { - "Name": { - "Ref": "AWSIAMInstanceProfilemastersmixedinstancesexamplecom" - } - }, - "ImageId": "ami-12345678", - "InstanceType": "m3.medium", - "KeyName": "kubernetes.mixedinstances.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57", - "MetadataOptions": { - "HttpPutResponseHopLimit": 1, - "HttpTokens": "optional" - }, - "Monitoring": { - "Enabled": false - }, - "NetworkInterfaces": [ - { - "AssociatePublicIpAddress": true, - "DeleteOnTermination": true, - "DeviceIndex": 0, - "Ipv6AddressCount": 0, - "Groups": [ - { - "Ref": "AWSEC2SecurityGroupmastersmixedinstancesexamplecom" - } - ] - } - ], - "TagSpecifications": [ - { - "ResourceType": "instance", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "mixedinstances.example.com" - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.mixedinstances.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a" - }, - { - "Key": "kubernetes.io/cluster/mixedinstances.example.com", - "Value": "owned" - } - ] - }, - { - "ResourceType": "volume", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "mixedinstances.example.com" - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.mixedinstances.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a" - }, - { - "Key": "kubernetes.io/cluster/mixedinstances.example.com", - "Value": "owned" - } - ] - } - ], - "UserData": "extracted" - } - } - }, - "AWSEC2LaunchTemplatemasterustest1bmastersmixedinstancesexamplecom": { - "Type": "AWS::EC2::LaunchTemplate", - "Properties": { - "LaunchTemplateName": "master-us-test-1b.masters.mixedinstances.example.com", - "LaunchTemplateData": { - "BlockDeviceMappings": [ - { - "DeviceName": "/dev/xvda", - "Ebs": { - "VolumeType": "gp3", - "VolumeSize": 64, - "Iops": 3000, - "Throughput": 125, - "DeleteOnTermination": true, - "Encrypted": true - } - }, - { - "DeviceName": "/dev/sdc", - "VirtualName": "ephemeral0" - } - ], - "IamInstanceProfile": { - "Name": { - "Ref": "AWSIAMInstanceProfilemastersmixedinstancesexamplecom" - } - }, - "ImageId": "ami-12345678", - "InstanceType": "m3.medium", - "KeyName": "kubernetes.mixedinstances.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57", - "MetadataOptions": { - "HttpPutResponseHopLimit": 1, - "HttpTokens": "optional" - }, - "Monitoring": { - "Enabled": false - }, - "NetworkInterfaces": [ - { - "AssociatePublicIpAddress": true, - "DeleteOnTermination": true, - "DeviceIndex": 0, - "Ipv6AddressCount": 0, - "Groups": [ - { - "Ref": "AWSEC2SecurityGroupmastersmixedinstancesexamplecom" - } - ] - } - ], - "TagSpecifications": [ - { - "ResourceType": "instance", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "mixedinstances.example.com" - }, - { - "Key": "Name", - "Value": "master-us-test-1b.masters.mixedinstances.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1b" - }, - { - "Key": "kubernetes.io/cluster/mixedinstances.example.com", - "Value": "owned" - } - ] - }, - { - "ResourceType": "volume", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "mixedinstances.example.com" - }, - { - "Key": "Name", - "Value": "master-us-test-1b.masters.mixedinstances.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1b" - }, - { - "Key": "kubernetes.io/cluster/mixedinstances.example.com", - "Value": "owned" - } - ] - } - ], - "UserData": "extracted" - } - } - }, - "AWSEC2LaunchTemplatemasterustest1cmastersmixedinstancesexamplecom": { - "Type": "AWS::EC2::LaunchTemplate", - "Properties": { - "LaunchTemplateName": "master-us-test-1c.masters.mixedinstances.example.com", - "LaunchTemplateData": { - "BlockDeviceMappings": [ - { - "DeviceName": "/dev/xvda", - "Ebs": { - "VolumeType": "gp3", - "VolumeSize": 64, - "Iops": 3000, - "Throughput": 125, - "DeleteOnTermination": true, - "Encrypted": true - } - }, - { - "DeviceName": "/dev/sdc", - "VirtualName": "ephemeral0" - } - ], - "IamInstanceProfile": { - "Name": { - "Ref": "AWSIAMInstanceProfilemastersmixedinstancesexamplecom" - } - }, - "ImageId": "ami-12345678", - "InstanceType": "m3.medium", - "KeyName": "kubernetes.mixedinstances.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57", - "MetadataOptions": { - "HttpPutResponseHopLimit": 1, - "HttpTokens": "optional" - }, - "Monitoring": { - "Enabled": false - }, - "NetworkInterfaces": [ - { - "AssociatePublicIpAddress": true, - "DeleteOnTermination": true, - "DeviceIndex": 0, - "Ipv6AddressCount": 0, - "Groups": [ - { - "Ref": "AWSEC2SecurityGroupmastersmixedinstancesexamplecom" - } - ] - } - ], - "TagSpecifications": [ - { - "ResourceType": "instance", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "mixedinstances.example.com" - }, - { - "Key": "Name", - "Value": "master-us-test-1c.masters.mixedinstances.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1c" - }, - { - "Key": "kubernetes.io/cluster/mixedinstances.example.com", - "Value": "owned" - } - ] - }, - { - "ResourceType": "volume", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "mixedinstances.example.com" - }, - { - "Key": "Name", - "Value": "master-us-test-1c.masters.mixedinstances.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1c" - }, - { - "Key": "kubernetes.io/cluster/mixedinstances.example.com", - "Value": "owned" - } - ] - } - ], - "UserData": "extracted" - } - } - }, - "AWSEC2LaunchTemplatenodesmixedinstancesexamplecom": { - "Type": "AWS::EC2::LaunchTemplate", - "Properties": { - "LaunchTemplateName": "nodes.mixedinstances.example.com", - "LaunchTemplateData": { - "BlockDeviceMappings": [ - { - "DeviceName": "/dev/xvda", - "Ebs": { - "VolumeType": "gp3", - "VolumeSize": 128, - "Iops": 3000, - "Throughput": 125, - "DeleteOnTermination": true, - "Encrypted": true - } - } - ], - "IamInstanceProfile": { - "Name": { - "Ref": "AWSIAMInstanceProfilenodesmixedinstancesexamplecom" - } - }, - "ImageId": "ami-12345678", - "InstanceType": "t2.medium", - "KeyName": "kubernetes.mixedinstances.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57", - "MetadataOptions": { - "HttpPutResponseHopLimit": 1, - "HttpTokens": "optional" - }, - "Monitoring": { - "Enabled": false - }, - "NetworkInterfaces": [ - { - "AssociatePublicIpAddress": true, - "DeleteOnTermination": true, - "DeviceIndex": 0, - "Ipv6AddressCount": 0, - "Groups": [ - { - "Ref": "AWSEC2SecurityGroupnodesmixedinstancesexamplecom" - } - ] - } - ], - "TagSpecifications": [ - { - "ResourceType": "instance", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "mixedinstances.example.com" - }, - { - "Key": "Name", - "Value": "nodes.mixedinstances.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "" - }, - { - "Key": "k8s.io/role/node", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes" - }, - { - "Key": "kubernetes.io/cluster/mixedinstances.example.com", - "Value": "owned" - } - ] - }, - { - "ResourceType": "volume", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "mixedinstances.example.com" - }, - { - "Key": "Name", - "Value": "nodes.mixedinstances.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "" - }, - { - "Key": "k8s.io/role/node", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes" - }, - { - "Key": "kubernetes.io/cluster/mixedinstances.example.com", - "Value": "owned" - } - ] - } - ], - "UserData": "extracted" - } - } - }, - "AWSEC2Route0": { - "Type": "AWS::EC2::Route", - "Properties": { - "RouteTableId": { - "Ref": "AWSEC2RouteTablemixedinstancesexamplecom" - }, - "DestinationIpv6CidrBlock": "::/0", - "GatewayId": { - "Ref": "AWSEC2InternetGatewaymixedinstancesexamplecom" - } - } - }, - "AWSEC2Route00000": { - "Type": "AWS::EC2::Route", - "Properties": { - "RouteTableId": { - "Ref": "AWSEC2RouteTablemixedinstancesexamplecom" - }, - "DestinationCidrBlock": "0.0.0.0/0", - "GatewayId": { - "Ref": "AWSEC2InternetGatewaymixedinstancesexamplecom" - } - } - }, - "AWSEC2RouteTablemixedinstancesexamplecom": { - "Type": "AWS::EC2::RouteTable", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCmixedinstancesexamplecom" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "mixedinstances.example.com" - }, - { - "Key": "Name", - "Value": "mixedinstances.example.com" - }, - { - "Key": "kubernetes.io/cluster/mixedinstances.example.com", - "Value": "owned" - }, - { - "Key": "kubernetes.io/kops/role", - "Value": "public" - } - ] - } - }, - "AWSEC2SecurityGroupEgressfrommastersmixedinstancesexamplecomegressall0to00": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersmixedinstancesexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIpv6": "::/0" - } - }, - "AWSEC2SecurityGroupEgressfrommastersmixedinstancesexamplecomegressall0to000000": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersmixedinstancesexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupEgressfromnodesmixedinstancesexamplecomegressall0to00": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesmixedinstancesexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIpv6": "::/0" - } - }, - "AWSEC2SecurityGroupEgressfromnodesmixedinstancesexamplecomegressall0to000000": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesmixedinstancesexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom00000ingresstcp22to22mastersmixedinstancesexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersmixedinstancesexamplecom" - }, - "FromPort": 22, - "ToPort": 22, - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom00000ingresstcp22to22nodesmixedinstancesexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesmixedinstancesexamplecom" - }, - "FromPort": 22, - "ToPort": 22, - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom00000ingresstcp443to443mastersmixedinstancesexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersmixedinstancesexamplecom" - }, - "FromPort": 443, - "ToPort": 443, - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrommastersmixedinstancesexamplecomingressall0to0mastersmixedinstancesexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersmixedinstancesexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupmastersmixedinstancesexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfrommastersmixedinstancesexamplecomingressall0to0nodesmixedinstancesexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesmixedinstancesexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupmastersmixedinstancesexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfromnodesmixedinstancesexamplecomingressall0to0nodesmixedinstancesexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesmixedinstancesexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesmixedinstancesexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfromnodesmixedinstancesexamplecomingresstcp1to2379mastersmixedinstancesexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersmixedinstancesexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesmixedinstancesexamplecom" - }, - "FromPort": 1, - "ToPort": 2379, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodesmixedinstancesexamplecomingresstcp2382to4000mastersmixedinstancesexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersmixedinstancesexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesmixedinstancesexamplecom" - }, - "FromPort": 2382, - "ToPort": 4000, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodesmixedinstancesexamplecomingresstcp4003to65535mastersmixedinstancesexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersmixedinstancesexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesmixedinstancesexamplecom" - }, - "FromPort": 4003, - "ToPort": 65535, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodesmixedinstancesexamplecomingressudp1to65535mastersmixedinstancesexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersmixedinstancesexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesmixedinstancesexamplecom" - }, - "FromPort": 1, - "ToPort": 65535, - "IpProtocol": "udp" - } - }, - "AWSEC2SecurityGroupmastersmixedinstancesexamplecom": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupName": "masters.mixedinstances.example.com", - "VpcId": { - "Ref": "AWSEC2VPCmixedinstancesexamplecom" - }, - "GroupDescription": "Security group for masters", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "mixedinstances.example.com" - }, - { - "Key": "Name", - "Value": "masters.mixedinstances.example.com" - }, - { - "Key": "kubernetes.io/cluster/mixedinstances.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2SecurityGroupnodesmixedinstancesexamplecom": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupName": "nodes.mixedinstances.example.com", - "VpcId": { - "Ref": "AWSEC2VPCmixedinstancesexamplecom" - }, - "GroupDescription": "Security group for nodes", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "mixedinstances.example.com" - }, - { - "Key": "Name", - "Value": "nodes.mixedinstances.example.com" - }, - { - "Key": "kubernetes.io/cluster/mixedinstances.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2SubnetRouteTableAssociationustest1amixedinstancesexamplecom": { - "Type": "AWS::EC2::SubnetRouteTableAssociation", - "Properties": { - "SubnetId": { - "Ref": "AWSEC2Subnetustest1amixedinstancesexamplecom" - }, - "RouteTableId": { - "Ref": "AWSEC2RouteTablemixedinstancesexamplecom" - } - } - }, - "AWSEC2SubnetRouteTableAssociationustest1bmixedinstancesexamplecom": { - "Type": "AWS::EC2::SubnetRouteTableAssociation", - "Properties": { - "SubnetId": { - "Ref": "AWSEC2Subnetustest1bmixedinstancesexamplecom" - }, - "RouteTableId": { - "Ref": "AWSEC2RouteTablemixedinstancesexamplecom" - } - } - }, - "AWSEC2SubnetRouteTableAssociationustest1cmixedinstancesexamplecom": { - "Type": "AWS::EC2::SubnetRouteTableAssociation", - "Properties": { - "SubnetId": { - "Ref": "AWSEC2Subnetustest1cmixedinstancesexamplecom" - }, - "RouteTableId": { - "Ref": "AWSEC2RouteTablemixedinstancesexamplecom" - } - } - }, - "AWSEC2Subnetustest1amixedinstancesexamplecom": { - "Type": "AWS::EC2::Subnet", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCmixedinstancesexamplecom" - }, - "CidrBlock": "10.0.1.0/24", - "AvailabilityZone": "us-test-1a", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "mixedinstances.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.mixedinstances.example.com" - }, - { - "Key": "SubnetType", - "Value": "Public" - }, - { - "Key": "kops.k8s.io/instance-group/master-us-test-1a", - "Value": "true" - }, - { - "Key": "kubernetes.io/cluster/mixedinstances.example.com", - "Value": "owned" - }, - { - "Key": "kubernetes.io/role/elb", - "Value": "1" - }, - { - "Key": "kubernetes.io/role/internal-elb", - "Value": "1" - } - ] - } - }, - "AWSEC2Subnetustest1bmixedinstancesexamplecom": { - "Type": "AWS::EC2::Subnet", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCmixedinstancesexamplecom" - }, - "CidrBlock": "10.0.2.0/24", - "AvailabilityZone": "us-test-1b", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "mixedinstances.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1b.mixedinstances.example.com" - }, - { - "Key": "SubnetType", - "Value": "Public" - }, - { - "Key": "kops.k8s.io/instance-group/master-us-test-1b", - "Value": "true" - }, - { - "Key": "kops.k8s.io/instance-group/nodes", - "Value": "true" - }, - { - "Key": "kubernetes.io/cluster/mixedinstances.example.com", - "Value": "owned" - }, - { - "Key": "kubernetes.io/role/elb", - "Value": "1" - }, - { - "Key": "kubernetes.io/role/internal-elb", - "Value": "1" - } - ] - } - }, - "AWSEC2Subnetustest1cmixedinstancesexamplecom": { - "Type": "AWS::EC2::Subnet", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCmixedinstancesexamplecom" - }, - "CidrBlock": "10.0.3.0/24", - "AvailabilityZone": "us-test-1c", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "mixedinstances.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1c.mixedinstances.example.com" - }, - { - "Key": "SubnetType", - "Value": "Public" - }, - { - "Key": "kops.k8s.io/instance-group/master-us-test-1c", - "Value": "true" - }, - { - "Key": "kubernetes.io/cluster/mixedinstances.example.com", - "Value": "owned" - }, - { - "Key": "kubernetes.io/role/elb", - "Value": "1" - }, - { - "Key": "kubernetes.io/role/internal-elb", - "Value": "1" - } - ] - } - }, - "AWSEC2VPCCidrBlockAmazonIPv6": { - "Type": "AWS::EC2::VPCCidrBlock", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCmixedinstancesexamplecom" - }, - "AmazonProvidedIpv6CidrBlock": true - } - }, - "AWSEC2VPCDHCPOptionsAssociationmixedinstancesexamplecom": { - "Type": "AWS::EC2::VPCDHCPOptionsAssociation", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCmixedinstancesexamplecom" - }, - "DhcpOptionsId": { - "Ref": "AWSEC2DHCPOptionsmixedinstancesexamplecom" - } - } - }, - "AWSEC2VPCGatewayAttachmentmixedinstancesexamplecom": { - "Type": "AWS::EC2::VPCGatewayAttachment", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCmixedinstancesexamplecom" - }, - "InternetGatewayId": { - "Ref": "AWSEC2InternetGatewaymixedinstancesexamplecom" - } - } - }, - "AWSEC2VPCmixedinstancesexamplecom": { - "Type": "AWS::EC2::VPC", - "Properties": { - "CidrBlock": "10.0.0.0/16", - "EnableDnsHostnames": true, - "EnableDnsSupport": true, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "mixedinstances.example.com" - }, - { - "Key": "Name", - "Value": "mixedinstances.example.com" - }, - { - "Key": "kubernetes.io/cluster/mixedinstances.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2Volumeustest1aetcdeventsmixedinstancesexamplecom": { - "Type": "AWS::EC2::Volume", - "Properties": { - "AvailabilityZone": "us-test-1a", - "Size": 20, - "VolumeType": "gp3", - "Iops": 3000, - "Throughput": 125, - "Encrypted": false, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "mixedinstances.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.etcd-events.mixedinstances.example.com" - }, - { - "Key": "k8s.io/etcd/events", - "Value": "us-test-1a/us-test-1a,us-test-1b,us-test-1c" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kubernetes.io/cluster/mixedinstances.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2Volumeustest1aetcdmainmixedinstancesexamplecom": { - "Type": "AWS::EC2::Volume", - "Properties": { - "AvailabilityZone": "us-test-1a", - "Size": 20, - "VolumeType": "gp3", - "Iops": 3000, - "Throughput": 125, - "Encrypted": false, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "mixedinstances.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.etcd-main.mixedinstances.example.com" - }, - { - "Key": "k8s.io/etcd/main", - "Value": "us-test-1a/us-test-1a,us-test-1b,us-test-1c" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kubernetes.io/cluster/mixedinstances.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2Volumeustest1betcdeventsmixedinstancesexamplecom": { - "Type": "AWS::EC2::Volume", - "Properties": { - "AvailabilityZone": "us-test-1b", - "Size": 20, - "VolumeType": "gp3", - "Iops": 3000, - "Throughput": 125, - "Encrypted": false, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "mixedinstances.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1b.etcd-events.mixedinstances.example.com" - }, - { - "Key": "k8s.io/etcd/events", - "Value": "us-test-1b/us-test-1a,us-test-1b,us-test-1c" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kubernetes.io/cluster/mixedinstances.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2Volumeustest1betcdmainmixedinstancesexamplecom": { - "Type": "AWS::EC2::Volume", - "Properties": { - "AvailabilityZone": "us-test-1b", - "Size": 20, - "VolumeType": "gp3", - "Iops": 3000, - "Throughput": 125, - "Encrypted": false, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "mixedinstances.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1b.etcd-main.mixedinstances.example.com" - }, - { - "Key": "k8s.io/etcd/main", - "Value": "us-test-1b/us-test-1a,us-test-1b,us-test-1c" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kubernetes.io/cluster/mixedinstances.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2Volumeustest1cetcdeventsmixedinstancesexamplecom": { - "Type": "AWS::EC2::Volume", - "Properties": { - "AvailabilityZone": "us-test-1c", - "Size": 20, - "VolumeType": "gp3", - "Iops": 3000, - "Throughput": 125, - "Encrypted": false, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "mixedinstances.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1c.etcd-events.mixedinstances.example.com" - }, - { - "Key": "k8s.io/etcd/events", - "Value": "us-test-1c/us-test-1a,us-test-1b,us-test-1c" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kubernetes.io/cluster/mixedinstances.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2Volumeustest1cetcdmainmixedinstancesexamplecom": { - "Type": "AWS::EC2::Volume", - "Properties": { - "AvailabilityZone": "us-test-1c", - "Size": 20, - "VolumeType": "gp3", - "Iops": 3000, - "Throughput": 125, - "Encrypted": false, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "mixedinstances.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1c.etcd-main.mixedinstances.example.com" - }, - { - "Key": "k8s.io/etcd/main", - "Value": "us-test-1c/us-test-1a,us-test-1b,us-test-1c" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kubernetes.io/cluster/mixedinstances.example.com", - "Value": "owned" - } - ] - } - }, - "AWSIAMInstanceProfilemastersmixedinstancesexamplecom": { - "Type": "AWS::IAM::InstanceProfile", - "Properties": { - "InstanceProfileName": "masters.mixedinstances.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolemastersmixedinstancesexamplecom" - } - ] - } - }, - "AWSIAMInstanceProfilenodesmixedinstancesexamplecom": { - "Type": "AWS::IAM::InstanceProfile", - "Properties": { - "InstanceProfileName": "nodes.mixedinstances.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolenodesmixedinstancesexamplecom" - } - ] - } - }, - "AWSIAMPolicymastersmixedinstancesexamplecom": { - "Type": "AWS::IAM::Policy", - "Properties": { - "PolicyName": "masters.mixedinstances.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolemastersmixedinstancesexamplecom" - } - ], - "PolicyDocument": { - "Statement": [ - { - "Action": "ec2:AttachVolume", - "Condition": { - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "mixedinstances.example.com", - "aws:ResourceTag/k8s.io/role/master": "1" - } - }, - "Effect": "Allow", - "Resource": [ - "*" - ] - }, - { - "Action": [ - "s3:Get*" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-read-bucket/clusters.example.com/mixedinstances.example.com/*" - }, - { - "Action": [ - "s3:GetObject", - "s3:DeleteObject", - "s3:DeleteObjectVersion", - "s3:PutObject" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/mixedinstances.example.com/backups/etcd/main/*" - }, - { - "Action": [ - "s3:GetObject", - "s3:DeleteObject", - "s3:DeleteObjectVersion", - "s3:PutObject" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/mixedinstances.example.com/backups/etcd/events/*" - }, - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-read-bucket" - ] - }, - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-write-bucket" - ] - }, - { - "Action": [ - "route53:ChangeResourceRecordSets", - "route53:ListResourceRecordSets", - "route53:GetHostedZone" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:route53:::hostedzone/Z1AFAKE1ZON3YO" - ] - }, - { - "Action": [ - "route53:GetChange" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:route53:::change/*" - ] - }, - { - "Action": [ - "route53:ListHostedZones", - "route53:ListTagsForResource" - ], - "Effect": "Allow", - "Resource": [ - "*" - ] - }, - { - "Action": "ec2:CreateTags", - "Condition": { - "StringEquals": { - "aws:RequestTag/KubernetesCluster": "mixedinstances.example.com", - "ec2:CreateAction": [ - "CreateSecurityGroup" - ] - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:security-group/*" - ] - }, - { - "Action": [ - "ec2:CreateTags", - "ec2:DeleteTags" - ], - "Condition": { - "Null": { - "aws:RequestTag/KubernetesCluster": "true" - }, - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "mixedinstances.example.com" - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:security-group/*" - ] - }, - { - "Action": "ec2:CreateTags", - "Condition": { - "StringEquals": { - "aws:RequestTag/KubernetesCluster": "mixedinstances.example.com", - "ec2:CreateAction": [ - "CreateVolume", - "CreateSnapshot" - ] - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:volume/*", - "arn:aws-test:ec2:*:*:snapshot/*" - ] - }, - { - "Action": [ - "ec2:CreateTags", - "ec2:DeleteTags" - ], - "Condition": { - "Null": { - "aws:RequestTag/KubernetesCluster": "true" - }, - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "mixedinstances.example.com" - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:volume/*", - "arn:aws-test:ec2:*:*:snapshot/*" - ] - }, - { - "Action": [ - "autoscaling:DescribeAutoScalingGroups", - "autoscaling:DescribeAutoScalingInstances", - "autoscaling:DescribeLaunchConfigurations", - "autoscaling:DescribeScalingActivities", - "autoscaling:DescribeTags", - "ec2:AttachVolume", - "ec2:AuthorizeSecurityGroupIngress", - "ec2:CreateSecurityGroup", - "ec2:CreateTags", - "ec2:DeleteRoute", - "ec2:DeleteSecurityGroup", - "ec2:DeleteVolume", - "ec2:DescribeAccountAttributes", - "ec2:DescribeInstanceTypes", - "ec2:DescribeInstances", - "ec2:DescribeLaunchTemplateVersions", - "ec2:DescribeRegions", - "ec2:DescribeRouteTables", - "ec2:DescribeSecurityGroups", - "ec2:DescribeSubnets", - "ec2:DescribeTags", - "ec2:DescribeVolumes", - "ec2:DescribeVolumesModifications", - "ec2:DescribeVpcs", - "ec2:DetachVolume", - "ec2:ModifyInstanceAttribute", - "ec2:ModifyVolume", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:CreateListener", - "elasticloadbalancing:CreateTargetGroup", - "elasticloadbalancing:DeleteListener", - "elasticloadbalancing:DescribeListeners", - "elasticloadbalancing:DescribeLoadBalancerAttributes", - "elasticloadbalancing:DescribeLoadBalancerPolicies", - "elasticloadbalancing:DescribeLoadBalancers", - "elasticloadbalancing:DescribeTargetGroups", - "elasticloadbalancing:DescribeTargetHealth", - "elasticloadbalancing:ModifyListener", - "elasticloadbalancing:RegisterTargets", - "iam:GetServerCertificate", - "iam:ListServerCertificates", - "kms:DescribeKey", - "kms:GenerateRandom" - ], - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": [ - "autoscaling:SetDesiredCapacity", - "autoscaling:TerminateInstanceInAutoScalingGroup", - "ec2:AttachVolume", - "ec2:AuthorizeSecurityGroupIngress", - "ec2:DeleteSecurityGroup", - "ec2:DeleteVolume", - "ec2:DetachVolume", - "ec2:ModifyInstanceAttribute", - "ec2:ModifyVolume", - "ec2:RevokeSecurityGroupIngress", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", - "elasticloadbalancing:AttachLoadBalancerToSubnets", - "elasticloadbalancing:ConfigureHealthCheck", - "elasticloadbalancing:CreateLoadBalancerListeners", - "elasticloadbalancing:CreateLoadBalancerPolicy", - "elasticloadbalancing:DeleteListener", - "elasticloadbalancing:DeleteLoadBalancer", - "elasticloadbalancing:DeleteLoadBalancerListeners", - "elasticloadbalancing:DeleteTargetGroup", - "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", - "elasticloadbalancing:DeregisterTargets", - "elasticloadbalancing:DetachLoadBalancerFromSubnets", - "elasticloadbalancing:ModifyListener", - "elasticloadbalancing:ModifyLoadBalancerAttributes", - "elasticloadbalancing:ModifyTargetGroup", - "elasticloadbalancing:RegisterInstancesWithLoadBalancer", - "elasticloadbalancing:RegisterTargets", - "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", - "elasticloadbalancing:SetLoadBalancerPoliciesOfListener" - ], - "Condition": { - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "mixedinstances.example.com" - } - }, - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": [ - "ec2:CreateSecurityGroup", - "ec2:CreateSnapshot", - "ec2:CreateVolume", - "elasticloadbalancing:CreateListener", - "elasticloadbalancing:CreateLoadBalancer", - "elasticloadbalancing:CreateTargetGroup" - ], - "Condition": { - "StringEquals": { - "aws:RequestTag/KubernetesCluster": "mixedinstances.example.com" - } - }, - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": "ec2:CreateSecurityGroup", - "Effect": "Allow", - "Resource": "arn:aws-test:ec2:*:*:vpc/*" - } - ], - "Version": "2012-10-17" - } - } - }, - "AWSIAMPolicynodesmixedinstancesexamplecom": { - "Type": "AWS::IAM::Policy", - "Properties": { - "PolicyName": "nodes.mixedinstances.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolenodesmixedinstancesexamplecom" - } - ], - "PolicyDocument": { - "Statement": [ - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-read-bucket" - ] - }, - { - "Action": [ - "autoscaling:DescribeAutoScalingInstances", - "ec2:DescribeInstanceTypes", - "ec2:DescribeInstances", - "ec2:DescribeRegions", - "iam:GetServerCertificate", - "iam:ListServerCertificates", - "kms:GenerateRandom" - ], - "Effect": "Allow", - "Resource": "*" - } - ], - "Version": "2012-10-17" - } - } - }, - "AWSIAMRolemastersmixedinstancesexamplecom": { - "Type": "AWS::IAM::Role", - "Properties": { - "RoleName": "masters.mixedinstances.example.com", - "AssumeRolePolicyDocument": { - "Statement": [ - { - "Action": "sts:AssumeRole", - "Effect": "Allow", - "Principal": { - "Service": "ec2.amazonaws.com" - } - } - ], - "Version": "2012-10-17" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "mixedinstances.example.com" - }, - { - "Key": "Name", - "Value": "masters.mixedinstances.example.com" - }, - { - "Key": "kubernetes.io/cluster/mixedinstances.example.com", - "Value": "owned" - } - ] - } - }, - "AWSIAMRolenodesmixedinstancesexamplecom": { - "Type": "AWS::IAM::Role", - "Properties": { - "RoleName": "nodes.mixedinstances.example.com", - "AssumeRolePolicyDocument": { - "Statement": [ - { - "Action": "sts:AssumeRole", - "Effect": "Allow", - "Principal": { - "Service": "ec2.amazonaws.com" - } - } - ], - "Version": "2012-10-17" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "mixedinstances.example.com" - }, - { - "Key": "Name", - "Value": "nodes.mixedinstances.example.com" - }, - { - "Key": "kubernetes.io/cluster/mixedinstances.example.com", - "Value": "owned" - } - ] - } - } - } -} diff --git a/tests/integration/update_cluster/mixed_instances_spot/cloudformation.json.extracted.yaml b/tests/integration/update_cluster/mixed_instances_spot/cloudformation.json.extracted.yaml deleted file mode 100644 index ba004d160d4e9..0000000000000 --- a/tests/integration/update_cluster/mixed_instances_spot/cloudformation.json.extracted.yaml +++ /dev/null @@ -1,956 +0,0 @@ -Resources.AWSEC2LaunchTemplatemasterustest1amastersmixedinstancesexamplecom.Properties.LaunchTemplateData.UserData: | - #!/bin/bash - set -o errexit - set -o nounset - set -o pipefail - - NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 - NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 - NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 - NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 - - export AWS_REGION=us-test-1 - - - - - sysctl -w net.core.rmem_max=16777216 || true - sysctl -w net.core.wmem_max=16777216 || true - sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true - sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true - - - function ensure-install-dir() { - INSTALL_DIR="/opt/kops" - # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec - if [[ -d /var/lib/toolbox ]]; then - INSTALL_DIR="/var/lib/toolbox/kops" - fi - mkdir -p ${INSTALL_DIR}/bin - mkdir -p ${INSTALL_DIR}/conf - cd ${INSTALL_DIR} - } - - # Retry a download until we get it. args: name, sha, urls - download-or-bust() { - local -r file="$1" - local -r hash="$2" - local -r urls=( $(split-commas "$3") ) - - if [[ -f "${file}" ]]; then - if ! validate-hash "${file}" "${hash}"; then - rm -f "${file}" - else - return 0 - fi - fi - - while true; do - for url in "${urls[@]}"; do - commands=( - "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - ) - for cmd in "${commands[@]}"; do - echo "Attempting download with: ${cmd} {url}" - if ! (${cmd} "${url}"); then - echo "== Download failed with ${cmd} ==" - continue - fi - if ! validate-hash "${file}" "${hash}"; then - echo "== Hash validation of ${url} failed. Retrying. ==" - rm -f "${file}" - else - echo "== Downloaded ${url} (SHA256 = ${hash}) ==" - return 0 - fi - done - done - - echo "All downloads failed; sleeping before retrying" - sleep 60 - done - } - - validate-hash() { - local -r file="$1" - local -r expected="$2" - local actual - - actual=$(sha256sum ${file} | awk '{ print $1 }') || true - if [[ "${actual}" != "${expected}" ]]; then - echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" - return 1 - fi - } - - function split-commas() { - echo $1 | tr "," "\n" - } - - function download-release() { - case "$(uname -m)" in - x86_64*|i?86_64*|amd64*) - NODEUP_URL="${NODEUP_URL_AMD64}" - NODEUP_HASH="${NODEUP_HASH_AMD64}" - ;; - aarch64*|arm64*) - NODEUP_URL="${NODEUP_URL_ARM64}" - NODEUP_HASH="${NODEUP_HASH_ARM64}" - ;; - *) - echo "Unsupported host arch: $(uname -m)" >&2 - exit 1 - ;; - esac - - cd ${INSTALL_DIR}/bin - download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" - - chmod +x nodeup - - echo "Running nodeup" - # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 - ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) - } - - #################################################################################### - - /bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" - - echo "== nodeup node config starting ==" - ensure-install-dir - - cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' - cloudConfig: - awsEBSCSIDriver: - enabled: false - manageStorageClasses: true - containerRuntime: containerd - containerd: - logLevel: info - version: 1.4.13 - docker: - skipInstall: true - encryptionConfig: null - etcdClusters: - events: - version: 3.4.13 - main: - version: 3.4.13 - kubeAPIServer: - allowPrivileged: true - anonymousAuth: false - apiAudiences: - - kubernetes.svc.default - apiServerCount: 3 - authorizationMode: AlwaysAllow - bindAddress: 0.0.0.0 - cloudProvider: aws - enableAdmissionPlugins: - - NamespaceLifecycle - - LimitRanger - - ServiceAccount - - DefaultStorageClass - - DefaultTolerationSeconds - - MutatingAdmissionWebhook - - ValidatingAdmissionWebhook - - NodeRestriction - - ResourceQuota - etcdServers: - - https://127.0.0.1:4001 - etcdServersOverrides: - - /events#https://127.0.0.1:4002 - image: registry.k8s.io/kube-apiserver:v1.21.0 - kubeletPreferredAddressTypes: - - InternalIP - - Hostname - - ExternalIP - logLevel: 2 - requestheaderAllowedNames: - - aggregator - requestheaderExtraHeaderPrefixes: - - X-Remote-Extra- - requestheaderGroupHeaders: - - X-Remote-Group - requestheaderUsernameHeaders: - - X-Remote-User - securePort: 443 - serviceAccountIssuer: https://api.internal.mixedinstances.example.com - serviceAccountJWKSURI: https://api.internal.mixedinstances.example.com/openid/v1/jwks - serviceClusterIPRange: 100.64.0.0/13 - storageBackend: etcd3 - kubeControllerManager: - allocateNodeCIDRs: true - attachDetachReconcileSyncPeriod: 1m0s - cloudProvider: aws - clusterCIDR: 100.96.0.0/11 - clusterName: mixedinstances.example.com - configureCloudRoutes: false - image: registry.k8s.io/kube-controller-manager:v1.21.0 - leaderElection: - leaderElect: true - logLevel: 2 - useServiceAccountCredentials: true - kubeProxy: - clusterCIDR: 100.96.0.0/11 - cpuRequest: 100m - image: registry.k8s.io/kube-proxy:v1.21.0 - logLevel: 2 - kubeScheduler: - image: registry.k8s.io/kube-scheduler:v1.21.0 - leaderElection: - leaderElect: true - logLevel: 2 - kubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: aws - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - masterKubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: aws - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - - __EOF_CLUSTER_SPEC - - cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' - CloudProvider: aws - ConfigBase: memfs://clusters.example.com/mixedinstances.example.com - InstanceGroupName: master-us-test-1a - InstanceGroupRole: Master - NodeupConfigHash: dJQEOWgaxO/wms9AXZe5pNgHt4GIczgtIgc0rnkHO+4= - - __EOF_KUBE_ENV - - download-release - echo "== nodeup node config done ==" -Resources.AWSEC2LaunchTemplatemasterustest1bmastersmixedinstancesexamplecom.Properties.LaunchTemplateData.UserData: | - #!/bin/bash - set -o errexit - set -o nounset - set -o pipefail - - NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 - NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 - NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 - NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 - - export AWS_REGION=us-test-1 - - - - - sysctl -w net.core.rmem_max=16777216 || true - sysctl -w net.core.wmem_max=16777216 || true - sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true - sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true - - - function ensure-install-dir() { - INSTALL_DIR="/opt/kops" - # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec - if [[ -d /var/lib/toolbox ]]; then - INSTALL_DIR="/var/lib/toolbox/kops" - fi - mkdir -p ${INSTALL_DIR}/bin - mkdir -p ${INSTALL_DIR}/conf - cd ${INSTALL_DIR} - } - - # Retry a download until we get it. args: name, sha, urls - download-or-bust() { - local -r file="$1" - local -r hash="$2" - local -r urls=( $(split-commas "$3") ) - - if [[ -f "${file}" ]]; then - if ! validate-hash "${file}" "${hash}"; then - rm -f "${file}" - else - return 0 - fi - fi - - while true; do - for url in "${urls[@]}"; do - commands=( - "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - ) - for cmd in "${commands[@]}"; do - echo "Attempting download with: ${cmd} {url}" - if ! (${cmd} "${url}"); then - echo "== Download failed with ${cmd} ==" - continue - fi - if ! validate-hash "${file}" "${hash}"; then - echo "== Hash validation of ${url} failed. Retrying. ==" - rm -f "${file}" - else - echo "== Downloaded ${url} (SHA256 = ${hash}) ==" - return 0 - fi - done - done - - echo "All downloads failed; sleeping before retrying" - sleep 60 - done - } - - validate-hash() { - local -r file="$1" - local -r expected="$2" - local actual - - actual=$(sha256sum ${file} | awk '{ print $1 }') || true - if [[ "${actual}" != "${expected}" ]]; then - echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" - return 1 - fi - } - - function split-commas() { - echo $1 | tr "," "\n" - } - - function download-release() { - case "$(uname -m)" in - x86_64*|i?86_64*|amd64*) - NODEUP_URL="${NODEUP_URL_AMD64}" - NODEUP_HASH="${NODEUP_HASH_AMD64}" - ;; - aarch64*|arm64*) - NODEUP_URL="${NODEUP_URL_ARM64}" - NODEUP_HASH="${NODEUP_HASH_ARM64}" - ;; - *) - echo "Unsupported host arch: $(uname -m)" >&2 - exit 1 - ;; - esac - - cd ${INSTALL_DIR}/bin - download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" - - chmod +x nodeup - - echo "Running nodeup" - # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 - ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) - } - - #################################################################################### - - /bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" - - echo "== nodeup node config starting ==" - ensure-install-dir - - cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' - cloudConfig: - awsEBSCSIDriver: - enabled: false - manageStorageClasses: true - containerRuntime: containerd - containerd: - logLevel: info - version: 1.4.13 - docker: - skipInstall: true - encryptionConfig: null - etcdClusters: - events: - version: 3.4.13 - main: - version: 3.4.13 - kubeAPIServer: - allowPrivileged: true - anonymousAuth: false - apiAudiences: - - kubernetes.svc.default - apiServerCount: 3 - authorizationMode: AlwaysAllow - bindAddress: 0.0.0.0 - cloudProvider: aws - enableAdmissionPlugins: - - NamespaceLifecycle - - LimitRanger - - ServiceAccount - - DefaultStorageClass - - DefaultTolerationSeconds - - MutatingAdmissionWebhook - - ValidatingAdmissionWebhook - - NodeRestriction - - ResourceQuota - etcdServers: - - https://127.0.0.1:4001 - etcdServersOverrides: - - /events#https://127.0.0.1:4002 - image: registry.k8s.io/kube-apiserver:v1.21.0 - kubeletPreferredAddressTypes: - - InternalIP - - Hostname - - ExternalIP - logLevel: 2 - requestheaderAllowedNames: - - aggregator - requestheaderExtraHeaderPrefixes: - - X-Remote-Extra- - requestheaderGroupHeaders: - - X-Remote-Group - requestheaderUsernameHeaders: - - X-Remote-User - securePort: 443 - serviceAccountIssuer: https://api.internal.mixedinstances.example.com - serviceAccountJWKSURI: https://api.internal.mixedinstances.example.com/openid/v1/jwks - serviceClusterIPRange: 100.64.0.0/13 - storageBackend: etcd3 - kubeControllerManager: - allocateNodeCIDRs: true - attachDetachReconcileSyncPeriod: 1m0s - cloudProvider: aws - clusterCIDR: 100.96.0.0/11 - clusterName: mixedinstances.example.com - configureCloudRoutes: false - image: registry.k8s.io/kube-controller-manager:v1.21.0 - leaderElection: - leaderElect: true - logLevel: 2 - useServiceAccountCredentials: true - kubeProxy: - clusterCIDR: 100.96.0.0/11 - cpuRequest: 100m - image: registry.k8s.io/kube-proxy:v1.21.0 - logLevel: 2 - kubeScheduler: - image: registry.k8s.io/kube-scheduler:v1.21.0 - leaderElection: - leaderElect: true - logLevel: 2 - kubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: aws - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - masterKubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: aws - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - - __EOF_CLUSTER_SPEC - - cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' - CloudProvider: aws - ConfigBase: memfs://clusters.example.com/mixedinstances.example.com - InstanceGroupName: master-us-test-1b - InstanceGroupRole: Master - NodeupConfigHash: ZZvBpLK2OrfYcnc/xQFNgZwOECyfoi54GSU1wx239hg= - - __EOF_KUBE_ENV - - download-release - echo "== nodeup node config done ==" -Resources.AWSEC2LaunchTemplatemasterustest1cmastersmixedinstancesexamplecom.Properties.LaunchTemplateData.UserData: | - #!/bin/bash - set -o errexit - set -o nounset - set -o pipefail - - NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 - NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 - NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 - NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 - - export AWS_REGION=us-test-1 - - - - - sysctl -w net.core.rmem_max=16777216 || true - sysctl -w net.core.wmem_max=16777216 || true - sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true - sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true - - - function ensure-install-dir() { - INSTALL_DIR="/opt/kops" - # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec - if [[ -d /var/lib/toolbox ]]; then - INSTALL_DIR="/var/lib/toolbox/kops" - fi - mkdir -p ${INSTALL_DIR}/bin - mkdir -p ${INSTALL_DIR}/conf - cd ${INSTALL_DIR} - } - - # Retry a download until we get it. args: name, sha, urls - download-or-bust() { - local -r file="$1" - local -r hash="$2" - local -r urls=( $(split-commas "$3") ) - - if [[ -f "${file}" ]]; then - if ! validate-hash "${file}" "${hash}"; then - rm -f "${file}" - else - return 0 - fi - fi - - while true; do - for url in "${urls[@]}"; do - commands=( - "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - ) - for cmd in "${commands[@]}"; do - echo "Attempting download with: ${cmd} {url}" - if ! (${cmd} "${url}"); then - echo "== Download failed with ${cmd} ==" - continue - fi - if ! validate-hash "${file}" "${hash}"; then - echo "== Hash validation of ${url} failed. Retrying. ==" - rm -f "${file}" - else - echo "== Downloaded ${url} (SHA256 = ${hash}) ==" - return 0 - fi - done - done - - echo "All downloads failed; sleeping before retrying" - sleep 60 - done - } - - validate-hash() { - local -r file="$1" - local -r expected="$2" - local actual - - actual=$(sha256sum ${file} | awk '{ print $1 }') || true - if [[ "${actual}" != "${expected}" ]]; then - echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" - return 1 - fi - } - - function split-commas() { - echo $1 | tr "," "\n" - } - - function download-release() { - case "$(uname -m)" in - x86_64*|i?86_64*|amd64*) - NODEUP_URL="${NODEUP_URL_AMD64}" - NODEUP_HASH="${NODEUP_HASH_AMD64}" - ;; - aarch64*|arm64*) - NODEUP_URL="${NODEUP_URL_ARM64}" - NODEUP_HASH="${NODEUP_HASH_ARM64}" - ;; - *) - echo "Unsupported host arch: $(uname -m)" >&2 - exit 1 - ;; - esac - - cd ${INSTALL_DIR}/bin - download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" - - chmod +x nodeup - - echo "Running nodeup" - # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 - ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) - } - - #################################################################################### - - /bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" - - echo "== nodeup node config starting ==" - ensure-install-dir - - cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' - cloudConfig: - awsEBSCSIDriver: - enabled: false - manageStorageClasses: true - containerRuntime: containerd - containerd: - logLevel: info - version: 1.4.13 - docker: - skipInstall: true - encryptionConfig: null - etcdClusters: - events: - version: 3.4.13 - main: - version: 3.4.13 - kubeAPIServer: - allowPrivileged: true - anonymousAuth: false - apiAudiences: - - kubernetes.svc.default - apiServerCount: 3 - authorizationMode: AlwaysAllow - bindAddress: 0.0.0.0 - cloudProvider: aws - enableAdmissionPlugins: - - NamespaceLifecycle - - LimitRanger - - ServiceAccount - - DefaultStorageClass - - DefaultTolerationSeconds - - MutatingAdmissionWebhook - - ValidatingAdmissionWebhook - - NodeRestriction - - ResourceQuota - etcdServers: - - https://127.0.0.1:4001 - etcdServersOverrides: - - /events#https://127.0.0.1:4002 - image: registry.k8s.io/kube-apiserver:v1.21.0 - kubeletPreferredAddressTypes: - - InternalIP - - Hostname - - ExternalIP - logLevel: 2 - requestheaderAllowedNames: - - aggregator - requestheaderExtraHeaderPrefixes: - - X-Remote-Extra- - requestheaderGroupHeaders: - - X-Remote-Group - requestheaderUsernameHeaders: - - X-Remote-User - securePort: 443 - serviceAccountIssuer: https://api.internal.mixedinstances.example.com - serviceAccountJWKSURI: https://api.internal.mixedinstances.example.com/openid/v1/jwks - serviceClusterIPRange: 100.64.0.0/13 - storageBackend: etcd3 - kubeControllerManager: - allocateNodeCIDRs: true - attachDetachReconcileSyncPeriod: 1m0s - cloudProvider: aws - clusterCIDR: 100.96.0.0/11 - clusterName: mixedinstances.example.com - configureCloudRoutes: false - image: registry.k8s.io/kube-controller-manager:v1.21.0 - leaderElection: - leaderElect: true - logLevel: 2 - useServiceAccountCredentials: true - kubeProxy: - clusterCIDR: 100.96.0.0/11 - cpuRequest: 100m - image: registry.k8s.io/kube-proxy:v1.21.0 - logLevel: 2 - kubeScheduler: - image: registry.k8s.io/kube-scheduler:v1.21.0 - leaderElection: - leaderElect: true - logLevel: 2 - kubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: aws - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - masterKubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: aws - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - - __EOF_CLUSTER_SPEC - - cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' - CloudProvider: aws - ConfigBase: memfs://clusters.example.com/mixedinstances.example.com - InstanceGroupName: master-us-test-1c - InstanceGroupRole: Master - NodeupConfigHash: zQBp4ZIdhKY6soMOzwj0ECCUVsSc5eLH/QG0waKCHBA= - - __EOF_KUBE_ENV - - download-release - echo "== nodeup node config done ==" -Resources.AWSEC2LaunchTemplatenodesmixedinstancesexamplecom.Properties.LaunchTemplateData.UserData: | - #!/bin/bash - set -o errexit - set -o nounset - set -o pipefail - - NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 - NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 - NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 - NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 - - export AWS_REGION=us-test-1 - - - - - sysctl -w net.core.rmem_max=16777216 || true - sysctl -w net.core.wmem_max=16777216 || true - sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true - sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true - - - function ensure-install-dir() { - INSTALL_DIR="/opt/kops" - # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec - if [[ -d /var/lib/toolbox ]]; then - INSTALL_DIR="/var/lib/toolbox/kops" - fi - mkdir -p ${INSTALL_DIR}/bin - mkdir -p ${INSTALL_DIR}/conf - cd ${INSTALL_DIR} - } - - # Retry a download until we get it. args: name, sha, urls - download-or-bust() { - local -r file="$1" - local -r hash="$2" - local -r urls=( $(split-commas "$3") ) - - if [[ -f "${file}" ]]; then - if ! validate-hash "${file}" "${hash}"; then - rm -f "${file}" - else - return 0 - fi - fi - - while true; do - for url in "${urls[@]}"; do - commands=( - "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - ) - for cmd in "${commands[@]}"; do - echo "Attempting download with: ${cmd} {url}" - if ! (${cmd} "${url}"); then - echo "== Download failed with ${cmd} ==" - continue - fi - if ! validate-hash "${file}" "${hash}"; then - echo "== Hash validation of ${url} failed. Retrying. ==" - rm -f "${file}" - else - echo "== Downloaded ${url} (SHA256 = ${hash}) ==" - return 0 - fi - done - done - - echo "All downloads failed; sleeping before retrying" - sleep 60 - done - } - - validate-hash() { - local -r file="$1" - local -r expected="$2" - local actual - - actual=$(sha256sum ${file} | awk '{ print $1 }') || true - if [[ "${actual}" != "${expected}" ]]; then - echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" - return 1 - fi - } - - function split-commas() { - echo $1 | tr "," "\n" - } - - function download-release() { - case "$(uname -m)" in - x86_64*|i?86_64*|amd64*) - NODEUP_URL="${NODEUP_URL_AMD64}" - NODEUP_HASH="${NODEUP_HASH_AMD64}" - ;; - aarch64*|arm64*) - NODEUP_URL="${NODEUP_URL_ARM64}" - NODEUP_HASH="${NODEUP_HASH_ARM64}" - ;; - *) - echo "Unsupported host arch: $(uname -m)" >&2 - exit 1 - ;; - esac - - cd ${INSTALL_DIR}/bin - download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" - - chmod +x nodeup - - echo "Running nodeup" - # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 - ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) - } - - #################################################################################### - - /bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" - - echo "== nodeup node config starting ==" - ensure-install-dir - - cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' - cloudConfig: - awsEBSCSIDriver: - enabled: false - manageStorageClasses: true - containerRuntime: containerd - containerd: - logLevel: info - version: 1.4.13 - docker: - skipInstall: true - kubeProxy: - clusterCIDR: 100.96.0.0/11 - cpuRequest: 100m - image: registry.k8s.io/kube-proxy:v1.21.0 - logLevel: 2 - kubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: aws - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - - __EOF_CLUSTER_SPEC - - cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' - CloudProvider: aws - ConfigServer: - CACertificates: | - -----BEGIN CERTIFICATE----- - MIIBbjCCARigAwIBAgIMFpANqBD8NSD82AUSMA0GCSqGSIb3DQEBCwUAMBgxFjAU - BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwODAwWhcNMzEwNzA3MDcw - ODAwWjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD - SwAwSAJBANFI3zr0Tk8krsW8vwjfMpzJOlWQ8616vG3YPa2qAgI7V4oKwfV0yIg1 - jt+H6f4P/wkPAPTPTfRp9Iy8oHEEFw0CAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG - MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNG3zVjTcLlJwDsJ4/K9DV7KohUA - MA0GCSqGSIb3DQEBCwUAA0EAB8d03fY2w7WKpfO29qI295pu2C4ca9AiVGOpgSc8 - tmQsq6rcxt3T+rb589PVtz0mw/cKTxOk6gH2CCC+yHfy2w== - -----END CERTIFICATE----- - -----BEGIN CERTIFICATE----- - MIIBbjCCARigAwIBAgIMFpANvmSa0OAlYmXKMA0GCSqGSIb3DQEBCwUAMBgxFjAU - BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwOTM2WhcNMzEwNzA3MDcw - OTM2WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD - SwAwSAJBAMF6F4aZdpe0RUpyykaBpWwZCnwbffhYGOw+fs6RdLuUq7QCNmJm/Eq7 - WWOziMYDiI9SbclpD+6QiJ0N3EqppVUCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG - MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLImp6ARjPDAH6nhI+scWVt3Q9bn - MA0GCSqGSIb3DQEBCwUAA0EAVQVx5MUtuAIeePuP9o51xtpT2S6Fvfi8J4ICxnlA - 9B7UD2ushcVFPtaeoL9Gfu8aY4KJBeqqg5ojl4qmRnThjw== - -----END CERTIFICATE----- - server: https://kops-controller.internal.mixedinstances.example.com:3988/ - InstanceGroupName: nodes - InstanceGroupRole: Node - NodeupConfigHash: r5UH1+vKHzFuCneSZEydvmqU1JF6tjt0nngH+/VoDV8= - - __EOF_KUBE_ENV - - download-release - echo "== nodeup node config done ==" diff --git a/tests/integration/update_cluster/mixed_instances_spot/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/mixed_instances_spot/data/aws_s3_object_cluster-completed.spec_content index dddf4ca550166..04515033e3857 100644 --- a/tests/integration/update_cluster/mixed_instances_spot/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/mixed_instances_spot/data/aws_s3_object_cluster-completed.spec_content @@ -150,7 +150,6 @@ spec: kubernetesApiAccess: - 0.0.0.0/0 kubernetesVersion: 1.21.0 - masterInternalName: api.internal.mixedinstances.example.com masterKubelet: anonymousAuth: false cgroupDriver: systemd diff --git a/tests/integration/update_cluster/mixed_instances_spot/in-v1alpha2.yaml b/tests/integration/update_cluster/mixed_instances_spot/in-v1alpha2.yaml index e45c19dedf751..f93b8fc9fb95d 100644 --- a/tests/integration/update_cluster/mixed_instances_spot/in-v1alpha2.yaml +++ b/tests/integration/update_cluster/mixed_instances_spot/in-v1alpha2.yaml @@ -30,7 +30,6 @@ spec: kubelet: anonymousAuth: false kubernetesVersion: v1.21.0 - masterInternalName: api.internal.mixedinstances.example.com masterPublicName: api.mixedinstances.example.com networkCIDR: 10.0.0.0/16 networking: diff --git a/tests/integration/update_cluster/nth_sqs_resources/cloudformation.json b/tests/integration/update_cluster/nth_sqs_resources/cloudformation.json deleted file mode 100644 index b37309d31c36a..0000000000000 --- a/tests/integration/update_cluster/nth_sqs_resources/cloudformation.json +++ /dev/null @@ -1,1567 +0,0 @@ -{ - "Resources": { - "AWSAutoScalingAutoScalingGroupmasterustest1amastersnthsqsresourceslongclusternameexamplecom": { - "Type": "AWS::AutoScaling::AutoScalingGroup", - "Properties": { - "AutoScalingGroupName": "master-us-test-1a.masters.nthsqsresources.longclustername.example.com", - "LaunchTemplate": { - "LaunchTemplateId": { - "Ref": "AWSEC2LaunchTemplatemasterustest1amastersnthsqsresourceslongclusternameexamplecom" - }, - "Version": { - "Fn::GetAtt": [ - "AWSEC2LaunchTemplatemasterustest1amastersnthsqsresourceslongclusternameexamplecom", - "LatestVersionNumber" - ] - } - }, - "MaxSize": "1", - "MinSize": "1", - "VPCZoneIdentifier": [ - { - "Ref": "AWSEC2Subnetustest1anthsqsresourceslongclusternameexamplecom" - } - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "nthsqsresources.longclustername.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.nthsqsresources.longclustername.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "aws-node-termination-handler/managed", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/role/master", - "Value": "1", - "PropagateAtLaunch": true - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a", - "PropagateAtLaunch": true - }, - { - "Key": "kubernetes.io/cluster/nthsqsresources.longclustername.example.com", - "Value": "owned", - "PropagateAtLaunch": true - } - ], - "MetricsCollection": [ - { - "Granularity": "1Minute", - "Metrics": [ - "GroupDesiredCapacity", - "GroupInServiceInstances", - "GroupMaxSize", - "GroupMinSize", - "GroupPendingInstances", - "GroupStandbyInstances", - "GroupTerminatingInstances", - "GroupTotalInstances" - ] - } - ] - } - }, - "AWSAutoScalingAutoScalingGroupnodesnthsqsresourceslongclusternameexamplecom": { - "Type": "AWS::AutoScaling::AutoScalingGroup", - "Properties": { - "AutoScalingGroupName": "nodes.nthsqsresources.longclustername.example.com", - "LaunchTemplate": { - "LaunchTemplateId": { - "Ref": "AWSEC2LaunchTemplatenodesnthsqsresourceslongclusternameexamplecom" - }, - "Version": { - "Fn::GetAtt": [ - "AWSEC2LaunchTemplatenodesnthsqsresourceslongclusternameexamplecom", - "LatestVersionNumber" - ] - } - }, - "MaxSize": "2", - "MinSize": "2", - "VPCZoneIdentifier": [ - { - "Ref": "AWSEC2Subnetustest1anthsqsresourceslongclusternameexamplecom" - } - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "nthsqsresources.longclustername.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "Name", - "Value": "nodes.nthsqsresources.longclustername.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "aws-node-termination-handler/managed", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/role/node", - "Value": "1", - "PropagateAtLaunch": true - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes", - "PropagateAtLaunch": true - }, - { - "Key": "kubernetes.io/cluster/nthsqsresources.longclustername.example.com", - "Value": "owned", - "PropagateAtLaunch": true - } - ], - "MetricsCollection": [ - { - "Granularity": "1Minute", - "Metrics": [ - "GroupDesiredCapacity", - "GroupInServiceInstances", - "GroupMaxSize", - "GroupMinSize", - "GroupPendingInstances", - "GroupStandbyInstances", - "GroupTerminatingInstances", - "GroupTotalInstances" - ] - } - ] - } - }, - "AWSAutoScalingLifecycleHookmasterustest1aNTHLifecycleHook": { - "Type": "AWS::AutoScaling::LifecycleHook", - "Properties": { - "LifecycleHookName": "master-us-test-1a-NTHLifecycleHook", - "AutoScalingGroupName": { - "Ref": "AWSAutoScalingAutoScalingGroupmasterustest1amastersnthsqsresourceslongclusternameexamplecom" - }, - "DefaultResult": "CONTINUE", - "HeartbeatTimeout": 300, - "LifecycleTransition": "autoscaling:EC2_INSTANCE_TERMINATING" - } - }, - "AWSAutoScalingLifecycleHooknodesNTHLifecycleHook": { - "Type": "AWS::AutoScaling::LifecycleHook", - "Properties": { - "LifecycleHookName": "nodes-NTHLifecycleHook", - "AutoScalingGroupName": { - "Ref": "AWSAutoScalingAutoScalingGroupnodesnthsqsresourceslongclusternameexamplecom" - }, - "DefaultResult": "CONTINUE", - "HeartbeatTimeout": 300, - "LifecycleTransition": "autoscaling:EC2_INSTANCE_TERMINATING" - } - }, - "AWSEC2DHCPOptionsnthsqsresourceslongclusternameexamplecom": { - "Type": "AWS::EC2::DHCPOptions", - "Properties": { - "DomainName": "us-test-1.compute.internal", - "DomainNameServers": [ - "AmazonProvidedDNS" - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "nthsqsresources.longclustername.example.com" - }, - { - "Key": "Name", - "Value": "nthsqsresources.longclustername.example.com" - }, - { - "Key": "kubernetes.io/cluster/nthsqsresources.longclustername.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2InternetGatewaynthsqsresourceslongclusternameexamplecom": { - "Type": "AWS::EC2::InternetGateway", - "Properties": { - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "nthsqsresources.longclustername.example.com" - }, - { - "Key": "Name", - "Value": "nthsqsresources.longclustername.example.com" - }, - { - "Key": "kubernetes.io/cluster/nthsqsresources.longclustername.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2LaunchTemplatemasterustest1amastersnthsqsresourceslongclusternameexamplecom": { - "Type": "AWS::EC2::LaunchTemplate", - "Properties": { - "LaunchTemplateName": "master-us-test-1a.masters.nthsqsresources.longclustername.example.com", - "LaunchTemplateData": { - "BlockDeviceMappings": [ - { - "DeviceName": "/dev/xvda", - "Ebs": { - "VolumeType": "gp3", - "VolumeSize": 64, - "Iops": 3000, - "Throughput": 125, - "DeleteOnTermination": true, - "Encrypted": true - } - }, - { - "DeviceName": "/dev/sdc", - "VirtualName": "ephemeral0" - } - ], - "IamInstanceProfile": { - "Name": { - "Ref": "AWSIAMInstanceProfilemastersnthsqsresourceslongclusternameexamplecom" - } - }, - "ImageId": "ami-12345678", - "InstanceType": "m3.medium", - "KeyName": "kubernetes.nthsqsresources.longclustername.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57", - "MetadataOptions": { - "HttpPutResponseHopLimit": 1, - "HttpTokens": "optional" - }, - "Monitoring": { - "Enabled": false - }, - "NetworkInterfaces": [ - { - "AssociatePublicIpAddress": true, - "DeleteOnTermination": true, - "DeviceIndex": 0, - "Ipv6AddressCount": 0, - "Groups": [ - { - "Ref": "AWSEC2SecurityGroupmastersnthsqsresourceslongclusternameexamplecom" - } - ] - } - ], - "TagSpecifications": [ - { - "ResourceType": "instance", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "nthsqsresources.longclustername.example.com" - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.nthsqsresources.longclustername.example.com" - }, - { - "Key": "aws-node-termination-handler/managed", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a" - }, - { - "Key": "kubernetes.io/cluster/nthsqsresources.longclustername.example.com", - "Value": "owned" - } - ] - }, - { - "ResourceType": "volume", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "nthsqsresources.longclustername.example.com" - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.nthsqsresources.longclustername.example.com" - }, - { - "Key": "aws-node-termination-handler/managed", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a" - }, - { - "Key": "kubernetes.io/cluster/nthsqsresources.longclustername.example.com", - "Value": "owned" - } - ] - } - ], - "UserData": "extracted" - } - } - }, - "AWSEC2LaunchTemplatenodesnthsqsresourceslongclusternameexamplecom": { - "Type": "AWS::EC2::LaunchTemplate", - "Properties": { - "LaunchTemplateName": "nodes.nthsqsresources.longclustername.example.com", - "LaunchTemplateData": { - "BlockDeviceMappings": [ - { - "DeviceName": "/dev/xvda", - "Ebs": { - "VolumeType": "gp3", - "VolumeSize": 128, - "Iops": 3000, - "Throughput": 125, - "DeleteOnTermination": true, - "Encrypted": true - } - } - ], - "IamInstanceProfile": { - "Name": { - "Ref": "AWSIAMInstanceProfilenodesnthsqsresourceslongclusternameexamplecom" - } - }, - "ImageId": "ami-12345678", - "InstanceType": "t2.medium", - "KeyName": "kubernetes.nthsqsresources.longclustername.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57", - "MetadataOptions": { - "HttpPutResponseHopLimit": 1, - "HttpTokens": "optional" - }, - "Monitoring": { - "Enabled": false - }, - "NetworkInterfaces": [ - { - "AssociatePublicIpAddress": true, - "DeleteOnTermination": true, - "DeviceIndex": 0, - "Ipv6AddressCount": 0, - "Groups": [ - { - "Ref": "AWSEC2SecurityGroupnodesnthsqsresourceslongclusternameexamplecom" - } - ] - } - ], - "TagSpecifications": [ - { - "ResourceType": "instance", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "nthsqsresources.longclustername.example.com" - }, - { - "Key": "Name", - "Value": "nodes.nthsqsresources.longclustername.example.com" - }, - { - "Key": "aws-node-termination-handler/managed", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "" - }, - { - "Key": "k8s.io/role/node", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes" - }, - { - "Key": "kubernetes.io/cluster/nthsqsresources.longclustername.example.com", - "Value": "owned" - } - ] - }, - { - "ResourceType": "volume", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "nthsqsresources.longclustername.example.com" - }, - { - "Key": "Name", - "Value": "nodes.nthsqsresources.longclustername.example.com" - }, - { - "Key": "aws-node-termination-handler/managed", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "" - }, - { - "Key": "k8s.io/role/node", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes" - }, - { - "Key": "kubernetes.io/cluster/nthsqsresources.longclustername.example.com", - "Value": "owned" - } - ] - } - ], - "UserData": "extracted" - } - } - }, - "AWSEC2Route0": { - "Type": "AWS::EC2::Route", - "Properties": { - "RouteTableId": { - "Ref": "AWSEC2RouteTablenthsqsresourceslongclusternameexamplecom" - }, - "DestinationIpv6CidrBlock": "::/0", - "GatewayId": { - "Ref": "AWSEC2InternetGatewaynthsqsresourceslongclusternameexamplecom" - } - } - }, - "AWSEC2Route00000": { - "Type": "AWS::EC2::Route", - "Properties": { - "RouteTableId": { - "Ref": "AWSEC2RouteTablenthsqsresourceslongclusternameexamplecom" - }, - "DestinationCidrBlock": "0.0.0.0/0", - "GatewayId": { - "Ref": "AWSEC2InternetGatewaynthsqsresourceslongclusternameexamplecom" - } - } - }, - "AWSEC2RouteTablenthsqsresourceslongclusternameexamplecom": { - "Type": "AWS::EC2::RouteTable", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCnthsqsresourceslongclusternameexamplecom" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "nthsqsresources.longclustername.example.com" - }, - { - "Key": "Name", - "Value": "nthsqsresources.longclustername.example.com" - }, - { - "Key": "kubernetes.io/cluster/nthsqsresources.longclustername.example.com", - "Value": "owned" - }, - { - "Key": "kubernetes.io/kops/role", - "Value": "public" - } - ] - } - }, - "AWSEC2SecurityGroupEgressfrommastersnthsqsresourceslongclusternameexamplecomegressall0to00": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersnthsqsresourceslongclusternameexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIpv6": "::/0" - } - }, - "AWSEC2SecurityGroupEgressfrommastersnthsqsresourceslongclusternameexamplecomegressall0to000000": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersnthsqsresourceslongclusternameexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupEgressfromnodesnthsqsresourceslongclusternameexamplecomegressall0to00": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesnthsqsresourceslongclusternameexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIpv6": "::/0" - } - }, - "AWSEC2SecurityGroupEgressfromnodesnthsqsresourceslongclusternameexamplecomegressall0to000000": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesnthsqsresourceslongclusternameexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom00000ingresstcp22to22mastersnthsqsresourceslongclusternameexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersnthsqsresourceslongclusternameexamplecom" - }, - "FromPort": 22, - "ToPort": 22, - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom00000ingresstcp22to22nodesnthsqsresourceslongclusternameexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesnthsqsresourceslongclusternameexamplecom" - }, - "FromPort": 22, - "ToPort": 22, - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom00000ingresstcp443to443mastersnthsqsresourceslongclusternameexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersnthsqsresourceslongclusternameexamplecom" - }, - "FromPort": 443, - "ToPort": 443, - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrommastersnthsqsresourceslongclusternameexamplecomingressall0to0mastersnthsqsresourceslongclusternameexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersnthsqsresourceslongclusternameexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupmastersnthsqsresourceslongclusternameexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfrommastersnthsqsresourceslongclusternameexamplecomingressall0to0nodesnthsqsresourceslongclusternameexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesnthsqsresourceslongclusternameexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupmastersnthsqsresourceslongclusternameexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfromnodesnthsqsresourceslongclusternameexamplecomingressall0to0nodesnthsqsresourceslongclusternameexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesnthsqsresourceslongclusternameexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesnthsqsresourceslongclusternameexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfromnodesnthsqsresourceslongclusternameexamplecomingresstcp1to2379mastersnthsqsresourceslongclusternameexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersnthsqsresourceslongclusternameexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesnthsqsresourceslongclusternameexamplecom" - }, - "FromPort": 1, - "ToPort": 2379, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodesnthsqsresourceslongclusternameexamplecomingresstcp2382to4000mastersnthsqsresourceslongclusternameexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersnthsqsresourceslongclusternameexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesnthsqsresourceslongclusternameexamplecom" - }, - "FromPort": 2382, - "ToPort": 4000, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodesnthsqsresourceslongclusternameexamplecomingresstcp4003to65535mastersnthsqsresourceslongclusternameexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersnthsqsresourceslongclusternameexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesnthsqsresourceslongclusternameexamplecom" - }, - "FromPort": 4003, - "ToPort": 65535, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodesnthsqsresourceslongclusternameexamplecomingressudp1to65535mastersnthsqsresourceslongclusternameexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersnthsqsresourceslongclusternameexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesnthsqsresourceslongclusternameexamplecom" - }, - "FromPort": 1, - "ToPort": 65535, - "IpProtocol": "udp" - } - }, - "AWSEC2SecurityGroupmastersnthsqsresourceslongclusternameexamplecom": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupName": "masters.nthsqsresources.longclustername.example.com", - "VpcId": { - "Ref": "AWSEC2VPCnthsqsresourceslongclusternameexamplecom" - }, - "GroupDescription": "Security group for masters", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "nthsqsresources.longclustername.example.com" - }, - { - "Key": "Name", - "Value": "masters.nthsqsresources.longclustername.example.com" - }, - { - "Key": "kubernetes.io/cluster/nthsqsresources.longclustername.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2SecurityGroupnodesnthsqsresourceslongclusternameexamplecom": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupName": "nodes.nthsqsresources.longclustername.example.com", - "VpcId": { - "Ref": "AWSEC2VPCnthsqsresourceslongclusternameexamplecom" - }, - "GroupDescription": "Security group for nodes", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "nthsqsresources.longclustername.example.com" - }, - { - "Key": "Name", - "Value": "nodes.nthsqsresources.longclustername.example.com" - }, - { - "Key": "kubernetes.io/cluster/nthsqsresources.longclustername.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2SubnetRouteTableAssociationustest1anthsqsresourceslongclusternameexamplecom": { - "Type": "AWS::EC2::SubnetRouteTableAssociation", - "Properties": { - "SubnetId": { - "Ref": "AWSEC2Subnetustest1anthsqsresourceslongclusternameexamplecom" - }, - "RouteTableId": { - "Ref": "AWSEC2RouteTablenthsqsresourceslongclusternameexamplecom" - } - } - }, - "AWSEC2Subnetustest1anthsqsresourceslongclusternameexamplecom": { - "Type": "AWS::EC2::Subnet", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCnthsqsresourceslongclusternameexamplecom" - }, - "CidrBlock": "172.20.32.0/19", - "AvailabilityZone": "us-test-1a", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "nthsqsresources.longclustername.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.nthsqsresources.longclustername.example.com" - }, - { - "Key": "SubnetType", - "Value": "Public" - }, - { - "Key": "kops.k8s.io/instance-group/master-us-test-1a", - "Value": "true" - }, - { - "Key": "kops.k8s.io/instance-group/nodes", - "Value": "true" - }, - { - "Key": "kubernetes.io/cluster/nthsqsresources.longclustername.example.com", - "Value": "owned" - }, - { - "Key": "kubernetes.io/role/elb", - "Value": "1" - }, - { - "Key": "kubernetes.io/role/internal-elb", - "Value": "1" - } - ] - } - }, - "AWSEC2VPCCidrBlockAmazonIPv6": { - "Type": "AWS::EC2::VPCCidrBlock", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCnthsqsresourceslongclusternameexamplecom" - }, - "AmazonProvidedIpv6CidrBlock": true - } - }, - "AWSEC2VPCDHCPOptionsAssociationnthsqsresourceslongclusternameexamplecom": { - "Type": "AWS::EC2::VPCDHCPOptionsAssociation", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCnthsqsresourceslongclusternameexamplecom" - }, - "DhcpOptionsId": { - "Ref": "AWSEC2DHCPOptionsnthsqsresourceslongclusternameexamplecom" - } - } - }, - "AWSEC2VPCGatewayAttachmentnthsqsresourceslongclusternameexamplecom": { - "Type": "AWS::EC2::VPCGatewayAttachment", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCnthsqsresourceslongclusternameexamplecom" - }, - "InternetGatewayId": { - "Ref": "AWSEC2InternetGatewaynthsqsresourceslongclusternameexamplecom" - } - } - }, - "AWSEC2VPCnthsqsresourceslongclusternameexamplecom": { - "Type": "AWS::EC2::VPC", - "Properties": { - "CidrBlock": "172.20.0.0/16", - "EnableDnsHostnames": true, - "EnableDnsSupport": true, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "nthsqsresources.longclustername.example.com" - }, - { - "Key": "Name", - "Value": "nthsqsresources.longclustername.example.com" - }, - { - "Key": "kubernetes.io/cluster/nthsqsresources.longclustername.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2Volumeustest1aetcdeventsnthsqsresourceslongclusternameexamplecom": { - "Type": "AWS::EC2::Volume", - "Properties": { - "AvailabilityZone": "us-test-1a", - "Size": 20, - "VolumeType": "gp3", - "Iops": 3000, - "Throughput": 125, - "Encrypted": false, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "nthsqsresources.longclustername.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.etcd-events.nthsqsresources.longclustername.example.com" - }, - { - "Key": "k8s.io/etcd/events", - "Value": "us-test-1a/us-test-1a" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kubernetes.io/cluster/nthsqsresources.longclustername.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2Volumeustest1aetcdmainnthsqsresourceslongclusternameexamplecom": { - "Type": "AWS::EC2::Volume", - "Properties": { - "AvailabilityZone": "us-test-1a", - "Size": 20, - "VolumeType": "gp3", - "Iops": 3000, - "Throughput": 125, - "Encrypted": false, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "nthsqsresources.longclustername.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.etcd-main.nthsqsresources.longclustername.example.com" - }, - { - "Key": "k8s.io/etcd/main", - "Value": "us-test-1a/us-test-1a" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kubernetes.io/cluster/nthsqsresources.longclustername.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEventsRulenthsqsresourceslongclusternameefkbaohASGLifecycle": { - "Type": "AWS::Events::Rule", - "Properties": { - "Name": "nthsqsresources.longclustername.e-fkbaoh-ASGLifecycle", - "EventPattern": { - "detail-type": [ - "EC2 Instance-terminate Lifecycle Action" - ], - "source": [ - "aws.autoscaling" - ] - }, - "Targets": [ - { - "Id": "1", - "Arn": { - "Ref": "AWSSQSQueuenthsqsresourceslongclusternameexamplecomnth" - } - } - ] - } - }, - "AWSEventsRulenthsqsresourceslongclusternameefkbaohInstanceScheduledChange": { - "Type": "AWS::Events::Rule", - "Properties": { - "Name": "nthsqsresources.longclustername.e-fkbaoh-InstanceScheduledChange", - "EventPattern": { - "detail": { - "eventTypeCategory": [ - "scheduledChange" - ], - "service": [ - "EC2" - ] - }, - "detail-type": [ - "AWS Health Event" - ], - "source": [ - "aws.health" - ] - }, - "Targets": [ - { - "Id": "1", - "Arn": { - "Ref": "AWSSQSQueuenthsqsresourceslongclusternameexamplecomnth" - } - } - ] - } - }, - "AWSEventsRulenthsqsresourceslongclusternameefkbaohInstanceStateChange": { - "Type": "AWS::Events::Rule", - "Properties": { - "Name": "nthsqsresources.longclustername.e-fkbaoh-InstanceStateChange", - "EventPattern": { - "detail-type": [ - "EC2 Instance State-change Notification" - ], - "source": [ - "aws.ec2" - ] - }, - "Targets": [ - { - "Id": "1", - "Arn": { - "Ref": "AWSSQSQueuenthsqsresourceslongclusternameexamplecomnth" - } - } - ] - } - }, - "AWSEventsRulenthsqsresourceslongclusternameefkbaohRebalanceRecommendation": { - "Type": "AWS::Events::Rule", - "Properties": { - "Name": "nthsqsresources.longclustername.e-fkbaoh-RebalanceRecommendation", - "EventPattern": { - "detail-type": [ - "EC2 Instance Rebalance Recommendation" - ], - "source": [ - "aws.ec2" - ] - }, - "Targets": [ - { - "Id": "1", - "Arn": { - "Ref": "AWSSQSQueuenthsqsresourceslongclusternameexamplecomnth" - } - } - ] - } - }, - "AWSEventsRulenthsqsresourceslongclusternameefkbaohSpotInterruption": { - "Type": "AWS::Events::Rule", - "Properties": { - "Name": "nthsqsresources.longclustername.e-fkbaoh-SpotInterruption", - "EventPattern": { - "detail-type": [ - "EC2 Spot Instance Interruption Warning" - ], - "source": [ - "aws.ec2" - ] - }, - "Targets": [ - { - "Id": "1", - "Arn": { - "Ref": "AWSSQSQueuenthsqsresourceslongclusternameexamplecomnth" - } - } - ] - } - }, - "AWSIAMInstanceProfilemastersnthsqsresourceslongclusternameexamplecom": { - "Type": "AWS::IAM::InstanceProfile", - "Properties": { - "InstanceProfileName": "masters.nthsqsresources.longclustername.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolemastersnthsqsresourceslongclusternameexamplecom" - } - ] - } - }, - "AWSIAMInstanceProfilenodesnthsqsresourceslongclusternameexamplecom": { - "Type": "AWS::IAM::InstanceProfile", - "Properties": { - "InstanceProfileName": "nodes.nthsqsresources.longclustername.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolenodesnthsqsresourceslongclusternameexamplecom" - } - ] - } - }, - "AWSIAMPolicymastersnthsqsresourceslongclusternameexamplecom": { - "Type": "AWS::IAM::Policy", - "Properties": { - "PolicyName": "masters.nthsqsresources.longclustername.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolemastersnthsqsresourceslongclusternameexamplecom" - } - ], - "PolicyDocument": { - "Statement": [ - { - "Action": "ec2:AttachVolume", - "Condition": { - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "nthsqsresources.longclustername.example.com", - "aws:ResourceTag/k8s.io/role/master": "1" - } - }, - "Effect": "Allow", - "Resource": [ - "*" - ] - }, - { - "Action": [ - "s3:Get*" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-read-bucket/clusters.example.com/nthsqsresources.longclustername.example.com/*" - }, - { - "Action": [ - "s3:GetObject", - "s3:DeleteObject", - "s3:DeleteObjectVersion", - "s3:PutObject" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/nthsqsresources.longclustername.example.com/backups/etcd/main/*" - }, - { - "Action": [ - "s3:GetObject", - "s3:DeleteObject", - "s3:DeleteObjectVersion", - "s3:PutObject" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/nthsqsresources.longclustername.example.com/backups/etcd/events/*" - }, - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-read-bucket" - ] - }, - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-write-bucket" - ] - }, - { - "Action": [ - "route53:ChangeResourceRecordSets", - "route53:ListResourceRecordSets", - "route53:GetHostedZone" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:route53:::hostedzone/Z1AFAKE1ZON3YO" - ] - }, - { - "Action": [ - "route53:GetChange" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:route53:::change/*" - ] - }, - { - "Action": [ - "route53:ListHostedZones", - "route53:ListTagsForResource" - ], - "Effect": "Allow", - "Resource": [ - "*" - ] - }, - { - "Action": "ec2:CreateTags", - "Condition": { - "StringEquals": { - "aws:RequestTag/KubernetesCluster": "nthsqsresources.longclustername.example.com", - "ec2:CreateAction": [ - "CreateSecurityGroup" - ] - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:security-group/*" - ] - }, - { - "Action": [ - "ec2:CreateTags", - "ec2:DeleteTags" - ], - "Condition": { - "Null": { - "aws:RequestTag/KubernetesCluster": "true" - }, - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "nthsqsresources.longclustername.example.com" - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:security-group/*" - ] - }, - { - "Action": "ec2:CreateTags", - "Condition": { - "StringEquals": { - "aws:RequestTag/KubernetesCluster": "nthsqsresources.longclustername.example.com", - "ec2:CreateAction": [ - "CreateVolume", - "CreateSnapshot" - ] - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:volume/*", - "arn:aws-test:ec2:*:*:snapshot/*" - ] - }, - { - "Action": [ - "ec2:CreateTags", - "ec2:DeleteTags" - ], - "Condition": { - "Null": { - "aws:RequestTag/KubernetesCluster": "true" - }, - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "nthsqsresources.longclustername.example.com" - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:volume/*", - "arn:aws-test:ec2:*:*:snapshot/*" - ] - }, - { - "Action": [ - "autoscaling:DescribeAutoScalingGroups", - "autoscaling:DescribeAutoScalingInstances", - "autoscaling:DescribeLaunchConfigurations", - "autoscaling:DescribeScalingActivities", - "autoscaling:DescribeTags", - "ec2:AttachVolume", - "ec2:AuthorizeSecurityGroupIngress", - "ec2:CreateSecurityGroup", - "ec2:CreateTags", - "ec2:DeleteRoute", - "ec2:DeleteSecurityGroup", - "ec2:DeleteVolume", - "ec2:DescribeAccountAttributes", - "ec2:DescribeInstanceTypes", - "ec2:DescribeInstances", - "ec2:DescribeLaunchTemplateVersions", - "ec2:DescribeRegions", - "ec2:DescribeRouteTables", - "ec2:DescribeSecurityGroups", - "ec2:DescribeSubnets", - "ec2:DescribeTags", - "ec2:DescribeVolumes", - "ec2:DescribeVolumesModifications", - "ec2:DescribeVpcs", - "ec2:DetachVolume", - "ec2:ModifyInstanceAttribute", - "ec2:ModifyVolume", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:CreateListener", - "elasticloadbalancing:CreateTargetGroup", - "elasticloadbalancing:DeleteListener", - "elasticloadbalancing:DescribeListeners", - "elasticloadbalancing:DescribeLoadBalancerAttributes", - "elasticloadbalancing:DescribeLoadBalancerPolicies", - "elasticloadbalancing:DescribeLoadBalancers", - "elasticloadbalancing:DescribeTargetGroups", - "elasticloadbalancing:DescribeTargetHealth", - "elasticloadbalancing:ModifyListener", - "elasticloadbalancing:RegisterTargets", - "iam:GetServerCertificate", - "iam:ListServerCertificates", - "kms:DescribeKey", - "kms:GenerateRandom", - "sqs:DeleteMessage", - "sqs:ReceiveMessage" - ], - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": [ - "autoscaling:CompleteLifecycleAction", - "autoscaling:SetDesiredCapacity", - "autoscaling:TerminateInstanceInAutoScalingGroup", - "ec2:AttachVolume", - "ec2:AuthorizeSecurityGroupIngress", - "ec2:DeleteSecurityGroup", - "ec2:DeleteVolume", - "ec2:DetachVolume", - "ec2:ModifyInstanceAttribute", - "ec2:ModifyVolume", - "ec2:RevokeSecurityGroupIngress", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", - "elasticloadbalancing:AttachLoadBalancerToSubnets", - "elasticloadbalancing:ConfigureHealthCheck", - "elasticloadbalancing:CreateLoadBalancerListeners", - "elasticloadbalancing:CreateLoadBalancerPolicy", - "elasticloadbalancing:DeleteListener", - "elasticloadbalancing:DeleteLoadBalancer", - "elasticloadbalancing:DeleteLoadBalancerListeners", - "elasticloadbalancing:DeleteTargetGroup", - "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", - "elasticloadbalancing:DeregisterTargets", - "elasticloadbalancing:DetachLoadBalancerFromSubnets", - "elasticloadbalancing:ModifyListener", - "elasticloadbalancing:ModifyLoadBalancerAttributes", - "elasticloadbalancing:ModifyTargetGroup", - "elasticloadbalancing:RegisterInstancesWithLoadBalancer", - "elasticloadbalancing:RegisterTargets", - "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", - "elasticloadbalancing:SetLoadBalancerPoliciesOfListener" - ], - "Condition": { - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "nthsqsresources.longclustername.example.com" - } - }, - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": [ - "ec2:CreateSecurityGroup", - "ec2:CreateSnapshot", - "ec2:CreateVolume", - "elasticloadbalancing:CreateListener", - "elasticloadbalancing:CreateLoadBalancer", - "elasticloadbalancing:CreateTargetGroup" - ], - "Condition": { - "StringEquals": { - "aws:RequestTag/KubernetesCluster": "nthsqsresources.longclustername.example.com" - } - }, - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": "ec2:CreateSecurityGroup", - "Effect": "Allow", - "Resource": "arn:aws-test:ec2:*:*:vpc/*" - } - ], - "Version": "2012-10-17" - } - } - }, - "AWSIAMPolicynodesnthsqsresourceslongclusternameexamplecom": { - "Type": "AWS::IAM::Policy", - "Properties": { - "PolicyName": "nodes.nthsqsresources.longclustername.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolenodesnthsqsresourceslongclusternameexamplecom" - } - ], - "PolicyDocument": { - "Statement": [ - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-read-bucket" - ] - }, - { - "Action": [ - "autoscaling:DescribeAutoScalingInstances", - "ec2:DescribeInstanceTypes", - "ec2:DescribeInstances", - "ec2:DescribeRegions", - "iam:GetServerCertificate", - "iam:ListServerCertificates", - "kms:GenerateRandom" - ], - "Effect": "Allow", - "Resource": "*" - } - ], - "Version": "2012-10-17" - } - } - }, - "AWSIAMRolemastersnthsqsresourceslongclusternameexamplecom": { - "Type": "AWS::IAM::Role", - "Properties": { - "RoleName": "masters.nthsqsresources.longclustername.example.com", - "AssumeRolePolicyDocument": { - "Statement": [ - { - "Action": "sts:AssumeRole", - "Effect": "Allow", - "Principal": { - "Service": "ec2.amazonaws.com" - } - } - ], - "Version": "2012-10-17" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "nthsqsresources.longclustername.example.com" - }, - { - "Key": "Name", - "Value": "masters.nthsqsresources.longclustername.example.com" - }, - { - "Key": "kubernetes.io/cluster/nthsqsresources.longclustername.example.com", - "Value": "owned" - } - ] - } - }, - "AWSIAMRolenodesnthsqsresourceslongclusternameexamplecom": { - "Type": "AWS::IAM::Role", - "Properties": { - "RoleName": "nodes.nthsqsresources.longclustername.example.com", - "AssumeRolePolicyDocument": { - "Statement": [ - { - "Action": "sts:AssumeRole", - "Effect": "Allow", - "Principal": { - "Service": "ec2.amazonaws.com" - } - } - ], - "Version": "2012-10-17" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "nthsqsresources.longclustername.example.com" - }, - { - "Key": "Name", - "Value": "nodes.nthsqsresources.longclustername.example.com" - }, - { - "Key": "kubernetes.io/cluster/nthsqsresources.longclustername.example.com", - "Value": "owned" - } - ] - } - }, - "AWSSQSQueuePolicynthsqsresourceslongclusternameexamplecomnthPolicy": { - "Type": "AWS::SQS::QueuePolicy", - "Properties": { - "Queues": [ - { - "Ref": "AWSSQSQueuenthsqsresourceslongclusternameexamplecomnth" - } - ], - "PolicyDocument": { - "Statement": [ - { - "Action": "sqs:SendMessage", - "Effect": "Allow", - "Principal": { - "Service": [ - "events.amazonaws.com", - "sqs.amazonaws.com" - ] - }, - "Resource": "arn:aws-test:sqs:us-test-1:123456789012:nthsqsresources-longclustername-example-com-nth" - } - ], - "Version": "2012-10-17" - } - } - }, - "AWSSQSQueuenthsqsresourceslongclusternameexamplecomnth": { - "Type": "AWS::SQS::Queue", - "Properties": { - "QueueName": "nthsqsresources-longclustername-example-com-nth", - "MessageRetentionPeriod": 300, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "nthsqsresources.longclustername.example.com" - }, - { - "Key": "Name", - "Value": "nthsqsresources-longclustername-example-com-nth" - }, - { - "Key": "kubernetes.io/cluster/nthsqsresources.longclustername.example.com", - "Value": "owned" - } - ] - } - } - } -} diff --git a/tests/integration/update_cluster/nth_sqs_resources/cloudformation.json.extracted.yaml b/tests/integration/update_cluster/nth_sqs_resources/cloudformation.json.extracted.yaml deleted file mode 100644 index 1ab8f8b561553..0000000000000 --- a/tests/integration/update_cluster/nth_sqs_resources/cloudformation.json.extracted.yaml +++ /dev/null @@ -1,449 +0,0 @@ -? Resources.AWSEC2LaunchTemplatemasterustest1amastersnthsqsresourceslongclusternameexamplecom.Properties.LaunchTemplateData.UserData -: | - #!/bin/bash - set -o errexit - set -o nounset - set -o pipefail - - NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 - NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 - NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 - NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 - - export AWS_REGION=us-test-1 - - - - - sysctl -w net.core.rmem_max=16777216 || true - sysctl -w net.core.wmem_max=16777216 || true - sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true - sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true - - - function ensure-install-dir() { - INSTALL_DIR="/opt/kops" - # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec - if [[ -d /var/lib/toolbox ]]; then - INSTALL_DIR="/var/lib/toolbox/kops" - fi - mkdir -p ${INSTALL_DIR}/bin - mkdir -p ${INSTALL_DIR}/conf - cd ${INSTALL_DIR} - } - - # Retry a download until we get it. args: name, sha, urls - download-or-bust() { - local -r file="$1" - local -r hash="$2" - local -r urls=( $(split-commas "$3") ) - - if [[ -f "${file}" ]]; then - if ! validate-hash "${file}" "${hash}"; then - rm -f "${file}" - else - return 0 - fi - fi - - while true; do - for url in "${urls[@]}"; do - commands=( - "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - ) - for cmd in "${commands[@]}"; do - echo "Attempting download with: ${cmd} {url}" - if ! (${cmd} "${url}"); then - echo "== Download failed with ${cmd} ==" - continue - fi - if ! validate-hash "${file}" "${hash}"; then - echo "== Hash validation of ${url} failed. Retrying. ==" - rm -f "${file}" - else - echo "== Downloaded ${url} (SHA256 = ${hash}) ==" - return 0 - fi - done - done - - echo "All downloads failed; sleeping before retrying" - sleep 60 - done - } - - validate-hash() { - local -r file="$1" - local -r expected="$2" - local actual - - actual=$(sha256sum ${file} | awk '{ print $1 }') || true - if [[ "${actual}" != "${expected}" ]]; then - echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" - return 1 - fi - } - - function split-commas() { - echo $1 | tr "," "\n" - } - - function download-release() { - case "$(uname -m)" in - x86_64*|i?86_64*|amd64*) - NODEUP_URL="${NODEUP_URL_AMD64}" - NODEUP_HASH="${NODEUP_HASH_AMD64}" - ;; - aarch64*|arm64*) - NODEUP_URL="${NODEUP_URL_ARM64}" - NODEUP_HASH="${NODEUP_HASH_ARM64}" - ;; - *) - echo "Unsupported host arch: $(uname -m)" >&2 - exit 1 - ;; - esac - - cd ${INSTALL_DIR}/bin - download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" - - chmod +x nodeup - - echo "Running nodeup" - # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 - ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) - } - - #################################################################################### - - /bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" - - echo "== nodeup node config starting ==" - ensure-install-dir - - cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' - cloudConfig: - awsEBSCSIDriver: - enabled: false - manageStorageClasses: true - containerRuntime: containerd - containerd: - logLevel: info - version: 1.4.13 - docker: - skipInstall: true - encryptionConfig: null - etcdClusters: - events: - version: 3.4.13 - main: - version: 3.4.13 - kubeAPIServer: - allowPrivileged: true - anonymousAuth: false - apiAudiences: - - kubernetes.svc.default - apiServerCount: 1 - authorizationMode: AlwaysAllow - bindAddress: 0.0.0.0 - cloudProvider: aws - enableAdmissionPlugins: - - NamespaceLifecycle - - LimitRanger - - ServiceAccount - - DefaultStorageClass - - DefaultTolerationSeconds - - MutatingAdmissionWebhook - - ValidatingAdmissionWebhook - - NodeRestriction - - ResourceQuota - etcdServers: - - https://127.0.0.1:4001 - etcdServersOverrides: - - /events#https://127.0.0.1:4002 - image: registry.k8s.io/kube-apiserver:v1.21.0 - kubeletPreferredAddressTypes: - - InternalIP - - Hostname - - ExternalIP - logLevel: 2 - requestheaderAllowedNames: - - aggregator - requestheaderExtraHeaderPrefixes: - - X-Remote-Extra- - requestheaderGroupHeaders: - - X-Remote-Group - requestheaderUsernameHeaders: - - X-Remote-User - securePort: 443 - serviceAccountIssuer: https://api.internal.nthsqsresources.longclustername.example.com - serviceAccountJWKSURI: https://api.internal.nthsqsresources.longclustername.example.com/openid/v1/jwks - serviceClusterIPRange: 100.64.0.0/13 - storageBackend: etcd3 - kubeControllerManager: - allocateNodeCIDRs: true - attachDetachReconcileSyncPeriod: 1m0s - cloudProvider: aws - clusterCIDR: 100.96.0.0/11 - clusterName: nthsqsresources.longclustername.example.com - configureCloudRoutes: false - image: registry.k8s.io/kube-controller-manager:v1.21.0 - leaderElection: - leaderElect: true - logLevel: 2 - useServiceAccountCredentials: true - kubeProxy: - clusterCIDR: 100.96.0.0/11 - cpuRequest: 100m - image: registry.k8s.io/kube-proxy:v1.21.0 - logLevel: 2 - kubeScheduler: - image: registry.k8s.io/kube-scheduler:v1.21.0 - leaderElection: - leaderElect: true - logLevel: 2 - kubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: aws - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - masterKubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: aws - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - - __EOF_CLUSTER_SPEC - - cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' - CloudProvider: aws - ConfigBase: memfs://clusters.example.com/nthsqsresources.longclustername.example.com - InstanceGroupName: master-us-test-1a - InstanceGroupRole: Master - NodeupConfigHash: KfWSX4emtavW2QDKkc+Wok3rLiV+c1jzaUH2UIu6BBI= - - __EOF_KUBE_ENV - - download-release - echo "== nodeup node config done ==" -Resources.AWSEC2LaunchTemplatenodesnthsqsresourceslongclusternameexamplecom.Properties.LaunchTemplateData.UserData: | - #!/bin/bash - set -o errexit - set -o nounset - set -o pipefail - - NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 - NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 - NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 - NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 - - export AWS_REGION=us-test-1 - - - - - sysctl -w net.core.rmem_max=16777216 || true - sysctl -w net.core.wmem_max=16777216 || true - sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true - sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true - - - function ensure-install-dir() { - INSTALL_DIR="/opt/kops" - # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec - if [[ -d /var/lib/toolbox ]]; then - INSTALL_DIR="/var/lib/toolbox/kops" - fi - mkdir -p ${INSTALL_DIR}/bin - mkdir -p ${INSTALL_DIR}/conf - cd ${INSTALL_DIR} - } - - # Retry a download until we get it. args: name, sha, urls - download-or-bust() { - local -r file="$1" - local -r hash="$2" - local -r urls=( $(split-commas "$3") ) - - if [[ -f "${file}" ]]; then - if ! validate-hash "${file}" "${hash}"; then - rm -f "${file}" - else - return 0 - fi - fi - - while true; do - for url in "${urls[@]}"; do - commands=( - "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - ) - for cmd in "${commands[@]}"; do - echo "Attempting download with: ${cmd} {url}" - if ! (${cmd} "${url}"); then - echo "== Download failed with ${cmd} ==" - continue - fi - if ! validate-hash "${file}" "${hash}"; then - echo "== Hash validation of ${url} failed. Retrying. ==" - rm -f "${file}" - else - echo "== Downloaded ${url} (SHA256 = ${hash}) ==" - return 0 - fi - done - done - - echo "All downloads failed; sleeping before retrying" - sleep 60 - done - } - - validate-hash() { - local -r file="$1" - local -r expected="$2" - local actual - - actual=$(sha256sum ${file} | awk '{ print $1 }') || true - if [[ "${actual}" != "${expected}" ]]; then - echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" - return 1 - fi - } - - function split-commas() { - echo $1 | tr "," "\n" - } - - function download-release() { - case "$(uname -m)" in - x86_64*|i?86_64*|amd64*) - NODEUP_URL="${NODEUP_URL_AMD64}" - NODEUP_HASH="${NODEUP_HASH_AMD64}" - ;; - aarch64*|arm64*) - NODEUP_URL="${NODEUP_URL_ARM64}" - NODEUP_HASH="${NODEUP_HASH_ARM64}" - ;; - *) - echo "Unsupported host arch: $(uname -m)" >&2 - exit 1 - ;; - esac - - cd ${INSTALL_DIR}/bin - download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" - - chmod +x nodeup - - echo "Running nodeup" - # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 - ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) - } - - #################################################################################### - - /bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" - - echo "== nodeup node config starting ==" - ensure-install-dir - - cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' - cloudConfig: - awsEBSCSIDriver: - enabled: false - manageStorageClasses: true - containerRuntime: containerd - containerd: - logLevel: info - version: 1.4.13 - docker: - skipInstall: true - kubeProxy: - clusterCIDR: 100.96.0.0/11 - cpuRequest: 100m - image: registry.k8s.io/kube-proxy:v1.21.0 - logLevel: 2 - kubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: aws - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - - __EOF_CLUSTER_SPEC - - cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' - CloudProvider: aws - ConfigServer: - CACertificates: | - -----BEGIN CERTIFICATE----- - MIIBbjCCARigAwIBAgIMFpANqBD8NSD82AUSMA0GCSqGSIb3DQEBCwUAMBgxFjAU - BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwODAwWhcNMzEwNzA3MDcw - ODAwWjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD - SwAwSAJBANFI3zr0Tk8krsW8vwjfMpzJOlWQ8616vG3YPa2qAgI7V4oKwfV0yIg1 - jt+H6f4P/wkPAPTPTfRp9Iy8oHEEFw0CAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG - MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNG3zVjTcLlJwDsJ4/K9DV7KohUA - MA0GCSqGSIb3DQEBCwUAA0EAB8d03fY2w7WKpfO29qI295pu2C4ca9AiVGOpgSc8 - tmQsq6rcxt3T+rb589PVtz0mw/cKTxOk6gH2CCC+yHfy2w== - -----END CERTIFICATE----- - -----BEGIN CERTIFICATE----- - MIIBbjCCARigAwIBAgIMFpANvmSa0OAlYmXKMA0GCSqGSIb3DQEBCwUAMBgxFjAU - BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwOTM2WhcNMzEwNzA3MDcw - OTM2WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD - SwAwSAJBAMF6F4aZdpe0RUpyykaBpWwZCnwbffhYGOw+fs6RdLuUq7QCNmJm/Eq7 - WWOziMYDiI9SbclpD+6QiJ0N3EqppVUCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG - MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLImp6ARjPDAH6nhI+scWVt3Q9bn - MA0GCSqGSIb3DQEBCwUAA0EAVQVx5MUtuAIeePuP9o51xtpT2S6Fvfi8J4ICxnlA - 9B7UD2ushcVFPtaeoL9Gfu8aY4KJBeqqg5ojl4qmRnThjw== - -----END CERTIFICATE----- - server: https://kops-controller.internal.nthsqsresources.longclustername.example.com:3988/ - InstanceGroupName: nodes - InstanceGroupRole: Node - NodeupConfigHash: G8g6T2b7gcigf604l9EuzyDS4NkqKM4RMwILiPmi/2g= - - __EOF_KUBE_ENV - - download-release - echo "== nodeup node config done ==" diff --git a/tests/integration/update_cluster/nth_sqs_resources/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/nth_sqs_resources/data/aws_s3_object_cluster-completed.spec_content index f71b83dbf0af0..900f120ca0894 100644 --- a/tests/integration/update_cluster/nth_sqs_resources/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/nth_sqs_resources/data/aws_s3_object_cluster-completed.spec_content @@ -142,7 +142,6 @@ spec: kubernetesApiAccess: - 0.0.0.0/0 kubernetesVersion: 1.21.0 - masterInternalName: api.internal.nthsqsresources.longclustername.example.com masterKubelet: anonymousAuth: false cgroupDriver: systemd diff --git a/tests/integration/update_cluster/nth_sqs_resources/in-v1alpha2.yaml b/tests/integration/update_cluster/nth_sqs_resources/in-v1alpha2.yaml index 5311faf42b4d0..6ca36e437e0f2 100644 --- a/tests/integration/update_cluster/nth_sqs_resources/in-v1alpha2.yaml +++ b/tests/integration/update_cluster/nth_sqs_resources/in-v1alpha2.yaml @@ -22,7 +22,6 @@ spec: kubelet: anonymousAuth: false kubernetesVersion: v1.21.0 - masterInternalName: api.internal.nthsqsresources.longclustername.example.com masterPublicName: api.nthsqsresources.longclustername.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/tests/integration/update_cluster/nvidia/cloudformation.json b/tests/integration/update_cluster/nvidia/cloudformation.json deleted file mode 100644 index 74233efaaa14f..0000000000000 --- a/tests/integration/update_cluster/nvidia/cloudformation.json +++ /dev/null @@ -1,1362 +0,0 @@ -{ - "Resources": { - "AWSAutoScalingAutoScalingGroupmasterustest1amastersminimalexamplecom": { - "Type": "AWS::AutoScaling::AutoScalingGroup", - "Properties": { - "AutoScalingGroupName": "master-us-test-1a.masters.minimal.example.com", - "LaunchTemplate": { - "LaunchTemplateId": { - "Ref": "AWSEC2LaunchTemplatemasterustest1amastersminimalexamplecom" - }, - "Version": { - "Fn::GetAtt": [ - "AWSEC2LaunchTemplatemasterustest1amastersminimalexamplecom", - "LatestVersionNumber" - ] - } - }, - "MaxSize": "1", - "MinSize": "1", - "VPCZoneIdentifier": [ - { - "Ref": "AWSEC2Subnetustest1aminimalexamplecom" - } - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.minimal.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/role/master", - "Value": "1", - "PropagateAtLaunch": true - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a", - "PropagateAtLaunch": true - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned", - "PropagateAtLaunch": true - } - ], - "MetricsCollection": [ - { - "Granularity": "1Minute", - "Metrics": [ - "GroupDesiredCapacity", - "GroupInServiceInstances", - "GroupMaxSize", - "GroupMinSize", - "GroupPendingInstances", - "GroupStandbyInstances", - "GroupTerminatingInstances", - "GroupTotalInstances" - ] - } - ] - } - }, - "AWSAutoScalingAutoScalingGroupnodesminimalexamplecom": { - "Type": "AWS::AutoScaling::AutoScalingGroup", - "Properties": { - "AutoScalingGroupName": "nodes.minimal.example.com", - "LaunchTemplate": { - "LaunchTemplateId": { - "Ref": "AWSEC2LaunchTemplatenodesminimalexamplecom" - }, - "Version": { - "Fn::GetAtt": [ - "AWSEC2LaunchTemplatenodesminimalexamplecom", - "LatestVersionNumber" - ] - } - }, - "MaxSize": "2", - "MinSize": "2", - "VPCZoneIdentifier": [ - { - "Ref": "AWSEC2Subnetustest1aminimalexamplecom" - } - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "Name", - "Value": "nodes.minimal.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/gpu", - "Value": "1", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/role/node", - "Value": "1", - "PropagateAtLaunch": true - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes", - "PropagateAtLaunch": true - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned", - "PropagateAtLaunch": true - } - ], - "MetricsCollection": [ - { - "Granularity": "1Minute", - "Metrics": [ - "GroupDesiredCapacity", - "GroupInServiceInstances", - "GroupMaxSize", - "GroupMinSize", - "GroupPendingInstances", - "GroupStandbyInstances", - "GroupTerminatingInstances", - "GroupTotalInstances" - ] - } - ] - } - }, - "AWSEC2DHCPOptionsminimalexamplecom": { - "Type": "AWS::EC2::DHCPOptions", - "Properties": { - "DomainName": "us-test-1.compute.internal", - "DomainNameServers": [ - "AmazonProvidedDNS" - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "minimal.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2InternetGatewayminimalexamplecom": { - "Type": "AWS::EC2::InternetGateway", - "Properties": { - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "minimal.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2LaunchTemplatemasterustest1amastersminimalexamplecom": { - "Type": "AWS::EC2::LaunchTemplate", - "Properties": { - "LaunchTemplateName": "master-us-test-1a.masters.minimal.example.com", - "LaunchTemplateData": { - "BlockDeviceMappings": [ - { - "DeviceName": "/dev/xvda", - "Ebs": { - "VolumeType": "gp3", - "VolumeSize": 64, - "Iops": 3000, - "Throughput": 125, - "DeleteOnTermination": true, - "Encrypted": true - } - }, - { - "DeviceName": "/dev/sdc", - "VirtualName": "ephemeral0" - } - ], - "IamInstanceProfile": { - "Name": { - "Ref": "AWSIAMInstanceProfilemastersminimalexamplecom" - } - }, - "ImageId": "ami-12345678", - "InstanceType": "m3.medium", - "KeyName": "kubernetes.minimal.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57", - "MetadataOptions": { - "HttpPutResponseHopLimit": 1, - "HttpTokens": "optional" - }, - "Monitoring": { - "Enabled": false - }, - "NetworkInterfaces": [ - { - "AssociatePublicIpAddress": true, - "DeleteOnTermination": true, - "DeviceIndex": 0, - "Ipv6AddressCount": 0, - "Groups": [ - { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - } - ] - } - ], - "TagSpecifications": [ - { - "ResourceType": "instance", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.minimal.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - }, - { - "ResourceType": "volume", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.minimal.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - ], - "UserData": "extracted" - } - } - }, - "AWSEC2LaunchTemplatenodesminimalexamplecom": { - "Type": "AWS::EC2::LaunchTemplate", - "Properties": { - "LaunchTemplateName": "nodes.minimal.example.com", - "LaunchTemplateData": { - "BlockDeviceMappings": [ - { - "DeviceName": "/dev/xvda", - "Ebs": { - "VolumeType": "gp3", - "VolumeSize": 128, - "Iops": 3000, - "Throughput": 125, - "DeleteOnTermination": true, - "Encrypted": true - } - } - ], - "IamInstanceProfile": { - "Name": { - "Ref": "AWSIAMInstanceProfilenodesminimalexamplecom" - } - }, - "ImageId": "ami-12345678", - "InstanceType": "g4dn.xlarge", - "KeyName": "kubernetes.minimal.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57", - "MetadataOptions": { - "HttpPutResponseHopLimit": 1, - "HttpTokens": "optional" - }, - "Monitoring": { - "Enabled": false - }, - "NetworkInterfaces": [ - { - "AssociatePublicIpAddress": true, - "DeleteOnTermination": true, - "DeviceIndex": 0, - "Ipv6AddressCount": 0, - "Groups": [ - { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - } - ] - } - ], - "TagSpecifications": [ - { - "ResourceType": "instance", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "nodes.minimal.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/gpu", - "Value": "1" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "" - }, - { - "Key": "k8s.io/role/node", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - }, - { - "ResourceType": "volume", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "nodes.minimal.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/gpu", - "Value": "1" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "" - }, - { - "Key": "k8s.io/role/node", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - ], - "UserData": "extracted" - } - } - }, - "AWSEC2Route0": { - "Type": "AWS::EC2::Route", - "Properties": { - "RouteTableId": { - "Ref": "AWSEC2RouteTableminimalexamplecom" - }, - "DestinationIpv6CidrBlock": "::/0", - "GatewayId": { - "Ref": "AWSEC2InternetGatewayminimalexamplecom" - } - } - }, - "AWSEC2Route00000": { - "Type": "AWS::EC2::Route", - "Properties": { - "RouteTableId": { - "Ref": "AWSEC2RouteTableminimalexamplecom" - }, - "DestinationCidrBlock": "0.0.0.0/0", - "GatewayId": { - "Ref": "AWSEC2InternetGatewayminimalexamplecom" - } - } - }, - "AWSEC2RouteTableminimalexamplecom": { - "Type": "AWS::EC2::RouteTable", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCminimalexamplecom" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "minimal.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - }, - { - "Key": "kubernetes.io/kops/role", - "Value": "public" - } - ] - } - }, - "AWSEC2SecurityGroupEgressfrommastersminimalexamplecomegressall0to00": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIpv6": "::/0" - } - }, - "AWSEC2SecurityGroupEgressfrommastersminimalexamplecomegressall0to000000": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupEgressfromnodesminimalexamplecomegressall0to00": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIpv6": "::/0" - } - }, - "AWSEC2SecurityGroupEgressfromnodesminimalexamplecomegressall0to000000": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom00000ingresstcp22to22mastersminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "FromPort": 22, - "ToPort": 22, - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom00000ingresstcp22to22nodesminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "FromPort": 22, - "ToPort": 22, - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom00000ingresstcp443to443mastersminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "FromPort": 443, - "ToPort": 443, - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrommastersminimalexamplecomingressall0to0mastersminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfrommastersminimalexamplecomingressall0to0nodesminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfromnodesminimalexamplecomingressall0to0nodesminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfromnodesminimalexamplecomingresstcp1to2379mastersminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "FromPort": 1, - "ToPort": 2379, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodesminimalexamplecomingresstcp2382to4000mastersminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "FromPort": 2382, - "ToPort": 4000, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodesminimalexamplecomingresstcp4003to65535mastersminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "FromPort": 4003, - "ToPort": 65535, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodesminimalexamplecomingressudp1to65535mastersminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" - }, - "FromPort": 1, - "ToPort": 65535, - "IpProtocol": "udp" - } - }, - "AWSEC2SecurityGroupmastersminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupName": "masters.minimal.example.com", - "VpcId": { - "Ref": "AWSEC2VPCminimalexamplecom" - }, - "GroupDescription": "Security group for masters", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "masters.minimal.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2SecurityGroupnodesminimalexamplecom": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupName": "nodes.minimal.example.com", - "VpcId": { - "Ref": "AWSEC2VPCminimalexamplecom" - }, - "GroupDescription": "Security group for nodes", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "nodes.minimal.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2SubnetRouteTableAssociationustest1aminimalexamplecom": { - "Type": "AWS::EC2::SubnetRouteTableAssociation", - "Properties": { - "SubnetId": { - "Ref": "AWSEC2Subnetustest1aminimalexamplecom" - }, - "RouteTableId": { - "Ref": "AWSEC2RouteTableminimalexamplecom" - } - } - }, - "AWSEC2Subnetustest1aminimalexamplecom": { - "Type": "AWS::EC2::Subnet", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCminimalexamplecom" - }, - "CidrBlock": "172.20.32.0/19", - "AvailabilityZone": "us-test-1a", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.minimal.example.com" - }, - { - "Key": "SubnetType", - "Value": "Public" - }, - { - "Key": "kops.k8s.io/instance-group/master-us-test-1a", - "Value": "true" - }, - { - "Key": "kops.k8s.io/instance-group/nodes", - "Value": "true" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - }, - { - "Key": "kubernetes.io/role/elb", - "Value": "1" - }, - { - "Key": "kubernetes.io/role/internal-elb", - "Value": "1" - } - ] - } - }, - "AWSEC2VPCCidrBlockAmazonIPv6": { - "Type": "AWS::EC2::VPCCidrBlock", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCminimalexamplecom" - }, - "AmazonProvidedIpv6CidrBlock": true - } - }, - "AWSEC2VPCDHCPOptionsAssociationminimalexamplecom": { - "Type": "AWS::EC2::VPCDHCPOptionsAssociation", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCminimalexamplecom" - }, - "DhcpOptionsId": { - "Ref": "AWSEC2DHCPOptionsminimalexamplecom" - } - } - }, - "AWSEC2VPCGatewayAttachmentminimalexamplecom": { - "Type": "AWS::EC2::VPCGatewayAttachment", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCminimalexamplecom" - }, - "InternetGatewayId": { - "Ref": "AWSEC2InternetGatewayminimalexamplecom" - } - } - }, - "AWSEC2VPCminimalexamplecom": { - "Type": "AWS::EC2::VPC", - "Properties": { - "CidrBlock": "172.20.0.0/16", - "EnableDnsHostnames": true, - "EnableDnsSupport": true, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "minimal.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2Volumeustest1aetcdeventsminimalexamplecom": { - "Type": "AWS::EC2::Volume", - "Properties": { - "AvailabilityZone": "us-test-1a", - "Size": 20, - "VolumeType": "gp3", - "Iops": 3000, - "Throughput": 125, - "Encrypted": false, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.etcd-events.minimal.example.com" - }, - { - "Key": "k8s.io/etcd/events", - "Value": "us-test-1a/us-test-1a" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2Volumeustest1aetcdmainminimalexamplecom": { - "Type": "AWS::EC2::Volume", - "Properties": { - "AvailabilityZone": "us-test-1a", - "Size": 20, - "VolumeType": "gp3", - "Iops": 3000, - "Throughput": 125, - "Encrypted": false, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.etcd-main.minimal.example.com" - }, - { - "Key": "k8s.io/etcd/main", - "Value": "us-test-1a/us-test-1a" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - }, - "AWSIAMInstanceProfilemastersminimalexamplecom": { - "Type": "AWS::IAM::InstanceProfile", - "Properties": { - "InstanceProfileName": "masters.minimal.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolemastersminimalexamplecom" - } - ] - } - }, - "AWSIAMInstanceProfilenodesminimalexamplecom": { - "Type": "AWS::IAM::InstanceProfile", - "Properties": { - "InstanceProfileName": "nodes.minimal.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolenodesminimalexamplecom" - } - ] - } - }, - "AWSIAMPolicymastersminimalexamplecom": { - "Type": "AWS::IAM::Policy", - "Properties": { - "PolicyName": "masters.minimal.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolemastersminimalexamplecom" - } - ], - "PolicyDocument": { - "Statement": [ - { - "Action": "ec2:AttachVolume", - "Condition": { - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "minimal.example.com", - "aws:ResourceTag/k8s.io/role/master": "1" - } - }, - "Effect": "Allow", - "Resource": [ - "*" - ] - }, - { - "Action": [ - "s3:Get*" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-read-bucket/clusters.example.com/minimal.example.com/*" - }, - { - "Action": [ - "s3:GetObject", - "s3:DeleteObject", - "s3:DeleteObjectVersion", - "s3:PutObject" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/minimal.example.com/backups/etcd/main/*" - }, - { - "Action": [ - "s3:GetObject", - "s3:DeleteObject", - "s3:DeleteObjectVersion", - "s3:PutObject" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/minimal.example.com/backups/etcd/events/*" - }, - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-read-bucket" - ] - }, - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-write-bucket" - ] - }, - { - "Action": [ - "route53:ChangeResourceRecordSets", - "route53:ListResourceRecordSets", - "route53:GetHostedZone" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:route53:::hostedzone/Z1AFAKE1ZON3YO" - ] - }, - { - "Action": [ - "route53:GetChange" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:route53:::change/*" - ] - }, - { - "Action": [ - "route53:ListHostedZones", - "route53:ListTagsForResource" - ], - "Effect": "Allow", - "Resource": [ - "*" - ] - }, - { - "Action": "ec2:CreateTags", - "Condition": { - "StringEquals": { - "aws:RequestTag/KubernetesCluster": "minimal.example.com", - "ec2:CreateAction": [ - "CreateSecurityGroup" - ] - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:security-group/*" - ] - }, - { - "Action": [ - "ec2:CreateTags", - "ec2:DeleteTags" - ], - "Condition": { - "Null": { - "aws:RequestTag/KubernetesCluster": "true" - }, - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "minimal.example.com" - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:security-group/*" - ] - }, - { - "Action": "ec2:CreateTags", - "Condition": { - "StringEquals": { - "aws:RequestTag/KubernetesCluster": "minimal.example.com", - "ec2:CreateAction": [ - "CreateVolume", - "CreateSnapshot" - ] - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:volume/*", - "arn:aws-test:ec2:*:*:snapshot/*" - ] - }, - { - "Action": [ - "ec2:CreateTags", - "ec2:DeleteTags" - ], - "Condition": { - "Null": { - "aws:RequestTag/KubernetesCluster": "true" - }, - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "minimal.example.com" - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:volume/*", - "arn:aws-test:ec2:*:*:snapshot/*" - ] - }, - { - "Action": [ - "autoscaling:DescribeAutoScalingGroups", - "autoscaling:DescribeAutoScalingInstances", - "autoscaling:DescribeLaunchConfigurations", - "autoscaling:DescribeScalingActivities", - "autoscaling:DescribeTags", - "ec2:AttachVolume", - "ec2:AuthorizeSecurityGroupIngress", - "ec2:CreateSecurityGroup", - "ec2:CreateTags", - "ec2:DeleteRoute", - "ec2:DeleteSecurityGroup", - "ec2:DeleteVolume", - "ec2:DescribeAccountAttributes", - "ec2:DescribeInstanceTypes", - "ec2:DescribeInstances", - "ec2:DescribeLaunchTemplateVersions", - "ec2:DescribeRegions", - "ec2:DescribeRouteTables", - "ec2:DescribeSecurityGroups", - "ec2:DescribeSubnets", - "ec2:DescribeTags", - "ec2:DescribeVolumes", - "ec2:DescribeVolumesModifications", - "ec2:DescribeVpcs", - "ec2:DetachVolume", - "ec2:ModifyInstanceAttribute", - "ec2:ModifyVolume", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:CreateListener", - "elasticloadbalancing:CreateTargetGroup", - "elasticloadbalancing:DeleteListener", - "elasticloadbalancing:DescribeListeners", - "elasticloadbalancing:DescribeLoadBalancerAttributes", - "elasticloadbalancing:DescribeLoadBalancerPolicies", - "elasticloadbalancing:DescribeLoadBalancers", - "elasticloadbalancing:DescribeTargetGroups", - "elasticloadbalancing:DescribeTargetHealth", - "elasticloadbalancing:ModifyListener", - "elasticloadbalancing:RegisterTargets", - "iam:GetServerCertificate", - "iam:ListServerCertificates", - "kms:DescribeKey", - "kms:GenerateRandom" - ], - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": [ - "autoscaling:SetDesiredCapacity", - "autoscaling:TerminateInstanceInAutoScalingGroup", - "ec2:AttachVolume", - "ec2:AuthorizeSecurityGroupIngress", - "ec2:DeleteSecurityGroup", - "ec2:DeleteVolume", - "ec2:DetachVolume", - "ec2:ModifyInstanceAttribute", - "ec2:ModifyVolume", - "ec2:RevokeSecurityGroupIngress", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", - "elasticloadbalancing:AttachLoadBalancerToSubnets", - "elasticloadbalancing:ConfigureHealthCheck", - "elasticloadbalancing:CreateLoadBalancerListeners", - "elasticloadbalancing:CreateLoadBalancerPolicy", - "elasticloadbalancing:DeleteListener", - "elasticloadbalancing:DeleteLoadBalancer", - "elasticloadbalancing:DeleteLoadBalancerListeners", - "elasticloadbalancing:DeleteTargetGroup", - "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", - "elasticloadbalancing:DeregisterTargets", - "elasticloadbalancing:DetachLoadBalancerFromSubnets", - "elasticloadbalancing:ModifyListener", - "elasticloadbalancing:ModifyLoadBalancerAttributes", - "elasticloadbalancing:ModifyTargetGroup", - "elasticloadbalancing:RegisterInstancesWithLoadBalancer", - "elasticloadbalancing:RegisterTargets", - "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", - "elasticloadbalancing:SetLoadBalancerPoliciesOfListener" - ], - "Condition": { - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "minimal.example.com" - } - }, - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": [ - "ec2:CreateSecurityGroup", - "ec2:CreateSnapshot", - "ec2:CreateVolume", - "elasticloadbalancing:CreateListener", - "elasticloadbalancing:CreateLoadBalancer", - "elasticloadbalancing:CreateTargetGroup" - ], - "Condition": { - "StringEquals": { - "aws:RequestTag/KubernetesCluster": "minimal.example.com" - } - }, - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": "ec2:CreateSecurityGroup", - "Effect": "Allow", - "Resource": "arn:aws-test:ec2:*:*:vpc/*" - } - ], - "Version": "2012-10-17" - } - } - }, - "AWSIAMPolicynodesminimalexamplecom": { - "Type": "AWS::IAM::Policy", - "Properties": { - "PolicyName": "nodes.minimal.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolenodesminimalexamplecom" - } - ], - "PolicyDocument": { - "Statement": [ - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-read-bucket" - ] - }, - { - "Action": [ - "autoscaling:DescribeAutoScalingInstances", - "ec2:DescribeInstanceTypes", - "ec2:DescribeInstances", - "ec2:DescribeRegions", - "iam:GetServerCertificate", - "iam:ListServerCertificates", - "kms:GenerateRandom" - ], - "Effect": "Allow", - "Resource": "*" - } - ], - "Version": "2012-10-17" - } - } - }, - "AWSIAMRolemastersminimalexamplecom": { - "Type": "AWS::IAM::Role", - "Properties": { - "RoleName": "masters.minimal.example.com", - "AssumeRolePolicyDocument": { - "Statement": [ - { - "Action": "sts:AssumeRole", - "Effect": "Allow", - "Principal": { - "Service": "ec2.amazonaws.com" - } - } - ], - "Version": "2012-10-17" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "masters.minimal.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - }, - "AWSIAMRolenodesminimalexamplecom": { - "Type": "AWS::IAM::Role", - "Properties": { - "RoleName": "nodes.minimal.example.com", - "AssumeRolePolicyDocument": { - "Statement": [ - { - "Action": "sts:AssumeRole", - "Effect": "Allow", - "Principal": { - "Service": "ec2.amazonaws.com" - } - } - ], - "Version": "2012-10-17" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "minimal.example.com" - }, - { - "Key": "Name", - "Value": "nodes.minimal.example.com" - }, - { - "Key": "kubernetes.io/cluster/minimal.example.com", - "Value": "owned" - } - ] - } - } - } -} diff --git a/tests/integration/update_cluster/nvidia/cloudformation.json.extracted.yaml b/tests/integration/update_cluster/nvidia/cloudformation.json.extracted.yaml deleted file mode 100644 index 1e5e15afed827..0000000000000 --- a/tests/integration/update_cluster/nvidia/cloudformation.json.extracted.yaml +++ /dev/null @@ -1,454 +0,0 @@ -Resources.AWSEC2LaunchTemplatemasterustest1amastersminimalexamplecom.Properties.LaunchTemplateData.UserData: | - #!/bin/bash - set -o errexit - set -o nounset - set -o pipefail - - NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 - NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 - NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 - NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 - - export AWS_REGION=us-test-1 - - - - - sysctl -w net.core.rmem_max=16777216 || true - sysctl -w net.core.wmem_max=16777216 || true - sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true - sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true - - - function ensure-install-dir() { - INSTALL_DIR="/opt/kops" - # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec - if [[ -d /var/lib/toolbox ]]; then - INSTALL_DIR="/var/lib/toolbox/kops" - fi - mkdir -p ${INSTALL_DIR}/bin - mkdir -p ${INSTALL_DIR}/conf - cd ${INSTALL_DIR} - } - - # Retry a download until we get it. args: name, sha, urls - download-or-bust() { - local -r file="$1" - local -r hash="$2" - local -r urls=( $(split-commas "$3") ) - - if [[ -f "${file}" ]]; then - if ! validate-hash "${file}" "${hash}"; then - rm -f "${file}" - else - return 0 - fi - fi - - while true; do - for url in "${urls[@]}"; do - commands=( - "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - ) - for cmd in "${commands[@]}"; do - echo "Attempting download with: ${cmd} {url}" - if ! (${cmd} "${url}"); then - echo "== Download failed with ${cmd} ==" - continue - fi - if ! validate-hash "${file}" "${hash}"; then - echo "== Hash validation of ${url} failed. Retrying. ==" - rm -f "${file}" - else - echo "== Downloaded ${url} (SHA256 = ${hash}) ==" - return 0 - fi - done - done - - echo "All downloads failed; sleeping before retrying" - sleep 60 - done - } - - validate-hash() { - local -r file="$1" - local -r expected="$2" - local actual - - actual=$(sha256sum ${file} | awk '{ print $1 }') || true - if [[ "${actual}" != "${expected}" ]]; then - echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" - return 1 - fi - } - - function split-commas() { - echo $1 | tr "," "\n" - } - - function download-release() { - case "$(uname -m)" in - x86_64*|i?86_64*|amd64*) - NODEUP_URL="${NODEUP_URL_AMD64}" - NODEUP_HASH="${NODEUP_HASH_AMD64}" - ;; - aarch64*|arm64*) - NODEUP_URL="${NODEUP_URL_ARM64}" - NODEUP_HASH="${NODEUP_HASH_ARM64}" - ;; - *) - echo "Unsupported host arch: $(uname -m)" >&2 - exit 1 - ;; - esac - - cd ${INSTALL_DIR}/bin - download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" - - chmod +x nodeup - - echo "Running nodeup" - # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 - ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) - } - - #################################################################################### - - /bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" - - echo "== nodeup node config starting ==" - ensure-install-dir - - cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' - cloudConfig: - awsEBSCSIDriver: - enabled: false - manageStorageClasses: true - containerRuntime: containerd - containerd: - logLevel: info - nvidiaGPU: - enabled: true - package: nvidia-headless-515-server - version: 1.4.13 - docker: - skipInstall: true - encryptionConfig: null - etcdClusters: - events: - version: 3.4.13 - main: - version: 3.4.13 - kubeAPIServer: - allowPrivileged: true - anonymousAuth: false - apiAudiences: - - kubernetes.svc.default - apiServerCount: 1 - authorizationMode: AlwaysAllow - bindAddress: 0.0.0.0 - cloudProvider: aws - enableAdmissionPlugins: - - NamespaceLifecycle - - LimitRanger - - ServiceAccount - - DefaultStorageClass - - DefaultTolerationSeconds - - MutatingAdmissionWebhook - - ValidatingAdmissionWebhook - - NodeRestriction - - ResourceQuota - etcdServers: - - https://127.0.0.1:4001 - etcdServersOverrides: - - /events#https://127.0.0.1:4002 - image: registry.k8s.io/kube-apiserver:v1.21.0 - kubeletPreferredAddressTypes: - - InternalIP - - Hostname - - ExternalIP - logLevel: 2 - requestheaderAllowedNames: - - aggregator - requestheaderExtraHeaderPrefixes: - - X-Remote-Extra- - requestheaderGroupHeaders: - - X-Remote-Group - requestheaderUsernameHeaders: - - X-Remote-User - securePort: 443 - serviceAccountIssuer: https://api.internal.minimal.example.com - serviceAccountJWKSURI: https://api.internal.minimal.example.com/openid/v1/jwks - serviceClusterIPRange: 100.64.0.0/13 - storageBackend: etcd3 - kubeControllerManager: - allocateNodeCIDRs: true - attachDetachReconcileSyncPeriod: 1m0s - cloudProvider: aws - clusterCIDR: 100.96.0.0/11 - clusterName: minimal.example.com - configureCloudRoutes: false - image: registry.k8s.io/kube-controller-manager:v1.21.0 - leaderElection: - leaderElect: true - logLevel: 2 - useServiceAccountCredentials: true - kubeProxy: - clusterCIDR: 100.96.0.0/11 - cpuRequest: 100m - image: registry.k8s.io/kube-proxy:v1.21.0 - logLevel: 2 - kubeScheduler: - image: registry.k8s.io/kube-scheduler:v1.21.0 - leaderElection: - leaderElect: true - logLevel: 2 - kubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: aws - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - masterKubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: aws - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - - __EOF_CLUSTER_SPEC - - cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' - CloudProvider: aws - ConfigBase: memfs://clusters.example.com/minimal.example.com - InstanceGroupName: master-us-test-1a - InstanceGroupRole: Master - NodeupConfigHash: NodfCK80TWPwvNrfMQmiCmiHnNUnGWr/JKOa8KZXfvg= - - __EOF_KUBE_ENV - - download-release - echo "== nodeup node config done ==" -Resources.AWSEC2LaunchTemplatenodesminimalexamplecom.Properties.LaunchTemplateData.UserData: | - #!/bin/bash - set -o errexit - set -o nounset - set -o pipefail - - NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 - NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 - NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 - NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 - - export AWS_REGION=us-test-1 - - - - - sysctl -w net.core.rmem_max=16777216 || true - sysctl -w net.core.wmem_max=16777216 || true - sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true - sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true - - - function ensure-install-dir() { - INSTALL_DIR="/opt/kops" - # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec - if [[ -d /var/lib/toolbox ]]; then - INSTALL_DIR="/var/lib/toolbox/kops" - fi - mkdir -p ${INSTALL_DIR}/bin - mkdir -p ${INSTALL_DIR}/conf - cd ${INSTALL_DIR} - } - - # Retry a download until we get it. args: name, sha, urls - download-or-bust() { - local -r file="$1" - local -r hash="$2" - local -r urls=( $(split-commas "$3") ) - - if [[ -f "${file}" ]]; then - if ! validate-hash "${file}" "${hash}"; then - rm -f "${file}" - else - return 0 - fi - fi - - while true; do - for url in "${urls[@]}"; do - commands=( - "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - ) - for cmd in "${commands[@]}"; do - echo "Attempting download with: ${cmd} {url}" - if ! (${cmd} "${url}"); then - echo "== Download failed with ${cmd} ==" - continue - fi - if ! validate-hash "${file}" "${hash}"; then - echo "== Hash validation of ${url} failed. Retrying. ==" - rm -f "${file}" - else - echo "== Downloaded ${url} (SHA256 = ${hash}) ==" - return 0 - fi - done - done - - echo "All downloads failed; sleeping before retrying" - sleep 60 - done - } - - validate-hash() { - local -r file="$1" - local -r expected="$2" - local actual - - actual=$(sha256sum ${file} | awk '{ print $1 }') || true - if [[ "${actual}" != "${expected}" ]]; then - echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" - return 1 - fi - } - - function split-commas() { - echo $1 | tr "," "\n" - } - - function download-release() { - case "$(uname -m)" in - x86_64*|i?86_64*|amd64*) - NODEUP_URL="${NODEUP_URL_AMD64}" - NODEUP_HASH="${NODEUP_HASH_AMD64}" - ;; - aarch64*|arm64*) - NODEUP_URL="${NODEUP_URL_ARM64}" - NODEUP_HASH="${NODEUP_HASH_ARM64}" - ;; - *) - echo "Unsupported host arch: $(uname -m)" >&2 - exit 1 - ;; - esac - - cd ${INSTALL_DIR}/bin - download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" - - chmod +x nodeup - - echo "Running nodeup" - # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 - ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) - } - - #################################################################################### - - /bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" - - echo "== nodeup node config starting ==" - ensure-install-dir - - cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' - cloudConfig: - awsEBSCSIDriver: - enabled: false - manageStorageClasses: true - containerRuntime: containerd - containerd: - logLevel: info - nvidiaGPU: - enabled: true - package: nvidia-headless-515-server - version: 1.4.13 - docker: - skipInstall: true - kubeProxy: - clusterCIDR: 100.96.0.0/11 - cpuRequest: 100m - image: registry.k8s.io/kube-proxy:v1.21.0 - logLevel: 2 - kubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: aws - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - - __EOF_CLUSTER_SPEC - - cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' - CloudProvider: aws - ConfigServer: - CACertificates: | - -----BEGIN CERTIFICATE----- - MIIBbjCCARigAwIBAgIMFpANqBD8NSD82AUSMA0GCSqGSIb3DQEBCwUAMBgxFjAU - BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwODAwWhcNMzEwNzA3MDcw - ODAwWjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD - SwAwSAJBANFI3zr0Tk8krsW8vwjfMpzJOlWQ8616vG3YPa2qAgI7V4oKwfV0yIg1 - jt+H6f4P/wkPAPTPTfRp9Iy8oHEEFw0CAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG - MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNG3zVjTcLlJwDsJ4/K9DV7KohUA - MA0GCSqGSIb3DQEBCwUAA0EAB8d03fY2w7WKpfO29qI295pu2C4ca9AiVGOpgSc8 - tmQsq6rcxt3T+rb589PVtz0mw/cKTxOk6gH2CCC+yHfy2w== - -----END CERTIFICATE----- - -----BEGIN CERTIFICATE----- - MIIBbjCCARigAwIBAgIMFpANvmSa0OAlYmXKMA0GCSqGSIb3DQEBCwUAMBgxFjAU - BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwOTM2WhcNMzEwNzA3MDcw - OTM2WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD - SwAwSAJBAMF6F4aZdpe0RUpyykaBpWwZCnwbffhYGOw+fs6RdLuUq7QCNmJm/Eq7 - WWOziMYDiI9SbclpD+6QiJ0N3EqppVUCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG - MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLImp6ARjPDAH6nhI+scWVt3Q9bn - MA0GCSqGSIb3DQEBCwUAA0EAVQVx5MUtuAIeePuP9o51xtpT2S6Fvfi8J4ICxnlA - 9B7UD2ushcVFPtaeoL9Gfu8aY4KJBeqqg5ojl4qmRnThjw== - -----END CERTIFICATE----- - server: https://kops-controller.internal.minimal.example.com:3988/ - InstanceGroupName: nodes - InstanceGroupRole: Node - NodeupConfigHash: tDbwabQSJFh5VMw97sOep79Jz4/d5rJqAjQ+ubVMO6M= - - __EOF_KUBE_ENV - - download-release - echo "== nodeup node config done ==" diff --git a/tests/integration/update_cluster/nvidia/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/nvidia/data/aws_s3_object_cluster-completed.spec_content index dd83ae3288c26..57525f9c95a2f 100644 --- a/tests/integration/update_cluster/nvidia/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/nvidia/data/aws_s3_object_cluster-completed.spec_content @@ -145,7 +145,6 @@ spec: kubernetesApiAccess: - 0.0.0.0/0 kubernetesVersion: 1.21.0 - masterInternalName: api.internal.minimal.example.com masterKubelet: anonymousAuth: false cgroupDriver: systemd diff --git a/tests/integration/update_cluster/nvidia/in-v1alpha2.yaml b/tests/integration/update_cluster/nvidia/in-v1alpha2.yaml index 708a887742cc0..f21a43d32764a 100644 --- a/tests/integration/update_cluster/nvidia/in-v1alpha2.yaml +++ b/tests/integration/update_cluster/nvidia/in-v1alpha2.yaml @@ -26,7 +26,6 @@ spec: kubelet: anonymousAuth: false kubernetesVersion: v1.21.0 - masterInternalName: api.internal.minimal.example.com masterPublicName: api.minimal.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/tests/integration/update_cluster/private-shared-ip/cloudformation.json b/tests/integration/update_cluster/private-shared-ip/cloudformation.json deleted file mode 100644 index 2fe9316686125..0000000000000 --- a/tests/integration/update_cluster/private-shared-ip/cloudformation.json +++ /dev/null @@ -1,1928 +0,0 @@ -{ - "Resources": { - "AWSAutoScalingAutoScalingGroupbastionprivatesharedipexamplecom": { - "Type": "AWS::AutoScaling::AutoScalingGroup", - "Properties": { - "AutoScalingGroupName": "bastion.private-shared-ip.example.com", - "LaunchTemplate": { - "LaunchTemplateId": { - "Ref": "AWSEC2LaunchTemplatebastionprivatesharedipexamplecom" - }, - "Version": { - "Fn::GetAtt": [ - "AWSEC2LaunchTemplatebastionprivatesharedipexamplecom", - "LatestVersionNumber" - ] - } - }, - "MaxSize": "1", - "MinSize": "1", - "VPCZoneIdentifier": [ - { - "Ref": "AWSEC2Subnetutilityustest1aprivatesharedipexamplecom" - } - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "private-shared-ip.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "Name", - "Value": "bastion.private-shared-ip.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/role/bastion", - "Value": "1", - "PropagateAtLaunch": true - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "bastion", - "PropagateAtLaunch": true - }, - { - "Key": "kubernetes.io/cluster/private-shared-ip.example.com", - "Value": "owned", - "PropagateAtLaunch": true - } - ], - "MetricsCollection": [ - { - "Granularity": "1Minute", - "Metrics": [ - "GroupDesiredCapacity", - "GroupInServiceInstances", - "GroupMaxSize", - "GroupMinSize", - "GroupPendingInstances", - "GroupStandbyInstances", - "GroupTerminatingInstances", - "GroupTotalInstances" - ] - } - ], - "TargetGroupARNs": [ - { - "Ref": "AWSElasticLoadBalancingV2TargetGroupbastionprivatesharedipeepmph" - } - ] - } - }, - "AWSAutoScalingAutoScalingGroupmasterustest1amastersprivatesharedipexamplecom": { - "Type": "AWS::AutoScaling::AutoScalingGroup", - "Properties": { - "AutoScalingGroupName": "master-us-test-1a.masters.private-shared-ip.example.com", - "LaunchTemplate": { - "LaunchTemplateId": { - "Ref": "AWSEC2LaunchTemplatemasterustest1amastersprivatesharedipexamplecom" - }, - "Version": { - "Fn::GetAtt": [ - "AWSEC2LaunchTemplatemasterustest1amastersprivatesharedipexamplecom", - "LatestVersionNumber" - ] - } - }, - "MaxSize": "1", - "MinSize": "1", - "VPCZoneIdentifier": [ - { - "Ref": "AWSEC2Subnetustest1aprivatesharedipexamplecom" - } - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "private-shared-ip.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.private-shared-ip.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/role/master", - "Value": "1", - "PropagateAtLaunch": true - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a", - "PropagateAtLaunch": true - }, - { - "Key": "kubernetes.io/cluster/private-shared-ip.example.com", - "Value": "owned", - "PropagateAtLaunch": true - } - ], - "MetricsCollection": [ - { - "Granularity": "1Minute", - "Metrics": [ - "GroupDesiredCapacity", - "GroupInServiceInstances", - "GroupMaxSize", - "GroupMinSize", - "GroupPendingInstances", - "GroupStandbyInstances", - "GroupTerminatingInstances", - "GroupTotalInstances" - ] - } - ], - "LoadBalancerNames": [ - { - "Ref": "AWSElasticLoadBalancingLoadBalancerapiprivatesharedipexamplecom" - } - ] - } - }, - "AWSAutoScalingAutoScalingGroupnodesprivatesharedipexamplecom": { - "Type": "AWS::AutoScaling::AutoScalingGroup", - "Properties": { - "AutoScalingGroupName": "nodes.private-shared-ip.example.com", - "LaunchTemplate": { - "LaunchTemplateId": { - "Ref": "AWSEC2LaunchTemplatenodesprivatesharedipexamplecom" - }, - "Version": { - "Fn::GetAtt": [ - "AWSEC2LaunchTemplatenodesprivatesharedipexamplecom", - "LatestVersionNumber" - ] - } - }, - "MaxSize": "2", - "MinSize": "2", - "VPCZoneIdentifier": [ - { - "Ref": "AWSEC2Subnetustest1aprivatesharedipexamplecom" - } - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "private-shared-ip.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "Name", - "Value": "nodes.private-shared-ip.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/role/node", - "Value": "1", - "PropagateAtLaunch": true - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes", - "PropagateAtLaunch": true - }, - { - "Key": "kubernetes.io/cluster/private-shared-ip.example.com", - "Value": "owned", - "PropagateAtLaunch": true - } - ], - "MetricsCollection": [ - { - "Granularity": "1Minute", - "Metrics": [ - "GroupDesiredCapacity", - "GroupInServiceInstances", - "GroupMaxSize", - "GroupMinSize", - "GroupPendingInstances", - "GroupStandbyInstances", - "GroupTerminatingInstances", - "GroupTotalInstances" - ] - } - ] - } - }, - "AWSEC2LaunchTemplatebastionprivatesharedipexamplecom": { - "Type": "AWS::EC2::LaunchTemplate", - "Properties": { - "LaunchTemplateName": "bastion.private-shared-ip.example.com", - "LaunchTemplateData": { - "BlockDeviceMappings": [ - { - "DeviceName": "/dev/xvda", - "Ebs": { - "VolumeType": "gp3", - "VolumeSize": 32, - "Iops": 3000, - "Throughput": 125, - "DeleteOnTermination": true, - "Encrypted": true - } - } - ], - "IamInstanceProfile": { - "Name": { - "Ref": "AWSIAMInstanceProfilebastionsprivatesharedipexamplecom" - } - }, - "ImageId": "ami-12345678", - "InstanceType": "t2.micro", - "KeyName": "kubernetes.private-shared-ip.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57", - "MetadataOptions": { - "HttpPutResponseHopLimit": 1, - "HttpTokens": "optional" - }, - "Monitoring": { - "Enabled": false - }, - "NetworkInterfaces": [ - { - "AssociatePublicIpAddress": true, - "DeleteOnTermination": true, - "DeviceIndex": 0, - "Ipv6AddressCount": 0, - "Groups": [ - { - "Ref": "AWSEC2SecurityGroupbastionprivatesharedipexamplecom" - } - ] - } - ], - "TagSpecifications": [ - { - "ResourceType": "instance", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "private-shared-ip.example.com" - }, - { - "Key": "Name", - "Value": "bastion.private-shared-ip.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "" - }, - { - "Key": "k8s.io/role/bastion", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "bastion" - }, - { - "Key": "kubernetes.io/cluster/private-shared-ip.example.com", - "Value": "owned" - } - ] - }, - { - "ResourceType": "volume", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "private-shared-ip.example.com" - }, - { - "Key": "Name", - "Value": "bastion.private-shared-ip.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "" - }, - { - "Key": "k8s.io/role/bastion", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "bastion" - }, - { - "Key": "kubernetes.io/cluster/private-shared-ip.example.com", - "Value": "owned" - } - ] - } - ] - } - } - }, - "AWSEC2LaunchTemplatemasterustest1amastersprivatesharedipexamplecom": { - "Type": "AWS::EC2::LaunchTemplate", - "Properties": { - "LaunchTemplateName": "master-us-test-1a.masters.private-shared-ip.example.com", - "LaunchTemplateData": { - "BlockDeviceMappings": [ - { - "DeviceName": "/dev/xvda", - "Ebs": { - "VolumeType": "gp3", - "VolumeSize": 64, - "Iops": 3000, - "Throughput": 125, - "DeleteOnTermination": true, - "Encrypted": true - } - }, - { - "DeviceName": "/dev/sdc", - "VirtualName": "ephemeral0" - } - ], - "IamInstanceProfile": { - "Name": { - "Ref": "AWSIAMInstanceProfilemastersprivatesharedipexamplecom" - } - }, - "ImageId": "ami-12345678", - "InstanceType": "m3.medium", - "KeyName": "kubernetes.private-shared-ip.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57", - "MetadataOptions": { - "HttpPutResponseHopLimit": 1, - "HttpTokens": "optional" - }, - "Monitoring": { - "Enabled": false - }, - "NetworkInterfaces": [ - { - "AssociatePublicIpAddress": false, - "DeleteOnTermination": true, - "DeviceIndex": 0, - "Ipv6AddressCount": 0, - "Groups": [ - { - "Ref": "AWSEC2SecurityGroupmastersprivatesharedipexamplecom" - } - ] - } - ], - "TagSpecifications": [ - { - "ResourceType": "instance", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "private-shared-ip.example.com" - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.private-shared-ip.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a" - }, - { - "Key": "kubernetes.io/cluster/private-shared-ip.example.com", - "Value": "owned" - } - ] - }, - { - "ResourceType": "volume", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "private-shared-ip.example.com" - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.private-shared-ip.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a" - }, - { - "Key": "kubernetes.io/cluster/private-shared-ip.example.com", - "Value": "owned" - } - ] - } - ], - "UserData": "extracted" - } - } - }, - "AWSEC2LaunchTemplatenodesprivatesharedipexamplecom": { - "Type": "AWS::EC2::LaunchTemplate", - "Properties": { - "LaunchTemplateName": "nodes.private-shared-ip.example.com", - "LaunchTemplateData": { - "BlockDeviceMappings": [ - { - "DeviceName": "/dev/xvda", - "Ebs": { - "VolumeType": "gp3", - "VolumeSize": 128, - "Iops": 3000, - "Throughput": 125, - "DeleteOnTermination": true, - "Encrypted": true - } - } - ], - "IamInstanceProfile": { - "Name": { - "Ref": "AWSIAMInstanceProfilenodesprivatesharedipexamplecom" - } - }, - "ImageId": "ami-12345678", - "InstanceType": "t2.medium", - "KeyName": "kubernetes.private-shared-ip.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57", - "MetadataOptions": { - "HttpPutResponseHopLimit": 1, - "HttpTokens": "optional" - }, - "Monitoring": { - "Enabled": false - }, - "NetworkInterfaces": [ - { - "AssociatePublicIpAddress": false, - "DeleteOnTermination": true, - "DeviceIndex": 0, - "Ipv6AddressCount": 0, - "Groups": [ - { - "Ref": "AWSEC2SecurityGroupnodesprivatesharedipexamplecom" - } - ] - } - ], - "TagSpecifications": [ - { - "ResourceType": "instance", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "private-shared-ip.example.com" - }, - { - "Key": "Name", - "Value": "nodes.private-shared-ip.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "" - }, - { - "Key": "k8s.io/role/node", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes" - }, - { - "Key": "kubernetes.io/cluster/private-shared-ip.example.com", - "Value": "owned" - } - ] - }, - { - "ResourceType": "volume", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "private-shared-ip.example.com" - }, - { - "Key": "Name", - "Value": "nodes.private-shared-ip.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "" - }, - { - "Key": "k8s.io/role/node", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes" - }, - { - "Key": "kubernetes.io/cluster/private-shared-ip.example.com", - "Value": "owned" - } - ] - } - ], - "UserData": "extracted" - } - } - }, - "AWSEC2NatGatewayustest1aprivatesharedipexamplecom": { - "Type": "AWS::EC2::NatGateway", - "Properties": { - "AllocationId": "eipalloc-12345678", - "SubnetId": { - "Ref": "AWSEC2Subnetutilityustest1aprivatesharedipexamplecom" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "private-shared-ip.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.private-shared-ip.example.com" - }, - { - "Key": "kubernetes.io/cluster/private-shared-ip.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2Route0": { - "Type": "AWS::EC2::Route", - "Properties": { - "RouteTableId": { - "Ref": "AWSEC2RouteTableprivatesharedipexamplecom" - }, - "DestinationIpv6CidrBlock": "::/0", - "GatewayId": "igw-1" - } - }, - "AWSEC2Route00000": { - "Type": "AWS::EC2::Route", - "Properties": { - "RouteTableId": { - "Ref": "AWSEC2RouteTableprivatesharedipexamplecom" - }, - "DestinationCidrBlock": "0.0.0.0/0", - "GatewayId": "igw-1" - } - }, - "AWSEC2RouteTableprivatesharedipexamplecom": { - "Type": "AWS::EC2::RouteTable", - "Properties": { - "VpcId": "vpc-12345678", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "private-shared-ip.example.com" - }, - { - "Key": "Name", - "Value": "private-shared-ip.example.com" - }, - { - "Key": "kubernetes.io/cluster/private-shared-ip.example.com", - "Value": "owned" - }, - { - "Key": "kubernetes.io/kops/role", - "Value": "public" - } - ] - } - }, - "AWSEC2RouteTableprivateustest1aprivatesharedipexamplecom": { - "Type": "AWS::EC2::RouteTable", - "Properties": { - "VpcId": "vpc-12345678", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "private-shared-ip.example.com" - }, - { - "Key": "Name", - "Value": "private-us-test-1a.private-shared-ip.example.com" - }, - { - "Key": "kubernetes.io/cluster/private-shared-ip.example.com", - "Value": "owned" - }, - { - "Key": "kubernetes.io/kops/role", - "Value": "private-us-test-1a" - } - ] - } - }, - "AWSEC2Routeprivateustest1a00000": { - "Type": "AWS::EC2::Route", - "Properties": { - "RouteTableId": { - "Ref": "AWSEC2RouteTableprivateustest1aprivatesharedipexamplecom" - }, - "DestinationCidrBlock": "0.0.0.0/0", - "NatGatewayId": { - "Ref": "AWSEC2NatGatewayustest1aprivatesharedipexamplecom" - } - } - }, - "AWSEC2SecurityGroupEgressfromapielbprivatesharedipexamplecomegressall0to00": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupapielbprivatesharedipexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIpv6": "::/0" - } - }, - "AWSEC2SecurityGroupEgressfromapielbprivatesharedipexamplecomegressall0to000000": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupapielbprivatesharedipexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupEgressfrombastionprivatesharedipexamplecomegressall0to00": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupbastionprivatesharedipexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIpv6": "::/0" - } - }, - "AWSEC2SecurityGroupEgressfrombastionprivatesharedipexamplecomegressall0to000000": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupbastionprivatesharedipexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupEgressfrommastersprivatesharedipexamplecomegressall0to00": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersprivatesharedipexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIpv6": "::/0" - } - }, - "AWSEC2SecurityGroupEgressfrommastersprivatesharedipexamplecomegressall0to000000": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersprivatesharedipexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupEgressfromnodesprivatesharedipexamplecomegressall0to00": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesprivatesharedipexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIpv6": "::/0" - } - }, - "AWSEC2SecurityGroupEgressfromnodesprivatesharedipexamplecomegressall0to000000": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesprivatesharedipexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom00000ingresstcp22to22bastionprivatesharedipexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupbastionprivatesharedipexamplecom" - }, - "FromPort": 22, - "ToPort": 22, - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom00000ingresstcp443to443apielbprivatesharedipexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupapielbprivatesharedipexamplecom" - }, - "FromPort": 443, - "ToPort": 443, - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom172204022ingresstcp22to22bastionprivatesharedipexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupbastionprivatesharedipexamplecom" - }, - "FromPort": 22, - "ToPort": 22, - "IpProtocol": "tcp", - "CidrIp": "172.20.4.0/22" - } - }, - "AWSEC2SecurityGroupIngressfrombastionprivatesharedipexamplecomingresstcp22to22mastersprivatesharedipexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersprivatesharedipexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupbastionprivatesharedipexamplecom" - }, - "FromPort": 22, - "ToPort": 22, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfrombastionprivatesharedipexamplecomingresstcp22to22nodesprivatesharedipexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesprivatesharedipexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupbastionprivatesharedipexamplecom" - }, - "FromPort": 22, - "ToPort": 22, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfrommastersprivatesharedipexamplecomingressall0to0mastersprivatesharedipexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersprivatesharedipexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupmastersprivatesharedipexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfrommastersprivatesharedipexamplecomingressall0to0nodesprivatesharedipexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesprivatesharedipexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupmastersprivatesharedipexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfromnodesprivatesharedipexamplecomingressall0to0nodesprivatesharedipexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesprivatesharedipexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesprivatesharedipexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfromnodesprivatesharedipexamplecomingresstcp1to2379mastersprivatesharedipexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersprivatesharedipexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesprivatesharedipexamplecom" - }, - "FromPort": 1, - "ToPort": 2379, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodesprivatesharedipexamplecomingresstcp2382to4000mastersprivatesharedipexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersprivatesharedipexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesprivatesharedipexamplecom" - }, - "FromPort": 2382, - "ToPort": 4000, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodesprivatesharedipexamplecomingresstcp4003to65535mastersprivatesharedipexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersprivatesharedipexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesprivatesharedipexamplecom" - }, - "FromPort": 4003, - "ToPort": 65535, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodesprivatesharedipexamplecomingressudp1to65535mastersprivatesharedipexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersprivatesharedipexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesprivatesharedipexamplecom" - }, - "FromPort": 1, - "ToPort": 65535, - "IpProtocol": "udp" - } - }, - "AWSEC2SecurityGroupIngresshttpselbtomaster": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersprivatesharedipexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupapielbprivatesharedipexamplecom" - }, - "FromPort": 443, - "ToPort": 443, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressicmppmtuapielb00000": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupapielbprivatesharedipexamplecom" - }, - "FromPort": 3, - "ToPort": 4, - "IpProtocol": "icmp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressicmppmtusshnlb00000": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupbastionprivatesharedipexamplecom" - }, - "FromPort": 3, - "ToPort": 4, - "IpProtocol": "icmp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressicmppmtusshnlb172204022": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupbastionprivatesharedipexamplecom" - }, - "FromPort": 3, - "ToPort": 4, - "IpProtocol": "icmp", - "CidrIp": "172.20.4.0/22" - } - }, - "AWSEC2SecurityGroupapielbprivatesharedipexamplecom": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupName": "api-elb.private-shared-ip.example.com", - "VpcId": "vpc-12345678", - "GroupDescription": "Security group for api ELB", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "private-shared-ip.example.com" - }, - { - "Key": "Name", - "Value": "api-elb.private-shared-ip.example.com" - }, - { - "Key": "kubernetes.io/cluster/private-shared-ip.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2SecurityGroupbastionprivatesharedipexamplecom": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupName": "bastion.private-shared-ip.example.com", - "VpcId": "vpc-12345678", - "GroupDescription": "Security group for bastion", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "private-shared-ip.example.com" - }, - { - "Key": "Name", - "Value": "bastion.private-shared-ip.example.com" - }, - { - "Key": "kubernetes.io/cluster/private-shared-ip.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2SecurityGroupmastersprivatesharedipexamplecom": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupName": "masters.private-shared-ip.example.com", - "VpcId": "vpc-12345678", - "GroupDescription": "Security group for masters", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "private-shared-ip.example.com" - }, - { - "Key": "Name", - "Value": "masters.private-shared-ip.example.com" - }, - { - "Key": "kubernetes.io/cluster/private-shared-ip.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2SecurityGroupnodesprivatesharedipexamplecom": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupName": "nodes.private-shared-ip.example.com", - "VpcId": "vpc-12345678", - "GroupDescription": "Security group for nodes", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "private-shared-ip.example.com" - }, - { - "Key": "Name", - "Value": "nodes.private-shared-ip.example.com" - }, - { - "Key": "kubernetes.io/cluster/private-shared-ip.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2SubnetRouteTableAssociationprivateustest1aprivatesharedipexamplecom": { - "Type": "AWS::EC2::SubnetRouteTableAssociation", - "Properties": { - "SubnetId": { - "Ref": "AWSEC2Subnetustest1aprivatesharedipexamplecom" - }, - "RouteTableId": { - "Ref": "AWSEC2RouteTableprivateustest1aprivatesharedipexamplecom" - } - } - }, - "AWSEC2SubnetRouteTableAssociationutilityustest1aprivatesharedipexamplecom": { - "Type": "AWS::EC2::SubnetRouteTableAssociation", - "Properties": { - "SubnetId": { - "Ref": "AWSEC2Subnetutilityustest1aprivatesharedipexamplecom" - }, - "RouteTableId": { - "Ref": "AWSEC2RouteTableprivatesharedipexamplecom" - } - } - }, - "AWSEC2Subnetustest1aprivatesharedipexamplecom": { - "Type": "AWS::EC2::Subnet", - "Properties": { - "VpcId": "vpc-12345678", - "CidrBlock": "172.20.32.0/19", - "AvailabilityZone": "us-test-1a", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "private-shared-ip.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.private-shared-ip.example.com" - }, - { - "Key": "SubnetType", - "Value": "Private" - }, - { - "Key": "kops.k8s.io/instance-group/master-us-test-1a", - "Value": "true" - }, - { - "Key": "kops.k8s.io/instance-group/nodes", - "Value": "true" - }, - { - "Key": "kubernetes.io/cluster/private-shared-ip.example.com", - "Value": "owned" - }, - { - "Key": "kubernetes.io/role/internal-elb", - "Value": "1" - } - ] - } - }, - "AWSEC2Subnetutilityustest1aprivatesharedipexamplecom": { - "Type": "AWS::EC2::Subnet", - "Properties": { - "VpcId": "vpc-12345678", - "CidrBlock": "172.20.4.0/22", - "AvailabilityZone": "us-test-1a", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "private-shared-ip.example.com" - }, - { - "Key": "Name", - "Value": "utility-us-test-1a.private-shared-ip.example.com" - }, - { - "Key": "SubnetType", - "Value": "Utility" - }, - { - "Key": "kops.k8s.io/instance-group/bastion", - "Value": "true" - }, - { - "Key": "kubernetes.io/cluster/private-shared-ip.example.com", - "Value": "owned" - }, - { - "Key": "kubernetes.io/role/elb", - "Value": "1" - } - ] - } - }, - "AWSEC2Volumeustest1aetcdeventsprivatesharedipexamplecom": { - "Type": "AWS::EC2::Volume", - "Properties": { - "AvailabilityZone": "us-test-1a", - "Size": 20, - "VolumeType": "gp3", - "Iops": 3000, - "Throughput": 125, - "Encrypted": false, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "private-shared-ip.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.etcd-events.private-shared-ip.example.com" - }, - { - "Key": "k8s.io/etcd/events", - "Value": "us-test-1a/us-test-1a" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kubernetes.io/cluster/private-shared-ip.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2Volumeustest1aetcdmainprivatesharedipexamplecom": { - "Type": "AWS::EC2::Volume", - "Properties": { - "AvailabilityZone": "us-test-1a", - "Size": 20, - "VolumeType": "gp3", - "Iops": 3000, - "Throughput": 125, - "Encrypted": false, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "private-shared-ip.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.etcd-main.private-shared-ip.example.com" - }, - { - "Key": "k8s.io/etcd/main", - "Value": "us-test-1a/us-test-1a" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kubernetes.io/cluster/private-shared-ip.example.com", - "Value": "owned" - } - ] - } - }, - "AWSElasticLoadBalancingLoadBalancerapiprivatesharedipexamplecom": { - "Type": "AWS::ElasticLoadBalancing::LoadBalancer", - "Properties": { - "LoadBalancerName": "api-private-shared-ip-exa-ohatqj", - "Listeners": [ - { - "InstancePort": "443", - "InstanceProtocol": "TCP", - "LoadBalancerPort": "443", - "Protocol": "TCP" - } - ], - "SecurityGroups": [ - { - "Ref": "AWSEC2SecurityGroupapielbprivatesharedipexamplecom" - } - ], - "Subnets": [ - { - "Ref": "AWSEC2Subnetutilityustest1aprivatesharedipexamplecom" - } - ], - "HealthCheck": { - "Target": "SSL:443", - "HealthyThreshold": "2", - "UnhealthyThreshold": "2", - "Interval": "10", - "Timeout": "5" - }, - "ConnectionDrainingPolicy": { - "Enabled": true, - "Timeout": 300 - }, - "ConnectionSettings": { - "IdleTimeout": 300 - }, - "CrossZone": false, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "private-shared-ip.example.com" - }, - { - "Key": "Name", - "Value": "api.private-shared-ip.example.com" - }, - { - "Key": "kubernetes.io/cluster/private-shared-ip.example.com", - "Value": "owned" - } - ] - } - }, - "AWSElasticLoadBalancingV2Listenerbastionprivatesharedipexamplecom22": { - "Type": "AWS::ElasticLoadBalancingV2::Listener", - "Properties": { - "DefaultActions": [ - { - "Type": "forward", - "TargetGroupArn": { - "Ref": "AWSElasticLoadBalancingV2TargetGroupbastionprivatesharedipeepmph" - } - } - ], - "LoadBalancerArn": { - "Ref": "AWSElasticLoadBalancingV2LoadBalancerbastionprivatesharedipexamplecom" - }, - "Port": 22, - "Protocol": "TCP" - } - }, - "AWSElasticLoadBalancingV2LoadBalancerbastionprivatesharedipexamplecom": { - "Type": "AWS::ElasticLoadBalancingV2::LoadBalancer", - "Properties": { - "Name": "bastion-private-shared-ip-eepmph", - "Scheme": "internet-facing", - "SubnetMappings": [ - { - "SubnetId": { - "Ref": "AWSEC2Subnetutilityustest1aprivatesharedipexamplecom" - } - } - ], - "Type": "network", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "private-shared-ip.example.com" - }, - { - "Key": "Name", - "Value": "bastion.private-shared-ip.example.com" - }, - { - "Key": "kubernetes.io/cluster/private-shared-ip.example.com", - "Value": "owned" - } - ] - } - }, - "AWSElasticLoadBalancingV2TargetGroupbastionprivatesharedipeepmph": { - "Type": "AWS::ElasticLoadBalancingV2::TargetGroup", - "Properties": { - "Name": "bastion-private-shared-ip-eepmph", - "Port": 22, - "Protocol": "TCP", - "VpcId": "vpc-12345678", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "private-shared-ip.example.com" - }, - { - "Key": "Name", - "Value": "bastion-private-shared-ip-eepmph" - }, - { - "Key": "kubernetes.io/cluster/private-shared-ip.example.com", - "Value": "owned" - } - ], - "HealthCheckProtocol": "TCP", - "HealthyThresholdCount": 2, - "UnhealthyThresholdCount": 2 - } - }, - "AWSIAMInstanceProfilebastionsprivatesharedipexamplecom": { - "Type": "AWS::IAM::InstanceProfile", - "Properties": { - "InstanceProfileName": "bastions.private-shared-ip.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolebastionsprivatesharedipexamplecom" - } - ] - } - }, - "AWSIAMInstanceProfilemastersprivatesharedipexamplecom": { - "Type": "AWS::IAM::InstanceProfile", - "Properties": { - "InstanceProfileName": "masters.private-shared-ip.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolemastersprivatesharedipexamplecom" - } - ] - } - }, - "AWSIAMInstanceProfilenodesprivatesharedipexamplecom": { - "Type": "AWS::IAM::InstanceProfile", - "Properties": { - "InstanceProfileName": "nodes.private-shared-ip.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolenodesprivatesharedipexamplecom" - } - ] - } - }, - "AWSIAMPolicybastionsprivatesharedipexamplecom": { - "Type": "AWS::IAM::Policy", - "Properties": { - "PolicyName": "bastions.private-shared-ip.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolebastionsprivatesharedipexamplecom" - } - ], - "PolicyDocument": { - "Statement": [ - { - "Action": "ec2:DescribeRegions", - "Effect": "Allow", - "Resource": "*" - } - ], - "Version": "2012-10-17" - } - } - }, - "AWSIAMPolicymastersprivatesharedipexamplecom": { - "Type": "AWS::IAM::Policy", - "Properties": { - "PolicyName": "masters.private-shared-ip.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolemastersprivatesharedipexamplecom" - } - ], - "PolicyDocument": { - "Statement": [ - { - "Action": "ec2:AttachVolume", - "Condition": { - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "private-shared-ip.example.com", - "aws:ResourceTag/k8s.io/role/master": "1" - } - }, - "Effect": "Allow", - "Resource": [ - "*" - ] - }, - { - "Action": [ - "s3:Get*" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-read-bucket/clusters.example.com/private-shared-ip.example.com/*" - }, - { - "Action": [ - "s3:GetObject", - "s3:DeleteObject", - "s3:DeleteObjectVersion", - "s3:PutObject" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/private-shared-ip.example.com/backups/etcd/main/*" - }, - { - "Action": [ - "s3:GetObject", - "s3:DeleteObject", - "s3:DeleteObjectVersion", - "s3:PutObject" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/private-shared-ip.example.com/backups/etcd/events/*" - }, - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-read-bucket" - ] - }, - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-write-bucket" - ] - }, - { - "Action": [ - "route53:ChangeResourceRecordSets", - "route53:ListResourceRecordSets", - "route53:GetHostedZone" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:route53:::hostedzone/Z1AFAKE1ZON3YO" - ] - }, - { - "Action": [ - "route53:GetChange" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:route53:::change/*" - ] - }, - { - "Action": [ - "route53:ListHostedZones", - "route53:ListTagsForResource" - ], - "Effect": "Allow", - "Resource": [ - "*" - ] - }, - { - "Action": "ec2:CreateTags", - "Condition": { - "StringEquals": { - "aws:RequestTag/KubernetesCluster": "private-shared-ip.example.com", - "ec2:CreateAction": [ - "CreateSecurityGroup" - ] - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:security-group/*" - ] - }, - { - "Action": [ - "ec2:CreateTags", - "ec2:DeleteTags" - ], - "Condition": { - "Null": { - "aws:RequestTag/KubernetesCluster": "true" - }, - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "private-shared-ip.example.com" - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:security-group/*" - ] - }, - { - "Action": "ec2:CreateTags", - "Condition": { - "StringEquals": { - "aws:RequestTag/KubernetesCluster": "private-shared-ip.example.com", - "ec2:CreateAction": [ - "CreateVolume", - "CreateSnapshot" - ] - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:volume/*", - "arn:aws-test:ec2:*:*:snapshot/*" - ] - }, - { - "Action": [ - "ec2:CreateTags", - "ec2:DeleteTags" - ], - "Condition": { - "Null": { - "aws:RequestTag/KubernetesCluster": "true" - }, - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "private-shared-ip.example.com" - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:volume/*", - "arn:aws-test:ec2:*:*:snapshot/*" - ] - }, - { - "Action": [ - "autoscaling:DescribeAutoScalingGroups", - "autoscaling:DescribeAutoScalingInstances", - "autoscaling:DescribeLaunchConfigurations", - "autoscaling:DescribeScalingActivities", - "autoscaling:DescribeTags", - "ec2:AttachVolume", - "ec2:AuthorizeSecurityGroupIngress", - "ec2:CreateSecurityGroup", - "ec2:CreateTags", - "ec2:DeleteRoute", - "ec2:DeleteSecurityGroup", - "ec2:DeleteVolume", - "ec2:DescribeAccountAttributes", - "ec2:DescribeInstanceTypes", - "ec2:DescribeInstances", - "ec2:DescribeLaunchTemplateVersions", - "ec2:DescribeRegions", - "ec2:DescribeRouteTables", - "ec2:DescribeSecurityGroups", - "ec2:DescribeSubnets", - "ec2:DescribeTags", - "ec2:DescribeVolumes", - "ec2:DescribeVolumesModifications", - "ec2:DescribeVpcs", - "ec2:DetachVolume", - "ec2:ModifyInstanceAttribute", - "ec2:ModifyVolume", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:CreateListener", - "elasticloadbalancing:CreateTargetGroup", - "elasticloadbalancing:DeleteListener", - "elasticloadbalancing:DescribeListeners", - "elasticloadbalancing:DescribeLoadBalancerAttributes", - "elasticloadbalancing:DescribeLoadBalancerPolicies", - "elasticloadbalancing:DescribeLoadBalancers", - "elasticloadbalancing:DescribeTargetGroups", - "elasticloadbalancing:DescribeTargetHealth", - "elasticloadbalancing:ModifyListener", - "elasticloadbalancing:RegisterTargets", - "iam:GetServerCertificate", - "iam:ListServerCertificates", - "kms:DescribeKey", - "kms:GenerateRandom" - ], - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": [ - "autoscaling:SetDesiredCapacity", - "autoscaling:TerminateInstanceInAutoScalingGroup", - "ec2:AttachVolume", - "ec2:AuthorizeSecurityGroupIngress", - "ec2:DeleteSecurityGroup", - "ec2:DeleteVolume", - "ec2:DetachVolume", - "ec2:ModifyInstanceAttribute", - "ec2:ModifyVolume", - "ec2:RevokeSecurityGroupIngress", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", - "elasticloadbalancing:AttachLoadBalancerToSubnets", - "elasticloadbalancing:ConfigureHealthCheck", - "elasticloadbalancing:CreateLoadBalancerListeners", - "elasticloadbalancing:CreateLoadBalancerPolicy", - "elasticloadbalancing:DeleteListener", - "elasticloadbalancing:DeleteLoadBalancer", - "elasticloadbalancing:DeleteLoadBalancerListeners", - "elasticloadbalancing:DeleteTargetGroup", - "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", - "elasticloadbalancing:DeregisterTargets", - "elasticloadbalancing:DetachLoadBalancerFromSubnets", - "elasticloadbalancing:ModifyListener", - "elasticloadbalancing:ModifyLoadBalancerAttributes", - "elasticloadbalancing:ModifyTargetGroup", - "elasticloadbalancing:RegisterInstancesWithLoadBalancer", - "elasticloadbalancing:RegisterTargets", - "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", - "elasticloadbalancing:SetLoadBalancerPoliciesOfListener" - ], - "Condition": { - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "private-shared-ip.example.com" - } - }, - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": [ - "ec2:CreateSecurityGroup", - "ec2:CreateSnapshot", - "ec2:CreateVolume", - "elasticloadbalancing:CreateListener", - "elasticloadbalancing:CreateLoadBalancer", - "elasticloadbalancing:CreateTargetGroup" - ], - "Condition": { - "StringEquals": { - "aws:RequestTag/KubernetesCluster": "private-shared-ip.example.com" - } - }, - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": "ec2:CreateSecurityGroup", - "Effect": "Allow", - "Resource": "arn:aws-test:ec2:*:*:vpc/*" - } - ], - "Version": "2012-10-17" - } - } - }, - "AWSIAMPolicynodesprivatesharedipexamplecom": { - "Type": "AWS::IAM::Policy", - "Properties": { - "PolicyName": "nodes.private-shared-ip.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolenodesprivatesharedipexamplecom" - } - ], - "PolicyDocument": { - "Statement": [ - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-read-bucket" - ] - }, - { - "Action": [ - "autoscaling:DescribeAutoScalingInstances", - "ec2:DescribeInstanceTypes", - "ec2:DescribeInstances", - "ec2:DescribeRegions", - "iam:GetServerCertificate", - "iam:ListServerCertificates", - "kms:GenerateRandom" - ], - "Effect": "Allow", - "Resource": "*" - } - ], - "Version": "2012-10-17" - } - } - }, - "AWSIAMRolebastionsprivatesharedipexamplecom": { - "Type": "AWS::IAM::Role", - "Properties": { - "RoleName": "bastions.private-shared-ip.example.com", - "AssumeRolePolicyDocument": { - "Statement": [ - { - "Action": "sts:AssumeRole", - "Effect": "Allow", - "Principal": { - "Service": "ec2.amazonaws.com" - } - } - ], - "Version": "2012-10-17" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "private-shared-ip.example.com" - }, - { - "Key": "Name", - "Value": "bastions.private-shared-ip.example.com" - }, - { - "Key": "kubernetes.io/cluster/private-shared-ip.example.com", - "Value": "owned" - } - ] - } - }, - "AWSIAMRolemastersprivatesharedipexamplecom": { - "Type": "AWS::IAM::Role", - "Properties": { - "RoleName": "masters.private-shared-ip.example.com", - "AssumeRolePolicyDocument": { - "Statement": [ - { - "Action": "sts:AssumeRole", - "Effect": "Allow", - "Principal": { - "Service": "ec2.amazonaws.com" - } - } - ], - "Version": "2012-10-17" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "private-shared-ip.example.com" - }, - { - "Key": "Name", - "Value": "masters.private-shared-ip.example.com" - }, - { - "Key": "kubernetes.io/cluster/private-shared-ip.example.com", - "Value": "owned" - } - ] - } - }, - "AWSIAMRolenodesprivatesharedipexamplecom": { - "Type": "AWS::IAM::Role", - "Properties": { - "RoleName": "nodes.private-shared-ip.example.com", - "AssumeRolePolicyDocument": { - "Statement": [ - { - "Action": "sts:AssumeRole", - "Effect": "Allow", - "Principal": { - "Service": "ec2.amazonaws.com" - } - } - ], - "Version": "2012-10-17" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "private-shared-ip.example.com" - }, - { - "Key": "Name", - "Value": "nodes.private-shared-ip.example.com" - }, - { - "Key": "kubernetes.io/cluster/private-shared-ip.example.com", - "Value": "owned" - } - ] - } - }, - "AWSRoute53RecordSetapiprivatesharedipexamplecom": { - "Type": "AWS::Route53::RecordSet", - "Properties": { - "Name": "api.private-shared-ip.example.com", - "Type": "A", - "AliasTarget": { - "DNSName": { - "Fn::GetAtt": [ - "AWSElasticLoadBalancingLoadBalancerapiprivatesharedipexamplecom", - "DNSName" - ] - }, - "HostedZoneId": { - "Fn::GetAtt": [ - "AWSElasticLoadBalancingLoadBalancerapiprivatesharedipexamplecom", - "CanonicalHostedZoneNameID" - ] - }, - "EvaluateTargetHealth": false - }, - "HostedZoneId": "/hostedzone/Z1AFAKE1ZON3YO" - } - } - } -} diff --git a/tests/integration/update_cluster/private-shared-ip/cloudformation.json.extracted.yaml b/tests/integration/update_cluster/private-shared-ip/cloudformation.json.extracted.yaml deleted file mode 100644 index 02d0dc69b3674..0000000000000 --- a/tests/integration/update_cluster/private-shared-ip/cloudformation.json.extracted.yaml +++ /dev/null @@ -1,448 +0,0 @@ -Resources.AWSEC2LaunchTemplatemasterustest1amastersprivatesharedipexamplecom.Properties.LaunchTemplateData.UserData: | - #!/bin/bash - set -o errexit - set -o nounset - set -o pipefail - - NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 - NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 - NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 - NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 - - export AWS_REGION=us-test-1 - - - - - sysctl -w net.core.rmem_max=16777216 || true - sysctl -w net.core.wmem_max=16777216 || true - sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true - sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true - - - function ensure-install-dir() { - INSTALL_DIR="/opt/kops" - # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec - if [[ -d /var/lib/toolbox ]]; then - INSTALL_DIR="/var/lib/toolbox/kops" - fi - mkdir -p ${INSTALL_DIR}/bin - mkdir -p ${INSTALL_DIR}/conf - cd ${INSTALL_DIR} - } - - # Retry a download until we get it. args: name, sha, urls - download-or-bust() { - local -r file="$1" - local -r hash="$2" - local -r urls=( $(split-commas "$3") ) - - if [[ -f "${file}" ]]; then - if ! validate-hash "${file}" "${hash}"; then - rm -f "${file}" - else - return 0 - fi - fi - - while true; do - for url in "${urls[@]}"; do - commands=( - "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - ) - for cmd in "${commands[@]}"; do - echo "Attempting download with: ${cmd} {url}" - if ! (${cmd} "${url}"); then - echo "== Download failed with ${cmd} ==" - continue - fi - if ! validate-hash "${file}" "${hash}"; then - echo "== Hash validation of ${url} failed. Retrying. ==" - rm -f "${file}" - else - echo "== Downloaded ${url} (SHA256 = ${hash}) ==" - return 0 - fi - done - done - - echo "All downloads failed; sleeping before retrying" - sleep 60 - done - } - - validate-hash() { - local -r file="$1" - local -r expected="$2" - local actual - - actual=$(sha256sum ${file} | awk '{ print $1 }') || true - if [[ "${actual}" != "${expected}" ]]; then - echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" - return 1 - fi - } - - function split-commas() { - echo $1 | tr "," "\n" - } - - function download-release() { - case "$(uname -m)" in - x86_64*|i?86_64*|amd64*) - NODEUP_URL="${NODEUP_URL_AMD64}" - NODEUP_HASH="${NODEUP_HASH_AMD64}" - ;; - aarch64*|arm64*) - NODEUP_URL="${NODEUP_URL_ARM64}" - NODEUP_HASH="${NODEUP_HASH_ARM64}" - ;; - *) - echo "Unsupported host arch: $(uname -m)" >&2 - exit 1 - ;; - esac - - cd ${INSTALL_DIR}/bin - download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" - - chmod +x nodeup - - echo "Running nodeup" - # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 - ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) - } - - #################################################################################### - - /bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" - - echo "== nodeup node config starting ==" - ensure-install-dir - - cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' - cloudConfig: - awsEBSCSIDriver: - enabled: false - manageStorageClasses: true - containerRuntime: containerd - containerd: - logLevel: info - version: 1.4.13 - docker: - skipInstall: true - encryptionConfig: null - etcdClusters: - events: - version: 3.4.13 - main: - version: 3.4.13 - kubeAPIServer: - allowPrivileged: true - anonymousAuth: false - apiAudiences: - - kubernetes.svc.default - apiServerCount: 1 - authorizationMode: AlwaysAllow - bindAddress: 0.0.0.0 - cloudProvider: aws - enableAdmissionPlugins: - - NamespaceLifecycle - - LimitRanger - - ServiceAccount - - DefaultStorageClass - - DefaultTolerationSeconds - - MutatingAdmissionWebhook - - ValidatingAdmissionWebhook - - NodeRestriction - - ResourceQuota - etcdServers: - - https://127.0.0.1:4001 - etcdServersOverrides: - - /events#https://127.0.0.1:4002 - image: registry.k8s.io/kube-apiserver:v1.21.0 - kubeletPreferredAddressTypes: - - InternalIP - - Hostname - - ExternalIP - logLevel: 2 - requestheaderAllowedNames: - - aggregator - requestheaderExtraHeaderPrefixes: - - X-Remote-Extra- - requestheaderGroupHeaders: - - X-Remote-Group - requestheaderUsernameHeaders: - - X-Remote-User - securePort: 443 - serviceAccountIssuer: https://api.internal.private-shared-ip.example.com - serviceAccountJWKSURI: https://api.internal.private-shared-ip.example.com/openid/v1/jwks - serviceClusterIPRange: 100.64.0.0/13 - storageBackend: etcd3 - kubeControllerManager: - allocateNodeCIDRs: true - attachDetachReconcileSyncPeriod: 1m0s - cloudProvider: aws - clusterCIDR: 100.96.0.0/11 - clusterName: private-shared-ip.example.com - configureCloudRoutes: false - image: registry.k8s.io/kube-controller-manager:v1.21.0 - leaderElection: - leaderElect: true - logLevel: 2 - useServiceAccountCredentials: true - kubeProxy: - clusterCIDR: 100.96.0.0/11 - cpuRequest: 100m - image: registry.k8s.io/kube-proxy:v1.21.0 - logLevel: 2 - kubeScheduler: - image: registry.k8s.io/kube-scheduler:v1.21.0 - leaderElection: - leaderElect: true - logLevel: 2 - kubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: aws - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - masterKubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: aws - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - - __EOF_CLUSTER_SPEC - - cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' - CloudProvider: aws - ConfigBase: memfs://clusters.example.com/private-shared-ip.example.com - InstanceGroupName: master-us-test-1a - InstanceGroupRole: Master - NodeupConfigHash: oHAJQFxNUCRBrZ/sFyJzgzKAqkvM7ORHpBtywUjaa+0= - - __EOF_KUBE_ENV - - download-release - echo "== nodeup node config done ==" -Resources.AWSEC2LaunchTemplatenodesprivatesharedipexamplecom.Properties.LaunchTemplateData.UserData: | - #!/bin/bash - set -o errexit - set -o nounset - set -o pipefail - - NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 - NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 - NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 - NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 - - export AWS_REGION=us-test-1 - - - - - sysctl -w net.core.rmem_max=16777216 || true - sysctl -w net.core.wmem_max=16777216 || true - sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true - sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true - - - function ensure-install-dir() { - INSTALL_DIR="/opt/kops" - # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec - if [[ -d /var/lib/toolbox ]]; then - INSTALL_DIR="/var/lib/toolbox/kops" - fi - mkdir -p ${INSTALL_DIR}/bin - mkdir -p ${INSTALL_DIR}/conf - cd ${INSTALL_DIR} - } - - # Retry a download until we get it. args: name, sha, urls - download-or-bust() { - local -r file="$1" - local -r hash="$2" - local -r urls=( $(split-commas "$3") ) - - if [[ -f "${file}" ]]; then - if ! validate-hash "${file}" "${hash}"; then - rm -f "${file}" - else - return 0 - fi - fi - - while true; do - for url in "${urls[@]}"; do - commands=( - "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - ) - for cmd in "${commands[@]}"; do - echo "Attempting download with: ${cmd} {url}" - if ! (${cmd} "${url}"); then - echo "== Download failed with ${cmd} ==" - continue - fi - if ! validate-hash "${file}" "${hash}"; then - echo "== Hash validation of ${url} failed. Retrying. ==" - rm -f "${file}" - else - echo "== Downloaded ${url} (SHA256 = ${hash}) ==" - return 0 - fi - done - done - - echo "All downloads failed; sleeping before retrying" - sleep 60 - done - } - - validate-hash() { - local -r file="$1" - local -r expected="$2" - local actual - - actual=$(sha256sum ${file} | awk '{ print $1 }') || true - if [[ "${actual}" != "${expected}" ]]; then - echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" - return 1 - fi - } - - function split-commas() { - echo $1 | tr "," "\n" - } - - function download-release() { - case "$(uname -m)" in - x86_64*|i?86_64*|amd64*) - NODEUP_URL="${NODEUP_URL_AMD64}" - NODEUP_HASH="${NODEUP_HASH_AMD64}" - ;; - aarch64*|arm64*) - NODEUP_URL="${NODEUP_URL_ARM64}" - NODEUP_HASH="${NODEUP_HASH_ARM64}" - ;; - *) - echo "Unsupported host arch: $(uname -m)" >&2 - exit 1 - ;; - esac - - cd ${INSTALL_DIR}/bin - download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" - - chmod +x nodeup - - echo "Running nodeup" - # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 - ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) - } - - #################################################################################### - - /bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" - - echo "== nodeup node config starting ==" - ensure-install-dir - - cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' - cloudConfig: - awsEBSCSIDriver: - enabled: false - manageStorageClasses: true - containerRuntime: containerd - containerd: - logLevel: info - version: 1.4.13 - docker: - skipInstall: true - kubeProxy: - clusterCIDR: 100.96.0.0/11 - cpuRequest: 100m - image: registry.k8s.io/kube-proxy:v1.21.0 - logLevel: 2 - kubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: aws - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - - __EOF_CLUSTER_SPEC - - cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' - CloudProvider: aws - ConfigServer: - CACertificates: | - -----BEGIN CERTIFICATE----- - MIIBbjCCARigAwIBAgIMFpANqBD8NSD82AUSMA0GCSqGSIb3DQEBCwUAMBgxFjAU - BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwODAwWhcNMzEwNzA3MDcw - ODAwWjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD - SwAwSAJBANFI3zr0Tk8krsW8vwjfMpzJOlWQ8616vG3YPa2qAgI7V4oKwfV0yIg1 - jt+H6f4P/wkPAPTPTfRp9Iy8oHEEFw0CAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG - MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNG3zVjTcLlJwDsJ4/K9DV7KohUA - MA0GCSqGSIb3DQEBCwUAA0EAB8d03fY2w7WKpfO29qI295pu2C4ca9AiVGOpgSc8 - tmQsq6rcxt3T+rb589PVtz0mw/cKTxOk6gH2CCC+yHfy2w== - -----END CERTIFICATE----- - -----BEGIN CERTIFICATE----- - MIIBbjCCARigAwIBAgIMFpANvmSa0OAlYmXKMA0GCSqGSIb3DQEBCwUAMBgxFjAU - BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwOTM2WhcNMzEwNzA3MDcw - OTM2WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD - SwAwSAJBAMF6F4aZdpe0RUpyykaBpWwZCnwbffhYGOw+fs6RdLuUq7QCNmJm/Eq7 - WWOziMYDiI9SbclpD+6QiJ0N3EqppVUCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG - MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLImp6ARjPDAH6nhI+scWVt3Q9bn - MA0GCSqGSIb3DQEBCwUAA0EAVQVx5MUtuAIeePuP9o51xtpT2S6Fvfi8J4ICxnlA - 9B7UD2ushcVFPtaeoL9Gfu8aY4KJBeqqg5ojl4qmRnThjw== - -----END CERTIFICATE----- - server: https://kops-controller.internal.private-shared-ip.example.com:3988/ - InstanceGroupName: nodes - InstanceGroupRole: Node - NodeupConfigHash: sLUfSvz77DYy3J1mEkrUcP9H21N2NitAHvZIb9OaWMY= - - __EOF_KUBE_ENV - - download-release - echo "== nodeup node config done ==" diff --git a/tests/integration/update_cluster/private-shared-ip/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/private-shared-ip/data/aws_s3_object_cluster-completed.spec_content index 3d9eb2e353dd7..7058f02c079b6 100644 --- a/tests/integration/update_cluster/private-shared-ip/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/private-shared-ip/data/aws_s3_object_cluster-completed.spec_content @@ -144,7 +144,6 @@ spec: kubernetesApiAccess: - 0.0.0.0/0 kubernetesVersion: 1.21.0 - masterInternalName: api.internal.private-shared-ip.example.com masterKubelet: anonymousAuth: false cgroupDriver: systemd diff --git a/tests/integration/update_cluster/private-shared-ip/in-v1alpha2.yaml b/tests/integration/update_cluster/private-shared-ip/in-v1alpha2.yaml index 101dbef6fafdb..5b0268edfb5e1 100644 --- a/tests/integration/update_cluster/private-shared-ip/in-v1alpha2.yaml +++ b/tests/integration/update_cluster/private-shared-ip/in-v1alpha2.yaml @@ -22,7 +22,6 @@ spec: kubelet: anonymousAuth: false kubernetesVersion: v1.21.0 - masterInternalName: api.internal.private-shared-ip.example.com masterPublicName: api.private-shared-ip.example.com networkCIDR: 172.20.0.0/16 networkID: vpc-12345678 diff --git a/tests/integration/update_cluster/private-shared-subnet/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/private-shared-subnet/data/aws_s3_object_cluster-completed.spec_content index 32110e5861045..cb3735b65854e 100644 --- a/tests/integration/update_cluster/private-shared-subnet/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/private-shared-subnet/data/aws_s3_object_cluster-completed.spec_content @@ -144,7 +144,6 @@ spec: kubernetesApiAccess: - 0.0.0.0/0 kubernetesVersion: 1.21.0 - masterInternalName: api.internal.private-shared-subnet.example.com masterKubelet: anonymousAuth: false cgroupDriver: systemd diff --git a/tests/integration/update_cluster/private-shared-subnet/in-v1alpha2.yaml b/tests/integration/update_cluster/private-shared-subnet/in-v1alpha2.yaml index 758ee1e6d2b6f..1a9283a183f38 100644 --- a/tests/integration/update_cluster/private-shared-subnet/in-v1alpha2.yaml +++ b/tests/integration/update_cluster/private-shared-subnet/in-v1alpha2.yaml @@ -22,7 +22,6 @@ spec: kubelet: anonymousAuth: false kubernetesVersion: v1.21.0 - masterInternalName: api.internal.private-shared-subnet.example.com masterPublicName: api.private-shared-subnet.example.com networkCIDR: 172.20.0.0/16 networkID: vpc-12345678 diff --git a/tests/integration/update_cluster/privatecalico/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/privatecalico/data/aws_s3_object_cluster-completed.spec_content index e1a17a6486f4c..64c7dad05002e 100644 --- a/tests/integration/update_cluster/privatecalico/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/privatecalico/data/aws_s3_object_cluster-completed.spec_content @@ -169,7 +169,6 @@ spec: - 0.0.0.0/0 - ::/0 kubernetesVersion: 1.25.0 - masterInternalName: api.internal.privatecalico.example.com masterKubelet: anonymousAuth: false cgroupDriver: systemd diff --git a/tests/integration/update_cluster/privatecalico/in-v1alpha2.yaml b/tests/integration/update_cluster/privatecalico/in-v1alpha2.yaml index 174f389a8f729..8b970519ae8f7 100644 --- a/tests/integration/update_cluster/privatecalico/in-v1alpha2.yaml +++ b/tests/integration/update_cluster/privatecalico/in-v1alpha2.yaml @@ -29,7 +29,6 @@ spec: - 0.0.0.0/0 - ::/0 kubernetesVersion: v1.25.0 - masterInternalName: api.internal.privatecalico.example.com masterPublicName: api.privatecalico.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/tests/integration/update_cluster/privatecanal/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/privatecanal/data/aws_s3_object_cluster-completed.spec_content index 683db473a4a51..0a169a63116a5 100644 --- a/tests/integration/update_cluster/privatecanal/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/privatecanal/data/aws_s3_object_cluster-completed.spec_content @@ -167,7 +167,6 @@ spec: kubernetesApiAccess: - 0.0.0.0/0 kubernetesVersion: 1.25.0 - masterInternalName: api.internal.privatecanal.example.com masterKubelet: anonymousAuth: false cgroupDriver: systemd diff --git a/tests/integration/update_cluster/privatecanal/in-v1alpha2.yaml b/tests/integration/update_cluster/privatecanal/in-v1alpha2.yaml index 0399fe64f5479..54c3f347327cd 100644 --- a/tests/integration/update_cluster/privatecanal/in-v1alpha2.yaml +++ b/tests/integration/update_cluster/privatecanal/in-v1alpha2.yaml @@ -22,7 +22,6 @@ spec: kubelet: anonymousAuth: false kubernetesVersion: v1.25.0 - masterInternalName: api.internal.privatecanal.example.com masterPublicName: api.privatecanal.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/tests/integration/update_cluster/privatecilium/cloudformation.json b/tests/integration/update_cluster/privatecilium/cloudformation.json deleted file mode 100644 index 6747a00527a64..0000000000000 --- a/tests/integration/update_cluster/privatecilium/cloudformation.json +++ /dev/null @@ -1,2070 +0,0 @@ -{ - "Resources": { - "AWSAutoScalingAutoScalingGroupbastionprivateciliumexamplecom": { - "Type": "AWS::AutoScaling::AutoScalingGroup", - "Properties": { - "AutoScalingGroupName": "bastion.privatecilium.example.com", - "LaunchTemplate": { - "LaunchTemplateId": { - "Ref": "AWSEC2LaunchTemplatebastionprivateciliumexamplecom" - }, - "Version": { - "Fn::GetAtt": [ - "AWSEC2LaunchTemplatebastionprivateciliumexamplecom", - "LatestVersionNumber" - ] - } - }, - "MaxSize": "1", - "MinSize": "1", - "VPCZoneIdentifier": [ - { - "Ref": "AWSEC2Subnetutilityustest1aprivateciliumexamplecom" - } - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privatecilium.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "Name", - "Value": "bastion.privatecilium.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/role/bastion", - "Value": "1", - "PropagateAtLaunch": true - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "bastion", - "PropagateAtLaunch": true - }, - { - "Key": "kubernetes.io/cluster/privatecilium.example.com", - "Value": "owned", - "PropagateAtLaunch": true - } - ], - "MetricsCollection": [ - { - "Granularity": "1Minute", - "Metrics": [ - "GroupDesiredCapacity", - "GroupInServiceInstances", - "GroupMaxSize", - "GroupMinSize", - "GroupPendingInstances", - "GroupStandbyInstances", - "GroupTerminatingInstances", - "GroupTotalInstances" - ] - } - ], - "TargetGroupARNs": [ - { - "Ref": "AWSElasticLoadBalancingV2TargetGroupbastionprivateciliumexal2ms01" - } - ] - } - }, - "AWSAutoScalingAutoScalingGroupmasterustest1amastersprivateciliumexamplecom": { - "Type": "AWS::AutoScaling::AutoScalingGroup", - "Properties": { - "AutoScalingGroupName": "master-us-test-1a.masters.privatecilium.example.com", - "LaunchTemplate": { - "LaunchTemplateId": { - "Ref": "AWSEC2LaunchTemplatemasterustest1amastersprivateciliumexamplecom" - }, - "Version": { - "Fn::GetAtt": [ - "AWSEC2LaunchTemplatemasterustest1amastersprivateciliumexamplecom", - "LatestVersionNumber" - ] - } - }, - "MaxSize": "1", - "MinSize": "1", - "VPCZoneIdentifier": [ - { - "Ref": "AWSEC2Subnetustest1aprivateciliumexamplecom" - } - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privatecilium.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.privatecilium.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/role/master", - "Value": "1", - "PropagateAtLaunch": true - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a", - "PropagateAtLaunch": true - }, - { - "Key": "kubernetes.io/cluster/privatecilium.example.com", - "Value": "owned", - "PropagateAtLaunch": true - } - ], - "MetricsCollection": [ - { - "Granularity": "1Minute", - "Metrics": [ - "GroupDesiredCapacity", - "GroupInServiceInstances", - "GroupMaxSize", - "GroupMinSize", - "GroupPendingInstances", - "GroupStandbyInstances", - "GroupTerminatingInstances", - "GroupTotalInstances" - ] - } - ], - "LoadBalancerNames": [ - { - "Ref": "AWSElasticLoadBalancingLoadBalancerapiprivateciliumexamplecom" - } - ] - } - }, - "AWSAutoScalingAutoScalingGroupnodesprivateciliumexamplecom": { - "Type": "AWS::AutoScaling::AutoScalingGroup", - "Properties": { - "AutoScalingGroupName": "nodes.privatecilium.example.com", - "LaunchTemplate": { - "LaunchTemplateId": { - "Ref": "AWSEC2LaunchTemplatenodesprivateciliumexamplecom" - }, - "Version": { - "Fn::GetAtt": [ - "AWSEC2LaunchTemplatenodesprivateciliumexamplecom", - "LatestVersionNumber" - ] - } - }, - "MaxSize": "2", - "MinSize": "2", - "VPCZoneIdentifier": [ - { - "Ref": "AWSEC2Subnetustest1aprivateciliumexamplecom" - } - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privatecilium.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "Name", - "Value": "nodes.privatecilium.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/role/node", - "Value": "1", - "PropagateAtLaunch": true - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes", - "PropagateAtLaunch": true - }, - { - "Key": "kubernetes.io/cluster/privatecilium.example.com", - "Value": "owned", - "PropagateAtLaunch": true - } - ], - "MetricsCollection": [ - { - "Granularity": "1Minute", - "Metrics": [ - "GroupDesiredCapacity", - "GroupInServiceInstances", - "GroupMaxSize", - "GroupMinSize", - "GroupPendingInstances", - "GroupStandbyInstances", - "GroupTerminatingInstances", - "GroupTotalInstances" - ] - } - ] - } - }, - "AWSEC2DHCPOptionsprivateciliumexamplecom": { - "Type": "AWS::EC2::DHCPOptions", - "Properties": { - "DomainName": "us-test-1.compute.internal", - "DomainNameServers": [ - "AmazonProvidedDNS" - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privatecilium.example.com" - }, - { - "Key": "Name", - "Value": "privatecilium.example.com" - }, - { - "Key": "kubernetes.io/cluster/privatecilium.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2EIPustest1aprivateciliumexamplecom": { - "Type": "AWS::EC2::EIP", - "Properties": { - "Domain": "vpc", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privatecilium.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.privatecilium.example.com" - }, - { - "Key": "kubernetes.io/cluster/privatecilium.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2InternetGatewayprivateciliumexamplecom": { - "Type": "AWS::EC2::InternetGateway", - "Properties": { - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privatecilium.example.com" - }, - { - "Key": "Name", - "Value": "privatecilium.example.com" - }, - { - "Key": "kubernetes.io/cluster/privatecilium.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2LaunchTemplatebastionprivateciliumexamplecom": { - "Type": "AWS::EC2::LaunchTemplate", - "Properties": { - "LaunchTemplateName": "bastion.privatecilium.example.com", - "LaunchTemplateData": { - "BlockDeviceMappings": [ - { - "DeviceName": "/dev/xvda", - "Ebs": { - "VolumeType": "gp3", - "VolumeSize": 32, - "Iops": 3000, - "Throughput": 125, - "DeleteOnTermination": true, - "Encrypted": true - } - } - ], - "IamInstanceProfile": { - "Name": { - "Ref": "AWSIAMInstanceProfilebastionsprivateciliumexamplecom" - } - }, - "ImageId": "ami-12345678", - "InstanceType": "t2.micro", - "KeyName": "kubernetes.privatecilium.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57", - "MetadataOptions": { - "HttpPutResponseHopLimit": 1, - "HttpTokens": "optional" - }, - "Monitoring": { - "Enabled": false - }, - "NetworkInterfaces": [ - { - "AssociatePublicIpAddress": true, - "DeleteOnTermination": true, - "DeviceIndex": 0, - "Ipv6AddressCount": 0, - "Groups": [ - { - "Ref": "AWSEC2SecurityGroupbastionprivateciliumexamplecom" - } - ] - } - ], - "TagSpecifications": [ - { - "ResourceType": "instance", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privatecilium.example.com" - }, - { - "Key": "Name", - "Value": "bastion.privatecilium.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "" - }, - { - "Key": "k8s.io/role/bastion", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "bastion" - }, - { - "Key": "kubernetes.io/cluster/privatecilium.example.com", - "Value": "owned" - } - ] - }, - { - "ResourceType": "volume", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privatecilium.example.com" - }, - { - "Key": "Name", - "Value": "bastion.privatecilium.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "" - }, - { - "Key": "k8s.io/role/bastion", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "bastion" - }, - { - "Key": "kubernetes.io/cluster/privatecilium.example.com", - "Value": "owned" - } - ] - } - ] - } - } - }, - "AWSEC2LaunchTemplatemasterustest1amastersprivateciliumexamplecom": { - "Type": "AWS::EC2::LaunchTemplate", - "Properties": { - "LaunchTemplateName": "master-us-test-1a.masters.privatecilium.example.com", - "LaunchTemplateData": { - "BlockDeviceMappings": [ - { - "DeviceName": "/dev/xvda", - "Ebs": { - "VolumeType": "gp3", - "VolumeSize": 64, - "Iops": 3000, - "Throughput": 125, - "DeleteOnTermination": true, - "Encrypted": true - } - }, - { - "DeviceName": "/dev/sdc", - "VirtualName": "ephemeral0" - } - ], - "IamInstanceProfile": { - "Name": { - "Ref": "AWSIAMInstanceProfilemastersprivateciliumexamplecom" - } - }, - "ImageId": "ami-12345678", - "InstanceType": "m3.medium", - "KeyName": "kubernetes.privatecilium.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57", - "MetadataOptions": { - "HttpPutResponseHopLimit": 1, - "HttpTokens": "optional" - }, - "Monitoring": { - "Enabled": false - }, - "NetworkInterfaces": [ - { - "AssociatePublicIpAddress": false, - "DeleteOnTermination": true, - "DeviceIndex": 0, - "Ipv6AddressCount": 0, - "Groups": [ - { - "Ref": "AWSEC2SecurityGroupmastersprivateciliumexamplecom" - } - ] - } - ], - "TagSpecifications": [ - { - "ResourceType": "instance", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privatecilium.example.com" - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.privatecilium.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a" - }, - { - "Key": "kubernetes.io/cluster/privatecilium.example.com", - "Value": "owned" - } - ] - }, - { - "ResourceType": "volume", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privatecilium.example.com" - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.privatecilium.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a" - }, - { - "Key": "kubernetes.io/cluster/privatecilium.example.com", - "Value": "owned" - } - ] - } - ], - "UserData": "extracted" - } - } - }, - "AWSEC2LaunchTemplatenodesprivateciliumexamplecom": { - "Type": "AWS::EC2::LaunchTemplate", - "Properties": { - "LaunchTemplateName": "nodes.privatecilium.example.com", - "LaunchTemplateData": { - "BlockDeviceMappings": [ - { - "DeviceName": "/dev/xvda", - "Ebs": { - "VolumeType": "gp3", - "VolumeSize": 128, - "Iops": 3000, - "Throughput": 125, - "DeleteOnTermination": true, - "Encrypted": true - } - } - ], - "IamInstanceProfile": { - "Name": { - "Ref": "AWSIAMInstanceProfilenodesprivateciliumexamplecom" - } - }, - "ImageId": "ami-12345678", - "InstanceType": "t2.medium", - "KeyName": "kubernetes.privatecilium.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57", - "MetadataOptions": { - "HttpPutResponseHopLimit": 1, - "HttpTokens": "optional" - }, - "Monitoring": { - "Enabled": false - }, - "NetworkInterfaces": [ - { - "AssociatePublicIpAddress": false, - "DeleteOnTermination": true, - "DeviceIndex": 0, - "Ipv6AddressCount": 0, - "Groups": [ - { - "Ref": "AWSEC2SecurityGroupnodesprivateciliumexamplecom" - } - ] - } - ], - "TagSpecifications": [ - { - "ResourceType": "instance", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privatecilium.example.com" - }, - { - "Key": "Name", - "Value": "nodes.privatecilium.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "" - }, - { - "Key": "k8s.io/role/node", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes" - }, - { - "Key": "kubernetes.io/cluster/privatecilium.example.com", - "Value": "owned" - } - ] - }, - { - "ResourceType": "volume", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privatecilium.example.com" - }, - { - "Key": "Name", - "Value": "nodes.privatecilium.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "" - }, - { - "Key": "k8s.io/role/node", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes" - }, - { - "Key": "kubernetes.io/cluster/privatecilium.example.com", - "Value": "owned" - } - ] - } - ], - "UserData": "extracted" - } - } - }, - "AWSEC2NatGatewayustest1aprivateciliumexamplecom": { - "Type": "AWS::EC2::NatGateway", - "Properties": { - "AllocationId": { - "Fn::GetAtt": [ - "AWSEC2EIPustest1aprivateciliumexamplecom", - "AllocationId" - ] - }, - "SubnetId": { - "Ref": "AWSEC2Subnetutilityustest1aprivateciliumexamplecom" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privatecilium.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.privatecilium.example.com" - }, - { - "Key": "kubernetes.io/cluster/privatecilium.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2Route0": { - "Type": "AWS::EC2::Route", - "Properties": { - "RouteTableId": { - "Ref": "AWSEC2RouteTableprivateciliumexamplecom" - }, - "DestinationIpv6CidrBlock": "::/0", - "GatewayId": { - "Ref": "AWSEC2InternetGatewayprivateciliumexamplecom" - } - } - }, - "AWSEC2Route00000": { - "Type": "AWS::EC2::Route", - "Properties": { - "RouteTableId": { - "Ref": "AWSEC2RouteTableprivateciliumexamplecom" - }, - "DestinationCidrBlock": "0.0.0.0/0", - "GatewayId": { - "Ref": "AWSEC2InternetGatewayprivateciliumexamplecom" - } - } - }, - "AWSEC2RouteTableprivateciliumexamplecom": { - "Type": "AWS::EC2::RouteTable", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCprivateciliumexamplecom" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privatecilium.example.com" - }, - { - "Key": "Name", - "Value": "privatecilium.example.com" - }, - { - "Key": "kubernetes.io/cluster/privatecilium.example.com", - "Value": "owned" - }, - { - "Key": "kubernetes.io/kops/role", - "Value": "public" - } - ] - } - }, - "AWSEC2RouteTableprivateustest1aprivateciliumexamplecom": { - "Type": "AWS::EC2::RouteTable", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCprivateciliumexamplecom" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privatecilium.example.com" - }, - { - "Key": "Name", - "Value": "private-us-test-1a.privatecilium.example.com" - }, - { - "Key": "kubernetes.io/cluster/privatecilium.example.com", - "Value": "owned" - }, - { - "Key": "kubernetes.io/kops/role", - "Value": "private-us-test-1a" - } - ] - } - }, - "AWSEC2Routeprivateustest1a00000": { - "Type": "AWS::EC2::Route", - "Properties": { - "RouteTableId": { - "Ref": "AWSEC2RouteTableprivateustest1aprivateciliumexamplecom" - }, - "DestinationCidrBlock": "0.0.0.0/0", - "NatGatewayId": { - "Ref": "AWSEC2NatGatewayustest1aprivateciliumexamplecom" - } - } - }, - "AWSEC2SecurityGroupEgressfromapielbprivateciliumexamplecomegressall0to00": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupapielbprivateciliumexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIpv6": "::/0" - } - }, - "AWSEC2SecurityGroupEgressfromapielbprivateciliumexamplecomegressall0to000000": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupapielbprivateciliumexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupEgressfrombastionprivateciliumexamplecomegressall0to00": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupbastionprivateciliumexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIpv6": "::/0" - } - }, - "AWSEC2SecurityGroupEgressfrombastionprivateciliumexamplecomegressall0to000000": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupbastionprivateciliumexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupEgressfrommastersprivateciliumexamplecomegressall0to00": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersprivateciliumexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIpv6": "::/0" - } - }, - "AWSEC2SecurityGroupEgressfrommastersprivateciliumexamplecomegressall0to000000": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersprivateciliumexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupEgressfromnodesprivateciliumexamplecomegressall0to00": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesprivateciliumexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIpv6": "::/0" - } - }, - "AWSEC2SecurityGroupEgressfromnodesprivateciliumexamplecomegressall0to000000": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesprivateciliumexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom00000ingresstcp22to22bastionprivateciliumexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupbastionprivateciliumexamplecom" - }, - "FromPort": 22, - "ToPort": 22, - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom00000ingresstcp443to443apielbprivateciliumexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupapielbprivateciliumexamplecom" - }, - "FromPort": 443, - "ToPort": 443, - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom172204022ingresstcp22to22bastionprivateciliumexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupbastionprivateciliumexamplecom" - }, - "FromPort": 22, - "ToPort": 22, - "IpProtocol": "tcp", - "CidrIp": "172.20.4.0/22" - } - }, - "AWSEC2SecurityGroupIngressfrombastionprivateciliumexamplecomingresstcp22to22mastersprivateciliumexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersprivateciliumexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupbastionprivateciliumexamplecom" - }, - "FromPort": 22, - "ToPort": 22, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfrombastionprivateciliumexamplecomingresstcp22to22nodesprivateciliumexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesprivateciliumexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupbastionprivateciliumexamplecom" - }, - "FromPort": 22, - "ToPort": 22, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfrommastersprivateciliumexamplecomingressall0to0mastersprivateciliumexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersprivateciliumexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupmastersprivateciliumexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfrommastersprivateciliumexamplecomingressall0to0nodesprivateciliumexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesprivateciliumexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupmastersprivateciliumexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfromnodesprivateciliumexamplecomingressall0to0nodesprivateciliumexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesprivateciliumexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesprivateciliumexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfromnodesprivateciliumexamplecomingresstcp1to2379mastersprivateciliumexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersprivateciliumexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesprivateciliumexamplecom" - }, - "FromPort": 1, - "ToPort": 2379, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodesprivateciliumexamplecomingresstcp2382to4000mastersprivateciliumexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersprivateciliumexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesprivateciliumexamplecom" - }, - "FromPort": 2382, - "ToPort": 4000, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodesprivateciliumexamplecomingresstcp4003to65535mastersprivateciliumexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersprivateciliumexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesprivateciliumexamplecom" - }, - "FromPort": 4003, - "ToPort": 65535, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodesprivateciliumexamplecomingressudp1to65535mastersprivateciliumexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersprivateciliumexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesprivateciliumexamplecom" - }, - "FromPort": 1, - "ToPort": 65535, - "IpProtocol": "udp" - } - }, - "AWSEC2SecurityGroupIngresshttpselbtomaster": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersprivateciliumexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupapielbprivateciliumexamplecom" - }, - "FromPort": 443, - "ToPort": 443, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressicmppmtuapielb00000": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupapielbprivateciliumexamplecom" - }, - "FromPort": 3, - "ToPort": 4, - "IpProtocol": "icmp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressicmppmtusshnlb00000": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupbastionprivateciliumexamplecom" - }, - "FromPort": 3, - "ToPort": 4, - "IpProtocol": "icmp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressicmppmtusshnlb172204022": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupbastionprivateciliumexamplecom" - }, - "FromPort": 3, - "ToPort": 4, - "IpProtocol": "icmp", - "CidrIp": "172.20.4.0/22" - } - }, - "AWSEC2SecurityGroupapielbprivateciliumexamplecom": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupName": "api-elb.privatecilium.example.com", - "VpcId": { - "Ref": "AWSEC2VPCprivateciliumexamplecom" - }, - "GroupDescription": "Security group for api ELB", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privatecilium.example.com" - }, - { - "Key": "Name", - "Value": "api-elb.privatecilium.example.com" - }, - { - "Key": "kubernetes.io/cluster/privatecilium.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2SecurityGroupbastionprivateciliumexamplecom": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupName": "bastion.privatecilium.example.com", - "VpcId": { - "Ref": "AWSEC2VPCprivateciliumexamplecom" - }, - "GroupDescription": "Security group for bastion", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privatecilium.example.com" - }, - { - "Key": "Name", - "Value": "bastion.privatecilium.example.com" - }, - { - "Key": "kubernetes.io/cluster/privatecilium.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2SecurityGroupmastersprivateciliumexamplecom": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupName": "masters.privatecilium.example.com", - "VpcId": { - "Ref": "AWSEC2VPCprivateciliumexamplecom" - }, - "GroupDescription": "Security group for masters", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privatecilium.example.com" - }, - { - "Key": "Name", - "Value": "masters.privatecilium.example.com" - }, - { - "Key": "kubernetes.io/cluster/privatecilium.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2SecurityGroupnodesprivateciliumexamplecom": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupName": "nodes.privatecilium.example.com", - "VpcId": { - "Ref": "AWSEC2VPCprivateciliumexamplecom" - }, - "GroupDescription": "Security group for nodes", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privatecilium.example.com" - }, - { - "Key": "Name", - "Value": "nodes.privatecilium.example.com" - }, - { - "Key": "kubernetes.io/cluster/privatecilium.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2SubnetRouteTableAssociationprivateustest1aprivateciliumexamplecom": { - "Type": "AWS::EC2::SubnetRouteTableAssociation", - "Properties": { - "SubnetId": { - "Ref": "AWSEC2Subnetustest1aprivateciliumexamplecom" - }, - "RouteTableId": { - "Ref": "AWSEC2RouteTableprivateustest1aprivateciliumexamplecom" - } - } - }, - "AWSEC2SubnetRouteTableAssociationutilityustest1aprivateciliumexamplecom": { - "Type": "AWS::EC2::SubnetRouteTableAssociation", - "Properties": { - "SubnetId": { - "Ref": "AWSEC2Subnetutilityustest1aprivateciliumexamplecom" - }, - "RouteTableId": { - "Ref": "AWSEC2RouteTableprivateciliumexamplecom" - } - } - }, - "AWSEC2Subnetustest1aprivateciliumexamplecom": { - "Type": "AWS::EC2::Subnet", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCprivateciliumexamplecom" - }, - "CidrBlock": "172.20.32.0/19", - "AvailabilityZone": "us-test-1a", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privatecilium.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.privatecilium.example.com" - }, - { - "Key": "SubnetType", - "Value": "Private" - }, - { - "Key": "kops.k8s.io/instance-group/master-us-test-1a", - "Value": "true" - }, - { - "Key": "kops.k8s.io/instance-group/nodes", - "Value": "true" - }, - { - "Key": "kubernetes.io/cluster/privatecilium.example.com", - "Value": "owned" - }, - { - "Key": "kubernetes.io/role/internal-elb", - "Value": "1" - } - ] - } - }, - "AWSEC2Subnetutilityustest1aprivateciliumexamplecom": { - "Type": "AWS::EC2::Subnet", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCprivateciliumexamplecom" - }, - "CidrBlock": "172.20.4.0/22", - "AvailabilityZone": "us-test-1a", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privatecilium.example.com" - }, - { - "Key": "Name", - "Value": "utility-us-test-1a.privatecilium.example.com" - }, - { - "Key": "SubnetType", - "Value": "Utility" - }, - { - "Key": "kops.k8s.io/instance-group/bastion", - "Value": "true" - }, - { - "Key": "kubernetes.io/cluster/privatecilium.example.com", - "Value": "owned" - }, - { - "Key": "kubernetes.io/role/elb", - "Value": "1" - } - ] - } - }, - "AWSEC2VPCCidrBlockAmazonIPv6": { - "Type": "AWS::EC2::VPCCidrBlock", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCprivateciliumexamplecom" - }, - "AmazonProvidedIpv6CidrBlock": true - } - }, - "AWSEC2VPCDHCPOptionsAssociationprivateciliumexamplecom": { - "Type": "AWS::EC2::VPCDHCPOptionsAssociation", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCprivateciliumexamplecom" - }, - "DhcpOptionsId": { - "Ref": "AWSEC2DHCPOptionsprivateciliumexamplecom" - } - } - }, - "AWSEC2VPCGatewayAttachmentprivateciliumexamplecom": { - "Type": "AWS::EC2::VPCGatewayAttachment", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCprivateciliumexamplecom" - }, - "InternetGatewayId": { - "Ref": "AWSEC2InternetGatewayprivateciliumexamplecom" - } - } - }, - "AWSEC2VPCprivateciliumexamplecom": { - "Type": "AWS::EC2::VPC", - "Properties": { - "CidrBlock": "172.20.0.0/16", - "EnableDnsHostnames": true, - "EnableDnsSupport": true, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privatecilium.example.com" - }, - { - "Key": "Name", - "Value": "privatecilium.example.com" - }, - { - "Key": "kubernetes.io/cluster/privatecilium.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2Volumeustest1aetcdeventsprivateciliumexamplecom": { - "Type": "AWS::EC2::Volume", - "Properties": { - "AvailabilityZone": "us-test-1a", - "Size": 20, - "VolumeType": "gp3", - "Iops": 3000, - "Throughput": 125, - "Encrypted": false, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privatecilium.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.etcd-events.privatecilium.example.com" - }, - { - "Key": "k8s.io/etcd/events", - "Value": "us-test-1a/us-test-1a" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kubernetes.io/cluster/privatecilium.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2Volumeustest1aetcdmainprivateciliumexamplecom": { - "Type": "AWS::EC2::Volume", - "Properties": { - "AvailabilityZone": "us-test-1a", - "Size": 20, - "VolumeType": "gp3", - "Iops": 3000, - "Throughput": 125, - "Encrypted": false, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privatecilium.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.etcd-main.privatecilium.example.com" - }, - { - "Key": "k8s.io/etcd/main", - "Value": "us-test-1a/us-test-1a" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kubernetes.io/cluster/privatecilium.example.com", - "Value": "owned" - } - ] - } - }, - "AWSElasticLoadBalancingLoadBalancerapiprivateciliumexamplecom": { - "Type": "AWS::ElasticLoadBalancing::LoadBalancer", - "Properties": { - "LoadBalancerName": "api-privatecilium-example-fnt793", - "Listeners": [ - { - "InstancePort": "443", - "InstanceProtocol": "TCP", - "LoadBalancerPort": "443", - "Protocol": "TCP" - } - ], - "SecurityGroups": [ - { - "Ref": "AWSEC2SecurityGroupapielbprivateciliumexamplecom" - } - ], - "Subnets": [ - { - "Ref": "AWSEC2Subnetutilityustest1aprivateciliumexamplecom" - } - ], - "HealthCheck": { - "Target": "SSL:443", - "HealthyThreshold": "2", - "UnhealthyThreshold": "2", - "Interval": "10", - "Timeout": "5" - }, - "ConnectionDrainingPolicy": { - "Enabled": true, - "Timeout": 300 - }, - "ConnectionSettings": { - "IdleTimeout": 300 - }, - "CrossZone": false, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privatecilium.example.com" - }, - { - "Key": "Name", - "Value": "api.privatecilium.example.com" - }, - { - "Key": "kubernetes.io/cluster/privatecilium.example.com", - "Value": "owned" - } - ] - } - }, - "AWSElasticLoadBalancingV2Listenerbastionprivateciliumexamplecom22": { - "Type": "AWS::ElasticLoadBalancingV2::Listener", - "Properties": { - "DefaultActions": [ - { - "Type": "forward", - "TargetGroupArn": { - "Ref": "AWSElasticLoadBalancingV2TargetGroupbastionprivateciliumexal2ms01" - } - } - ], - "LoadBalancerArn": { - "Ref": "AWSElasticLoadBalancingV2LoadBalancerbastionprivateciliumexamplecom" - }, - "Port": 22, - "Protocol": "TCP" - } - }, - "AWSElasticLoadBalancingV2LoadBalancerbastionprivateciliumexamplecom": { - "Type": "AWS::ElasticLoadBalancingV2::LoadBalancer", - "Properties": { - "Name": "bastion-privatecilium-exa-l2ms01", - "Scheme": "internet-facing", - "SubnetMappings": [ - { - "SubnetId": { - "Ref": "AWSEC2Subnetutilityustest1aprivateciliumexamplecom" - } - } - ], - "Type": "network", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privatecilium.example.com" - }, - { - "Key": "Name", - "Value": "bastion.privatecilium.example.com" - }, - { - "Key": "kubernetes.io/cluster/privatecilium.example.com", - "Value": "owned" - } - ] - } - }, - "AWSElasticLoadBalancingV2TargetGroupbastionprivateciliumexal2ms01": { - "Type": "AWS::ElasticLoadBalancingV2::TargetGroup", - "Properties": { - "Name": "bastion-privatecilium-exa-l2ms01", - "Port": 22, - "Protocol": "TCP", - "VpcId": { - "Ref": "AWSEC2VPCprivateciliumexamplecom" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privatecilium.example.com" - }, - { - "Key": "Name", - "Value": "bastion-privatecilium-exa-l2ms01" - }, - { - "Key": "kubernetes.io/cluster/privatecilium.example.com", - "Value": "owned" - } - ], - "HealthCheckProtocol": "TCP", - "HealthyThresholdCount": 2, - "UnhealthyThresholdCount": 2 - } - }, - "AWSIAMInstanceProfilebastionsprivateciliumexamplecom": { - "Type": "AWS::IAM::InstanceProfile", - "Properties": { - "InstanceProfileName": "bastions.privatecilium.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolebastionsprivateciliumexamplecom" - } - ] - } - }, - "AWSIAMInstanceProfilemastersprivateciliumexamplecom": { - "Type": "AWS::IAM::InstanceProfile", - "Properties": { - "InstanceProfileName": "masters.privatecilium.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolemastersprivateciliumexamplecom" - } - ] - } - }, - "AWSIAMInstanceProfilenodesprivateciliumexamplecom": { - "Type": "AWS::IAM::InstanceProfile", - "Properties": { - "InstanceProfileName": "nodes.privatecilium.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolenodesprivateciliumexamplecom" - } - ] - } - }, - "AWSIAMPolicybastionsprivateciliumexamplecom": { - "Type": "AWS::IAM::Policy", - "Properties": { - "PolicyName": "bastions.privatecilium.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolebastionsprivateciliumexamplecom" - } - ], - "PolicyDocument": { - "Statement": [ - { - "Action": "ec2:DescribeRegions", - "Effect": "Allow", - "Resource": "*" - } - ], - "Version": "2012-10-17" - } - } - }, - "AWSIAMPolicymastersprivateciliumexamplecom": { - "Type": "AWS::IAM::Policy", - "Properties": { - "PolicyName": "masters.privatecilium.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolemastersprivateciliumexamplecom" - } - ], - "PolicyDocument": { - "Statement": [ - { - "Action": "ec2:AttachVolume", - "Condition": { - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "privatecilium.example.com", - "aws:ResourceTag/k8s.io/role/master": "1" - } - }, - "Effect": "Allow", - "Resource": [ - "*" - ] - }, - { - "Action": [ - "s3:Get*" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-read-bucket/clusters.example.com/privatecilium.example.com/*" - }, - { - "Action": [ - "s3:GetObject", - "s3:DeleteObject", - "s3:DeleteObjectVersion", - "s3:PutObject" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/privatecilium.example.com/backups/etcd/main/*" - }, - { - "Action": [ - "s3:GetObject", - "s3:DeleteObject", - "s3:DeleteObjectVersion", - "s3:PutObject" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/privatecilium.example.com/backups/etcd/events/*" - }, - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-read-bucket" - ] - }, - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-write-bucket" - ] - }, - { - "Action": [ - "route53:ChangeResourceRecordSets", - "route53:ListResourceRecordSets", - "route53:GetHostedZone" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:route53:::hostedzone/Z1AFAKE1ZON3YO" - ] - }, - { - "Action": [ - "route53:GetChange" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:route53:::change/*" - ] - }, - { - "Action": [ - "route53:ListHostedZones", - "route53:ListTagsForResource" - ], - "Effect": "Allow", - "Resource": [ - "*" - ] - }, - { - "Action": "ec2:CreateTags", - "Condition": { - "StringEquals": { - "aws:RequestTag/KubernetesCluster": "privatecilium.example.com", - "ec2:CreateAction": [ - "CreateSecurityGroup" - ] - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:security-group/*" - ] - }, - { - "Action": [ - "ec2:CreateTags", - "ec2:DeleteTags" - ], - "Condition": { - "Null": { - "aws:RequestTag/KubernetesCluster": "true" - }, - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "privatecilium.example.com" - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:security-group/*" - ] - }, - { - "Action": "ec2:CreateTags", - "Condition": { - "StringEquals": { - "aws:RequestTag/KubernetesCluster": "privatecilium.example.com", - "ec2:CreateAction": [ - "CreateVolume", - "CreateSnapshot" - ] - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:volume/*", - "arn:aws-test:ec2:*:*:snapshot/*" - ] - }, - { - "Action": [ - "ec2:CreateTags", - "ec2:DeleteTags" - ], - "Condition": { - "Null": { - "aws:RequestTag/KubernetesCluster": "true" - }, - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "privatecilium.example.com" - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:volume/*", - "arn:aws-test:ec2:*:*:snapshot/*" - ] - }, - { - "Action": [ - "autoscaling:DescribeAutoScalingGroups", - "autoscaling:DescribeAutoScalingInstances", - "autoscaling:DescribeLaunchConfigurations", - "autoscaling:DescribeScalingActivities", - "autoscaling:DescribeTags", - "ec2:AttachVolume", - "ec2:AuthorizeSecurityGroupIngress", - "ec2:CreateSecurityGroup", - "ec2:CreateTags", - "ec2:DeleteRoute", - "ec2:DeleteSecurityGroup", - "ec2:DeleteVolume", - "ec2:DescribeAccountAttributes", - "ec2:DescribeInstanceTypes", - "ec2:DescribeInstances", - "ec2:DescribeLaunchTemplateVersions", - "ec2:DescribeRegions", - "ec2:DescribeRouteTables", - "ec2:DescribeSecurityGroups", - "ec2:DescribeSubnets", - "ec2:DescribeTags", - "ec2:DescribeVolumes", - "ec2:DescribeVolumesModifications", - "ec2:DescribeVpcs", - "ec2:DetachVolume", - "ec2:ModifyInstanceAttribute", - "ec2:ModifyVolume", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:CreateListener", - "elasticloadbalancing:CreateTargetGroup", - "elasticloadbalancing:DeleteListener", - "elasticloadbalancing:DescribeListeners", - "elasticloadbalancing:DescribeLoadBalancerAttributes", - "elasticloadbalancing:DescribeLoadBalancerPolicies", - "elasticloadbalancing:DescribeLoadBalancers", - "elasticloadbalancing:DescribeTargetGroups", - "elasticloadbalancing:DescribeTargetHealth", - "elasticloadbalancing:ModifyListener", - "elasticloadbalancing:RegisterTargets", - "iam:GetServerCertificate", - "iam:ListServerCertificates", - "kms:DescribeKey", - "kms:GenerateRandom" - ], - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": [ - "autoscaling:SetDesiredCapacity", - "autoscaling:TerminateInstanceInAutoScalingGroup", - "ec2:AttachVolume", - "ec2:AuthorizeSecurityGroupIngress", - "ec2:DeleteSecurityGroup", - "ec2:DeleteVolume", - "ec2:DetachVolume", - "ec2:ModifyInstanceAttribute", - "ec2:ModifyVolume", - "ec2:RevokeSecurityGroupIngress", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", - "elasticloadbalancing:AttachLoadBalancerToSubnets", - "elasticloadbalancing:ConfigureHealthCheck", - "elasticloadbalancing:CreateLoadBalancerListeners", - "elasticloadbalancing:CreateLoadBalancerPolicy", - "elasticloadbalancing:DeleteListener", - "elasticloadbalancing:DeleteLoadBalancer", - "elasticloadbalancing:DeleteLoadBalancerListeners", - "elasticloadbalancing:DeleteTargetGroup", - "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", - "elasticloadbalancing:DeregisterTargets", - "elasticloadbalancing:DetachLoadBalancerFromSubnets", - "elasticloadbalancing:ModifyListener", - "elasticloadbalancing:ModifyLoadBalancerAttributes", - "elasticloadbalancing:ModifyTargetGroup", - "elasticloadbalancing:RegisterInstancesWithLoadBalancer", - "elasticloadbalancing:RegisterTargets", - "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", - "elasticloadbalancing:SetLoadBalancerPoliciesOfListener" - ], - "Condition": { - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "privatecilium.example.com" - } - }, - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": [ - "ec2:CreateSecurityGroup", - "ec2:CreateSnapshot", - "ec2:CreateVolume", - "elasticloadbalancing:CreateListener", - "elasticloadbalancing:CreateLoadBalancer", - "elasticloadbalancing:CreateTargetGroup" - ], - "Condition": { - "StringEquals": { - "aws:RequestTag/KubernetesCluster": "privatecilium.example.com" - } - }, - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": "ec2:CreateSecurityGroup", - "Effect": "Allow", - "Resource": "arn:aws-test:ec2:*:*:vpc/*" - } - ], - "Version": "2012-10-17" - } - } - }, - "AWSIAMPolicynodesprivateciliumexamplecom": { - "Type": "AWS::IAM::Policy", - "Properties": { - "PolicyName": "nodes.privatecilium.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolenodesprivateciliumexamplecom" - } - ], - "PolicyDocument": { - "Statement": [ - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-read-bucket" - ] - }, - { - "Action": [ - "autoscaling:DescribeAutoScalingInstances", - "ec2:DescribeInstanceTypes", - "ec2:DescribeInstances", - "ec2:DescribeRegions", - "iam:GetServerCertificate", - "iam:ListServerCertificates", - "kms:GenerateRandom" - ], - "Effect": "Allow", - "Resource": "*" - } - ], - "Version": "2012-10-17" - } - } - }, - "AWSIAMRolebastionsprivateciliumexamplecom": { - "Type": "AWS::IAM::Role", - "Properties": { - "RoleName": "bastions.privatecilium.example.com", - "AssumeRolePolicyDocument": { - "Statement": [ - { - "Action": "sts:AssumeRole", - "Effect": "Allow", - "Principal": { - "Service": "ec2.amazonaws.com" - } - } - ], - "Version": "2012-10-17" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privatecilium.example.com" - }, - { - "Key": "Name", - "Value": "bastions.privatecilium.example.com" - }, - { - "Key": "kubernetes.io/cluster/privatecilium.example.com", - "Value": "owned" - } - ] - } - }, - "AWSIAMRolemastersprivateciliumexamplecom": { - "Type": "AWS::IAM::Role", - "Properties": { - "RoleName": "masters.privatecilium.example.com", - "AssumeRolePolicyDocument": { - "Statement": [ - { - "Action": "sts:AssumeRole", - "Effect": "Allow", - "Principal": { - "Service": "ec2.amazonaws.com" - } - } - ], - "Version": "2012-10-17" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privatecilium.example.com" - }, - { - "Key": "Name", - "Value": "masters.privatecilium.example.com" - }, - { - "Key": "kubernetes.io/cluster/privatecilium.example.com", - "Value": "owned" - } - ] - } - }, - "AWSIAMRolenodesprivateciliumexamplecom": { - "Type": "AWS::IAM::Role", - "Properties": { - "RoleName": "nodes.privatecilium.example.com", - "AssumeRolePolicyDocument": { - "Statement": [ - { - "Action": "sts:AssumeRole", - "Effect": "Allow", - "Principal": { - "Service": "ec2.amazonaws.com" - } - } - ], - "Version": "2012-10-17" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privatecilium.example.com" - }, - { - "Key": "Name", - "Value": "nodes.privatecilium.example.com" - }, - { - "Key": "kubernetes.io/cluster/privatecilium.example.com", - "Value": "owned" - } - ] - } - }, - "AWSRoute53RecordSetapiprivateciliumexamplecom": { - "Type": "AWS::Route53::RecordSet", - "Properties": { - "Name": "api.privatecilium.example.com", - "Type": "A", - "AliasTarget": { - "DNSName": { - "Fn::GetAtt": [ - "AWSElasticLoadBalancingLoadBalancerapiprivateciliumexamplecom", - "DNSName" - ] - }, - "HostedZoneId": { - "Fn::GetAtt": [ - "AWSElasticLoadBalancingLoadBalancerapiprivateciliumexamplecom", - "CanonicalHostedZoneNameID" - ] - }, - "EvaluateTargetHealth": false - }, - "HostedZoneId": "/hostedzone/Z1AFAKE1ZON3YO" - } - } - } -} diff --git a/tests/integration/update_cluster/privatecilium/cloudformation.json.extracted.yaml b/tests/integration/update_cluster/privatecilium/cloudformation.json.extracted.yaml deleted file mode 100644 index 1f9d992957538..0000000000000 --- a/tests/integration/update_cluster/privatecilium/cloudformation.json.extracted.yaml +++ /dev/null @@ -1,448 +0,0 @@ -Resources.AWSEC2LaunchTemplatemasterustest1amastersprivateciliumexamplecom.Properties.LaunchTemplateData.UserData: | - #!/bin/bash - set -o errexit - set -o nounset - set -o pipefail - - NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 - NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 - NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 - NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 - - export AWS_REGION=us-test-1 - - - - - sysctl -w net.core.rmem_max=16777216 || true - sysctl -w net.core.wmem_max=16777216 || true - sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true - sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true - - - function ensure-install-dir() { - INSTALL_DIR="/opt/kops" - # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec - if [[ -d /var/lib/toolbox ]]; then - INSTALL_DIR="/var/lib/toolbox/kops" - fi - mkdir -p ${INSTALL_DIR}/bin - mkdir -p ${INSTALL_DIR}/conf - cd ${INSTALL_DIR} - } - - # Retry a download until we get it. args: name, sha, urls - download-or-bust() { - local -r file="$1" - local -r hash="$2" - local -r urls=( $(split-commas "$3") ) - - if [[ -f "${file}" ]]; then - if ! validate-hash "${file}" "${hash}"; then - rm -f "${file}" - else - return 0 - fi - fi - - while true; do - for url in "${urls[@]}"; do - commands=( - "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - ) - for cmd in "${commands[@]}"; do - echo "Attempting download with: ${cmd} {url}" - if ! (${cmd} "${url}"); then - echo "== Download failed with ${cmd} ==" - continue - fi - if ! validate-hash "${file}" "${hash}"; then - echo "== Hash validation of ${url} failed. Retrying. ==" - rm -f "${file}" - else - echo "== Downloaded ${url} (SHA256 = ${hash}) ==" - return 0 - fi - done - done - - echo "All downloads failed; sleeping before retrying" - sleep 60 - done - } - - validate-hash() { - local -r file="$1" - local -r expected="$2" - local actual - - actual=$(sha256sum ${file} | awk '{ print $1 }') || true - if [[ "${actual}" != "${expected}" ]]; then - echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" - return 1 - fi - } - - function split-commas() { - echo $1 | tr "," "\n" - } - - function download-release() { - case "$(uname -m)" in - x86_64*|i?86_64*|amd64*) - NODEUP_URL="${NODEUP_URL_AMD64}" - NODEUP_HASH="${NODEUP_HASH_AMD64}" - ;; - aarch64*|arm64*) - NODEUP_URL="${NODEUP_URL_ARM64}" - NODEUP_HASH="${NODEUP_HASH_ARM64}" - ;; - *) - echo "Unsupported host arch: $(uname -m)" >&2 - exit 1 - ;; - esac - - cd ${INSTALL_DIR}/bin - download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" - - chmod +x nodeup - - echo "Running nodeup" - # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 - ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) - } - - #################################################################################### - - /bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" - - echo "== nodeup node config starting ==" - ensure-install-dir - - cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' - cloudConfig: - awsEBSCSIDriver: - enabled: false - manageStorageClasses: true - containerRuntime: containerd - containerd: - logLevel: info - version: 1.4.13 - docker: - skipInstall: true - encryptionConfig: null - etcdClusters: - events: - version: 3.4.13 - main: - version: 3.4.13 - kubeAPIServer: - allowPrivileged: true - anonymousAuth: false - apiAudiences: - - kubernetes.svc.default - apiServerCount: 1 - authorizationMode: AlwaysAllow - bindAddress: 0.0.0.0 - cloudProvider: aws - enableAdmissionPlugins: - - NamespaceLifecycle - - LimitRanger - - ServiceAccount - - DefaultStorageClass - - DefaultTolerationSeconds - - MutatingAdmissionWebhook - - ValidatingAdmissionWebhook - - NodeRestriction - - ResourceQuota - etcdServers: - - https://127.0.0.1:4001 - etcdServersOverrides: - - /events#https://127.0.0.1:4002 - image: registry.k8s.io/kube-apiserver:v1.21.0 - kubeletPreferredAddressTypes: - - InternalIP - - Hostname - - ExternalIP - logLevel: 2 - requestheaderAllowedNames: - - aggregator - requestheaderExtraHeaderPrefixes: - - X-Remote-Extra- - requestheaderGroupHeaders: - - X-Remote-Group - requestheaderUsernameHeaders: - - X-Remote-User - securePort: 443 - serviceAccountIssuer: https://api.internal.privatecilium.example.com - serviceAccountJWKSURI: https://api.internal.privatecilium.example.com/openid/v1/jwks - serviceClusterIPRange: 100.64.0.0/13 - storageBackend: etcd3 - kubeControllerManager: - allocateNodeCIDRs: true - attachDetachReconcileSyncPeriod: 1m0s - cloudProvider: aws - clusterCIDR: 100.96.0.0/11 - clusterName: privatecilium.example.com - configureCloudRoutes: false - image: registry.k8s.io/kube-controller-manager:v1.21.0 - leaderElection: - leaderElect: true - logLevel: 2 - useServiceAccountCredentials: true - kubeProxy: - clusterCIDR: 100.96.0.0/11 - cpuRequest: 100m - image: registry.k8s.io/kube-proxy:v1.21.0 - logLevel: 2 - kubeScheduler: - image: registry.k8s.io/kube-scheduler:v1.21.0 - leaderElection: - leaderElect: true - logLevel: 2 - kubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: aws - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - masterKubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: aws - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - - __EOF_CLUSTER_SPEC - - cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' - CloudProvider: aws - ConfigBase: memfs://clusters.example.com/privatecilium.example.com - InstanceGroupName: master-us-test-1a - InstanceGroupRole: Master - NodeupConfigHash: bE/1eP3ebjiERpmAXHGaOxi6wsg/nS0FFda8mVTSjVk= - - __EOF_KUBE_ENV - - download-release - echo "== nodeup node config done ==" -Resources.AWSEC2LaunchTemplatenodesprivateciliumexamplecom.Properties.LaunchTemplateData.UserData: | - #!/bin/bash - set -o errexit - set -o nounset - set -o pipefail - - NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 - NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 - NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 - NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 - - export AWS_REGION=us-test-1 - - - - - sysctl -w net.core.rmem_max=16777216 || true - sysctl -w net.core.wmem_max=16777216 || true - sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true - sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true - - - function ensure-install-dir() { - INSTALL_DIR="/opt/kops" - # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec - if [[ -d /var/lib/toolbox ]]; then - INSTALL_DIR="/var/lib/toolbox/kops" - fi - mkdir -p ${INSTALL_DIR}/bin - mkdir -p ${INSTALL_DIR}/conf - cd ${INSTALL_DIR} - } - - # Retry a download until we get it. args: name, sha, urls - download-or-bust() { - local -r file="$1" - local -r hash="$2" - local -r urls=( $(split-commas "$3") ) - - if [[ -f "${file}" ]]; then - if ! validate-hash "${file}" "${hash}"; then - rm -f "${file}" - else - return 0 - fi - fi - - while true; do - for url in "${urls[@]}"; do - commands=( - "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - ) - for cmd in "${commands[@]}"; do - echo "Attempting download with: ${cmd} {url}" - if ! (${cmd} "${url}"); then - echo "== Download failed with ${cmd} ==" - continue - fi - if ! validate-hash "${file}" "${hash}"; then - echo "== Hash validation of ${url} failed. Retrying. ==" - rm -f "${file}" - else - echo "== Downloaded ${url} (SHA256 = ${hash}) ==" - return 0 - fi - done - done - - echo "All downloads failed; sleeping before retrying" - sleep 60 - done - } - - validate-hash() { - local -r file="$1" - local -r expected="$2" - local actual - - actual=$(sha256sum ${file} | awk '{ print $1 }') || true - if [[ "${actual}" != "${expected}" ]]; then - echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" - return 1 - fi - } - - function split-commas() { - echo $1 | tr "," "\n" - } - - function download-release() { - case "$(uname -m)" in - x86_64*|i?86_64*|amd64*) - NODEUP_URL="${NODEUP_URL_AMD64}" - NODEUP_HASH="${NODEUP_HASH_AMD64}" - ;; - aarch64*|arm64*) - NODEUP_URL="${NODEUP_URL_ARM64}" - NODEUP_HASH="${NODEUP_HASH_ARM64}" - ;; - *) - echo "Unsupported host arch: $(uname -m)" >&2 - exit 1 - ;; - esac - - cd ${INSTALL_DIR}/bin - download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" - - chmod +x nodeup - - echo "Running nodeup" - # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 - ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) - } - - #################################################################################### - - /bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" - - echo "== nodeup node config starting ==" - ensure-install-dir - - cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' - cloudConfig: - awsEBSCSIDriver: - enabled: false - manageStorageClasses: true - containerRuntime: containerd - containerd: - logLevel: info - version: 1.4.13 - docker: - skipInstall: true - kubeProxy: - clusterCIDR: 100.96.0.0/11 - cpuRequest: 100m - image: registry.k8s.io/kube-proxy:v1.21.0 - logLevel: 2 - kubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: aws - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - - __EOF_CLUSTER_SPEC - - cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' - CloudProvider: aws - ConfigServer: - CACertificates: | - -----BEGIN CERTIFICATE----- - MIIBbjCCARigAwIBAgIMFpANqBD8NSD82AUSMA0GCSqGSIb3DQEBCwUAMBgxFjAU - BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwODAwWhcNMzEwNzA3MDcw - ODAwWjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD - SwAwSAJBANFI3zr0Tk8krsW8vwjfMpzJOlWQ8616vG3YPa2qAgI7V4oKwfV0yIg1 - jt+H6f4P/wkPAPTPTfRp9Iy8oHEEFw0CAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG - MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNG3zVjTcLlJwDsJ4/K9DV7KohUA - MA0GCSqGSIb3DQEBCwUAA0EAB8d03fY2w7WKpfO29qI295pu2C4ca9AiVGOpgSc8 - tmQsq6rcxt3T+rb589PVtz0mw/cKTxOk6gH2CCC+yHfy2w== - -----END CERTIFICATE----- - -----BEGIN CERTIFICATE----- - MIIBbjCCARigAwIBAgIMFpANvmSa0OAlYmXKMA0GCSqGSIb3DQEBCwUAMBgxFjAU - BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwOTM2WhcNMzEwNzA3MDcw - OTM2WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD - SwAwSAJBAMF6F4aZdpe0RUpyykaBpWwZCnwbffhYGOw+fs6RdLuUq7QCNmJm/Eq7 - WWOziMYDiI9SbclpD+6QiJ0N3EqppVUCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG - MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLImp6ARjPDAH6nhI+scWVt3Q9bn - MA0GCSqGSIb3DQEBCwUAA0EAVQVx5MUtuAIeePuP9o51xtpT2S6Fvfi8J4ICxnlA - 9B7UD2ushcVFPtaeoL9Gfu8aY4KJBeqqg5ojl4qmRnThjw== - -----END CERTIFICATE----- - server: https://kops-controller.internal.privatecilium.example.com:3988/ - InstanceGroupName: nodes - InstanceGroupRole: Node - NodeupConfigHash: sN54AEbLjJsWbgX3bYbJ1zKa2/ez6JnG1YWBh3knm7w= - - __EOF_KUBE_ENV - - download-release - echo "== nodeup node config done ==" diff --git a/tests/integration/update_cluster/privatecilium/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/privatecilium/data/aws_s3_object_cluster-completed.spec_content index 08769b6b498ef..4f98c5ebf1167 100644 --- a/tests/integration/update_cluster/privatecilium/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/privatecilium/data/aws_s3_object_cluster-completed.spec_content @@ -144,7 +144,6 @@ spec: kubernetesApiAccess: - 0.0.0.0/0 kubernetesVersion: 1.21.0 - masterInternalName: api.internal.privatecilium.example.com masterKubelet: anonymousAuth: false cgroupDriver: systemd diff --git a/tests/integration/update_cluster/privatecilium/in-v1alpha2.yaml b/tests/integration/update_cluster/privatecilium/in-v1alpha2.yaml index 39340a11ea1ab..5f4408daa8db8 100644 --- a/tests/integration/update_cluster/privatecilium/in-v1alpha2.yaml +++ b/tests/integration/update_cluster/privatecilium/in-v1alpha2.yaml @@ -22,7 +22,6 @@ spec: kubelet: anonymousAuth: false kubernetesVersion: v1.21.0 - masterInternalName: api.internal.privatecilium.example.com masterPublicName: api.privatecilium.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/tests/integration/update_cluster/privatecilium2/cloudformation.json b/tests/integration/update_cluster/privatecilium2/cloudformation.json deleted file mode 100644 index 70e10499ad833..0000000000000 --- a/tests/integration/update_cluster/privatecilium2/cloudformation.json +++ /dev/null @@ -1,2002 +0,0 @@ -{ - "Resources": { - "AWSAutoScalingAutoScalingGroupbastionprivateciliumexamplecom": { - "Type": "AWS::AutoScaling::AutoScalingGroup", - "Properties": { - "AutoScalingGroupName": "bastion.privatecilium.example.com", - "LaunchTemplate": { - "LaunchTemplateId": { - "Ref": "AWSEC2LaunchTemplatebastionprivateciliumexamplecom" - }, - "Version": { - "Fn::GetAtt": [ - "AWSEC2LaunchTemplatebastionprivateciliumexamplecom", - "LatestVersionNumber" - ] - } - }, - "MaxSize": "1", - "MinSize": "1", - "VPCZoneIdentifier": [ - { - "Ref": "AWSEC2Subnetutilityustest1aprivateciliumexamplecom" - } - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privatecilium.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "Name", - "Value": "bastion.privatecilium.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/role/bastion", - "Value": "1", - "PropagateAtLaunch": true - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "bastion", - "PropagateAtLaunch": true - }, - { - "Key": "kubernetes.io/cluster/privatecilium.example.com", - "Value": "owned", - "PropagateAtLaunch": true - } - ], - "MetricsCollection": [ - { - "Granularity": "1Minute", - "Metrics": [ - "GroupDesiredCapacity", - "GroupInServiceInstances", - "GroupMaxSize", - "GroupMinSize", - "GroupPendingInstances", - "GroupStandbyInstances", - "GroupTerminatingInstances", - "GroupTotalInstances" - ] - } - ], - "TargetGroupARNs": [ - { - "Ref": "AWSElasticLoadBalancingV2TargetGroupbastionprivateciliumexal2ms01" - } - ] - } - }, - "AWSAutoScalingAutoScalingGroupmasterustest1amastersprivateciliumexamplecom": { - "Type": "AWS::AutoScaling::AutoScalingGroup", - "Properties": { - "AutoScalingGroupName": "master-us-test-1a.masters.privatecilium.example.com", - "LaunchTemplate": { - "LaunchTemplateId": { - "Ref": "AWSEC2LaunchTemplatemasterustest1amastersprivateciliumexamplecom" - }, - "Version": { - "Fn::GetAtt": [ - "AWSEC2LaunchTemplatemasterustest1amastersprivateciliumexamplecom", - "LatestVersionNumber" - ] - } - }, - "MaxSize": "1", - "MinSize": "1", - "VPCZoneIdentifier": [ - { - "Ref": "AWSEC2Subnetustest1aprivateciliumexamplecom" - } - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privatecilium.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.privatecilium.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/role/master", - "Value": "1", - "PropagateAtLaunch": true - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a", - "PropagateAtLaunch": true - }, - { - "Key": "kubernetes.io/cluster/privatecilium.example.com", - "Value": "owned", - "PropagateAtLaunch": true - } - ], - "MetricsCollection": [ - { - "Granularity": "1Minute", - "Metrics": [ - "GroupDesiredCapacity", - "GroupInServiceInstances", - "GroupMaxSize", - "GroupMinSize", - "GroupPendingInstances", - "GroupStandbyInstances", - "GroupTerminatingInstances", - "GroupTotalInstances" - ] - } - ], - "LoadBalancerNames": [ - { - "Ref": "AWSElasticLoadBalancingLoadBalancerapiprivateciliumexamplecom" - } - ] - } - }, - "AWSAutoScalingAutoScalingGroupnodesprivateciliumexamplecom": { - "Type": "AWS::AutoScaling::AutoScalingGroup", - "Properties": { - "AutoScalingGroupName": "nodes.privatecilium.example.com", - "LaunchTemplate": { - "LaunchTemplateId": { - "Ref": "AWSEC2LaunchTemplatenodesprivateciliumexamplecom" - }, - "Version": { - "Fn::GetAtt": [ - "AWSEC2LaunchTemplatenodesprivateciliumexamplecom", - "LatestVersionNumber" - ] - } - }, - "MaxSize": "2", - "MinSize": "2", - "VPCZoneIdentifier": [ - { - "Ref": "AWSEC2Subnetustest1aprivateciliumexamplecom" - } - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privatecilium.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "Name", - "Value": "nodes.privatecilium.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/role/node", - "Value": "1", - "PropagateAtLaunch": true - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes", - "PropagateAtLaunch": true - }, - { - "Key": "kubernetes.io/cluster/privatecilium.example.com", - "Value": "owned", - "PropagateAtLaunch": true - } - ], - "MetricsCollection": [ - { - "Granularity": "1Minute", - "Metrics": [ - "GroupDesiredCapacity", - "GroupInServiceInstances", - "GroupMaxSize", - "GroupMinSize", - "GroupPendingInstances", - "GroupStandbyInstances", - "GroupTerminatingInstances", - "GroupTotalInstances" - ] - } - ] - } - }, - "AWSEC2DHCPOptionsprivateciliumexamplecom": { - "Type": "AWS::EC2::DHCPOptions", - "Properties": { - "DomainName": "us-test-1.compute.internal", - "DomainNameServers": [ - "AmazonProvidedDNS" - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privatecilium.example.com" - }, - { - "Key": "Name", - "Value": "privatecilium.example.com" - }, - { - "Key": "kubernetes.io/cluster/privatecilium.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2EIPustest1aprivateciliumexamplecom": { - "Type": "AWS::EC2::EIP", - "Properties": { - "Domain": "vpc", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privatecilium.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.privatecilium.example.com" - }, - { - "Key": "kubernetes.io/cluster/privatecilium.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2InternetGatewayprivateciliumexamplecom": { - "Type": "AWS::EC2::InternetGateway", - "Properties": { - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privatecilium.example.com" - }, - { - "Key": "Name", - "Value": "privatecilium.example.com" - }, - { - "Key": "kubernetes.io/cluster/privatecilium.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2LaunchTemplatebastionprivateciliumexamplecom": { - "Type": "AWS::EC2::LaunchTemplate", - "Properties": { - "LaunchTemplateName": "bastion.privatecilium.example.com", - "LaunchTemplateData": { - "BlockDeviceMappings": [ - { - "DeviceName": "/dev/xvda", - "Ebs": { - "VolumeType": "gp3", - "VolumeSize": 32, - "Iops": 3000, - "Throughput": 125, - "DeleteOnTermination": true, - "Encrypted": true - } - } - ], - "IamInstanceProfile": { - "Name": { - "Ref": "AWSIAMInstanceProfilebastionsprivateciliumexamplecom" - } - }, - "ImageId": "ami-12345678", - "InstanceType": "t2.micro", - "KeyName": "kubernetes.privatecilium.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57", - "MetadataOptions": { - "HttpPutResponseHopLimit": 1, - "HttpTokens": "optional" - }, - "Monitoring": { - "Enabled": false - }, - "NetworkInterfaces": [ - { - "AssociatePublicIpAddress": true, - "DeleteOnTermination": true, - "DeviceIndex": 0, - "Ipv6AddressCount": 0, - "Groups": [ - { - "Ref": "AWSEC2SecurityGroupbastionprivateciliumexamplecom" - } - ] - } - ], - "TagSpecifications": [ - { - "ResourceType": "instance", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privatecilium.example.com" - }, - { - "Key": "Name", - "Value": "bastion.privatecilium.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "" - }, - { - "Key": "k8s.io/role/bastion", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "bastion" - }, - { - "Key": "kubernetes.io/cluster/privatecilium.example.com", - "Value": "owned" - } - ] - }, - { - "ResourceType": "volume", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privatecilium.example.com" - }, - { - "Key": "Name", - "Value": "bastion.privatecilium.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "" - }, - { - "Key": "k8s.io/role/bastion", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "bastion" - }, - { - "Key": "kubernetes.io/cluster/privatecilium.example.com", - "Value": "owned" - } - ] - } - ] - } - } - }, - "AWSEC2LaunchTemplatemasterustest1amastersprivateciliumexamplecom": { - "Type": "AWS::EC2::LaunchTemplate", - "Properties": { - "LaunchTemplateName": "master-us-test-1a.masters.privatecilium.example.com", - "LaunchTemplateData": { - "BlockDeviceMappings": [ - { - "DeviceName": "/dev/xvda", - "Ebs": { - "VolumeType": "gp3", - "VolumeSize": 64, - "Iops": 3000, - "Throughput": 125, - "DeleteOnTermination": true, - "Encrypted": true - } - }, - { - "DeviceName": "/dev/sdc", - "VirtualName": "ephemeral0" - } - ], - "IamInstanceProfile": { - "Name": { - "Ref": "AWSIAMInstanceProfilemastersprivateciliumexamplecom" - } - }, - "ImageId": "ami-12345678", - "InstanceType": "m3.medium", - "KeyName": "kubernetes.privatecilium.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57", - "MetadataOptions": { - "HttpPutResponseHopLimit": 1, - "HttpTokens": "optional" - }, - "Monitoring": { - "Enabled": false - }, - "NetworkInterfaces": [ - { - "AssociatePublicIpAddress": false, - "DeleteOnTermination": true, - "DeviceIndex": 0, - "Ipv6AddressCount": 0, - "Groups": [ - { - "Ref": "AWSEC2SecurityGroupmastersprivateciliumexamplecom" - } - ] - } - ], - "TagSpecifications": [ - { - "ResourceType": "instance", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privatecilium.example.com" - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.privatecilium.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a" - }, - { - "Key": "kubernetes.io/cluster/privatecilium.example.com", - "Value": "owned" - } - ] - }, - { - "ResourceType": "volume", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privatecilium.example.com" - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.privatecilium.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a" - }, - { - "Key": "kubernetes.io/cluster/privatecilium.example.com", - "Value": "owned" - } - ] - } - ], - "UserData": "extracted" - } - } - }, - "AWSEC2LaunchTemplatenodesprivateciliumexamplecom": { - "Type": "AWS::EC2::LaunchTemplate", - "Properties": { - "LaunchTemplateName": "nodes.privatecilium.example.com", - "LaunchTemplateData": { - "BlockDeviceMappings": [ - { - "DeviceName": "/dev/xvda", - "Ebs": { - "VolumeType": "gp3", - "VolumeSize": 128, - "Iops": 3000, - "Throughput": 125, - "DeleteOnTermination": true, - "Encrypted": true - } - } - ], - "IamInstanceProfile": { - "Name": { - "Ref": "AWSIAMInstanceProfilenodesprivateciliumexamplecom" - } - }, - "ImageId": "ami-12345678", - "InstanceType": "t2.medium", - "KeyName": "kubernetes.privatecilium.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57", - "MetadataOptions": { - "HttpPutResponseHopLimit": 1, - "HttpTokens": "optional" - }, - "Monitoring": { - "Enabled": false - }, - "NetworkInterfaces": [ - { - "AssociatePublicIpAddress": false, - "DeleteOnTermination": true, - "DeviceIndex": 0, - "Ipv6AddressCount": 0, - "Groups": [ - { - "Ref": "AWSEC2SecurityGroupnodesprivateciliumexamplecom" - } - ] - } - ], - "TagSpecifications": [ - { - "ResourceType": "instance", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privatecilium.example.com" - }, - { - "Key": "Name", - "Value": "nodes.privatecilium.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "" - }, - { - "Key": "k8s.io/role/node", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes" - }, - { - "Key": "kubernetes.io/cluster/privatecilium.example.com", - "Value": "owned" - } - ] - }, - { - "ResourceType": "volume", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privatecilium.example.com" - }, - { - "Key": "Name", - "Value": "nodes.privatecilium.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "" - }, - { - "Key": "k8s.io/role/node", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes" - }, - { - "Key": "kubernetes.io/cluster/privatecilium.example.com", - "Value": "owned" - } - ] - } - ], - "UserData": "extracted" - } - } - }, - "AWSEC2NatGatewayustest1aprivateciliumexamplecom": { - "Type": "AWS::EC2::NatGateway", - "Properties": { - "AllocationId": { - "Fn::GetAtt": [ - "AWSEC2EIPustest1aprivateciliumexamplecom", - "AllocationId" - ] - }, - "SubnetId": { - "Ref": "AWSEC2Subnetutilityustest1aprivateciliumexamplecom" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privatecilium.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.privatecilium.example.com" - }, - { - "Key": "kubernetes.io/cluster/privatecilium.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2Route0": { - "Type": "AWS::EC2::Route", - "Properties": { - "RouteTableId": { - "Ref": "AWSEC2RouteTableprivateciliumexamplecom" - }, - "DestinationIpv6CidrBlock": "::/0", - "GatewayId": { - "Ref": "AWSEC2InternetGatewayprivateciliumexamplecom" - } - } - }, - "AWSEC2Route00000": { - "Type": "AWS::EC2::Route", - "Properties": { - "RouteTableId": { - "Ref": "AWSEC2RouteTableprivateciliumexamplecom" - }, - "DestinationCidrBlock": "0.0.0.0/0", - "GatewayId": { - "Ref": "AWSEC2InternetGatewayprivateciliumexamplecom" - } - } - }, - "AWSEC2RouteTableprivateciliumexamplecom": { - "Type": "AWS::EC2::RouteTable", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCprivateciliumexamplecom" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privatecilium.example.com" - }, - { - "Key": "Name", - "Value": "privatecilium.example.com" - }, - { - "Key": "kubernetes.io/cluster/privatecilium.example.com", - "Value": "owned" - }, - { - "Key": "kubernetes.io/kops/role", - "Value": "public" - } - ] - } - }, - "AWSEC2RouteTableprivateustest1aprivateciliumexamplecom": { - "Type": "AWS::EC2::RouteTable", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCprivateciliumexamplecom" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privatecilium.example.com" - }, - { - "Key": "Name", - "Value": "private-us-test-1a.privatecilium.example.com" - }, - { - "Key": "kubernetes.io/cluster/privatecilium.example.com", - "Value": "owned" - }, - { - "Key": "kubernetes.io/kops/role", - "Value": "private-us-test-1a" - } - ] - } - }, - "AWSEC2Routeprivateustest1a00000": { - "Type": "AWS::EC2::Route", - "Properties": { - "RouteTableId": { - "Ref": "AWSEC2RouteTableprivateustest1aprivateciliumexamplecom" - }, - "DestinationCidrBlock": "0.0.0.0/0", - "NatGatewayId": { - "Ref": "AWSEC2NatGatewayustest1aprivateciliumexamplecom" - } - } - }, - "AWSEC2SecurityGroupEgressfromapielbprivateciliumexamplecomegressall0to00": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupapielbprivateciliumexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIpv6": "::/0" - } - }, - "AWSEC2SecurityGroupEgressfromapielbprivateciliumexamplecomegressall0to000000": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupapielbprivateciliumexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupEgressfrombastionprivateciliumexamplecomegressall0to00": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupbastionprivateciliumexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIpv6": "::/0" - } - }, - "AWSEC2SecurityGroupEgressfrombastionprivateciliumexamplecomegressall0to000000": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupbastionprivateciliumexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupEgressfrommastersprivateciliumexamplecomegressall0to00": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersprivateciliumexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIpv6": "::/0" - } - }, - "AWSEC2SecurityGroupEgressfrommastersprivateciliumexamplecomegressall0to000000": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersprivateciliumexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupEgressfromnodesprivateciliumexamplecomegressall0to00": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesprivateciliumexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIpv6": "::/0" - } - }, - "AWSEC2SecurityGroupEgressfromnodesprivateciliumexamplecomegressall0to000000": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesprivateciliumexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom00000ingresstcp22to22bastionprivateciliumexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupbastionprivateciliumexamplecom" - }, - "FromPort": 22, - "ToPort": 22, - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom00000ingresstcp443to443apielbprivateciliumexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupapielbprivateciliumexamplecom" - }, - "FromPort": 443, - "ToPort": 443, - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom172204022ingresstcp22to22bastionprivateciliumexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupbastionprivateciliumexamplecom" - }, - "FromPort": 22, - "ToPort": 22, - "IpProtocol": "tcp", - "CidrIp": "172.20.4.0/22" - } - }, - "AWSEC2SecurityGroupIngressfrombastionprivateciliumexamplecomingresstcp22to22mastersprivateciliumexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersprivateciliumexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupbastionprivateciliumexamplecom" - }, - "FromPort": 22, - "ToPort": 22, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfrombastionprivateciliumexamplecomingresstcp22to22nodesprivateciliumexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesprivateciliumexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupbastionprivateciliumexamplecom" - }, - "FromPort": 22, - "ToPort": 22, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfrommastersprivateciliumexamplecomingressall0to0mastersprivateciliumexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersprivateciliumexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupmastersprivateciliumexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfrommastersprivateciliumexamplecomingressall0to0nodesprivateciliumexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesprivateciliumexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupmastersprivateciliumexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfromnodesprivateciliumexamplecomingressall0to0nodesprivateciliumexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesprivateciliumexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesprivateciliumexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfromnodesprivateciliumexamplecomingresstcp1to2379mastersprivateciliumexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersprivateciliumexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesprivateciliumexamplecom" - }, - "FromPort": 1, - "ToPort": 2379, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodesprivateciliumexamplecomingresstcp2382to4000mastersprivateciliumexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersprivateciliumexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesprivateciliumexamplecom" - }, - "FromPort": 2382, - "ToPort": 4000, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodesprivateciliumexamplecomingresstcp4003to65535mastersprivateciliumexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersprivateciliumexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesprivateciliumexamplecom" - }, - "FromPort": 4003, - "ToPort": 65535, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodesprivateciliumexamplecomingressudp1to65535mastersprivateciliumexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersprivateciliumexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesprivateciliumexamplecom" - }, - "FromPort": 1, - "ToPort": 65535, - "IpProtocol": "udp" - } - }, - "AWSEC2SecurityGroupIngresshttpselbtomaster": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersprivateciliumexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupapielbprivateciliumexamplecom" - }, - "FromPort": 443, - "ToPort": 443, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressicmppmtuapielb00000": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupapielbprivateciliumexamplecom" - }, - "FromPort": 3, - "ToPort": 4, - "IpProtocol": "icmp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressicmppmtusshnlb00000": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupbastionprivateciliumexamplecom" - }, - "FromPort": 3, - "ToPort": 4, - "IpProtocol": "icmp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressicmppmtusshnlb172204022": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupbastionprivateciliumexamplecom" - }, - "FromPort": 3, - "ToPort": 4, - "IpProtocol": "icmp", - "CidrIp": "172.20.4.0/22" - } - }, - "AWSEC2SecurityGroupapielbprivateciliumexamplecom": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupName": "api-elb.privatecilium.example.com", - "VpcId": { - "Ref": "AWSEC2VPCprivateciliumexamplecom" - }, - "GroupDescription": "Security group for api ELB", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privatecilium.example.com" - }, - { - "Key": "Name", - "Value": "api-elb.privatecilium.example.com" - }, - { - "Key": "kubernetes.io/cluster/privatecilium.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2SecurityGroupbastionprivateciliumexamplecom": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupName": "bastion.privatecilium.example.com", - "VpcId": { - "Ref": "AWSEC2VPCprivateciliumexamplecom" - }, - "GroupDescription": "Security group for bastion", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privatecilium.example.com" - }, - { - "Key": "Name", - "Value": "bastion.privatecilium.example.com" - }, - { - "Key": "kubernetes.io/cluster/privatecilium.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2SecurityGroupmastersprivateciliumexamplecom": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupName": "masters.privatecilium.example.com", - "VpcId": { - "Ref": "AWSEC2VPCprivateciliumexamplecom" - }, - "GroupDescription": "Security group for masters", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privatecilium.example.com" - }, - { - "Key": "Name", - "Value": "masters.privatecilium.example.com" - }, - { - "Key": "kubernetes.io/cluster/privatecilium.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2SecurityGroupnodesprivateciliumexamplecom": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupName": "nodes.privatecilium.example.com", - "VpcId": { - "Ref": "AWSEC2VPCprivateciliumexamplecom" - }, - "GroupDescription": "Security group for nodes", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privatecilium.example.com" - }, - { - "Key": "Name", - "Value": "nodes.privatecilium.example.com" - }, - { - "Key": "kubernetes.io/cluster/privatecilium.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2SubnetRouteTableAssociationprivateustest1aprivateciliumexamplecom": { - "Type": "AWS::EC2::SubnetRouteTableAssociation", - "Properties": { - "SubnetId": { - "Ref": "AWSEC2Subnetustest1aprivateciliumexamplecom" - }, - "RouteTableId": { - "Ref": "AWSEC2RouteTableprivateustest1aprivateciliumexamplecom" - } - } - }, - "AWSEC2SubnetRouteTableAssociationutilityustest1aprivateciliumexamplecom": { - "Type": "AWS::EC2::SubnetRouteTableAssociation", - "Properties": { - "SubnetId": { - "Ref": "AWSEC2Subnetutilityustest1aprivateciliumexamplecom" - }, - "RouteTableId": { - "Ref": "AWSEC2RouteTableprivateciliumexamplecom" - } - } - }, - "AWSEC2Subnetustest1aprivateciliumexamplecom": { - "Type": "AWS::EC2::Subnet", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCprivateciliumexamplecom" - }, - "CidrBlock": "172.20.32.0/19", - "AvailabilityZone": "us-test-1a", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privatecilium.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.privatecilium.example.com" - }, - { - "Key": "SubnetType", - "Value": "Private" - }, - { - "Key": "kops.k8s.io/instance-group/master-us-test-1a", - "Value": "true" - }, - { - "Key": "kops.k8s.io/instance-group/nodes", - "Value": "true" - }, - { - "Key": "kubernetes.io/cluster/privatecilium.example.com", - "Value": "owned" - }, - { - "Key": "kubernetes.io/role/internal-elb", - "Value": "1" - } - ] - } - }, - "AWSEC2Subnetutilityustest1aprivateciliumexamplecom": { - "Type": "AWS::EC2::Subnet", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCprivateciliumexamplecom" - }, - "CidrBlock": "172.20.4.0/22", - "AvailabilityZone": "us-test-1a", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privatecilium.example.com" - }, - { - "Key": "Name", - "Value": "utility-us-test-1a.privatecilium.example.com" - }, - { - "Key": "SubnetType", - "Value": "Utility" - }, - { - "Key": "kops.k8s.io/instance-group/bastion", - "Value": "true" - }, - { - "Key": "kubernetes.io/cluster/privatecilium.example.com", - "Value": "owned" - }, - { - "Key": "kubernetes.io/role/elb", - "Value": "1" - } - ] - } - }, - "AWSEC2VPCCidrBlockAmazonIPv6": { - "Type": "AWS::EC2::VPCCidrBlock", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCprivateciliumexamplecom" - }, - "AmazonProvidedIpv6CidrBlock": true - } - }, - "AWSEC2VPCDHCPOptionsAssociationprivateciliumexamplecom": { - "Type": "AWS::EC2::VPCDHCPOptionsAssociation", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCprivateciliumexamplecom" - }, - "DhcpOptionsId": { - "Ref": "AWSEC2DHCPOptionsprivateciliumexamplecom" - } - } - }, - "AWSEC2VPCGatewayAttachmentprivateciliumexamplecom": { - "Type": "AWS::EC2::VPCGatewayAttachment", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCprivateciliumexamplecom" - }, - "InternetGatewayId": { - "Ref": "AWSEC2InternetGatewayprivateciliumexamplecom" - } - } - }, - "AWSEC2VPCprivateciliumexamplecom": { - "Type": "AWS::EC2::VPC", - "Properties": { - "CidrBlock": "172.20.0.0/16", - "EnableDnsHostnames": true, - "EnableDnsSupport": true, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privatecilium.example.com" - }, - { - "Key": "Name", - "Value": "privatecilium.example.com" - }, - { - "Key": "kubernetes.io/cluster/privatecilium.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2Volumeustest1aetcdeventsprivateciliumexamplecom": { - "Type": "AWS::EC2::Volume", - "Properties": { - "AvailabilityZone": "us-test-1a", - "Size": 20, - "VolumeType": "gp3", - "Iops": 3000, - "Throughput": 125, - "Encrypted": false, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privatecilium.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.etcd-events.privatecilium.example.com" - }, - { - "Key": "k8s.io/etcd/events", - "Value": "us-test-1a/us-test-1a" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kubernetes.io/cluster/privatecilium.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2Volumeustest1aetcdmainprivateciliumexamplecom": { - "Type": "AWS::EC2::Volume", - "Properties": { - "AvailabilityZone": "us-test-1a", - "Size": 20, - "VolumeType": "gp3", - "Iops": 3000, - "Throughput": 125, - "Encrypted": false, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privatecilium.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.etcd-main.privatecilium.example.com" - }, - { - "Key": "k8s.io/etcd/main", - "Value": "us-test-1a/us-test-1a" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kubernetes.io/cluster/privatecilium.example.com", - "Value": "owned" - } - ] - } - }, - "AWSElasticLoadBalancingLoadBalancerapiprivateciliumexamplecom": { - "Type": "AWS::ElasticLoadBalancing::LoadBalancer", - "Properties": { - "LoadBalancerName": "api-privatecilium-example-fnt793", - "Listeners": [ - { - "InstancePort": "443", - "InstanceProtocol": "TCP", - "LoadBalancerPort": "443", - "Protocol": "TCP" - } - ], - "SecurityGroups": [ - { - "Ref": "AWSEC2SecurityGroupapielbprivateciliumexamplecom" - } - ], - "Subnets": [ - { - "Ref": "AWSEC2Subnetutilityustest1aprivateciliumexamplecom" - } - ], - "HealthCheck": { - "Target": "SSL:443", - "HealthyThreshold": "2", - "UnhealthyThreshold": "2", - "Interval": "10", - "Timeout": "5" - }, - "ConnectionDrainingPolicy": { - "Enabled": true, - "Timeout": 300 - }, - "ConnectionSettings": { - "IdleTimeout": 300 - }, - "CrossZone": false, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privatecilium.example.com" - }, - { - "Key": "Name", - "Value": "api.privatecilium.example.com" - }, - { - "Key": "kubernetes.io/cluster/privatecilium.example.com", - "Value": "owned" - } - ] - } - }, - "AWSElasticLoadBalancingV2Listenerbastionprivateciliumexamplecom22": { - "Type": "AWS::ElasticLoadBalancingV2::Listener", - "Properties": { - "DefaultActions": [ - { - "Type": "forward", - "TargetGroupArn": { - "Ref": "AWSElasticLoadBalancingV2TargetGroupbastionprivateciliumexal2ms01" - } - } - ], - "LoadBalancerArn": { - "Ref": "AWSElasticLoadBalancingV2LoadBalancerbastionprivateciliumexamplecom" - }, - "Port": 22, - "Protocol": "TCP" - } - }, - "AWSElasticLoadBalancingV2LoadBalancerbastionprivateciliumexamplecom": { - "Type": "AWS::ElasticLoadBalancingV2::LoadBalancer", - "Properties": { - "Name": "bastion-privatecilium-exa-l2ms01", - "Scheme": "internet-facing", - "SubnetMappings": [ - { - "SubnetId": { - "Ref": "AWSEC2Subnetutilityustest1aprivateciliumexamplecom" - } - } - ], - "Type": "network", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privatecilium.example.com" - }, - { - "Key": "Name", - "Value": "bastion.privatecilium.example.com" - }, - { - "Key": "kubernetes.io/cluster/privatecilium.example.com", - "Value": "owned" - } - ] - } - }, - "AWSElasticLoadBalancingV2TargetGroupbastionprivateciliumexal2ms01": { - "Type": "AWS::ElasticLoadBalancingV2::TargetGroup", - "Properties": { - "Name": "bastion-privatecilium-exa-l2ms01", - "Port": 22, - "Protocol": "TCP", - "VpcId": { - "Ref": "AWSEC2VPCprivateciliumexamplecom" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privatecilium.example.com" - }, - { - "Key": "Name", - "Value": "bastion-privatecilium-exa-l2ms01" - }, - { - "Key": "kubernetes.io/cluster/privatecilium.example.com", - "Value": "owned" - } - ], - "HealthCheckProtocol": "TCP", - "HealthyThresholdCount": 2, - "UnhealthyThresholdCount": 2 - } - }, - "AWSIAMInstanceProfilebastionsprivateciliumexamplecom": { - "Type": "AWS::IAM::InstanceProfile", - "Properties": { - "InstanceProfileName": "bastions.privatecilium.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolebastionsprivateciliumexamplecom" - } - ] - } - }, - "AWSIAMInstanceProfilemastersprivateciliumexamplecom": { - "Type": "AWS::IAM::InstanceProfile", - "Properties": { - "InstanceProfileName": "masters.privatecilium.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolemastersprivateciliumexamplecom" - } - ] - } - }, - "AWSIAMInstanceProfilenodesprivateciliumexamplecom": { - "Type": "AWS::IAM::InstanceProfile", - "Properties": { - "InstanceProfileName": "nodes.privatecilium.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolenodesprivateciliumexamplecom" - } - ] - } - }, - "AWSIAMPolicybastionsprivateciliumexamplecom": { - "Type": "AWS::IAM::Policy", - "Properties": { - "PolicyName": "bastions.privatecilium.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolebastionsprivateciliumexamplecom" - } - ], - "PolicyDocument": { - "Statement": [ - { - "Action": "ec2:DescribeRegions", - "Effect": "Allow", - "Resource": "*" - } - ], - "Version": "2012-10-17" - } - } - }, - "AWSIAMPolicymastersprivateciliumexamplecom": { - "Type": "AWS::IAM::Policy", - "Properties": { - "PolicyName": "masters.privatecilium.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolemastersprivateciliumexamplecom" - } - ], - "PolicyDocument": { - "Statement": [ - { - "Action": "ec2:AttachVolume", - "Condition": { - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "privatecilium.example.com", - "aws:ResourceTag/k8s.io/role/master": "1" - } - }, - "Effect": "Allow", - "Resource": [ - "*" - ] - }, - { - "Action": [ - "s3:Get*" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-read-bucket/clusters.example.com/privatecilium.example.com/*" - }, - { - "Action": [ - "s3:GetObject", - "s3:DeleteObject", - "s3:DeleteObjectVersion", - "s3:PutObject" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/privatecilium.example.com/backups/etcd/main/*" - }, - { - "Action": [ - "s3:GetObject", - "s3:DeleteObject", - "s3:DeleteObjectVersion", - "s3:PutObject" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/privatecilium.example.com/backups/etcd/events/*" - }, - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-read-bucket" - ] - }, - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-write-bucket" - ] - }, - { - "Action": [ - "route53:ChangeResourceRecordSets", - "route53:ListResourceRecordSets", - "route53:GetHostedZone" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:route53:::hostedzone/Z1AFAKE1ZON3YO" - ] - }, - { - "Action": [ - "route53:GetChange" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:route53:::change/*" - ] - }, - { - "Action": [ - "route53:ListHostedZones", - "route53:ListTagsForResource" - ], - "Effect": "Allow", - "Resource": [ - "*" - ] - }, - { - "Action": "ec2:CreateTags", - "Condition": { - "StringEquals": { - "aws:RequestTag/KubernetesCluster": "privatecilium.example.com", - "ec2:CreateAction": [ - "CreateVolume", - "CreateSnapshot" - ] - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:volume/*", - "arn:aws-test:ec2:*:*:snapshot/*" - ] - }, - { - "Action": [ - "ec2:CreateTags", - "ec2:DeleteTags" - ], - "Condition": { - "Null": { - "aws:RequestTag/KubernetesCluster": "true" - }, - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "privatecilium.example.com" - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:volume/*", - "arn:aws-test:ec2:*:*:snapshot/*" - ] - }, - { - "Action": "ec2:CreateTags", - "Condition": { - "StringEquals": { - "aws:RequestTag/KubernetesCluster": "privatecilium.example.com", - "ec2:CreateAction": [ - "CreateSecurityGroup" - ] - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:security-group/*" - ] - }, - { - "Action": [ - "ec2:CreateTags", - "ec2:DeleteTags" - ], - "Condition": { - "Null": { - "aws:RequestTag/KubernetesCluster": "true" - }, - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "privatecilium.example.com" - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:security-group/*" - ] - }, - { - "Action": [ - "autoscaling:DescribeAutoScalingGroups", - "autoscaling:DescribeAutoScalingInstances", - "autoscaling:DescribeLaunchConfigurations", - "autoscaling:DescribeScalingActivities", - "autoscaling:DescribeTags", - "ec2:DescribeAccountAttributes", - "ec2:DescribeInstanceTypes", - "ec2:DescribeInstances", - "ec2:DescribeLaunchTemplateVersions", - "ec2:DescribeRegions", - "ec2:DescribeRouteTables", - "ec2:DescribeSecurityGroups", - "ec2:DescribeSubnets", - "ec2:DescribeTags", - "ec2:DescribeVolumes", - "ec2:DescribeVolumesModifications", - "ec2:DescribeVpcs", - "elasticloadbalancing:DescribeListeners", - "elasticloadbalancing:DescribeLoadBalancerAttributes", - "elasticloadbalancing:DescribeLoadBalancerPolicies", - "elasticloadbalancing:DescribeLoadBalancers", - "elasticloadbalancing:DescribeTargetGroups", - "elasticloadbalancing:DescribeTargetHealth", - "iam:GetServerCertificate", - "iam:ListServerCertificates", - "kms:DescribeKey", - "kms:GenerateRandom" - ], - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": [ - "autoscaling:SetDesiredCapacity", - "autoscaling:TerminateInstanceInAutoScalingGroup", - "ec2:AttachVolume", - "ec2:AuthorizeSecurityGroupIngress", - "ec2:DeleteSecurityGroup", - "ec2:DeleteVolume", - "ec2:DetachVolume", - "ec2:ModifyInstanceAttribute", - "ec2:ModifyVolume", - "ec2:RevokeSecurityGroupIngress", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", - "elasticloadbalancing:AttachLoadBalancerToSubnets", - "elasticloadbalancing:ConfigureHealthCheck", - "elasticloadbalancing:CreateLoadBalancerListeners", - "elasticloadbalancing:CreateLoadBalancerPolicy", - "elasticloadbalancing:DeleteListener", - "elasticloadbalancing:DeleteLoadBalancer", - "elasticloadbalancing:DeleteLoadBalancerListeners", - "elasticloadbalancing:DeleteTargetGroup", - "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", - "elasticloadbalancing:DeregisterTargets", - "elasticloadbalancing:DetachLoadBalancerFromSubnets", - "elasticloadbalancing:ModifyListener", - "elasticloadbalancing:ModifyLoadBalancerAttributes", - "elasticloadbalancing:ModifyTargetGroup", - "elasticloadbalancing:RegisterInstancesWithLoadBalancer", - "elasticloadbalancing:RegisterTargets", - "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", - "elasticloadbalancing:SetLoadBalancerPoliciesOfListener" - ], - "Condition": { - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "privatecilium.example.com" - } - }, - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": [ - "ec2:CreateSecurityGroup", - "ec2:CreateSnapshot", - "ec2:CreateVolume", - "elasticloadbalancing:CreateListener", - "elasticloadbalancing:CreateLoadBalancer", - "elasticloadbalancing:CreateTargetGroup" - ], - "Condition": { - "StringEquals": { - "aws:RequestTag/KubernetesCluster": "privatecilium.example.com" - } - }, - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": "ec2:CreateSecurityGroup", - "Effect": "Allow", - "Resource": "arn:aws-test:ec2:*:*:vpc/*" - } - ], - "Version": "2012-10-17" - } - } - }, - "AWSIAMPolicynodesprivateciliumexamplecom": { - "Type": "AWS::IAM::Policy", - "Properties": { - "PolicyName": "nodes.privatecilium.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolenodesprivateciliumexamplecom" - } - ], - "PolicyDocument": { - "Statement": [ - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-read-bucket" - ] - }, - { - "Action": [ - "autoscaling:DescribeAutoScalingInstances", - "ec2:DescribeInstanceTypes", - "ec2:DescribeInstances", - "ec2:DescribeRegions", - "iam:GetServerCertificate", - "iam:ListServerCertificates", - "kms:GenerateRandom" - ], - "Effect": "Allow", - "Resource": "*" - } - ], - "Version": "2012-10-17" - } - } - }, - "AWSIAMRolebastionsprivateciliumexamplecom": { - "Type": "AWS::IAM::Role", - "Properties": { - "RoleName": "bastions.privatecilium.example.com", - "AssumeRolePolicyDocument": { - "Statement": [ - { - "Action": "sts:AssumeRole", - "Effect": "Allow", - "Principal": { - "Service": "ec2.amazonaws.com" - } - } - ], - "Version": "2012-10-17" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privatecilium.example.com" - }, - { - "Key": "Name", - "Value": "bastions.privatecilium.example.com" - }, - { - "Key": "kubernetes.io/cluster/privatecilium.example.com", - "Value": "owned" - } - ] - } - }, - "AWSIAMRolemastersprivateciliumexamplecom": { - "Type": "AWS::IAM::Role", - "Properties": { - "RoleName": "masters.privatecilium.example.com", - "AssumeRolePolicyDocument": { - "Statement": [ - { - "Action": "sts:AssumeRole", - "Effect": "Allow", - "Principal": { - "Service": "ec2.amazonaws.com" - } - } - ], - "Version": "2012-10-17" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privatecilium.example.com" - }, - { - "Key": "Name", - "Value": "masters.privatecilium.example.com" - }, - { - "Key": "kubernetes.io/cluster/privatecilium.example.com", - "Value": "owned" - } - ] - } - }, - "AWSIAMRolenodesprivateciliumexamplecom": { - "Type": "AWS::IAM::Role", - "Properties": { - "RoleName": "nodes.privatecilium.example.com", - "AssumeRolePolicyDocument": { - "Statement": [ - { - "Action": "sts:AssumeRole", - "Effect": "Allow", - "Principal": { - "Service": "ec2.amazonaws.com" - } - } - ], - "Version": "2012-10-17" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privatecilium.example.com" - }, - { - "Key": "Name", - "Value": "nodes.privatecilium.example.com" - }, - { - "Key": "kubernetes.io/cluster/privatecilium.example.com", - "Value": "owned" - } - ] - } - }, - "AWSRoute53RecordSetapiprivateciliumexamplecom": { - "Type": "AWS::Route53::RecordSet", - "Properties": { - "Name": "api.privatecilium.example.com", - "Type": "A", - "AliasTarget": { - "DNSName": { - "Fn::GetAtt": [ - "AWSElasticLoadBalancingLoadBalancerapiprivateciliumexamplecom", - "DNSName" - ] - }, - "HostedZoneId": { - "Fn::GetAtt": [ - "AWSElasticLoadBalancingLoadBalancerapiprivateciliumexamplecom", - "CanonicalHostedZoneNameID" - ] - }, - "EvaluateTargetHealth": false - }, - "HostedZoneId": "/hostedzone/Z1AFAKE1ZON3YO" - } - } - } -} diff --git a/tests/integration/update_cluster/privatecilium2/cloudformation.json.extracted.yaml b/tests/integration/update_cluster/privatecilium2/cloudformation.json.extracted.yaml deleted file mode 100644 index 653b6e167388e..0000000000000 --- a/tests/integration/update_cluster/privatecilium2/cloudformation.json.extracted.yaml +++ /dev/null @@ -1,472 +0,0 @@ -Resources.AWSEC2LaunchTemplatemasterustest1amastersprivateciliumexamplecom.Properties.LaunchTemplateData.UserData: | - #!/bin/bash - set -o errexit - set -o nounset - set -o pipefail - - NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 - NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 - NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 - NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 - - export AWS_REGION=us-test-1 - - - - - sysctl -w net.core.rmem_max=16777216 || true - sysctl -w net.core.wmem_max=16777216 || true - sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true - sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true - - - function ensure-install-dir() { - INSTALL_DIR="/opt/kops" - # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec - if [[ -d /var/lib/toolbox ]]; then - INSTALL_DIR="/var/lib/toolbox/kops" - fi - mkdir -p ${INSTALL_DIR}/bin - mkdir -p ${INSTALL_DIR}/conf - cd ${INSTALL_DIR} - } - - # Retry a download until we get it. args: name, sha, urls - download-or-bust() { - local -r file="$1" - local -r hash="$2" - local -r urls=( $(split-commas "$3") ) - - if [[ -f "${file}" ]]; then - if ! validate-hash "${file}" "${hash}"; then - rm -f "${file}" - else - return 0 - fi - fi - - while true; do - for url in "${urls[@]}"; do - commands=( - "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - ) - for cmd in "${commands[@]}"; do - echo "Attempting download with: ${cmd} {url}" - if ! (${cmd} "${url}"); then - echo "== Download failed with ${cmd} ==" - continue - fi - if ! validate-hash "${file}" "${hash}"; then - echo "== Hash validation of ${url} failed. Retrying. ==" - rm -f "${file}" - else - echo "== Downloaded ${url} (SHA256 = ${hash}) ==" - return 0 - fi - done - done - - echo "All downloads failed; sleeping before retrying" - sleep 60 - done - } - - validate-hash() { - local -r file="$1" - local -r expected="$2" - local actual - - actual=$(sha256sum ${file} | awk '{ print $1 }') || true - if [[ "${actual}" != "${expected}" ]]; then - echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" - return 1 - fi - } - - function split-commas() { - echo $1 | tr "," "\n" - } - - function download-release() { - case "$(uname -m)" in - x86_64*|i?86_64*|amd64*) - NODEUP_URL="${NODEUP_URL_AMD64}" - NODEUP_HASH="${NODEUP_HASH_AMD64}" - ;; - aarch64*|arm64*) - NODEUP_URL="${NODEUP_URL_ARM64}" - NODEUP_HASH="${NODEUP_HASH_ARM64}" - ;; - *) - echo "Unsupported host arch: $(uname -m)" >&2 - exit 1 - ;; - esac - - cd ${INSTALL_DIR}/bin - download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" - - chmod +x nodeup - - echo "Running nodeup" - # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 - ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) - } - - #################################################################################### - - /bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" - - echo "== nodeup node config starting ==" - ensure-install-dir - - cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' - cloudConfig: - awsEBSCSIDriver: - enabled: true - version: v1.12.0 - manageStorageClasses: true - containerRuntime: containerd - containerd: - logLevel: info - runc: - version: 1.1.4 - version: 1.6.10 - docker: - skipInstall: true - encryptionConfig: null - etcdClusters: - events: - version: 3.5.4 - main: - version: 3.5.4 - kubeAPIServer: - allowPrivileged: true - anonymousAuth: false - apiAudiences: - - kubernetes.svc.default - apiServerCount: 1 - authorizationMode: AlwaysAllow - bindAddress: 0.0.0.0 - cloudProvider: external - enableAdmissionPlugins: - - NamespaceLifecycle - - LimitRanger - - ServiceAccount - - DefaultStorageClass - - DefaultTolerationSeconds - - MutatingAdmissionWebhook - - ValidatingAdmissionWebhook - - NodeRestriction - - ResourceQuota - etcdServers: - - https://127.0.0.1:4001 - etcdServersOverrides: - - /events#https://127.0.0.1:4002 - featureGates: - CSIMigrationAWS: "true" - InTreePluginAWSUnregister: "true" - image: registry.k8s.io/kube-apiserver:v1.24.0 - kubeletPreferredAddressTypes: - - InternalIP - - Hostname - - ExternalIP - logLevel: 2 - requestheaderAllowedNames: - - aggregator - requestheaderExtraHeaderPrefixes: - - X-Remote-Extra- - requestheaderGroupHeaders: - - X-Remote-Group - requestheaderUsernameHeaders: - - X-Remote-User - securePort: 443 - serviceAccountIssuer: https://api.internal.privatecilium.example.com - serviceAccountJWKSURI: https://api.internal.privatecilium.example.com/openid/v1/jwks - serviceClusterIPRange: 100.64.0.0/13 - storageBackend: etcd3 - kubeControllerManager: - allocateNodeCIDRs: true - attachDetachReconcileSyncPeriod: 1m0s - cloudProvider: external - clusterCIDR: 100.96.0.0/11 - clusterName: privatecilium.example.com - configureCloudRoutes: false - featureGates: - CSIMigrationAWS: "true" - InTreePluginAWSUnregister: "true" - image: registry.k8s.io/kube-controller-manager:v1.24.0 - leaderElection: - leaderElect: true - logLevel: 2 - useServiceAccountCredentials: true - kubeProxy: - clusterCIDR: 100.96.0.0/11 - cpuRequest: 100m - image: registry.k8s.io/kube-proxy:v1.24.0 - logLevel: 2 - kubeScheduler: - featureGates: - CSIMigrationAWS: "true" - InTreePluginAWSUnregister: "true" - image: registry.k8s.io/kube-scheduler:v1.24.0 - leaderElection: - leaderElect: true - logLevel: 2 - kubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: external - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - featureGates: - CSIMigrationAWS: "true" - InTreePluginAWSUnregister: "true" - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - protectKernelDefaults: true - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - masterKubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: external - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - featureGates: - CSIMigrationAWS: "true" - InTreePluginAWSUnregister: "true" - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - protectKernelDefaults: true - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - - __EOF_CLUSTER_SPEC - - cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' - CloudProvider: aws - ConfigBase: memfs://clusters.example.com/privatecilium.example.com - InstanceGroupName: master-us-test-1a - InstanceGroupRole: Master - NodeupConfigHash: 6B9ojG3Qh0EKnyRqH29vgauEqkUaEAF4m+uXp+Di3d8= - - __EOF_KUBE_ENV - - download-release - echo "== nodeup node config done ==" -Resources.AWSEC2LaunchTemplatenodesprivateciliumexamplecom.Properties.LaunchTemplateData.UserData: | - #!/bin/bash - set -o errexit - set -o nounset - set -o pipefail - - NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 - NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 - NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 - NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 - - export AWS_REGION=us-test-1 - - - - - sysctl -w net.core.rmem_max=16777216 || true - sysctl -w net.core.wmem_max=16777216 || true - sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true - sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true - - - function ensure-install-dir() { - INSTALL_DIR="/opt/kops" - # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec - if [[ -d /var/lib/toolbox ]]; then - INSTALL_DIR="/var/lib/toolbox/kops" - fi - mkdir -p ${INSTALL_DIR}/bin - mkdir -p ${INSTALL_DIR}/conf - cd ${INSTALL_DIR} - } - - # Retry a download until we get it. args: name, sha, urls - download-or-bust() { - local -r file="$1" - local -r hash="$2" - local -r urls=( $(split-commas "$3") ) - - if [[ -f "${file}" ]]; then - if ! validate-hash "${file}" "${hash}"; then - rm -f "${file}" - else - return 0 - fi - fi - - while true; do - for url in "${urls[@]}"; do - commands=( - "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - ) - for cmd in "${commands[@]}"; do - echo "Attempting download with: ${cmd} {url}" - if ! (${cmd} "${url}"); then - echo "== Download failed with ${cmd} ==" - continue - fi - if ! validate-hash "${file}" "${hash}"; then - echo "== Hash validation of ${url} failed. Retrying. ==" - rm -f "${file}" - else - echo "== Downloaded ${url} (SHA256 = ${hash}) ==" - return 0 - fi - done - done - - echo "All downloads failed; sleeping before retrying" - sleep 60 - done - } - - validate-hash() { - local -r file="$1" - local -r expected="$2" - local actual - - actual=$(sha256sum ${file} | awk '{ print $1 }') || true - if [[ "${actual}" != "${expected}" ]]; then - echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" - return 1 - fi - } - - function split-commas() { - echo $1 | tr "," "\n" - } - - function download-release() { - case "$(uname -m)" in - x86_64*|i?86_64*|amd64*) - NODEUP_URL="${NODEUP_URL_AMD64}" - NODEUP_HASH="${NODEUP_HASH_AMD64}" - ;; - aarch64*|arm64*) - NODEUP_URL="${NODEUP_URL_ARM64}" - NODEUP_HASH="${NODEUP_HASH_ARM64}" - ;; - *) - echo "Unsupported host arch: $(uname -m)" >&2 - exit 1 - ;; - esac - - cd ${INSTALL_DIR}/bin - download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" - - chmod +x nodeup - - echo "Running nodeup" - # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 - ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) - } - - #################################################################################### - - /bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" - - echo "== nodeup node config starting ==" - ensure-install-dir - - cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' - cloudConfig: - awsEBSCSIDriver: - enabled: true - version: v1.12.0 - manageStorageClasses: true - containerRuntime: containerd - containerd: - logLevel: info - runc: - version: 1.1.4 - version: 1.6.10 - docker: - skipInstall: true - kubeProxy: - clusterCIDR: 100.96.0.0/11 - cpuRequest: 100m - image: registry.k8s.io/kube-proxy:v1.24.0 - logLevel: 2 - kubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: external - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - featureGates: - CSIMigrationAWS: "true" - InTreePluginAWSUnregister: "true" - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - protectKernelDefaults: true - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - - __EOF_CLUSTER_SPEC - - cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' - CloudProvider: aws - ConfigServer: - CACertificates: | - -----BEGIN CERTIFICATE----- - MIIBbjCCARigAwIBAgIMFpANqBD8NSD82AUSMA0GCSqGSIb3DQEBCwUAMBgxFjAU - BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwODAwWhcNMzEwNzA3MDcw - ODAwWjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD - SwAwSAJBANFI3zr0Tk8krsW8vwjfMpzJOlWQ8616vG3YPa2qAgI7V4oKwfV0yIg1 - jt+H6f4P/wkPAPTPTfRp9Iy8oHEEFw0CAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG - MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNG3zVjTcLlJwDsJ4/K9DV7KohUA - MA0GCSqGSIb3DQEBCwUAA0EAB8d03fY2w7WKpfO29qI295pu2C4ca9AiVGOpgSc8 - tmQsq6rcxt3T+rb589PVtz0mw/cKTxOk6gH2CCC+yHfy2w== - -----END CERTIFICATE----- - -----BEGIN CERTIFICATE----- - MIIBbjCCARigAwIBAgIMFpANvmSa0OAlYmXKMA0GCSqGSIb3DQEBCwUAMBgxFjAU - BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwOTM2WhcNMzEwNzA3MDcw - OTM2WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD - SwAwSAJBAMF6F4aZdpe0RUpyykaBpWwZCnwbffhYGOw+fs6RdLuUq7QCNmJm/Eq7 - WWOziMYDiI9SbclpD+6QiJ0N3EqppVUCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG - MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLImp6ARjPDAH6nhI+scWVt3Q9bn - MA0GCSqGSIb3DQEBCwUAA0EAVQVx5MUtuAIeePuP9o51xtpT2S6Fvfi8J4ICxnlA - 9B7UD2ushcVFPtaeoL9Gfu8aY4KJBeqqg5ojl4qmRnThjw== - -----END CERTIFICATE----- - server: https://kops-controller.internal.privatecilium.example.com:3988/ - InstanceGroupName: nodes - InstanceGroupRole: Node - NodeupConfigHash: wdoKm7fIgHCgm77Y43XNvYUPTWblXHtttGO2w/5kDGw= - - __EOF_KUBE_ENV - - download-release - echo "== nodeup node config done ==" diff --git a/tests/integration/update_cluster/privatecilium2/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/privatecilium2/data/aws_s3_object_cluster-completed.spec_content index 9ffb698e76546..82914512dbaf8 100644 --- a/tests/integration/update_cluster/privatecilium2/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/privatecilium2/data/aws_s3_object_cluster-completed.spec_content @@ -170,7 +170,6 @@ spec: kubernetesApiAccess: - 0.0.0.0/0 kubernetesVersion: 1.24.0 - masterInternalName: api.internal.privatecilium.example.com masterKubelet: anonymousAuth: false cgroupDriver: systemd diff --git a/tests/integration/update_cluster/privatecilium2/in-v1alpha2.yaml b/tests/integration/update_cluster/privatecilium2/in-v1alpha2.yaml index 0d9c1f6299abc..250a68a44afd2 100644 --- a/tests/integration/update_cluster/privatecilium2/in-v1alpha2.yaml +++ b/tests/integration/update_cluster/privatecilium2/in-v1alpha2.yaml @@ -24,7 +24,6 @@ spec: kubelet: anonymousAuth: false kubernetesVersion: v1.24.0 - masterInternalName: api.internal.privatecilium.example.com masterPublicName: api.privatecilium.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/tests/integration/update_cluster/privateciliumadvanced/cloudformation.json b/tests/integration/update_cluster/privateciliumadvanced/cloudformation.json deleted file mode 100644 index a29d72b03b312..0000000000000 --- a/tests/integration/update_cluster/privateciliumadvanced/cloudformation.json +++ /dev/null @@ -1,2122 +0,0 @@ -{ - "Resources": { - "AWSAutoScalingAutoScalingGroupbastionprivateciliumadvancedexamplecom": { - "Type": "AWS::AutoScaling::AutoScalingGroup", - "Properties": { - "AutoScalingGroupName": "bastion.privateciliumadvanced.example.com", - "LaunchTemplate": { - "LaunchTemplateId": { - "Ref": "AWSEC2LaunchTemplatebastionprivateciliumadvancedexamplecom" - }, - "Version": { - "Fn::GetAtt": [ - "AWSEC2LaunchTemplatebastionprivateciliumadvancedexamplecom", - "LatestVersionNumber" - ] - } - }, - "MaxSize": "1", - "MinSize": "1", - "VPCZoneIdentifier": [ - { - "Ref": "AWSEC2Subnetutilityustest1aprivateciliumadvancedexamplecom" - } - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privateciliumadvanced.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "Name", - "Value": "bastion.privateciliumadvanced.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/role/bastion", - "Value": "1", - "PropagateAtLaunch": true - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "bastion", - "PropagateAtLaunch": true - }, - { - "Key": "kubernetes.io/cluster/privateciliumadvanced.example.com", - "Value": "owned", - "PropagateAtLaunch": true - } - ], - "MetricsCollection": [ - { - "Granularity": "1Minute", - "Metrics": [ - "GroupDesiredCapacity", - "GroupInServiceInstances", - "GroupMaxSize", - "GroupMinSize", - "GroupPendingInstances", - "GroupStandbyInstances", - "GroupTerminatingInstances", - "GroupTotalInstances" - ] - } - ], - "TargetGroupARNs": [ - { - "Ref": "AWSElasticLoadBalancingV2TargetGroupbastionprivateciliumadva0jni40" - } - ] - } - }, - "AWSAutoScalingAutoScalingGroupmasterustest1amastersprivateciliumadvancedexamplecom": { - "Type": "AWS::AutoScaling::AutoScalingGroup", - "Properties": { - "AutoScalingGroupName": "master-us-test-1a.masters.privateciliumadvanced.example.com", - "LaunchTemplate": { - "LaunchTemplateId": { - "Ref": "AWSEC2LaunchTemplatemasterustest1amastersprivateciliumadvancedexamplecom" - }, - "Version": { - "Fn::GetAtt": [ - "AWSEC2LaunchTemplatemasterustest1amastersprivateciliumadvancedexamplecom", - "LatestVersionNumber" - ] - } - }, - "MaxSize": "1", - "MinSize": "1", - "VPCZoneIdentifier": [ - { - "Ref": "AWSEC2Subnetustest1aprivateciliumadvancedexamplecom" - } - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privateciliumadvanced.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.privateciliumadvanced.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/role/master", - "Value": "1", - "PropagateAtLaunch": true - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a", - "PropagateAtLaunch": true - }, - { - "Key": "kubernetes.io/cluster/privateciliumadvanced.example.com", - "Value": "owned", - "PropagateAtLaunch": true - } - ], - "MetricsCollection": [ - { - "Granularity": "1Minute", - "Metrics": [ - "GroupDesiredCapacity", - "GroupInServiceInstances", - "GroupMaxSize", - "GroupMinSize", - "GroupPendingInstances", - "GroupStandbyInstances", - "GroupTerminatingInstances", - "GroupTotalInstances" - ] - } - ], - "LoadBalancerNames": [ - { - "Ref": "AWSElasticLoadBalancingLoadBalancerapiprivateciliumadvancedexamplecom" - } - ] - } - }, - "AWSAutoScalingAutoScalingGroupnodesprivateciliumadvancedexamplecom": { - "Type": "AWS::AutoScaling::AutoScalingGroup", - "Properties": { - "AutoScalingGroupName": "nodes.privateciliumadvanced.example.com", - "LaunchTemplate": { - "LaunchTemplateId": { - "Ref": "AWSEC2LaunchTemplatenodesprivateciliumadvancedexamplecom" - }, - "Version": { - "Fn::GetAtt": [ - "AWSEC2LaunchTemplatenodesprivateciliumadvancedexamplecom", - "LatestVersionNumber" - ] - } - }, - "MaxSize": "2", - "MinSize": "2", - "VPCZoneIdentifier": [ - { - "Ref": "AWSEC2Subnetustest1aprivateciliumadvancedexamplecom" - } - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privateciliumadvanced.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "Name", - "Value": "nodes.privateciliumadvanced.example.com", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "", - "PropagateAtLaunch": true - }, - { - "Key": "k8s.io/role/node", - "Value": "1", - "PropagateAtLaunch": true - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes", - "PropagateAtLaunch": true - }, - { - "Key": "kubernetes.io/cluster/privateciliumadvanced.example.com", - "Value": "owned", - "PropagateAtLaunch": true - } - ], - "MetricsCollection": [ - { - "Granularity": "1Minute", - "Metrics": [ - "GroupDesiredCapacity", - "GroupInServiceInstances", - "GroupMaxSize", - "GroupMinSize", - "GroupPendingInstances", - "GroupStandbyInstances", - "GroupTerminatingInstances", - "GroupTotalInstances" - ] - } - ] - } - }, - "AWSEC2DHCPOptionsprivateciliumadvancedexamplecom": { - "Type": "AWS::EC2::DHCPOptions", - "Properties": { - "DomainName": "us-test-1.compute.internal", - "DomainNameServers": [ - "AmazonProvidedDNS" - ], - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privateciliumadvanced.example.com" - }, - { - "Key": "Name", - "Value": "privateciliumadvanced.example.com" - }, - { - "Key": "kubernetes.io/cluster/privateciliumadvanced.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2EIPustest1aprivateciliumadvancedexamplecom": { - "Type": "AWS::EC2::EIP", - "Properties": { - "Domain": "vpc", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privateciliumadvanced.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.privateciliumadvanced.example.com" - }, - { - "Key": "kubernetes.io/cluster/privateciliumadvanced.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2InternetGatewayprivateciliumadvancedexamplecom": { - "Type": "AWS::EC2::InternetGateway", - "Properties": { - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privateciliumadvanced.example.com" - }, - { - "Key": "Name", - "Value": "privateciliumadvanced.example.com" - }, - { - "Key": "kubernetes.io/cluster/privateciliumadvanced.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2LaunchTemplatebastionprivateciliumadvancedexamplecom": { - "Type": "AWS::EC2::LaunchTemplate", - "Properties": { - "LaunchTemplateName": "bastion.privateciliumadvanced.example.com", - "LaunchTemplateData": { - "BlockDeviceMappings": [ - { - "DeviceName": "/dev/xvda", - "Ebs": { - "VolumeType": "gp3", - "VolumeSize": 32, - "Iops": 3000, - "Throughput": 125, - "DeleteOnTermination": true, - "Encrypted": true - } - } - ], - "IamInstanceProfile": { - "Name": { - "Ref": "AWSIAMInstanceProfilebastionsprivateciliumadvancedexamplecom" - } - }, - "ImageId": "ami-12345678", - "InstanceType": "t2.micro", - "KeyName": "kubernetes.privateciliumadvanced.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57", - "MetadataOptions": { - "HttpPutResponseHopLimit": 1, - "HttpTokens": "optional" - }, - "Monitoring": { - "Enabled": false - }, - "NetworkInterfaces": [ - { - "AssociatePublicIpAddress": true, - "DeleteOnTermination": true, - "DeviceIndex": 0, - "Ipv6AddressCount": 0, - "Groups": [ - { - "Ref": "AWSEC2SecurityGroupbastionprivateciliumadvancedexamplecom" - } - ] - } - ], - "TagSpecifications": [ - { - "ResourceType": "instance", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privateciliumadvanced.example.com" - }, - { - "Key": "Name", - "Value": "bastion.privateciliumadvanced.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "" - }, - { - "Key": "k8s.io/role/bastion", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "bastion" - }, - { - "Key": "kubernetes.io/cluster/privateciliumadvanced.example.com", - "Value": "owned" - } - ] - }, - { - "ResourceType": "volume", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privateciliumadvanced.example.com" - }, - { - "Key": "Name", - "Value": "bastion.privateciliumadvanced.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "" - }, - { - "Key": "k8s.io/role/bastion", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "bastion" - }, - { - "Key": "kubernetes.io/cluster/privateciliumadvanced.example.com", - "Value": "owned" - } - ] - } - ] - } - } - }, - "AWSEC2LaunchTemplatemasterustest1amastersprivateciliumadvancedexamplecom": { - "Type": "AWS::EC2::LaunchTemplate", - "Properties": { - "LaunchTemplateName": "master-us-test-1a.masters.privateciliumadvanced.example.com", - "LaunchTemplateData": { - "BlockDeviceMappings": [ - { - "DeviceName": "/dev/xvda", - "Ebs": { - "VolumeType": "gp3", - "VolumeSize": 64, - "Iops": 3000, - "Throughput": 125, - "DeleteOnTermination": true, - "Encrypted": true - } - }, - { - "DeviceName": "/dev/sdc", - "VirtualName": "ephemeral0" - } - ], - "IamInstanceProfile": { - "Name": { - "Ref": "AWSIAMInstanceProfilemastersprivateciliumadvancedexamplecom" - } - }, - "ImageId": "ami-12345678", - "InstanceType": "m3.medium", - "KeyName": "kubernetes.privateciliumadvanced.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57", - "MetadataOptions": { - "HttpPutResponseHopLimit": 1, - "HttpTokens": "optional" - }, - "Monitoring": { - "Enabled": false - }, - "NetworkInterfaces": [ - { - "AssociatePublicIpAddress": false, - "DeleteOnTermination": true, - "DeviceIndex": 0, - "Ipv6AddressCount": 0, - "Groups": [ - { - "Ref": "AWSEC2SecurityGroupmastersprivateciliumadvancedexamplecom" - } - ] - } - ], - "TagSpecifications": [ - { - "ResourceType": "instance", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privateciliumadvanced.example.com" - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.privateciliumadvanced.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a" - }, - { - "Key": "kubernetes.io/cluster/privateciliumadvanced.example.com", - "Value": "owned" - } - ] - }, - { - "ResourceType": "volume", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privateciliumadvanced.example.com" - }, - { - "Key": "Name", - "Value": "master-us-test-1a.masters.privateciliumadvanced.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "master" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", - "Value": "" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", - "Value": "" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "master-us-test-1a" - }, - { - "Key": "kubernetes.io/cluster/privateciliumadvanced.example.com", - "Value": "owned" - } - ] - } - ], - "UserData": "extracted" - } - } - }, - "AWSEC2LaunchTemplatenodesprivateciliumadvancedexamplecom": { - "Type": "AWS::EC2::LaunchTemplate", - "Properties": { - "LaunchTemplateName": "nodes.privateciliumadvanced.example.com", - "LaunchTemplateData": { - "BlockDeviceMappings": [ - { - "DeviceName": "/dev/xvda", - "Ebs": { - "VolumeType": "gp3", - "VolumeSize": 128, - "Iops": 3000, - "Throughput": 125, - "DeleteOnTermination": true, - "Encrypted": true - } - } - ], - "IamInstanceProfile": { - "Name": { - "Ref": "AWSIAMInstanceProfilenodesprivateciliumadvancedexamplecom" - } - }, - "ImageId": "ami-12345678", - "InstanceType": "t2.medium", - "KeyName": "kubernetes.privateciliumadvanced.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57", - "MetadataOptions": { - "HttpPutResponseHopLimit": 1, - "HttpTokens": "optional" - }, - "Monitoring": { - "Enabled": false - }, - "NetworkInterfaces": [ - { - "AssociatePublicIpAddress": false, - "DeleteOnTermination": true, - "DeviceIndex": 0, - "Ipv6AddressCount": 0, - "Groups": [ - { - "Ref": "AWSEC2SecurityGroupnodesprivateciliumadvancedexamplecom" - } - ] - } - ], - "TagSpecifications": [ - { - "ResourceType": "instance", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privateciliumadvanced.example.com" - }, - { - "Key": "Name", - "Value": "nodes.privateciliumadvanced.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "" - }, - { - "Key": "k8s.io/role/node", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes" - }, - { - "Key": "kubernetes.io/cluster/privateciliumadvanced.example.com", - "Value": "owned" - } - ] - }, - { - "ResourceType": "volume", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privateciliumadvanced.example.com" - }, - { - "Key": "Name", - "Value": "nodes.privateciliumadvanced.example.com" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", - "Value": "node" - }, - { - "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", - "Value": "" - }, - { - "Key": "k8s.io/role/node", - "Value": "1" - }, - { - "Key": "kops.k8s.io/instancegroup", - "Value": "nodes" - }, - { - "Key": "kubernetes.io/cluster/privateciliumadvanced.example.com", - "Value": "owned" - } - ] - } - ], - "UserData": "extracted" - } - } - }, - "AWSEC2NatGatewayustest1aprivateciliumadvancedexamplecom": { - "Type": "AWS::EC2::NatGateway", - "Properties": { - "AllocationId": { - "Fn::GetAtt": [ - "AWSEC2EIPustest1aprivateciliumadvancedexamplecom", - "AllocationId" - ] - }, - "SubnetId": { - "Ref": "AWSEC2Subnetutilityustest1aprivateciliumadvancedexamplecom" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privateciliumadvanced.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.privateciliumadvanced.example.com" - }, - { - "Key": "kubernetes.io/cluster/privateciliumadvanced.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2Route0": { - "Type": "AWS::EC2::Route", - "Properties": { - "RouteTableId": { - "Ref": "AWSEC2RouteTableprivateciliumadvancedexamplecom" - }, - "DestinationIpv6CidrBlock": "::/0", - "GatewayId": { - "Ref": "AWSEC2InternetGatewayprivateciliumadvancedexamplecom" - } - } - }, - "AWSEC2Route00000": { - "Type": "AWS::EC2::Route", - "Properties": { - "RouteTableId": { - "Ref": "AWSEC2RouteTableprivateciliumadvancedexamplecom" - }, - "DestinationCidrBlock": "0.0.0.0/0", - "GatewayId": { - "Ref": "AWSEC2InternetGatewayprivateciliumadvancedexamplecom" - } - } - }, - "AWSEC2RouteTableprivateciliumadvancedexamplecom": { - "Type": "AWS::EC2::RouteTable", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCprivateciliumadvancedexamplecom" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privateciliumadvanced.example.com" - }, - { - "Key": "Name", - "Value": "privateciliumadvanced.example.com" - }, - { - "Key": "kubernetes.io/cluster/privateciliumadvanced.example.com", - "Value": "owned" - }, - { - "Key": "kubernetes.io/kops/role", - "Value": "public" - } - ] - } - }, - "AWSEC2RouteTableprivateustest1aprivateciliumadvancedexamplecom": { - "Type": "AWS::EC2::RouteTable", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCprivateciliumadvancedexamplecom" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privateciliumadvanced.example.com" - }, - { - "Key": "Name", - "Value": "private-us-test-1a.privateciliumadvanced.example.com" - }, - { - "Key": "kubernetes.io/cluster/privateciliumadvanced.example.com", - "Value": "owned" - }, - { - "Key": "kubernetes.io/kops/role", - "Value": "private-us-test-1a" - } - ] - } - }, - "AWSEC2Routeprivateustest1a00000": { - "Type": "AWS::EC2::Route", - "Properties": { - "RouteTableId": { - "Ref": "AWSEC2RouteTableprivateustest1aprivateciliumadvancedexamplecom" - }, - "DestinationCidrBlock": "0.0.0.0/0", - "NatGatewayId": { - "Ref": "AWSEC2NatGatewayustest1aprivateciliumadvancedexamplecom" - } - } - }, - "AWSEC2SecurityGroupEgressfromapielbprivateciliumadvancedexamplecomegressall0to00": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupapielbprivateciliumadvancedexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIpv6": "::/0" - } - }, - "AWSEC2SecurityGroupEgressfromapielbprivateciliumadvancedexamplecomegressall0to000000": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupapielbprivateciliumadvancedexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupEgressfrombastionprivateciliumadvancedexamplecomegressall0to00": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupbastionprivateciliumadvancedexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIpv6": "::/0" - } - }, - "AWSEC2SecurityGroupEgressfrombastionprivateciliumadvancedexamplecomegressall0to000000": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupbastionprivateciliumadvancedexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupEgressfrommastersprivateciliumadvancedexamplecomegressall0to00": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersprivateciliumadvancedexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIpv6": "::/0" - } - }, - "AWSEC2SecurityGroupEgressfrommastersprivateciliumadvancedexamplecomegressall0to000000": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersprivateciliumadvancedexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupEgressfromnodesprivateciliumadvancedexamplecomegressall0to00": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesprivateciliumadvancedexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIpv6": "::/0" - } - }, - "AWSEC2SecurityGroupEgressfromnodesprivateciliumadvancedexamplecomegressall0to000000": { - "Type": "AWS::EC2::SecurityGroupEgress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesprivateciliumadvancedexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom00000ingresstcp22to22bastionprivateciliumadvancedexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupbastionprivateciliumadvancedexamplecom" - }, - "FromPort": 22, - "ToPort": 22, - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom00000ingresstcp443to443apielbprivateciliumadvancedexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupapielbprivateciliumadvancedexamplecom" - }, - "FromPort": 443, - "ToPort": 443, - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressfrom172204022ingresstcp22to22bastionprivateciliumadvancedexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupbastionprivateciliumadvancedexamplecom" - }, - "FromPort": 22, - "ToPort": 22, - "IpProtocol": "tcp", - "CidrIp": "172.20.4.0/22" - } - }, - "AWSEC2SecurityGroupIngressfrombastionprivateciliumadvancedexamplecomingresstcp22to22mastersprivateciliumadvancedexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersprivateciliumadvancedexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupbastionprivateciliumadvancedexamplecom" - }, - "FromPort": 22, - "ToPort": 22, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfrombastionprivateciliumadvancedexamplecomingresstcp22to22nodesprivateciliumadvancedexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesprivateciliumadvancedexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupbastionprivateciliumadvancedexamplecom" - }, - "FromPort": 22, - "ToPort": 22, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfrommastersprivateciliumadvancedexamplecomingressall0to0mastersprivateciliumadvancedexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersprivateciliumadvancedexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupmastersprivateciliumadvancedexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfrommastersprivateciliumadvancedexamplecomingressall0to0nodesprivateciliumadvancedexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesprivateciliumadvancedexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupmastersprivateciliumadvancedexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfromnodesprivateciliumadvancedexamplecomingressall0to0nodesprivateciliumadvancedexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupnodesprivateciliumadvancedexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesprivateciliumadvancedexamplecom" - }, - "FromPort": 0, - "ToPort": 0, - "IpProtocol": "-1" - } - }, - "AWSEC2SecurityGroupIngressfromnodesprivateciliumadvancedexamplecomingresstcp1to2379mastersprivateciliumadvancedexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersprivateciliumadvancedexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesprivateciliumadvancedexamplecom" - }, - "FromPort": 1, - "ToPort": 2379, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodesprivateciliumadvancedexamplecomingresstcp2383to4000mastersprivateciliumadvancedexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersprivateciliumadvancedexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesprivateciliumadvancedexamplecom" - }, - "FromPort": 2383, - "ToPort": 4000, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodesprivateciliumadvancedexamplecomingresstcp4003to65535mastersprivateciliumadvancedexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersprivateciliumadvancedexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesprivateciliumadvancedexamplecom" - }, - "FromPort": 4003, - "ToPort": 65535, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressfromnodesprivateciliumadvancedexamplecomingressudp1to65535mastersprivateciliumadvancedexamplecom": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersprivateciliumadvancedexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupnodesprivateciliumadvancedexamplecom" - }, - "FromPort": 1, - "ToPort": 65535, - "IpProtocol": "udp" - } - }, - "AWSEC2SecurityGroupIngresshttpselbtomaster": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupmastersprivateciliumadvancedexamplecom" - }, - "SourceSecurityGroupId": { - "Ref": "AWSEC2SecurityGroupapielbprivateciliumadvancedexamplecom" - }, - "FromPort": 443, - "ToPort": 443, - "IpProtocol": "tcp" - } - }, - "AWSEC2SecurityGroupIngressicmppmtuapielb00000": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupapielbprivateciliumadvancedexamplecom" - }, - "FromPort": 3, - "ToPort": 4, - "IpProtocol": "icmp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressicmppmtusshnlb00000": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupbastionprivateciliumadvancedexamplecom" - }, - "FromPort": 3, - "ToPort": 4, - "IpProtocol": "icmp", - "CidrIp": "0.0.0.0/0" - } - }, - "AWSEC2SecurityGroupIngressicmppmtusshnlb172204022": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": { - "Ref": "AWSEC2SecurityGroupbastionprivateciliumadvancedexamplecom" - }, - "FromPort": 3, - "ToPort": 4, - "IpProtocol": "icmp", - "CidrIp": "172.20.4.0/22" - } - }, - "AWSEC2SecurityGroupapielbprivateciliumadvancedexamplecom": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupName": "api-elb.privateciliumadvanced.example.com", - "VpcId": { - "Ref": "AWSEC2VPCprivateciliumadvancedexamplecom" - }, - "GroupDescription": "Security group for api ELB", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privateciliumadvanced.example.com" - }, - { - "Key": "Name", - "Value": "api-elb.privateciliumadvanced.example.com" - }, - { - "Key": "kubernetes.io/cluster/privateciliumadvanced.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2SecurityGroupbastionprivateciliumadvancedexamplecom": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupName": "bastion.privateciliumadvanced.example.com", - "VpcId": { - "Ref": "AWSEC2VPCprivateciliumadvancedexamplecom" - }, - "GroupDescription": "Security group for bastion", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privateciliumadvanced.example.com" - }, - { - "Key": "Name", - "Value": "bastion.privateciliumadvanced.example.com" - }, - { - "Key": "kubernetes.io/cluster/privateciliumadvanced.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2SecurityGroupmastersprivateciliumadvancedexamplecom": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupName": "masters.privateciliumadvanced.example.com", - "VpcId": { - "Ref": "AWSEC2VPCprivateciliumadvancedexamplecom" - }, - "GroupDescription": "Security group for masters", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privateciliumadvanced.example.com" - }, - { - "Key": "Name", - "Value": "masters.privateciliumadvanced.example.com" - }, - { - "Key": "kubernetes.io/cluster/privateciliumadvanced.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2SecurityGroupnodesprivateciliumadvancedexamplecom": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupName": "nodes.privateciliumadvanced.example.com", - "VpcId": { - "Ref": "AWSEC2VPCprivateciliumadvancedexamplecom" - }, - "GroupDescription": "Security group for nodes", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privateciliumadvanced.example.com" - }, - { - "Key": "Name", - "Value": "nodes.privateciliumadvanced.example.com" - }, - { - "Key": "kubernetes.io/cluster/privateciliumadvanced.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2SubnetRouteTableAssociationprivateustest1aprivateciliumadvancedexamplecom": { - "Type": "AWS::EC2::SubnetRouteTableAssociation", - "Properties": { - "SubnetId": { - "Ref": "AWSEC2Subnetustest1aprivateciliumadvancedexamplecom" - }, - "RouteTableId": { - "Ref": "AWSEC2RouteTableprivateustest1aprivateciliumadvancedexamplecom" - } - } - }, - "AWSEC2SubnetRouteTableAssociationutilityustest1aprivateciliumadvancedexamplecom": { - "Type": "AWS::EC2::SubnetRouteTableAssociation", - "Properties": { - "SubnetId": { - "Ref": "AWSEC2Subnetutilityustest1aprivateciliumadvancedexamplecom" - }, - "RouteTableId": { - "Ref": "AWSEC2RouteTableprivateciliumadvancedexamplecom" - } - } - }, - "AWSEC2Subnetustest1aprivateciliumadvancedexamplecom": { - "Type": "AWS::EC2::Subnet", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCprivateciliumadvancedexamplecom" - }, - "CidrBlock": "172.20.32.0/19", - "AvailabilityZone": "us-test-1a", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privateciliumadvanced.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.privateciliumadvanced.example.com" - }, - { - "Key": "SubnetType", - "Value": "Private" - }, - { - "Key": "kops.k8s.io/instance-group/master-us-test-1a", - "Value": "true" - }, - { - "Key": "kops.k8s.io/instance-group/nodes", - "Value": "true" - }, - { - "Key": "kubernetes.io/cluster/privateciliumadvanced.example.com", - "Value": "owned" - }, - { - "Key": "kubernetes.io/role/internal-elb", - "Value": "1" - } - ] - } - }, - "AWSEC2Subnetutilityustest1aprivateciliumadvancedexamplecom": { - "Type": "AWS::EC2::Subnet", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCprivateciliumadvancedexamplecom" - }, - "CidrBlock": "172.20.4.0/22", - "AvailabilityZone": "us-test-1a", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privateciliumadvanced.example.com" - }, - { - "Key": "Name", - "Value": "utility-us-test-1a.privateciliumadvanced.example.com" - }, - { - "Key": "SubnetType", - "Value": "Utility" - }, - { - "Key": "kops.k8s.io/instance-group/bastion", - "Value": "true" - }, - { - "Key": "kubernetes.io/cluster/privateciliumadvanced.example.com", - "Value": "owned" - }, - { - "Key": "kubernetes.io/role/elb", - "Value": "1" - } - ] - } - }, - "AWSEC2VPCCidrBlockAmazonIPv6": { - "Type": "AWS::EC2::VPCCidrBlock", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCprivateciliumadvancedexamplecom" - }, - "AmazonProvidedIpv6CidrBlock": true - } - }, - "AWSEC2VPCDHCPOptionsAssociationprivateciliumadvancedexamplecom": { - "Type": "AWS::EC2::VPCDHCPOptionsAssociation", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCprivateciliumadvancedexamplecom" - }, - "DhcpOptionsId": { - "Ref": "AWSEC2DHCPOptionsprivateciliumadvancedexamplecom" - } - } - }, - "AWSEC2VPCGatewayAttachmentprivateciliumadvancedexamplecom": { - "Type": "AWS::EC2::VPCGatewayAttachment", - "Properties": { - "VpcId": { - "Ref": "AWSEC2VPCprivateciliumadvancedexamplecom" - }, - "InternetGatewayId": { - "Ref": "AWSEC2InternetGatewayprivateciliumadvancedexamplecom" - } - } - }, - "AWSEC2VPCprivateciliumadvancedexamplecom": { - "Type": "AWS::EC2::VPC", - "Properties": { - "CidrBlock": "172.20.0.0/16", - "EnableDnsHostnames": true, - "EnableDnsSupport": true, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privateciliumadvanced.example.com" - }, - { - "Key": "Name", - "Value": "privateciliumadvanced.example.com" - }, - { - "Key": "kubernetes.io/cluster/privateciliumadvanced.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2Volumeustest1aetcdciliumprivateciliumadvancedexamplecom": { - "Type": "AWS::EC2::Volume", - "Properties": { - "AvailabilityZone": "us-test-1a", - "Size": 20, - "VolumeType": "gp3", - "Iops": 3000, - "Throughput": 125, - "Encrypted": false, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privateciliumadvanced.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.etcd-cilium.privateciliumadvanced.example.com" - }, - { - "Key": "k8s.io/etcd/cilium", - "Value": "us-test-1a/us-test-1a" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kubernetes.io/cluster/privateciliumadvanced.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2Volumeustest1aetcdeventsprivateciliumadvancedexamplecom": { - "Type": "AWS::EC2::Volume", - "Properties": { - "AvailabilityZone": "us-test-1a", - "Size": 20, - "VolumeType": "gp3", - "Iops": 3000, - "Throughput": 125, - "Encrypted": false, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privateciliumadvanced.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.etcd-events.privateciliumadvanced.example.com" - }, - { - "Key": "k8s.io/etcd/events", - "Value": "us-test-1a/us-test-1a" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kubernetes.io/cluster/privateciliumadvanced.example.com", - "Value": "owned" - } - ] - } - }, - "AWSEC2Volumeustest1aetcdmainprivateciliumadvancedexamplecom": { - "Type": "AWS::EC2::Volume", - "Properties": { - "AvailabilityZone": "us-test-1a", - "Size": 20, - "VolumeType": "gp3", - "Iops": 3000, - "Throughput": 125, - "Encrypted": false, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privateciliumadvanced.example.com" - }, - { - "Key": "Name", - "Value": "us-test-1a.etcd-main.privateciliumadvanced.example.com" - }, - { - "Key": "k8s.io/etcd/main", - "Value": "us-test-1a/us-test-1a" - }, - { - "Key": "k8s.io/role/master", - "Value": "1" - }, - { - "Key": "kubernetes.io/cluster/privateciliumadvanced.example.com", - "Value": "owned" - } - ] - } - }, - "AWSElasticLoadBalancingLoadBalancerapiprivateciliumadvancedexamplecom": { - "Type": "AWS::ElasticLoadBalancing::LoadBalancer", - "Properties": { - "LoadBalancerName": "api-privateciliumadvanced-0cffmm", - "Listeners": [ - { - "InstancePort": "443", - "InstanceProtocol": "TCP", - "LoadBalancerPort": "443", - "Protocol": "TCP" - } - ], - "SecurityGroups": [ - { - "Ref": "AWSEC2SecurityGroupapielbprivateciliumadvancedexamplecom" - } - ], - "Subnets": [ - { - "Ref": "AWSEC2Subnetutilityustest1aprivateciliumadvancedexamplecom" - } - ], - "HealthCheck": { - "Target": "SSL:443", - "HealthyThreshold": "2", - "UnhealthyThreshold": "2", - "Interval": "10", - "Timeout": "5" - }, - "ConnectionDrainingPolicy": { - "Enabled": true, - "Timeout": 300 - }, - "ConnectionSettings": { - "IdleTimeout": 300 - }, - "CrossZone": false, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privateciliumadvanced.example.com" - }, - { - "Key": "Name", - "Value": "api.privateciliumadvanced.example.com" - }, - { - "Key": "kubernetes.io/cluster/privateciliumadvanced.example.com", - "Value": "owned" - } - ] - } - }, - "AWSElasticLoadBalancingV2Listenerbastionprivateciliumadvancedexamplecom22": { - "Type": "AWS::ElasticLoadBalancingV2::Listener", - "Properties": { - "DefaultActions": [ - { - "Type": "forward", - "TargetGroupArn": { - "Ref": "AWSElasticLoadBalancingV2TargetGroupbastionprivateciliumadva0jni40" - } - } - ], - "LoadBalancerArn": { - "Ref": "AWSElasticLoadBalancingV2LoadBalancerbastionprivateciliumadvancedexamplecom" - }, - "Port": 22, - "Protocol": "TCP" - } - }, - "AWSElasticLoadBalancingV2LoadBalancerbastionprivateciliumadvancedexamplecom": { - "Type": "AWS::ElasticLoadBalancingV2::LoadBalancer", - "Properties": { - "Name": "bastion-privateciliumadva-0jni40", - "Scheme": "internet-facing", - "SubnetMappings": [ - { - "SubnetId": { - "Ref": "AWSEC2Subnetutilityustest1aprivateciliumadvancedexamplecom" - } - } - ], - "Type": "network", - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privateciliumadvanced.example.com" - }, - { - "Key": "Name", - "Value": "bastion.privateciliumadvanced.example.com" - }, - { - "Key": "kubernetes.io/cluster/privateciliumadvanced.example.com", - "Value": "owned" - } - ] - } - }, - "AWSElasticLoadBalancingV2TargetGroupbastionprivateciliumadva0jni40": { - "Type": "AWS::ElasticLoadBalancingV2::TargetGroup", - "Properties": { - "Name": "bastion-privateciliumadva-0jni40", - "Port": 22, - "Protocol": "TCP", - "VpcId": { - "Ref": "AWSEC2VPCprivateciliumadvancedexamplecom" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privateciliumadvanced.example.com" - }, - { - "Key": "Name", - "Value": "bastion-privateciliumadva-0jni40" - }, - { - "Key": "kubernetes.io/cluster/privateciliumadvanced.example.com", - "Value": "owned" - } - ], - "HealthCheckProtocol": "TCP", - "HealthyThresholdCount": 2, - "UnhealthyThresholdCount": 2 - } - }, - "AWSIAMInstanceProfilebastionsprivateciliumadvancedexamplecom": { - "Type": "AWS::IAM::InstanceProfile", - "Properties": { - "InstanceProfileName": "bastions.privateciliumadvanced.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolebastionsprivateciliumadvancedexamplecom" - } - ] - } - }, - "AWSIAMInstanceProfilemastersprivateciliumadvancedexamplecom": { - "Type": "AWS::IAM::InstanceProfile", - "Properties": { - "InstanceProfileName": "masters.privateciliumadvanced.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolemastersprivateciliumadvancedexamplecom" - } - ] - } - }, - "AWSIAMInstanceProfilenodesprivateciliumadvancedexamplecom": { - "Type": "AWS::IAM::InstanceProfile", - "Properties": { - "InstanceProfileName": "nodes.privateciliumadvanced.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolenodesprivateciliumadvancedexamplecom" - } - ] - } - }, - "AWSIAMPolicybastionsprivateciliumadvancedexamplecom": { - "Type": "AWS::IAM::Policy", - "Properties": { - "PolicyName": "bastions.privateciliumadvanced.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolebastionsprivateciliumadvancedexamplecom" - } - ], - "PolicyDocument": { - "Statement": [ - { - "Action": "ec2:DescribeRegions", - "Effect": "Allow", - "Resource": "*" - } - ], - "Version": "2012-10-17" - } - } - }, - "AWSIAMPolicymastersprivateciliumadvancedexamplecom": { - "Type": "AWS::IAM::Policy", - "Properties": { - "PolicyName": "masters.privateciliumadvanced.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolemastersprivateciliumadvancedexamplecom" - } - ], - "PolicyDocument": { - "Statement": [ - { - "Action": "ec2:AttachVolume", - "Condition": { - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "privateciliumadvanced.example.com", - "aws:ResourceTag/k8s.io/role/master": "1" - } - }, - "Effect": "Allow", - "Resource": [ - "*" - ] - }, - { - "Action": [ - "s3:Get*" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-read-bucket/clusters.example.com/privateciliumadvanced.example.com/*" - }, - { - "Action": [ - "s3:GetObject", - "s3:DeleteObject", - "s3:DeleteObjectVersion", - "s3:PutObject" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/privateciliumadvanced.example.com/backups/etcd/main/*" - }, - { - "Action": [ - "s3:GetObject", - "s3:DeleteObject", - "s3:DeleteObjectVersion", - "s3:PutObject" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/privateciliumadvanced.example.com/backups/etcd/events/*" - }, - { - "Action": [ - "s3:GetObject", - "s3:DeleteObject", - "s3:DeleteObjectVersion", - "s3:PutObject" - ], - "Effect": "Allow", - "Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/privateciliumadvanced.example.com/backups/etcd/cilium/*" - }, - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-read-bucket" - ] - }, - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-write-bucket" - ] - }, - { - "Action": [ - "route53:ChangeResourceRecordSets", - "route53:ListResourceRecordSets", - "route53:GetHostedZone" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:route53:::hostedzone/Z1AFAKE1ZON3YO" - ] - }, - { - "Action": [ - "route53:GetChange" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:route53:::change/*" - ] - }, - { - "Action": [ - "route53:ListHostedZones", - "route53:ListTagsForResource" - ], - "Effect": "Allow", - "Resource": [ - "*" - ] - }, - { - "Action": "ec2:CreateTags", - "Condition": { - "StringEquals": { - "aws:RequestTag/KubernetesCluster": "privateciliumadvanced.example.com", - "ec2:CreateAction": [ - "CreateSecurityGroup" - ] - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:security-group/*" - ] - }, - { - "Action": [ - "ec2:CreateTags", - "ec2:DeleteTags" - ], - "Condition": { - "Null": { - "aws:RequestTag/KubernetesCluster": "true" - }, - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "privateciliumadvanced.example.com" - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:security-group/*" - ] - }, - { - "Action": "ec2:CreateTags", - "Condition": { - "StringEquals": { - "aws:RequestTag/KubernetesCluster": "privateciliumadvanced.example.com", - "ec2:CreateAction": [ - "CreateVolume", - "CreateSnapshot" - ] - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:volume/*", - "arn:aws-test:ec2:*:*:snapshot/*" - ] - }, - { - "Action": [ - "ec2:CreateTags", - "ec2:DeleteTags" - ], - "Condition": { - "Null": { - "aws:RequestTag/KubernetesCluster": "true" - }, - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "privateciliumadvanced.example.com" - } - }, - "Effect": "Allow", - "Resource": [ - "arn:aws-test:ec2:*:*:volume/*", - "arn:aws-test:ec2:*:*:snapshot/*" - ] - }, - { - "Action": [ - "autoscaling:DescribeAutoScalingGroups", - "autoscaling:DescribeAutoScalingInstances", - "autoscaling:DescribeLaunchConfigurations", - "autoscaling:DescribeScalingActivities", - "autoscaling:DescribeTags", - "ec2:AssignPrivateIpAddresses", - "ec2:AttachNetworkInterface", - "ec2:AttachVolume", - "ec2:AuthorizeSecurityGroupIngress", - "ec2:CreateNetworkInterface", - "ec2:CreateSecurityGroup", - "ec2:CreateTags", - "ec2:DeleteNetworkInterface", - "ec2:DeleteRoute", - "ec2:DeleteSecurityGroup", - "ec2:DeleteVolume", - "ec2:DescribeAccountAttributes", - "ec2:DescribeInstanceTypes", - "ec2:DescribeInstances", - "ec2:DescribeLaunchTemplateVersions", - "ec2:DescribeNetworkInterfaces", - "ec2:DescribeRegions", - "ec2:DescribeRouteTables", - "ec2:DescribeSecurityGroups", - "ec2:DescribeSubnets", - "ec2:DescribeTags", - "ec2:DescribeVolumes", - "ec2:DescribeVolumesModifications", - "ec2:DescribeVpcPeeringConnections", - "ec2:DescribeVpcs", - "ec2:DetachNetworkInterface", - "ec2:DetachVolume", - "ec2:ModifyInstanceAttribute", - "ec2:ModifyNetworkInterfaceAttribute", - "ec2:ModifyVolume", - "ec2:UnassignPrivateIpAddresses", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:CreateListener", - "elasticloadbalancing:CreateTargetGroup", - "elasticloadbalancing:DeleteListener", - "elasticloadbalancing:DescribeListeners", - "elasticloadbalancing:DescribeLoadBalancerAttributes", - "elasticloadbalancing:DescribeLoadBalancerPolicies", - "elasticloadbalancing:DescribeLoadBalancers", - "elasticloadbalancing:DescribeTargetGroups", - "elasticloadbalancing:DescribeTargetHealth", - "elasticloadbalancing:ModifyListener", - "elasticloadbalancing:RegisterTargets", - "iam:GetServerCertificate", - "iam:ListServerCertificates", - "kms:DescribeKey", - "kms:GenerateRandom" - ], - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": [ - "autoscaling:SetDesiredCapacity", - "autoscaling:TerminateInstanceInAutoScalingGroup", - "ec2:AttachVolume", - "ec2:AuthorizeSecurityGroupIngress", - "ec2:DeleteSecurityGroup", - "ec2:DeleteVolume", - "ec2:DetachVolume", - "ec2:ModifyInstanceAttribute", - "ec2:ModifyVolume", - "ec2:RevokeSecurityGroupIngress", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", - "elasticloadbalancing:AttachLoadBalancerToSubnets", - "elasticloadbalancing:ConfigureHealthCheck", - "elasticloadbalancing:CreateLoadBalancerListeners", - "elasticloadbalancing:CreateLoadBalancerPolicy", - "elasticloadbalancing:DeleteListener", - "elasticloadbalancing:DeleteLoadBalancer", - "elasticloadbalancing:DeleteLoadBalancerListeners", - "elasticloadbalancing:DeleteTargetGroup", - "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", - "elasticloadbalancing:DeregisterTargets", - "elasticloadbalancing:DetachLoadBalancerFromSubnets", - "elasticloadbalancing:ModifyListener", - "elasticloadbalancing:ModifyLoadBalancerAttributes", - "elasticloadbalancing:ModifyTargetGroup", - "elasticloadbalancing:RegisterInstancesWithLoadBalancer", - "elasticloadbalancing:RegisterTargets", - "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", - "elasticloadbalancing:SetLoadBalancerPoliciesOfListener" - ], - "Condition": { - "StringEquals": { - "aws:ResourceTag/KubernetesCluster": "privateciliumadvanced.example.com" - } - }, - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": [ - "ec2:CreateSecurityGroup", - "ec2:CreateSnapshot", - "ec2:CreateVolume", - "elasticloadbalancing:CreateListener", - "elasticloadbalancing:CreateLoadBalancer", - "elasticloadbalancing:CreateTargetGroup" - ], - "Condition": { - "StringEquals": { - "aws:RequestTag/KubernetesCluster": "privateciliumadvanced.example.com" - } - }, - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": "ec2:CreateSecurityGroup", - "Effect": "Allow", - "Resource": "arn:aws-test:ec2:*:*:vpc/*" - } - ], - "Version": "2012-10-17" - } - } - }, - "AWSIAMPolicynodesprivateciliumadvancedexamplecom": { - "Type": "AWS::IAM::Policy", - "Properties": { - "PolicyName": "nodes.privateciliumadvanced.example.com", - "Roles": [ - { - "Ref": "AWSIAMRolenodesprivateciliumadvancedexamplecom" - } - ], - "PolicyDocument": { - "Statement": [ - { - "Action": [ - "s3:GetBucketLocation", - "s3:GetEncryptionConfiguration", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws-test:s3:::placeholder-read-bucket" - ] - }, - { - "Action": [ - "autoscaling:DescribeAutoScalingInstances", - "ec2:DescribeInstanceTypes", - "ec2:DescribeInstances", - "ec2:DescribeRegions", - "iam:GetServerCertificate", - "iam:ListServerCertificates", - "kms:GenerateRandom" - ], - "Effect": "Allow", - "Resource": "*" - } - ], - "Version": "2012-10-17" - } - } - }, - "AWSIAMRolebastionsprivateciliumadvancedexamplecom": { - "Type": "AWS::IAM::Role", - "Properties": { - "RoleName": "bastions.privateciliumadvanced.example.com", - "AssumeRolePolicyDocument": { - "Statement": [ - { - "Action": "sts:AssumeRole", - "Effect": "Allow", - "Principal": { - "Service": "ec2.amazonaws.com" - } - } - ], - "Version": "2012-10-17" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privateciliumadvanced.example.com" - }, - { - "Key": "Name", - "Value": "bastions.privateciliumadvanced.example.com" - }, - { - "Key": "kubernetes.io/cluster/privateciliumadvanced.example.com", - "Value": "owned" - } - ] - } - }, - "AWSIAMRolemastersprivateciliumadvancedexamplecom": { - "Type": "AWS::IAM::Role", - "Properties": { - "RoleName": "masters.privateciliumadvanced.example.com", - "AssumeRolePolicyDocument": { - "Statement": [ - { - "Action": "sts:AssumeRole", - "Effect": "Allow", - "Principal": { - "Service": "ec2.amazonaws.com" - } - } - ], - "Version": "2012-10-17" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privateciliumadvanced.example.com" - }, - { - "Key": "Name", - "Value": "masters.privateciliumadvanced.example.com" - }, - { - "Key": "kubernetes.io/cluster/privateciliumadvanced.example.com", - "Value": "owned" - } - ] - } - }, - "AWSIAMRolenodesprivateciliumadvancedexamplecom": { - "Type": "AWS::IAM::Role", - "Properties": { - "RoleName": "nodes.privateciliumadvanced.example.com", - "AssumeRolePolicyDocument": { - "Statement": [ - { - "Action": "sts:AssumeRole", - "Effect": "Allow", - "Principal": { - "Service": "ec2.amazonaws.com" - } - } - ], - "Version": "2012-10-17" - }, - "Tags": [ - { - "Key": "KubernetesCluster", - "Value": "privateciliumadvanced.example.com" - }, - { - "Key": "Name", - "Value": "nodes.privateciliumadvanced.example.com" - }, - { - "Key": "kubernetes.io/cluster/privateciliumadvanced.example.com", - "Value": "owned" - } - ] - } - }, - "AWSRoute53RecordSetapiprivateciliumadvancedexamplecom": { - "Type": "AWS::Route53::RecordSet", - "Properties": { - "Name": "api.privateciliumadvanced.example.com", - "Type": "A", - "AliasTarget": { - "DNSName": { - "Fn::GetAtt": [ - "AWSElasticLoadBalancingLoadBalancerapiprivateciliumadvancedexamplecom", - "DNSName" - ] - }, - "HostedZoneId": { - "Fn::GetAtt": [ - "AWSElasticLoadBalancingLoadBalancerapiprivateciliumadvancedexamplecom", - "CanonicalHostedZoneNameID" - ] - }, - "EvaluateTargetHealth": false - }, - "HostedZoneId": "/hostedzone/Z1AFAKE1ZON3YO" - } - } - } -} diff --git a/tests/integration/update_cluster/privateciliumadvanced/cloudformation.json.extracted.yaml b/tests/integration/update_cluster/privateciliumadvanced/cloudformation.json.extracted.yaml deleted file mode 100644 index 0d9affb324c16..0000000000000 --- a/tests/integration/update_cluster/privateciliumadvanced/cloudformation.json.extracted.yaml +++ /dev/null @@ -1,452 +0,0 @@ -Resources.AWSEC2LaunchTemplatemasterustest1amastersprivateciliumadvancedexamplecom.Properties.LaunchTemplateData.UserData: | - #!/bin/bash - set -o errexit - set -o nounset - set -o pipefail - - NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 - NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 - NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 - NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 - - export AWS_REGION=us-test-1 - - - - - sysctl -w net.core.rmem_max=16777216 || true - sysctl -w net.core.wmem_max=16777216 || true - sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true - sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true - - - function ensure-install-dir() { - INSTALL_DIR="/opt/kops" - # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec - if [[ -d /var/lib/toolbox ]]; then - INSTALL_DIR="/var/lib/toolbox/kops" - fi - mkdir -p ${INSTALL_DIR}/bin - mkdir -p ${INSTALL_DIR}/conf - cd ${INSTALL_DIR} - } - - # Retry a download until we get it. args: name, sha, urls - download-or-bust() { - local -r file="$1" - local -r hash="$2" - local -r urls=( $(split-commas "$3") ) - - if [[ -f "${file}" ]]; then - if ! validate-hash "${file}" "${hash}"; then - rm -f "${file}" - else - return 0 - fi - fi - - while true; do - for url in "${urls[@]}"; do - commands=( - "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - ) - for cmd in "${commands[@]}"; do - echo "Attempting download with: ${cmd} {url}" - if ! (${cmd} "${url}"); then - echo "== Download failed with ${cmd} ==" - continue - fi - if ! validate-hash "${file}" "${hash}"; then - echo "== Hash validation of ${url} failed. Retrying. ==" - rm -f "${file}" - else - echo "== Downloaded ${url} (SHA256 = ${hash}) ==" - return 0 - fi - done - done - - echo "All downloads failed; sleeping before retrying" - sleep 60 - done - } - - validate-hash() { - local -r file="$1" - local -r expected="$2" - local actual - - actual=$(sha256sum ${file} | awk '{ print $1 }') || true - if [[ "${actual}" != "${expected}" ]]; then - echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" - return 1 - fi - } - - function split-commas() { - echo $1 | tr "," "\n" - } - - function download-release() { - case "$(uname -m)" in - x86_64*|i?86_64*|amd64*) - NODEUP_URL="${NODEUP_URL_AMD64}" - NODEUP_HASH="${NODEUP_HASH_AMD64}" - ;; - aarch64*|arm64*) - NODEUP_URL="${NODEUP_URL_ARM64}" - NODEUP_HASH="${NODEUP_HASH_ARM64}" - ;; - *) - echo "Unsupported host arch: $(uname -m)" >&2 - exit 1 - ;; - esac - - cd ${INSTALL_DIR}/bin - download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" - - chmod +x nodeup - - echo "Running nodeup" - # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 - ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) - } - - #################################################################################### - - /bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" - - echo "== nodeup node config starting ==" - ensure-install-dir - - cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' - cloudConfig: - awsEBSCSIDriver: - enabled: false - manageStorageClasses: true - containerRuntime: containerd - containerd: - logLevel: info - version: 1.4.13 - docker: - skipInstall: true - encryptionConfig: null - etcdClusters: - cilium: - version: 3.4.13 - events: - version: 3.4.13 - main: - version: 3.4.13 - kubeAPIServer: - allowPrivileged: true - anonymousAuth: false - apiAudiences: - - kubernetes.svc.default - apiServerCount: 1 - authorizationMode: AlwaysAllow - bindAddress: 0.0.0.0 - cloudProvider: aws - enableAdmissionPlugins: - - NamespaceLifecycle - - LimitRanger - - ServiceAccount - - DefaultStorageClass - - DefaultTolerationSeconds - - MutatingAdmissionWebhook - - ValidatingAdmissionWebhook - - NodeRestriction - - ResourceQuota - etcdServers: - - https://127.0.0.1:4001 - etcdServersOverrides: - - /events#https://127.0.0.1:4002 - image: registry.k8s.io/kube-apiserver:v1.21.0 - kubeletPreferredAddressTypes: - - InternalIP - - Hostname - - ExternalIP - logLevel: 2 - requestheaderAllowedNames: - - aggregator - requestheaderExtraHeaderPrefixes: - - X-Remote-Extra- - requestheaderGroupHeaders: - - X-Remote-Group - requestheaderUsernameHeaders: - - X-Remote-User - securePort: 443 - serviceAccountIssuer: https://api.internal.privateciliumadvanced.example.com - serviceAccountJWKSURI: https://api.internal.privateciliumadvanced.example.com/openid/v1/jwks - serviceClusterIPRange: 100.64.0.0/13 - storageBackend: etcd3 - kubeControllerManager: - allocateNodeCIDRs: true - attachDetachReconcileSyncPeriod: 1m0s - cloudProvider: aws - clusterCIDR: 100.96.0.0/11 - clusterName: privateciliumadvanced.example.com - configureCloudRoutes: false - image: registry.k8s.io/kube-controller-manager:v1.21.0 - leaderElection: - leaderElect: true - logLevel: 2 - useServiceAccountCredentials: true - kubeProxy: - clusterCIDR: 100.96.0.0/11 - cpuRequest: 100m - enabled: false - image: registry.k8s.io/kube-proxy:v1.21.0 - logLevel: 2 - kubeScheduler: - image: registry.k8s.io/kube-scheduler:v1.21.0 - leaderElection: - leaderElect: true - logLevel: 2 - kubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: aws - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - masterKubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: aws - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - - __EOF_CLUSTER_SPEC - - cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' - CloudProvider: aws - ConfigBase: memfs://clusters.example.com/privateciliumadvanced.example.com - InstanceGroupName: master-us-test-1a - InstanceGroupRole: Master - NodeupConfigHash: fkxlCTz97ABY8V09Ym4jNPEPFpWjG4j97P9CI0hpy3g= - - __EOF_KUBE_ENV - - download-release - echo "== nodeup node config done ==" -Resources.AWSEC2LaunchTemplatenodesprivateciliumadvancedexamplecom.Properties.LaunchTemplateData.UserData: | - #!/bin/bash - set -o errexit - set -o nounset - set -o pipefail - - NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 - NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 - NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 - NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 - - export AWS_REGION=us-test-1 - - - - - sysctl -w net.core.rmem_max=16777216 || true - sysctl -w net.core.wmem_max=16777216 || true - sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true - sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true - - - function ensure-install-dir() { - INSTALL_DIR="/opt/kops" - # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec - if [[ -d /var/lib/toolbox ]]; then - INSTALL_DIR="/var/lib/toolbox/kops" - fi - mkdir -p ${INSTALL_DIR}/bin - mkdir -p ${INSTALL_DIR}/conf - cd ${INSTALL_DIR} - } - - # Retry a download until we get it. args: name, sha, urls - download-or-bust() { - local -r file="$1" - local -r hash="$2" - local -r urls=( $(split-commas "$3") ) - - if [[ -f "${file}" ]]; then - if ! validate-hash "${file}" "${hash}"; then - rm -f "${file}" - else - return 0 - fi - fi - - while true; do - for url in "${urls[@]}"; do - commands=( - "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" - "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" - ) - for cmd in "${commands[@]}"; do - echo "Attempting download with: ${cmd} {url}" - if ! (${cmd} "${url}"); then - echo "== Download failed with ${cmd} ==" - continue - fi - if ! validate-hash "${file}" "${hash}"; then - echo "== Hash validation of ${url} failed. Retrying. ==" - rm -f "${file}" - else - echo "== Downloaded ${url} (SHA256 = ${hash}) ==" - return 0 - fi - done - done - - echo "All downloads failed; sleeping before retrying" - sleep 60 - done - } - - validate-hash() { - local -r file="$1" - local -r expected="$2" - local actual - - actual=$(sha256sum ${file} | awk '{ print $1 }') || true - if [[ "${actual}" != "${expected}" ]]; then - echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" - return 1 - fi - } - - function split-commas() { - echo $1 | tr "," "\n" - } - - function download-release() { - case "$(uname -m)" in - x86_64*|i?86_64*|amd64*) - NODEUP_URL="${NODEUP_URL_AMD64}" - NODEUP_HASH="${NODEUP_HASH_AMD64}" - ;; - aarch64*|arm64*) - NODEUP_URL="${NODEUP_URL_ARM64}" - NODEUP_HASH="${NODEUP_HASH_ARM64}" - ;; - *) - echo "Unsupported host arch: $(uname -m)" >&2 - exit 1 - ;; - esac - - cd ${INSTALL_DIR}/bin - download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" - - chmod +x nodeup - - echo "Running nodeup" - # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 - ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) - } - - #################################################################################### - - /bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" - - echo "== nodeup node config starting ==" - ensure-install-dir - - cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' - cloudConfig: - awsEBSCSIDriver: - enabled: false - manageStorageClasses: true - containerRuntime: containerd - containerd: - logLevel: info - version: 1.4.13 - docker: - skipInstall: true - kubeProxy: - clusterCIDR: 100.96.0.0/11 - cpuRequest: 100m - enabled: false - image: registry.k8s.io/kube-proxy:v1.21.0 - logLevel: 2 - kubelet: - anonymousAuth: false - cgroupDriver: systemd - cgroupRoot: / - cloudProvider: aws - clusterDNS: 100.64.0.10 - clusterDomain: cluster.local - enableDebuggingHandlers: true - evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% - kubeconfigPath: /var/lib/kubelet/kubeconfig - logLevel: 2 - networkPluginName: cni - podInfraContainerImage: registry.k8s.io/pause:3.6 - podManifestPath: /etc/kubernetes/manifests - registerSchedulable: true - shutdownGracePeriod: 30s - shutdownGracePeriodCriticalPods: 10s - - __EOF_CLUSTER_SPEC - - cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' - CloudProvider: aws - ConfigServer: - CACertificates: | - -----BEGIN CERTIFICATE----- - MIIBbjCCARigAwIBAgIMFpANqBD8NSD82AUSMA0GCSqGSIb3DQEBCwUAMBgxFjAU - BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwODAwWhcNMzEwNzA3MDcw - ODAwWjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD - SwAwSAJBANFI3zr0Tk8krsW8vwjfMpzJOlWQ8616vG3YPa2qAgI7V4oKwfV0yIg1 - jt+H6f4P/wkPAPTPTfRp9Iy8oHEEFw0CAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG - MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNG3zVjTcLlJwDsJ4/K9DV7KohUA - MA0GCSqGSIb3DQEBCwUAA0EAB8d03fY2w7WKpfO29qI295pu2C4ca9AiVGOpgSc8 - tmQsq6rcxt3T+rb589PVtz0mw/cKTxOk6gH2CCC+yHfy2w== - -----END CERTIFICATE----- - -----BEGIN CERTIFICATE----- - MIIBbjCCARigAwIBAgIMFpANvmSa0OAlYmXKMA0GCSqGSIb3DQEBCwUAMBgxFjAU - BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwOTM2WhcNMzEwNzA3MDcw - OTM2WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD - SwAwSAJBAMF6F4aZdpe0RUpyykaBpWwZCnwbffhYGOw+fs6RdLuUq7QCNmJm/Eq7 - WWOziMYDiI9SbclpD+6QiJ0N3EqppVUCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG - MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLImp6ARjPDAH6nhI+scWVt3Q9bn - MA0GCSqGSIb3DQEBCwUAA0EAVQVx5MUtuAIeePuP9o51xtpT2S6Fvfi8J4ICxnlA - 9B7UD2ushcVFPtaeoL9Gfu8aY4KJBeqqg5ojl4qmRnThjw== - -----END CERTIFICATE----- - server: https://kops-controller.internal.privateciliumadvanced.example.com:3988/ - InstanceGroupName: nodes - InstanceGroupRole: Node - NodeupConfigHash: wuX1AqCvPq+rf4YEELokVnc5SB6MRE2oma/RSBgfUJM= - - __EOF_KUBE_ENV - - download-release - echo "== nodeup node config done ==" diff --git a/tests/integration/update_cluster/privateciliumadvanced/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/privateciliumadvanced/data/aws_s3_object_cluster-completed.spec_content index 3d3bb46edcf1f..6303eaf92c157 100644 --- a/tests/integration/update_cluster/privateciliumadvanced/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/privateciliumadvanced/data/aws_s3_object_cluster-completed.spec_content @@ -152,7 +152,6 @@ spec: kubernetesApiAccess: - 0.0.0.0/0 kubernetesVersion: 1.21.0 - masterInternalName: api.internal.privateciliumadvanced.example.com masterKubelet: anonymousAuth: false cgroupDriver: systemd diff --git a/tests/integration/update_cluster/privateciliumadvanced/in-v1alpha2.yaml b/tests/integration/update_cluster/privateciliumadvanced/in-v1alpha2.yaml index 3af5dc3d918e2..80227961a330d 100644 --- a/tests/integration/update_cluster/privateciliumadvanced/in-v1alpha2.yaml +++ b/tests/integration/update_cluster/privateciliumadvanced/in-v1alpha2.yaml @@ -28,7 +28,6 @@ spec: kubeProxy: enabled: false kubernetesVersion: v1.21.0 - masterInternalName: api.internal.privateciliumadvanced.example.com masterPublicName: api.privateciliumadvanced.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/tests/integration/update_cluster/privatedns1/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/privatedns1/data/aws_s3_object_cluster-completed.spec_content index 5fcef84f603e7..6aa26ed4f3545 100644 --- a/tests/integration/update_cluster/privatedns1/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/privatedns1/data/aws_s3_object_cluster-completed.spec_content @@ -147,7 +147,6 @@ spec: kubernetesApiAccess: - 0.0.0.0/0 kubernetesVersion: 1.21.0 - masterInternalName: api.internal.privatedns1.example.com masterKubelet: anonymousAuth: false cgroupDriver: systemd diff --git a/tests/integration/update_cluster/privatedns1/in-v1alpha2.yaml b/tests/integration/update_cluster/privatedns1/in-v1alpha2.yaml index a9634fe964df2..818027be9c99a 100644 --- a/tests/integration/update_cluster/privatedns1/in-v1alpha2.yaml +++ b/tests/integration/update_cluster/privatedns1/in-v1alpha2.yaml @@ -26,7 +26,6 @@ spec: kubelet: anonymousAuth: false kubernetesVersion: v1.21.0 - masterInternalName: api.internal.privatedns1.example.com masterPublicName: api.privatedns1.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/tests/integration/update_cluster/privatedns2/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/privatedns2/data/aws_s3_object_cluster-completed.spec_content index bffb55db8ef4e..66f7900d6a994 100644 --- a/tests/integration/update_cluster/privatedns2/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/privatedns2/data/aws_s3_object_cluster-completed.spec_content @@ -144,7 +144,6 @@ spec: kubernetesApiAccess: - 0.0.0.0/0 kubernetesVersion: 1.21.0 - masterInternalName: api.internal.privatedns2.example.com masterKubelet: anonymousAuth: false cgroupDriver: systemd diff --git a/tests/integration/update_cluster/privatedns2/in-v1alpha2.yaml b/tests/integration/update_cluster/privatedns2/in-v1alpha2.yaml index f8a47d2db35f4..e5cc31d86fcf5 100644 --- a/tests/integration/update_cluster/privatedns2/in-v1alpha2.yaml +++ b/tests/integration/update_cluster/privatedns2/in-v1alpha2.yaml @@ -23,7 +23,6 @@ spec: kubelet: anonymousAuth: false kubernetesVersion: v1.21.0 - masterInternalName: api.internal.privatedns2.example.com masterPublicName: api.privatedns2.example.com networkCIDR: 172.20.0.0/16 networkID: vpc-12345678 diff --git a/tests/integration/update_cluster/privateflannel/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/privateflannel/data/aws_s3_object_cluster-completed.spec_content index 1046b5fb2323e..24f70f4e941ae 100644 --- a/tests/integration/update_cluster/privateflannel/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/privateflannel/data/aws_s3_object_cluster-completed.spec_content @@ -167,7 +167,6 @@ spec: kubernetesApiAccess: - 0.0.0.0/0 kubernetesVersion: 1.25.0 - masterInternalName: api.internal.privateflannel.example.com masterKubelet: anonymousAuth: false cgroupDriver: systemd diff --git a/tests/integration/update_cluster/privateflannel/in-v1alpha2.yaml b/tests/integration/update_cluster/privateflannel/in-v1alpha2.yaml index 88c5852449448..7157a54b24105 100644 --- a/tests/integration/update_cluster/privateflannel/in-v1alpha2.yaml +++ b/tests/integration/update_cluster/privateflannel/in-v1alpha2.yaml @@ -22,7 +22,6 @@ spec: kubelet: anonymousAuth: false kubernetesVersion: v1.25.0 - masterInternalName: api.internal.privateflannel.example.com masterPublicName: api.privateflannel.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/tests/integration/update_cluster/privatekopeio/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/privatekopeio/data/aws_s3_object_cluster-completed.spec_content index e8abc870fc892..67e34c18a6993 100644 --- a/tests/integration/update_cluster/privatekopeio/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/privatekopeio/data/aws_s3_object_cluster-completed.spec_content @@ -143,7 +143,6 @@ spec: kubernetesApiAccess: - 0.0.0.0/0 kubernetesVersion: 1.21.0 - masterInternalName: api.internal.privatekopeio.example.com masterKubelet: anonymousAuth: false cgroupDriver: systemd diff --git a/tests/integration/update_cluster/privatekopeio/in-v1alpha2.yaml b/tests/integration/update_cluster/privatekopeio/in-v1alpha2.yaml index 06c995aae4c79..85c634ee2d7cd 100644 --- a/tests/integration/update_cluster/privatekopeio/in-v1alpha2.yaml +++ b/tests/integration/update_cluster/privatekopeio/in-v1alpha2.yaml @@ -22,7 +22,6 @@ spec: kubelet: anonymousAuth: false kubernetesVersion: v1.21.0 - masterInternalName: api.internal.privatekopeio.example.com masterPublicName: api.privatekopeio.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/tests/integration/update_cluster/privateweave/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/privateweave/data/aws_s3_object_cluster-completed.spec_content index cbe1268b7aacb..bf5bd825fe3f2 100644 --- a/tests/integration/update_cluster/privateweave/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/privateweave/data/aws_s3_object_cluster-completed.spec_content @@ -144,7 +144,6 @@ spec: kubernetesApiAccess: - 0.0.0.0/0 kubernetesVersion: 1.21.0 - masterInternalName: api.internal.privateweave.example.com masterKubelet: anonymousAuth: false cgroupDriver: systemd diff --git a/tests/integration/update_cluster/privateweave/in-v1alpha2.yaml b/tests/integration/update_cluster/privateweave/in-v1alpha2.yaml index 5e56a310c021d..6f368de2fc8e1 100644 --- a/tests/integration/update_cluster/privateweave/in-v1alpha2.yaml +++ b/tests/integration/update_cluster/privateweave/in-v1alpha2.yaml @@ -22,7 +22,6 @@ spec: kubelet: anonymousAuth: false kubernetesVersion: v1.21.0 - masterInternalName: api.internal.privateweave.example.com masterPublicName: api.privateweave.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/tests/integration/update_cluster/public-jwks-apiserver/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/public-jwks-apiserver/data/aws_s3_object_cluster-completed.spec_content index aa0853aeb1570..0aca166f0ace0 100644 --- a/tests/integration/update_cluster/public-jwks-apiserver/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/public-jwks-apiserver/data/aws_s3_object_cluster-completed.spec_content @@ -168,7 +168,6 @@ spec: kubernetesApiAccess: - 0.0.0.0/0 kubernetesVersion: 1.24.0 - masterInternalName: api.internal.minimal.example.com masterKubelet: anonymousAuth: false cgroupDriver: systemd diff --git a/tests/integration/update_cluster/public-jwks-apiserver/in-v1alpha2.yaml b/tests/integration/update_cluster/public-jwks-apiserver/in-v1alpha2.yaml index c1e38f235bb4a..5b6965626adbf 100644 --- a/tests/integration/update_cluster/public-jwks-apiserver/in-v1alpha2.yaml +++ b/tests/integration/update_cluster/public-jwks-apiserver/in-v1alpha2.yaml @@ -26,7 +26,6 @@ spec: kubelet: anonymousAuth: false kubernetesVersion: v1.24.0 - masterInternalName: api.internal.minimal.example.com masterPublicName: api.minimal.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/tests/integration/update_cluster/shared_subnet/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/shared_subnet/data/aws_s3_object_cluster-completed.spec_content index 43827f88f729f..221f75953d9c1 100644 --- a/tests/integration/update_cluster/shared_subnet/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/shared_subnet/data/aws_s3_object_cluster-completed.spec_content @@ -142,7 +142,6 @@ spec: kubernetesApiAccess: - 0.0.0.0/0 kubernetesVersion: 1.21.0 - masterInternalName: api.internal.sharedsubnet.example.com masterKubelet: anonymousAuth: false cgroupDriver: systemd diff --git a/tests/integration/update_cluster/shared_subnet/in-v1alpha2.yaml b/tests/integration/update_cluster/shared_subnet/in-v1alpha2.yaml index 50e668d4d8d8b..63585136284dd 100644 --- a/tests/integration/update_cluster/shared_subnet/in-v1alpha2.yaml +++ b/tests/integration/update_cluster/shared_subnet/in-v1alpha2.yaml @@ -22,7 +22,6 @@ spec: kubelet: anonymousAuth: false kubernetesVersion: v1.21.0 - masterInternalName: api.internal.sharedsubnet.example.com masterPublicName: api.sharedsubnet.example.com networkCIDR: 172.20.0.0/16 networkID: vpc-12345678 diff --git a/tests/integration/update_cluster/shared_vpc/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/shared_vpc/data/aws_s3_object_cluster-completed.spec_content index de7942a0b1aeb..92172e7fa192f 100644 --- a/tests/integration/update_cluster/shared_vpc/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/shared_vpc/data/aws_s3_object_cluster-completed.spec_content @@ -142,7 +142,6 @@ spec: kubernetesApiAccess: - 0.0.0.0/0 kubernetesVersion: 1.21.0 - masterInternalName: api.internal.sharedvpc.example.com masterKubelet: anonymousAuth: false cgroupDriver: systemd diff --git a/tests/integration/update_cluster/shared_vpc/in-v1alpha2.yaml b/tests/integration/update_cluster/shared_vpc/in-v1alpha2.yaml index 815022836d45b..2e1ef14082271 100644 --- a/tests/integration/update_cluster/shared_vpc/in-v1alpha2.yaml +++ b/tests/integration/update_cluster/shared_vpc/in-v1alpha2.yaml @@ -22,7 +22,6 @@ spec: kubelet: anonymousAuth: false kubernetesVersion: v1.21.0 - masterInternalName: api.internal.sharedvpc.example.com masterPublicName: api.sharedvpc.example.com networkCIDR: 172.20.0.0/16 networkID: vpc-12345678 diff --git a/tests/integration/update_cluster/unmanaged/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/unmanaged/data/aws_s3_object_cluster-completed.spec_content index 28cc5cd16ca71..28ca340b30c62 100644 --- a/tests/integration/update_cluster/unmanaged/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/unmanaged/data/aws_s3_object_cluster-completed.spec_content @@ -144,7 +144,6 @@ spec: kubernetesApiAccess: - 0.0.0.0/0 kubernetesVersion: 1.21.0 - masterInternalName: api.internal.unmanaged.example.com masterKubelet: anonymousAuth: false cgroupDriver: systemd diff --git a/tests/integration/update_cluster/unmanaged/in-v1alpha2.yaml b/tests/integration/update_cluster/unmanaged/in-v1alpha2.yaml index a0c28f0f1fd78..07d2a6d7a83c6 100644 --- a/tests/integration/update_cluster/unmanaged/in-v1alpha2.yaml +++ b/tests/integration/update_cluster/unmanaged/in-v1alpha2.yaml @@ -22,7 +22,6 @@ spec: kubelet: anonymousAuth: false kubernetesVersion: v1.21.0 - masterInternalName: api.internal.unmanaged.example.com masterPublicName: api.unmanaged.example.com networkID: vpc-12345678 networkCIDR: 172.20.0.0/16 diff --git a/tests/integration/update_cluster/vfs-said/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/vfs-said/data/aws_s3_object_cluster-completed.spec_content index eee49dded70c0..70bb94cddcfe4 100644 --- a/tests/integration/update_cluster/vfs-said/data/aws_s3_object_cluster-completed.spec_content +++ b/tests/integration/update_cluster/vfs-said/data/aws_s3_object_cluster-completed.spec_content @@ -142,7 +142,6 @@ spec: kubernetesApiAccess: - 0.0.0.0/0 kubernetesVersion: 1.21.0 - masterInternalName: api.internal.minimal.example.com masterKubelet: anonymousAuth: false cgroupDriver: systemd diff --git a/tests/integration/update_cluster/vfs-said/in-v1alpha2.yaml b/tests/integration/update_cluster/vfs-said/in-v1alpha2.yaml index 01ff6a83f62a0..625daee5c3006 100644 --- a/tests/integration/update_cluster/vfs-said/in-v1alpha2.yaml +++ b/tests/integration/update_cluster/vfs-said/in-v1alpha2.yaml @@ -22,7 +22,6 @@ spec: kubelet: anonymousAuth: false kubernetesVersion: v1.21.0 - masterInternalName: api.internal.minimal.example.com masterPublicName: api.minimal.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/upup/models/cloudup/resources/addons/karpenter.sh/k8s-1.19.yaml.template b/upup/models/cloudup/resources/addons/karpenter.sh/k8s-1.19.yaml.template index a55ecfbe52b85..51233419e3ff6 100644 --- a/upup/models/cloudup/resources/addons/karpenter.sh/k8s-1.19.yaml.template +++ b/upup/models/cloudup/resources/addons/karpenter.sh/k8s-1.19.yaml.template @@ -914,7 +914,7 @@ spec: - name: CLUSTER_NAME value: {{ ClusterName }} - name: CLUSTER_ENDPOINT - value: https://{{ .MasterInternalName }} + value: https://{{ APIInternalName }} - name: CONFIG_LOGGING_NAME value: "karpenter-config-logging" - name: KARPENTER_SERVICE @@ -1022,7 +1022,7 @@ spec: - name: KUBERNETES_MIN_VERSION value: "1.19.0-0" - name: CLUSTER_ENDPOINT - value: https://{{ .MasterInternalName }} + value: https://{{ APIInternalName }} - name: CONFIG_LOGGING_NAME value: "karpenter-config-logging" - name: KARPENTER_SERVICE diff --git a/upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.16-v1.11.yaml.template b/upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.16-v1.11.yaml.template index 1e574e676a9e5..b085df485c2e8 100644 --- a/upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.16-v1.11.yaml.template +++ b/upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.16-v1.11.yaml.template @@ -46,7 +46,7 @@ data: etcd-config: |- --- endpoints: - - https://{{ $.MasterInternalName }}:4003 + - https://{{ APIInternalName }}:4003 trusted-ca-file: '/var/lib/etcd-secrets/etcd-ca.crt' key-file: '/var/lib/etcd-secrets/etcd-client-cilium.key' @@ -681,7 +681,7 @@ spec: name: cilium-config optional: true - name: KUBERNETES_SERVICE_HOST - value: "{{ $.MasterInternalName }}" + value: "{{ APIInternalName }}" - name: KUBERNETES_SERVICE_PORT value: "443" {{ with .EnablePolicy }} @@ -948,7 +948,7 @@ spec: name: cilium-config optional: true - name: KUBERNETES_SERVICE_HOST - value: "{{ $.MasterInternalName }}" + value: "{{ APIInternalName }}" - name: KUBERNETES_SERVICE_PORT value: "443" image: "quay.io/cilium/operator:{{ .Version }}" diff --git a/upup/models/cloudup/resources/addons/networking.projectcalico.org/k8s-1.16.yaml.template b/upup/models/cloudup/resources/addons/networking.projectcalico.org/k8s-1.16.yaml.template index 95153e2db355e..798f0ce89be92 100644 --- a/upup/models/cloudup/resources/addons/networking.projectcalico.org/k8s-1.16.yaml.template +++ b/upup/models/cloudup/resources/addons/networking.projectcalico.org/k8s-1.16.yaml.template @@ -10,7 +10,7 @@ metadata: name: kubernetes-services-endpoint namespace: kube-system data: - KUBERNETES_SERVICE_HOST: "{{ .MasterInternalName }}" + KUBERNETES_SERVICE_HOST: "{{ APIInternalName }}" KUBERNETES_SERVICE_PORT: "443" {{- end }} diff --git a/upup/models/cloudup/resources/addons/networking.projectcalico.org/k8s-1.22.yaml.template b/upup/models/cloudup/resources/addons/networking.projectcalico.org/k8s-1.22.yaml.template index b2c8f51238938..fbf16f9dc0c7a 100644 --- a/upup/models/cloudup/resources/addons/networking.projectcalico.org/k8s-1.22.yaml.template +++ b/upup/models/cloudup/resources/addons/networking.projectcalico.org/k8s-1.22.yaml.template @@ -10,7 +10,7 @@ metadata: name: kubernetes-services-endpoint namespace: kube-system data: - KUBERNETES_SERVICE_HOST: "{{ .MasterInternalName }}" + KUBERNETES_SERVICE_HOST: "{{ APIInternalName }}" KUBERNETES_SERVICE_PORT: "443" {{- end }} diff --git a/upup/models/cloudup/resources/addons/networking.projectcalico.org/k8s-1.25.yaml.template b/upup/models/cloudup/resources/addons/networking.projectcalico.org/k8s-1.25.yaml.template index 4b4bf32c9e9d0..00e7c4a58101c 100644 --- a/upup/models/cloudup/resources/addons/networking.projectcalico.org/k8s-1.25.yaml.template +++ b/upup/models/cloudup/resources/addons/networking.projectcalico.org/k8s-1.25.yaml.template @@ -9,7 +9,7 @@ metadata: name: kubernetes-services-endpoint namespace: kube-system data: - KUBERNETES_SERVICE_HOST: "{{ .MasterInternalName }}" + KUBERNETES_SERVICE_HOST: "{{ APIInternalName }}" KUBERNETES_SERVICE_PORT: "443" {{- end }} --- diff --git a/upup/pkg/fi/cloudup/apply_cluster.go b/upup/pkg/fi/cloudup/apply_cluster.go index 0ec75b13d3d36..6eeed2342d615 100644 --- a/upup/pkg/fi/cloudup/apply_cluster.go +++ b/upup/pkg/fi/cloudup/apply_cluster.go @@ -362,7 +362,7 @@ func (c *ApplyClusterCmd) Run(ctx context.Context) error { } encryptionConfigSecretHash := "" - if fi.BoolValue(c.Cluster.Spec.EncryptionConfig) { + if fi.ValueOf(c.Cluster.Spec.EncryptionConfig) { secret, err := secretStore.FindSecret("encryptionconfig") if err != nil { return fmt.Errorf("could not load encryptionconfig secret: %v", err) @@ -598,7 +598,7 @@ func (c *ApplyClusterCmd) Run(ctx context.Context) error { } nth := c.Cluster.Spec.NodeTerminationHandler - if nth != nil && fi.BoolValue(nth.Enabled) && fi.BoolValue(nth.EnableSQSTerminationDraining) { + if nth != nil && fi.ValueOf(nth.Enabled) && fi.ValueOf(nth.EnableSQSTerminationDraining) { l.Builders = append(l.Builders, &awsmodel.NodeTerminationHandlerBuilder{ AWSModelContext: awsModelContext, Lifecycle: clusterLifecycle, @@ -1266,7 +1266,7 @@ func newNodeUpConfigBuilder(cluster *kops.Cluster, assetBuilder *assets.AssetBui if isMaster { for _, etcdCluster := range cluster.Spec.EtcdClusters { for _, member := range etcdCluster.Members { - instanceGroup := fi.StringValue(member.InstanceGroup) + instanceGroup := fi.ValueOf(member.InstanceGroup) etcdManifest := fmt.Sprintf("manifests/etcd/%s-%s.yaml", etcdCluster.Name, instanceGroup) etcdManifests[instanceGroup] = append(etcdManifests[instanceGroup], configBase.Join(etcdManifest).Path()) } @@ -1435,7 +1435,7 @@ func (n *nodeUpConfigBuilder) BuildConfig(ig *kops.InstanceGroup, apiserverAddit bootConfig.ConfigServer = configServer delete(config.CAs, fi.CertificateIDCA) } else { - bootConfig.ConfigBase = fi.String(n.configBase.Path()) + bootConfig.ConfigBase = fi.PtrTo(n.configBase.Path()) } for _, manifest := range n.assetBuilder.StaticManifests { diff --git a/upup/pkg/fi/cloudup/awstasks/autoscalinggroup.go b/upup/pkg/fi/cloudup/awstasks/autoscalinggroup.go index 7b41fe5c2d789..daf2ded4540c3 100644 --- a/upup/pkg/fi/cloudup/awstasks/autoscalinggroup.go +++ b/upup/pkg/fi/cloudup/awstasks/autoscalinggroup.go @@ -106,7 +106,7 @@ func (e *AutoscalingGroup) CompareWithID() *string { func (e *AutoscalingGroup) Find(c *fi.Context) (*AutoscalingGroup, error) { cloud := c.Cloud.(awsup.AWSCloud) - g, err := findAutoscalingGroup(cloud, fi.StringValue(e.Name)) + g, err := findAutoscalingGroup(cloud, fi.ValueOf(e.Name)) if err != nil { return nil, err } @@ -123,7 +123,7 @@ func (e *AutoscalingGroup) Find(c *fi.Context) (*AutoscalingGroup, error) { // Use 0 as default value when api returns nil (same as model) if g.MaxInstanceLifetime == nil { - actual.MaxInstanceLifetime = fi.Int64(0) + actual.MaxInstanceLifetime = fi.PtrTo(int64(0)) } else { actual.MaxInstanceLifetime = g.MaxInstanceLifetime } @@ -154,12 +154,12 @@ func (e *AutoscalingGroup) Find(c *fi.Context) (*AutoscalingGroup, error) { for _, lb := range e.LoadBalancers { // All external ELBs have their Shared field set to true. The API ELB does not. // Note that Shared is set by the kops model rather than AWS tags. - if !fi.BoolValue(lb.Shared) { + if !fi.ValueOf(lb.Shared) { apiLBTask = lb } } if apiLBTask != nil && len(actual.LoadBalancers) > 0 { - apiLBDesc, err := c.Cloud.(awsup.AWSCloud).FindELBByNameTag(fi.StringValue(apiLBTask.Name)) + apiLBDesc, err := c.Cloud.(awsup.AWSCloud).FindELBByNameTag(fi.ValueOf(apiLBTask.Name)) if err != nil { return nil, err } @@ -178,14 +178,14 @@ func (e *AutoscalingGroup) Find(c *fi.Context) (*AutoscalingGroup, error) { actual.TargetGroups = []*TargetGroup{} if len(g.TargetGroupARNs) > 0 { for _, tg := range g.TargetGroupARNs { - targetGroupName, err := awsup.GetTargetGroupNameFromARN(fi.StringValue(tg)) + targetGroupName, err := awsup.GetTargetGroupNameFromARN(fi.ValueOf(tg)) if err != nil { return nil, err } if targetGroupName != awsup.GetResourceName32(c.Cluster.Name, "tcp") && targetGroupName != awsup.GetResourceName32(c.Cluster.Name, "tls") { actual.TargetGroups = append(actual.TargetGroups, &TargetGroup{ARN: aws.String(*tg), Name: aws.String(targetGroupName)}) } else { - actual.TargetGroups = append(actual.TargetGroups, &TargetGroup{ARN: aws.String(*tg), Name: aws.String(fi.StringValue(g.AutoScalingGroupName) + "-" + targetGroupName)}) + actual.TargetGroups = append(actual.TargetGroups, &TargetGroup{ARN: aws.String(*tg), Name: aws.String(fi.ValueOf(g.AutoScalingGroupName) + "-" + targetGroupName)}) } } } @@ -233,7 +233,7 @@ func (e *AutoscalingGroup) Find(c *fi.Context) (*AutoscalingGroup, error) { actual.MixedSpotMaxPrice = mpd.SpotMaxPrice // MixedSpotMaxPrice must be set to "" in order to unset. if mpd.SpotMaxPrice == nil { - actual.MixedSpotMaxPrice = fi.String("") + actual.MixedSpotMaxPrice = fi.PtrTo("") } } @@ -246,7 +246,7 @@ func (e *AutoscalingGroup) Find(c *fi.Context) (*AutoscalingGroup, error) { } for _, n := range g.MixedInstancesPolicy.LaunchTemplate.Overrides { - actual.MixedInstanceOverrides = append(actual.MixedInstanceOverrides, fi.StringValue(n.InstanceType)) + actual.MixedInstanceOverrides = append(actual.MixedInstanceOverrides, fi.ValueOf(n.InstanceType)) } } } @@ -287,14 +287,14 @@ func findAutoscalingGroup(cloud awsup.AWSCloud, name string) (*autoscaling.Group // Check for "Delete in progress" (the only use .Status). We won't be able to update or create while // this is true, but filtering it out here makes the messages slightly clearer. if g.Status != nil { - klog.Warningf("Skipping AutoScalingGroup %v: %v", fi.StringValue(g.AutoScalingGroupName), fi.StringValue(g.Status)) + klog.Warningf("Skipping AutoScalingGroup %v: %v", fi.ValueOf(g.AutoScalingGroupName), fi.ValueOf(g.Status)) continue } if aws.StringValue(g.AutoScalingGroupName) == name { found = append(found, g) } else { - klog.Warningf("Got ASG with unexpected name %q", fi.StringValue(g.AutoScalingGroupName)) + klog.Warningf("Got ASG with unexpected name %q", fi.ValueOf(g.AutoScalingGroupName)) } } @@ -341,7 +341,7 @@ func (e *AutoscalingGroup) CheckChanges(a, ex, changes *AutoscalingGroup) error func (v *AutoscalingGroup) RenderAWS(t *awsup.AWSAPITarget, a, e, changes *AutoscalingGroup) error { // @step: did we find an autoscaling group? if a == nil { - klog.V(2).Infof("Creating autoscaling group with name: %s", fi.StringValue(e.Name)) + klog.V(2).Infof("Creating autoscaling group with name: %s", fi.ValueOf(e.Name)) request := &autoscaling.CreateAutoScalingGroupInput{ AutoScalingGroupName: e.Name, @@ -349,12 +349,12 @@ func (v *AutoscalingGroup) RenderAWS(t *awsup.AWSAPITarget, a, e, changes *Autos MaxSize: e.MaxSize, NewInstancesProtectedFromScaleIn: e.InstanceProtection, Tags: v.AutoscalingGroupTags(), - VPCZoneIdentifier: fi.String(strings.Join(e.AutoscalingGroupSubnets(), ",")), CapacityRebalance: e.CapacityRebalance, + VPCZoneIdentifier: fi.PtrTo(strings.Join(e.AutoscalingGroupSubnets(), ",")), } //On ASG creation 0 value is forbidden - if fi.Int64Value(e.MaxInstanceLifetime) == 0 { + if fi.ValueOf(e.MaxInstanceLifetime) == 0 { request.MaxInstanceLifetime = nil } else { request.MaxInstanceLifetime = e.MaxInstanceLifetime @@ -362,7 +362,7 @@ func (v *AutoscalingGroup) RenderAWS(t *awsup.AWSAPITarget, a, e, changes *Autos for _, k := range e.LoadBalancers { if k.LoadBalancerName == nil { - lbDesc, err := t.Cloud.FindELBByNameTag(fi.StringValue(k.GetName())) + lbDesc, err := t.Cloud.FindELBByNameTag(fi.ValueOf(k.GetName())) if err != nil { return err } @@ -399,7 +399,7 @@ func (v *AutoscalingGroup) RenderAWS(t *awsup.AWSAPITarget, a, e, changes *Autos p := request.MixedInstancesPolicy.LaunchTemplate for _, x := range e.MixedInstanceOverrides { p.Overrides = append(p.Overrides, &autoscaling.LaunchTemplateOverrides{ - InstanceType: fi.String(x), + InstanceType: fi.PtrTo(x), }, ) } @@ -513,7 +513,7 @@ func (v *AutoscalingGroup) RenderAWS(t *awsup.AWSAPITarget, a, e, changes *Autos if changes.MixedInstanceOverrides != nil { p := request.MixedInstancesPolicy.LaunchTemplate for _, x := range changes.MixedInstanceOverrides { - p.Overrides = append(p.Overrides, &autoscaling.LaunchTemplateOverrides{InstanceType: fi.String(x)}) + p.Overrides = append(p.Overrides, &autoscaling.LaunchTemplateOverrides{InstanceType: fi.PtrTo(x)}) } changes.MixedInstanceOverrides = nil } @@ -543,7 +543,7 @@ func (v *AutoscalingGroup) RenderAWS(t *awsup.AWSAPITarget, a, e, changes *Autos request.MaxInstanceLifetime = e.MaxInstanceLifetime changes.MaxInstanceLifetime = nil } else { - request.MaxInstanceLifetime = fi.Int64(0) + request.MaxInstanceLifetime = fi.PtrTo(int64(0)) } var updateTagsRequest *autoscaling.CreateOrUpdateTagsInput @@ -654,7 +654,7 @@ func (v *AutoscalingGroup) RenderAWS(t *awsup.AWSAPITarget, a, e, changes *Autos klog.Warningf("cannot apply changes to AutoScalingGroup: %v", changes) } - klog.V(2).Infof("Updating autoscaling group %s", fi.StringValue(e.Name)) + klog.V(2).Infof("Updating autoscaling group %s", fi.ValueOf(e.Name)) if _, err := t.Cloud.Autoscaling().UpdateAutoScalingGroup(request); err != nil { return fmt.Errorf("error updating AutoscalingGroup: %v", err) @@ -749,7 +749,7 @@ func (e *AutoscalingGroup) AutoscalingGroupSubnets() []string { var list []string for _, x := range e.Subnets { - list = append(list, fi.StringValue(x.ID)) + list = append(list, fi.ValueOf(x.ID)) } return list @@ -943,9 +943,9 @@ func (_ *AutoscalingGroup) RenderTerraform(t *terraform.TerraformTarget, a, e, c for _, k := range maps.SortedKeys(e.Tags) { v := e.Tags[k] tf.Tags = append(tf.Tags, &terraformASGTag{ - Key: fi.String(k), - Value: fi.String(v), - PropagateAtLaunch: fi.Bool(true), + Key: fi.PtrTo(k), + Value: fi.PtrTo(v), + PropagateAtLaunch: fi.PtrTo(true), }) } @@ -961,7 +961,7 @@ func (_ *AutoscalingGroup) RenderTerraform(t *terraform.TerraformTarget, a, e, c if e.UseMixedInstancesPolicy() { // Temporary warning until https://github.com/terraform-providers/terraform-provider-aws/issues/9750 is resolved - if e.MixedSpotAllocationStrategy == fi.String("capacity-optimized") { + if e.MixedSpotAllocationStrategy == fi.PtrTo("capacity-optimized") { fmt.Print("Terraform does not currently support a capacity optimized strategy - please see https://github.com/terraform-providers/terraform-provider-aws/issues/9750") } @@ -991,7 +991,7 @@ func (_ *AutoscalingGroup) RenderTerraform(t *terraform.TerraformTarget, a, e, c } for _, x := range e.MixedInstanceOverrides { - tf.MixedInstancesPolicy[0].LaunchTemplate[0].Override = append(tf.MixedInstancesPolicy[0].LaunchTemplate[0].Override, &terraformAutoscalingMixedInstancesPolicyLaunchTemplateOverride{InstanceType: fi.String(x)}) + tf.MixedInstancesPolicy[0].LaunchTemplate[0].Override = append(tf.MixedInstancesPolicy[0].LaunchTemplate[0].Override, &terraformAutoscalingMixedInstancesPolicyLaunchTemplateOverride{InstanceType: fi.PtrTo(x)}) } } else if e.LaunchTemplate != nil { tf.LaunchTemplate = &terraformAutoscalingLaunchTemplateSpecification{ @@ -1036,7 +1036,7 @@ func (_ *AutoscalingGroup) RenderTerraform(t *terraform.TerraformTarget, a, e, c var processes []*string if e.SuspendProcesses != nil { for _, p := range *e.SuspendProcesses { - processes = append(processes, fi.String(p)) + processes = append(processes, fi.PtrTo(p)) } } tf.SuspendedProcesses = processes @@ -1046,7 +1046,7 @@ func (_ *AutoscalingGroup) RenderTerraform(t *terraform.TerraformTarget, a, e, c // TerraformLink fills in the property func (e *AutoscalingGroup) TerraformLink() *terraformWriter.Literal { - return terraformWriter.LiteralProperty("aws_autoscaling_group", fi.StringValue(e.Name), "id") + return terraformWriter.LiteralProperty("aws_autoscaling_group", fi.ValueOf(e.Name), "id") } type cloudformationASGTag struct { @@ -1148,7 +1148,7 @@ func (_ *AutoscalingGroup) RenderCloudformation(t *cloudformation.Cloudformation } for _, x := range e.MixedInstanceOverrides { - cf.MixedInstancesPolicy.LaunchTemplate.Overrides = append(cf.MixedInstancesPolicy.LaunchTemplate.Overrides, &cloudformationAutoscalingLaunchTemplateOverride{InstanceType: fi.String(x)}) + cf.MixedInstancesPolicy.LaunchTemplate.Overrides = append(cf.MixedInstancesPolicy.LaunchTemplate.Overrides, &cloudformationAutoscalingLaunchTemplateOverride{InstanceType: fi.PtrTo(x)}) } } else if e.LaunchTemplate != nil { cf.LaunchTemplate = &cloudformationAutoscalingLaunchTemplateSpecification{ @@ -1166,9 +1166,9 @@ func (_ *AutoscalingGroup) RenderCloudformation(t *cloudformation.Cloudformation for _, k := range maps.SortedKeys(e.Tags) { v := e.Tags[k] cf.Tags = append(cf.Tags, &cloudformationASGTag{ - Key: fi.String(k), - Value: fi.String(v), - PropagateAtLaunch: fi.Bool(true), + Key: fi.PtrTo(k), + Value: fi.PtrTo(v), + PropagateAtLaunch: fi.PtrTo(true), }) } @@ -1180,10 +1180,10 @@ func (_ *AutoscalingGroup) RenderCloudformation(t *cloudformation.Cloudformation cf.TargetGroupARNs = append(cf.TargetGroupARNs, tg.CloudformationLink()) } - return t.RenderResource("AWS::AutoScaling::AutoScalingGroup", fi.StringValue(e.Name), cf) + return t.RenderResource("AWS::AutoScaling::AutoScalingGroup", fi.ValueOf(e.Name), cf) } // CloudformationLink is adds a reference func (e *AutoscalingGroup) CloudformationLink() *cloudformation.Literal { - return cloudformation.Ref("AWS::AutoScaling::AutoScalingGroup", fi.StringValue(e.Name)) + return cloudformation.Ref("AWS::AutoScaling::AutoScalingGroup", fi.ValueOf(e.Name)) } diff --git a/upup/pkg/fi/cloudup/awstasks/autoscalinggroup_test.go b/upup/pkg/fi/cloudup/awstasks/autoscalinggroup_test.go index 371dbf6e32d02..b008433f9b2c0 100644 --- a/upup/pkg/fi/cloudup/awstasks/autoscalinggroup_test.go +++ b/upup/pkg/fi/cloudup/awstasks/autoscalinggroup_test.go @@ -201,16 +201,16 @@ func TestAutoscalingGroupTerraformRender(t *testing.T) { cases := []*renderTest{ { Resource: &AutoscalingGroup{ - Name: fi.String("test"), - Granularity: fi.String("5min"), - LaunchTemplate: &LaunchTemplate{Name: fi.String("test_lc")}, - MaxSize: fi.Int64(10), + Name: fi.PtrTo("test"), + Granularity: fi.PtrTo("5min"), + LaunchTemplate: &LaunchTemplate{Name: fi.PtrTo("test_lc")}, + MaxSize: fi.PtrTo(int64(10)), Metrics: []string{"test"}, - MinSize: fi.Int64(1), + MinSize: fi.PtrTo(int64(1)), Subnets: []*Subnet{ { - Name: fi.String("test-sg"), - ID: fi.String("sg-1111"), + Name: fi.PtrTo("test-sg"), + ID: fi.PtrTo("sg-1111"), }, }, Tags: map[string]string{ @@ -259,19 +259,19 @@ terraform { }, { Resource: &AutoscalingGroup{ - Name: fi.String("test1"), - LaunchTemplate: &LaunchTemplate{Name: fi.String("test_lt")}, - MaxSize: fi.Int64(10), + Name: fi.PtrTo("test1"), + LaunchTemplate: &LaunchTemplate{Name: fi.PtrTo("test_lt")}, + MaxSize: fi.PtrTo(int64(10)), Metrics: []string{"test"}, - MinSize: fi.Int64(5), + MinSize: fi.PtrTo(int64(5)), MixedInstanceOverrides: []string{"t2.medium", "t2.large"}, - MixedOnDemandBase: fi.Int64(4), - MixedOnDemandAboveBase: fi.Int64(30), - MixedSpotAllocationStrategy: fi.String("capacity-optimized"), + MixedOnDemandBase: fi.PtrTo(int64(4)), + MixedOnDemandAboveBase: fi.PtrTo(int64(30)), + MixedSpotAllocationStrategy: fi.PtrTo("capacity-optimized"), Subnets: []*Subnet{ { - Name: fi.String("test-sg"), - ID: fi.String("sg-1111"), + Name: fi.PtrTo("test-sg"), + ID: fi.PtrTo("sg-1111"), }, }, Tags: map[string]string{ @@ -336,96 +336,3 @@ terraform { doRenderTests(t, "RenderTerraform", cases) } - -func TestAutoscalingGroupCloudformationRender(t *testing.T) { - cases := []*renderTest{ - { - Resource: &AutoscalingGroup{ - Name: fi.String("test1"), - LaunchTemplate: &LaunchTemplate{Name: fi.String("test_lt")}, - MaxSize: fi.Int64(10), - Metrics: []string{"test"}, - MinSize: fi.Int64(5), - MixedInstanceOverrides: []string{"t2.medium", "t2.large"}, - MixedOnDemandBase: fi.Int64(4), - MixedOnDemandAboveBase: fi.Int64(30), - Subnets: []*Subnet{ - { - Name: fi.String("test-sg"), - ID: fi.String("sg-1111"), - }, - }, - Tags: map[string]string{ - "test": "tag", - "cluster": "test", - }, - }, - Expected: `{ - "Resources": { - "AWSAutoScalingAutoScalingGrouptest1": { - "Type": "AWS::AutoScaling::AutoScalingGroup", - "Properties": { - "AutoScalingGroupName": "test1", - "MaxSize": "10", - "MinSize": "5", - "VPCZoneIdentifier": [ - { - "Ref": "AWSEC2Subnettestsg" - } - ], - "Tags": [ - { - "Key": "cluster", - "Value": "test", - "PropagateAtLaunch": true - }, - { - "Key": "test", - "Value": "tag", - "PropagateAtLaunch": true - } - ], - "MetricsCollection": [ - { - "Granularity": null, - "Metrics": [ - "test" - ] - } - ], - "MixedInstancesPolicy": { - "LaunchTemplate": { - "LaunchTemplateSpecification": { - "LaunchTemplateId": { - "Ref": "AWSEC2LaunchTemplatetest_lt" - }, - "Version": { - "Fn::GetAtt": [ - "AWSEC2LaunchTemplatetest_lt", - "LatestVersionNumber" - ] - } - }, - "Overrides": [ - { - "InstanceType": "t2.medium" - }, - { - "InstanceType": "t2.large" - } - ] - }, - "InstancesDistribution": { - "OnDemandBaseCapacity": 4, - "OnDemandPercentageAboveBaseCapacity": 30 - } - } - } - } - } -}`, - }, - } - - doRenderTests(t, "RenderCloudformation", cases) -} diff --git a/upup/pkg/fi/cloudup/awstasks/autoscalinglifecyclehook.go b/upup/pkg/fi/cloudup/awstasks/autoscalinglifecyclehook.go index 4b73448a2098c..6a443ac5cdbcd 100644 --- a/upup/pkg/fi/cloudup/awstasks/autoscalinglifecyclehook.go +++ b/upup/pkg/fi/cloudup/awstasks/autoscalinglifecyclehook.go @@ -66,7 +66,7 @@ func (h *AutoscalingLifecycleHook) Find(c *fi.Context) (*AutoscalingLifecycleHoo return nil, fmt.Errorf("error listing ASG Lifecycle Hooks: %v", err) } if response == nil || len(response.LifecycleHooks) == 0 { - if !fi.BoolValue(h.Enabled) { + if !fi.ValueOf(h.Enabled) { return h, nil } @@ -86,7 +86,7 @@ func (h *AutoscalingLifecycleHook) Find(c *fi.Context) (*AutoscalingLifecycleHoo DefaultResult: hook.DefaultResult, HeartbeatTimeout: hook.HeartbeatTimeout, LifecycleTransition: hook.LifecycleTransition, - Enabled: fi.Bool(true), + Enabled: fi.PtrTo(true), } return actual, nil @@ -111,7 +111,7 @@ func (_ *AutoscalingLifecycleHook) CheckChanges(a, e, changes *AutoscalingLifecy func (*AutoscalingLifecycleHook) RenderAWS(t *awsup.AWSAPITarget, a, e, changes *AutoscalingLifecycleHook) error { if changes != nil { - if fi.BoolValue(e.Enabled) { + if fi.ValueOf(e.Enabled) { request := &autoscaling.PutLifecycleHookInput{ AutoScalingGroupName: e.AutoscalingGroup.Name, DefaultResult: e.DefaultResult, @@ -147,7 +147,7 @@ type terraformASGLifecycleHook struct { } func (_ *AutoscalingLifecycleHook) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *AutoscalingLifecycleHook) error { - if !fi.BoolValue(e.Enabled) { + if !fi.ValueOf(e.Enabled) { return nil } tf := &terraformASGLifecycleHook{ @@ -170,7 +170,7 @@ type cloudformationASGLifecycleHook struct { } func (_ *AutoscalingLifecycleHook) RenderCloudformation(t *cloudformation.CloudformationTarget, a, e, changes *AutoscalingLifecycleHook) error { - if !fi.BoolValue(e.Enabled) { + if !fi.ValueOf(e.Enabled) { return nil } tf := &cloudformationASGLifecycleHook{ diff --git a/upup/pkg/fi/cloudup/awstasks/block_device_mappings.go b/upup/pkg/fi/cloudup/awstasks/block_device_mappings.go index 83e3f25288424..f9d441b023abe 100644 --- a/upup/pkg/fi/cloudup/awstasks/block_device_mappings.go +++ b/upup/pkg/fi/cloudup/awstasks/block_device_mappings.go @@ -78,14 +78,14 @@ func (i *BlockDeviceMapping) ToEC2(deviceName string) *ec2.BlockDeviceMapping { VolumeSize: i.EbsVolumeSize, VolumeType: i.EbsVolumeType, } - switch fi.StringValue(i.EbsVolumeType) { + switch fi.ValueOf(i.EbsVolumeType) { case ec2.VolumeTypeGp3: o.Ebs.Throughput = i.EbsVolumeThroughput fallthrough case ec2.VolumeTypeIo1, ec2.VolumeTypeIo2: o.Ebs.Iops = i.EbsVolumeIops } - if fi.BoolValue(o.Ebs.Encrypted) { + if fi.ValueOf(o.Ebs.Encrypted) { o.Ebs.KmsKeyId = i.EbsKmsKey } } @@ -105,7 +105,7 @@ func BlockDeviceMappingFromAutoscaling(i *autoscaling.BlockDeviceMapping) (strin o.EbsVolumeSize = i.Ebs.VolumeSize o.EbsVolumeType = i.Ebs.VolumeType - if fi.StringValue(o.EbsVolumeType) == ec2.VolumeTypeIo1 || fi.StringValue(o.EbsVolumeType) == ec2.VolumeTypeIo2 { + if fi.ValueOf(o.EbsVolumeType) == ec2.VolumeTypeIo1 || fi.ValueOf(o.EbsVolumeType) == ec2.VolumeTypeIo2 { o.EbsVolumeIops = i.Ebs.Iops } } @@ -126,7 +126,7 @@ func (i *BlockDeviceMapping) ToAutoscaling(deviceName string) *autoscaling.Block VolumeSize: i.EbsVolumeSize, VolumeType: i.EbsVolumeType, } - if fi.StringValue(o.Ebs.VolumeType) == ec2.VolumeTypeIo1 || fi.StringValue(o.Ebs.VolumeType) == ec2.VolumeTypeIo2 { + if fi.ValueOf(o.Ebs.VolumeType) == ec2.VolumeTypeIo1 || fi.ValueOf(o.Ebs.VolumeType) == ec2.VolumeTypeIo2 { o.Ebs.Iops = i.EbsVolumeIops } } @@ -167,14 +167,14 @@ func (i *BlockDeviceMapping) ToLaunchTemplateBootDeviceRequest(deviceName string VolumeType: i.EbsVolumeType, } } - switch fi.StringValue(i.EbsVolumeType) { + switch fi.ValueOf(i.EbsVolumeType) { case ec2.VolumeTypeGp3: o.Ebs.Throughput = i.EbsVolumeThroughput fallthrough case ec2.VolumeTypeIo1, ec2.VolumeTypeIo2: o.Ebs.Iops = i.EbsVolumeIops } - if fi.BoolValue(i.EbsEncrypted) { + if fi.ValueOf(i.EbsEncrypted) { o.Ebs.KmsKeyId = i.EbsKmsKey } diff --git a/upup/pkg/fi/cloudup/awstasks/classic_load_balancer.go b/upup/pkg/fi/cloudup/awstasks/classic_load_balancer.go index 433c5ab180961..e630bf2c1f9c0 100644 --- a/upup/pkg/fi/cloudup/awstasks/classic_load_balancer.go +++ b/upup/pkg/fi/cloudup/awstasks/classic_load_balancer.go @@ -213,7 +213,7 @@ func (e *ClassicLoadBalancer) getHostedZoneId() *string { func (e *ClassicLoadBalancer) Find(c *fi.Context) (*ClassicLoadBalancer, error) { cloud := c.Cloud.(awsup.AWSCloud) - lb, err := cloud.FindELBByNameTag(fi.StringValue(e.Name)) + lb, err := cloud.FindELBByNameTag(fi.ValueOf(e.Name)) if err != nil { return nil, err } @@ -328,7 +328,7 @@ func (e *ClassicLoadBalancer) Find(c *fi.Context) (*ClassicLoadBalancer, error) // We allow for the LoadBalancerName to be wrong: // 1. We don't want to force a rename of the ELB, because that is a destructive operation // 2. We were creating ELBs with insufficiently qualified names previously - if fi.StringValue(e.LoadBalancerName) != fi.StringValue(actual.LoadBalancerName) { + if fi.ValueOf(e.LoadBalancerName) != fi.ValueOf(actual.LoadBalancerName) { klog.V(2).Infof("Reusing existing load balancer with name: %q", aws.StringValue(actual.LoadBalancerName)) e.LoadBalancerName = actual.LoadBalancerName } @@ -349,7 +349,7 @@ func (e *ClassicLoadBalancer) IsForAPIServer() bool { func (e *ClassicLoadBalancer) FindAddresses(context *fi.Context) ([]string, error) { cloud := context.Cloud.(awsup.AWSCloud) - lb, err := cloud.FindELBByNameTag(fi.StringValue(e.Name)) + lb, err := cloud.FindELBByNameTag(fi.ValueOf(e.Name)) if err != nil { return nil, err } @@ -357,7 +357,7 @@ func (e *ClassicLoadBalancer) FindAddresses(context *fi.Context) ([]string, erro return nil, nil } - lbDnsName := fi.StringValue(lb.DNSName) + lbDnsName := fi.ValueOf(lb.DNSName) if lbDnsName == "" { return nil, nil } @@ -369,7 +369,7 @@ func (e *ClassicLoadBalancer) Run(c *fi.Context) error { } func (_ *ClassicLoadBalancer) ShouldCreate(a, e, changes *ClassicLoadBalancer) (bool, error) { - if fi.BoolValue(e.Shared) { + if fi.ValueOf(e.Shared) { return false, nil } return true, nil @@ -384,11 +384,11 @@ func (e *ClassicLoadBalancer) Normalize(c *fi.Context) error { func (s *ClassicLoadBalancer) CheckChanges(a, e, changes *ClassicLoadBalancer) error { if a == nil { - if fi.StringValue(e.Name) == "" { + if fi.ValueOf(e.Name) == "" { return fi.RequiredField("Name") } - shared := fi.BoolValue(e.Shared) + shared := fi.ValueOf(e.Shared) if !shared { if len(e.SecurityGroups) == 0 { return fi.RequiredField("SecurityGroups") @@ -425,7 +425,7 @@ func (s *ClassicLoadBalancer) CheckChanges(a, e, changes *ClassicLoadBalancer) e } func (_ *ClassicLoadBalancer) RenderAWS(t *awsup.AWSAPITarget, a, e, changes *ClassicLoadBalancer) error { - shared := fi.BoolValue(e.Shared) + shared := fi.ValueOf(e.Shared) if shared { return nil } @@ -480,17 +480,17 @@ func (_ *ClassicLoadBalancer) RenderAWS(t *awsup.AWSAPITarget, a, e, changes *Cl } e.HostedZoneId = lb.CanonicalHostedZoneNameID } else { - loadBalancerName = fi.StringValue(a.LoadBalancerName) + loadBalancerName = fi.ValueOf(a.LoadBalancerName) if changes.Subnets != nil { var expectedSubnets []string for _, s := range e.Subnets { - expectedSubnets = append(expectedSubnets, fi.StringValue(s.ID)) + expectedSubnets = append(expectedSubnets, fi.ValueOf(s.ID)) } var actualSubnets []string for _, s := range a.Subnets { - actualSubnets = append(actualSubnets, fi.StringValue(s.ID)) + actualSubnets = append(actualSubnets, fi.ValueOf(s.ID)) } oldSubnetIDs := slice.GetUniqueStrings(expectedSubnets, actualSubnets) @@ -608,7 +608,7 @@ type OrderLoadBalancersByName []*ClassicLoadBalancer func (a OrderLoadBalancersByName) Len() int { return len(a) } func (a OrderLoadBalancersByName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a OrderLoadBalancersByName) Less(i, j int) bool { - return fi.StringValue(a[i].Name) < fi.StringValue(a[j].Name) + return fi.ValueOf(a[i].Name) < fi.ValueOf(a[j].Name) } type terraformLoadBalancer struct { @@ -648,7 +648,7 @@ type terraformLoadBalancerHealthCheck struct { } func (_ *ClassicLoadBalancer) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *ClassicLoadBalancer) error { - shared := fi.BoolValue(e.Shared) + shared := fi.ValueOf(e.Shared) if shared { return nil } @@ -662,8 +662,8 @@ func (_ *ClassicLoadBalancer) RenderTerraform(t *terraform.TerraformTarget, a, e tf := &terraformLoadBalancer{ LoadBalancerName: e.LoadBalancerName, } - if fi.StringValue(e.Scheme) == "internal" { - tf.Internal = fi.Bool(true) + if fi.ValueOf(e.Scheme) == "internal" { + tf.Internal = fi.PtrTo(true) } for _, subnet := range e.Subnets { @@ -711,7 +711,7 @@ func (_ *ClassicLoadBalancer) RenderTerraform(t *terraform.TerraformTarget, a, e } } - if e.AccessLog != nil && fi.BoolValue(e.AccessLog.Enabled) { + if e.AccessLog != nil && fi.ValueOf(e.AccessLog.Enabled) { tf.AccessLog = &terraformLoadBalancerAccessLog{ EmitInterval: e.AccessLog.EmitInterval, Enabled: e.AccessLog.Enabled, @@ -743,7 +743,7 @@ func (_ *ClassicLoadBalancer) RenderTerraform(t *terraform.TerraformTarget, a, e } func (e *ClassicLoadBalancer) TerraformLink(params ...string) *terraformWriter.Literal { - shared := fi.BoolValue(e.Shared) + shared := fi.ValueOf(e.Shared) if shared { if e.LoadBalancerName == nil { klog.Fatalf("Name must be set, if LB is shared: %s", e) @@ -807,7 +807,7 @@ func (_ *ClassicLoadBalancer) RenderCloudformation(t *cloudformation.Cloudformat // If this resource has a public IP address and is also in a VPC that is defined in the same template, // you must use the DependsOn attribute to declare a dependency on the VPC-gateway attachment. - shared := fi.BoolValue(e.Shared) + shared := fi.ValueOf(e.Shared) if shared { return nil } @@ -850,7 +850,7 @@ func (_ *ClassicLoadBalancer) RenderCloudformation(t *cloudformation.Cloudformat } } - if e.AccessLog != nil && fi.BoolValue(e.AccessLog.Enabled) { + if e.AccessLog != nil && fi.ValueOf(e.AccessLog.Enabled) { tf.AccessLog = &cloudformationClassicLoadBalancerAccessLog{ EmitInterval: e.AccessLog.EmitInterval, Enabled: e.AccessLog.Enabled, @@ -887,7 +887,7 @@ func (_ *ClassicLoadBalancer) RenderCloudformation(t *cloudformation.Cloudformat } func (e *ClassicLoadBalancer) CloudformationLink() *cloudformation.Literal { - shared := fi.BoolValue(e.Shared) + shared := fi.ValueOf(e.Shared) if shared { if e.LoadBalancerName == nil { klog.Fatalf("Name must be set, if LB is shared: %s", e) diff --git a/upup/pkg/fi/cloudup/awstasks/classic_loadbalancer_attributes.go b/upup/pkg/fi/cloudup/awstasks/classic_loadbalancer_attributes.go index 920f0ad5fe104..adf8ef570680f 100644 --- a/upup/pkg/fi/cloudup/awstasks/classic_loadbalancer_attributes.go +++ b/upup/pkg/fi/cloudup/awstasks/classic_loadbalancer_attributes.go @@ -110,7 +110,7 @@ func (_ *ClassicLoadBalancer) modifyLoadBalancerAttributes(t *awsup.AWSAPITarget return nil } - loadBalancerName := fi.StringValue(e.LoadBalancerName) + loadBalancerName := fi.ValueOf(e.LoadBalancerName) request := &elb.ModifyLoadBalancerAttributesInput{} request.LoadBalancerName = e.LoadBalancerName @@ -119,22 +119,22 @@ func (_ *ClassicLoadBalancer) modifyLoadBalancerAttributes(t *awsup.AWSAPITarget // Setting mandatory attributes to default values if empty request.LoadBalancerAttributes.AccessLog = &elb.AccessLog{} if e.AccessLog == nil || e.AccessLog.Enabled == nil { - request.LoadBalancerAttributes.AccessLog.Enabled = fi.Bool(false) + request.LoadBalancerAttributes.AccessLog.Enabled = fi.PtrTo(false) } request.LoadBalancerAttributes.ConnectionDraining = &elb.ConnectionDraining{} if e.ConnectionDraining == nil || e.ConnectionDraining.Enabled == nil { - request.LoadBalancerAttributes.ConnectionDraining.Enabled = fi.Bool(false) + request.LoadBalancerAttributes.ConnectionDraining.Enabled = fi.PtrTo(false) } if e.ConnectionDraining == nil || e.ConnectionDraining.Timeout == nil { - request.LoadBalancerAttributes.ConnectionDraining.Timeout = fi.Int64(300) + request.LoadBalancerAttributes.ConnectionDraining.Timeout = fi.PtrTo(int64(300)) } request.LoadBalancerAttributes.ConnectionSettings = &elb.ConnectionSettings{} if e.ConnectionSettings == nil || e.ConnectionSettings.IdleTimeout == nil { - request.LoadBalancerAttributes.ConnectionSettings.IdleTimeout = fi.Int64(60) + request.LoadBalancerAttributes.ConnectionSettings.IdleTimeout = fi.PtrTo(int64(60)) } request.LoadBalancerAttributes.CrossZoneLoadBalancing = &elb.CrossZoneLoadBalancing{} if e.CrossZoneLoadBalancing == nil || e.CrossZoneLoadBalancing.Enabled == nil { - request.LoadBalancerAttributes.CrossZoneLoadBalancing.Enabled = fi.Bool(false) + request.LoadBalancerAttributes.CrossZoneLoadBalancing.Enabled = fi.PtrTo(false) } else { request.LoadBalancerAttributes.CrossZoneLoadBalancing.Enabled = e.CrossZoneLoadBalancing.Enabled } diff --git a/upup/pkg/fi/cloudup/awstasks/dnsname.go b/upup/pkg/fi/cloudup/awstasks/dnsname.go index ea3154a9693e4..48032b50fe21b 100644 --- a/upup/pkg/fi/cloudup/awstasks/dnsname.go +++ b/upup/pkg/fi/cloudup/awstasks/dnsname.go @@ -56,17 +56,17 @@ func (e *DNSName) Find(c *fi.Context) (*DNSName, error) { cloud := c.Cloud.(awsup.AWSCloud) if e.Zone == nil || e.Zone.ZoneID == nil { - klog.V(4).Infof("Zone / ZoneID not found for %s, skipping Find", fi.StringValue(e.ResourceName)) + klog.V(4).Infof("Zone / ZoneID not found for %s, skipping Find", fi.ValueOf(e.ResourceName)) return nil, nil } - findName := fi.StringValue(e.ResourceName) + findName := fi.ValueOf(e.ResourceName) if findName == "" { return nil, nil } findName = strings.TrimSuffix(findName, ".") - findType := fi.StringValue(e.ResourceType) + findType := fi.ValueOf(e.ResourceType) if findType == "" { return nil, nil } @@ -161,10 +161,10 @@ func findDNSTargetNLB(cloud awsup.AWSCloud, aliasTarget *route53.AliasTarget, dn tags := tagMap[loadBalancerArn] nameTag, _ := awsup.FindELBV2Tag(tags, "Name") if nameTag == "" { - return nil, fmt.Errorf("Found NLB %q linked to DNS name %q, but it did not have a Name tag", loadBalancerName, fi.StringValue(targetDNSName)) + return nil, fmt.Errorf("Found NLB %q linked to DNS name %q, but it did not have a Name tag", loadBalancerName, fi.ValueOf(targetDNSName)) } nameTag = strings.Replace(nameTag, ".", "-", -1) - return &NetworkLoadBalancer{Name: fi.String(nameTag)}, nil + return &NetworkLoadBalancer{Name: fi.PtrTo(nameTag)}, nil } return nil, nil } @@ -183,9 +183,9 @@ func findDNSTargetELB(cloud awsup.AWSCloud, aliasTarget *route53.AliasTarget, dn tags := tagMap[loadBalancerName] nameTag, _ := awsup.FindELBTag(tags, "Name") if nameTag == "" { - return nil, fmt.Errorf("Found ELB %q linked to DNS name %q, but it did not have a Name tag", loadBalancerName, fi.StringValue(targetDNSName)) + return nil, fmt.Errorf("Found ELB %q linked to DNS name %q, but it did not have a Name tag", loadBalancerName, fi.ValueOf(targetDNSName)) } - return &ClassicLoadBalancer{Name: fi.String(nameTag)}, nil + return &ClassicLoadBalancer{Name: fi.PtrTo(nameTag)}, nil } return nil, nil } @@ -196,13 +196,13 @@ func (e *DNSName) Run(c *fi.Context) error { func (s *DNSName) CheckChanges(a, e, changes *DNSName) error { if a == nil { - if fi.StringValue(e.Name) == "" { + if fi.ValueOf(e.Name) == "" { return fi.RequiredField("Name") } - if fi.StringValue(e.ResourceName) == "" { + if fi.ValueOf(e.ResourceName) == "" { return fi.RequiredField("ResourceName") } - if fi.StringValue(e.ResourceType) == "" { + if fi.ValueOf(e.ResourceType) == "" { return fi.RequiredField("ResourceType") } if e.Zone == nil { diff --git a/upup/pkg/fi/cloudup/awstasks/dnszone.go b/upup/pkg/fi/cloudup/awstasks/dnszone.go index 8b3f5d2edb52e..dc4a4784193c1 100644 --- a/upup/pkg/fi/cloudup/awstasks/dnszone.go +++ b/upup/pkg/fi/cloudup/awstasks/dnszone.go @@ -67,10 +67,10 @@ func (e *DNSZone) Find(c *fi.Context) (*DNSZone, error) { actual := &DNSZone{} actual.Name = e.Name if z.HostedZone.Name != nil { - actual.DNSName = fi.String(strings.TrimSuffix(*z.HostedZone.Name, ".")) + actual.DNSName = fi.PtrTo(strings.TrimSuffix(*z.HostedZone.Name, ".")) } if z.HostedZone.Id != nil { - actual.ZoneID = fi.String(strings.TrimPrefix(*z.HostedZone.Id, "/hostedzone/")) + actual.ZoneID = fi.PtrTo(strings.TrimPrefix(*z.HostedZone.Id, "/hostedzone/")) } actual.Private = z.HostedZone.Config.PrivateZone @@ -120,7 +120,7 @@ func (e *DNSZone) findExisting(cloud awsup.AWSCloud) (*route53.GetHostedZoneOutp } } - findName := fi.StringValue(e.DNSName) + findName := fi.ValueOf(e.DNSName) if findName == "" { return nil, nil } @@ -138,7 +138,7 @@ func (e *DNSZone) findExisting(cloud awsup.AWSCloud) (*route53.GetHostedZoneOutp var zones []*route53.HostedZone for _, zone := range response.HostedZones { - if aws.StringValue(zone.Name) == findName && fi.BoolValue(zone.Config.PrivateZone) == fi.BoolValue(e.Private) { + if aws.StringValue(zone.Name) == findName && fi.ValueOf(zone.Config.PrivateZone) == fi.ValueOf(e.Private) { zones = append(zones, zone) } } @@ -166,7 +166,7 @@ func (e *DNSZone) Run(c *fi.Context) error { } func (s *DNSZone) CheckChanges(a, e, changes *DNSZone) error { - if fi.StringValue(e.Name) == "" { + if fi.ValueOf(e.Name) == "" { return fi.RequiredField("Name") } return nil @@ -234,7 +234,7 @@ type terraformRoute53ZoneAssociation struct { func (_ *DNSZone) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *DNSZone) error { cloud := t.Cloud.(awsup.AWSCloud) - dnsName := fi.StringValue(e.DNSName) + dnsName := fi.ValueOf(e.DNSName) // As a special case, we check for an existing zone // It is really painful to have TF create a new one... @@ -308,7 +308,7 @@ type cloudformationRoute53Zone struct { func (_ *DNSZone) RenderCloudformation(t *cloudformation.CloudformationTarget, a, e, changes *DNSZone) error { cloud := t.Cloud.(awsup.AWSCloud) - dnsName := fi.StringValue(e.DNSName) + dnsName := fi.ValueOf(e.DNSName) // As a special case, we check for an existing zone // It is really painful to have TF create a new one... @@ -328,7 +328,7 @@ func (_ *DNSZone) RenderCloudformation(t *cloudformation.CloudformationTarget, a return nil } - if !fi.BoolValue(e.Private) { + if !fi.ValueOf(e.Private) { return fmt.Errorf("Creation of public Route53 hosted zones is not supported for cloudformation") } diff --git a/upup/pkg/fi/cloudup/awstasks/ebsvolume.go b/upup/pkg/fi/cloudup/awstasks/ebsvolume.go index 1eed81f985566..d3273519bc86b 100644 --- a/upup/pkg/fi/cloudup/awstasks/ebsvolume.go +++ b/upup/pkg/fi/cloudup/awstasks/ebsvolume.go @@ -234,7 +234,7 @@ func (e *EBSVolume) TerraformLink() *terraformWriter.Literal { // TerraformName returns the terraform-safe name, along with a boolean indicating of whether name-prefixing was needed. func (e *EBSVolume) TerraformName() (string, bool) { usedPrefix := false - name := fi.StringValue(e.Name) + name := fi.ValueOf(e.Name) if name[0] >= '0' && name[0] <= '9' { usedPrefix = true return fmt.Sprintf("ebs-%v", name), usedPrefix diff --git a/upup/pkg/fi/cloudup/awstasks/egressonlyinternetgateway.go b/upup/pkg/fi/cloudup/awstasks/egressonlyinternetgateway.go index b94767e9f069c..fa7795f9d2810 100644 --- a/upup/pkg/fi/cloudup/awstasks/egressonlyinternetgateway.go +++ b/upup/pkg/fi/cloudup/awstasks/egressonlyinternetgateway.go @@ -69,9 +69,9 @@ func (e *EgressOnlyInternetGateway) Find(c *fi.Context) (*EgressOnlyInternetGate request := &ec2.DescribeEgressOnlyInternetGatewaysInput{} - shared := fi.BoolValue(e.Shared) + shared := fi.ValueOf(e.Shared) if shared { - if fi.StringValue(e.VPC.ID) == "" { + if fi.ValueOf(e.VPC.ID) == "" { return nil, fmt.Errorf("VPC ID is required when EgressOnlyInternetGateway is shared") } @@ -114,7 +114,7 @@ func (e *EgressOnlyInternetGateway) Find(c *fi.Context) (*EgressOnlyInternetGate } // We don't set the tags for a shared EIGW - if fi.BoolValue(e.Shared) { + if fi.ValueOf(e.Shared) { actual.Tags = e.Tags } @@ -136,7 +136,7 @@ func (s *EgressOnlyInternetGateway) CheckChanges(a, e, changes *EgressOnlyIntern } func (_ *EgressOnlyInternetGateway) RenderAWS(t *awsup.AWSAPITarget, a, e, changes *EgressOnlyInternetGateway) error { - shared := fi.BoolValue(e.Shared) + shared := fi.ValueOf(e.Shared) if shared { // Verify the EgressOnlyInternetGateway was found and matches our required settings if a == nil { @@ -172,14 +172,14 @@ type terraformEgressOnlyInternetGateway struct { } func (_ *EgressOnlyInternetGateway) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *EgressOnlyInternetGateway) error { - shared := fi.BoolValue(e.Shared) + shared := fi.ValueOf(e.Shared) if shared { // Not terraform owned / managed // But ... attempt to discover the ID so TerraformLink works if e.ID == nil { request := &ec2.DescribeEgressOnlyInternetGatewaysInput{} - vpcID := fi.StringValue(e.VPC.ID) + vpcID := fi.ValueOf(e.VPC.ID) if vpcID == "" { return fmt.Errorf("VPC ID is required when EgressOnlyInternetGateway is shared") } @@ -207,7 +207,7 @@ func (_ *EgressOnlyInternetGateway) RenderTerraform(t *terraform.TerraformTarget } func (e *EgressOnlyInternetGateway) TerraformLink() *terraformWriter.Literal { - shared := fi.BoolValue(e.Shared) + shared := fi.ValueOf(e.Shared) if shared { if e.ID == nil { klog.Fatalf("ID must be set, if EgressOnlyInternetGateway is shared: %s", e) diff --git a/upup/pkg/fi/cloudup/awstasks/egressonlyinternetgateway_test.go b/upup/pkg/fi/cloudup/awstasks/egressonlyinternetgateway_test.go index 93101e65fa4c0..41aabfce2266f 100644 --- a/upup/pkg/fi/cloudup/awstasks/egressonlyinternetgateway_test.go +++ b/upup/pkg/fi/cloudup/awstasks/egressonlyinternetgateway_test.go @@ -69,14 +69,14 @@ func TestSharedEgressOnlyInternetGatewayDoesNotRename(t *testing.T) { Lifecycle: fi.LifecycleSync, CIDR: s("172.20.0.0/16"), Tags: map[string]string{"kubernetes.io/cluster/cluster.example.com": "shared"}, - Shared: fi.Bool(true), + Shared: fi.PtrTo(true), ID: vpc.Vpc.VpcId, } eigw1 := &EgressOnlyInternetGateway{ Name: s("eigw1"), Lifecycle: fi.LifecycleSync, VPC: vpc1, - Shared: fi.Bool(true), + Shared: fi.PtrTo(true), ID: internetGateway.EgressOnlyInternetGateway.EgressOnlyInternetGatewayId, Tags: make(map[string]string), } @@ -105,7 +105,7 @@ func TestSharedEgressOnlyInternetGatewayDoesNotRename(t *testing.T) { t.Fatalf("unexpected error during Run: %v", err) } - if fi.StringValue(eigw1.ID) == "" { + if fi.ValueOf(eigw1.ID) == "" { t.Fatalf("ID not set after create") } diff --git a/upup/pkg/fi/cloudup/awstasks/elastic_ip.go b/upup/pkg/fi/cloudup/awstasks/elastic_ip.go index 00260ecad922c..5dc197ea17c46 100644 --- a/upup/pkg/fi/cloudup/awstasks/elastic_ip.go +++ b/upup/pkg/fi/cloudup/awstasks/elastic_ip.go @@ -265,7 +265,7 @@ func (_ *ElasticIP) RenderAWS(t *awsup.AWSAPITarget, a, e, changes *ElasticIP) e } else { // TODO: Figure out what we can do. We're sort of stuck between wanting to have one code-path with // terraform, and having a bigger "window of loss" here before we create the NATGateway - klog.V(2).Infof("ElasticIP %q not tagged on subnet; risk of leaking", fi.StringValue(publicIp)) + klog.V(2).Infof("ElasticIP %q not tagged on subnet; risk of leaking", fi.ValueOf(publicIp)) } return nil @@ -277,7 +277,7 @@ type terraformElasticIP struct { } func (_ *ElasticIP) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *ElasticIP) error { - if fi.BoolValue(e.Shared) { + if fi.ValueOf(e.Shared) { if e.ID == nil { return fmt.Errorf("ID must be set, if ElasticIP is shared: %v", e) } @@ -294,7 +294,7 @@ func (_ *ElasticIP) RenderTerraform(t *terraform.TerraformTarget, a, e, changes } func (e *ElasticIP) TerraformLink() *terraformWriter.Literal { - if fi.BoolValue(e.Shared) { + if fi.ValueOf(e.Shared) { if e.ID == nil { klog.Fatalf("ID must be set, if ElasticIP is shared: %v", e) } @@ -310,7 +310,7 @@ type cloudformationElasticIP struct { } func (_ *ElasticIP) RenderCloudformation(t *cloudformation.CloudformationTarget, a, e, changes *ElasticIP) error { - if fi.BoolValue(e.Shared) { + if fi.ValueOf(e.Shared) { if e.ID == nil { return fmt.Errorf("ID must be set, if ElasticIP is shared: %v", e) } @@ -332,7 +332,7 @@ func (_ *ElasticIP) RenderCloudformation(t *cloudformation.CloudformationTarget, //} func (e *ElasticIP) CloudformationAllocationID() *cloudformation.Literal { - if fi.BoolValue(e.Shared) { + if fi.ValueOf(e.Shared) { if e.ID == nil { klog.Fatalf("ID must be set, if ElasticIP is shared: %v", e) } diff --git a/upup/pkg/fi/cloudup/awstasks/elastic_ip_test.go b/upup/pkg/fi/cloudup/awstasks/elastic_ip_test.go index e73920a943dd5..68c4484c91c95 100644 --- a/upup/pkg/fi/cloudup/awstasks/elastic_ip_test.go +++ b/upup/pkg/fi/cloudup/awstasks/elastic_ip_test.go @@ -88,7 +88,7 @@ func TestElasticIPCreate(t *testing.T) { t.Fatalf("unexpected error during Run: %v", err) } - if fi.StringValue(eip1.ID) == "" { + if fi.ValueOf(eip1.ID) == "" { t.Fatalf("ID not set after create") } diff --git a/upup/pkg/fi/cloudup/awstasks/eventbridgerule.go b/upup/pkg/fi/cloudup/awstasks/eventbridgerule.go index a3dfc4c1eca39..52cd069775282 100644 --- a/upup/pkg/fi/cloudup/awstasks/eventbridgerule.go +++ b/upup/pkg/fi/cloudup/awstasks/eventbridgerule.go @@ -148,7 +148,7 @@ func (_ *EventBridgeRule) RenderTerraform(t *terraform.TerraformTarget, a, e, ch } func (eb *EventBridgeRule) TerraformLink() *terraformWriter.Literal { - return terraformWriter.LiteralProperty("aws_cloudwatch_event_rule", fi.StringValue(eb.Name), "id") + return terraformWriter.LiteralProperty("aws_cloudwatch_event_rule", fi.ValueOf(eb.Name), "id") } type cloudformationTarget struct { diff --git a/upup/pkg/fi/cloudup/awstasks/eventbridgetarget.go b/upup/pkg/fi/cloudup/awstasks/eventbridgetarget.go index 805ae97bef83a..146ad588bd3e8 100644 --- a/upup/pkg/fi/cloudup/awstasks/eventbridgetarget.go +++ b/upup/pkg/fi/cloudup/awstasks/eventbridgetarget.go @@ -74,7 +74,7 @@ func (eb *EventBridgeTarget) Find(c *fi.Context) (*EventBridgeTarget, error) { return nil, nil } for _, target := range response.Targets { - if fi.StringValue(target.Arn) == fi.StringValue(eb.SQSQueue.ARN) { + if fi.ValueOf(target.Arn) == fi.ValueOf(eb.SQSQueue.ARN) { actual := &EventBridgeTarget{ ID: target.Id, Name: eb.Name, diff --git a/upup/pkg/fi/cloudup/awstasks/helper.go b/upup/pkg/fi/cloudup/awstasks/helper.go index 95af0c508f2b2..c87ad86d2ec56 100644 --- a/upup/pkg/fi/cloudup/awstasks/helper.go +++ b/upup/pkg/fi/cloudup/awstasks/helper.go @@ -37,7 +37,7 @@ func buildEphemeralDevices(cloud awsup.AWSCloud, machineType string) (map[string blockDeviceMappings := make(map[string]*BlockDeviceMapping) for _, ed := range mt.EphemeralDevices() { - blockDeviceMappings[ed.DeviceName] = &BlockDeviceMapping{VirtualName: fi.String(ed.VirtualName)} + blockDeviceMappings[ed.DeviceName] = &BlockDeviceMapping{VirtualName: fi.PtrTo(ed.VirtualName)} } return blockDeviceMappings, nil diff --git a/upup/pkg/fi/cloudup/awstasks/iaminstanceprofile.go b/upup/pkg/fi/cloudup/awstasks/iaminstanceprofile.go index f01d9c06163a0..614d08cae9d5c 100644 --- a/upup/pkg/fi/cloudup/awstasks/iaminstanceprofile.go +++ b/upup/pkg/fi/cloudup/awstasks/iaminstanceprofile.go @@ -101,7 +101,7 @@ func (e *IAMInstanceProfile) Run(c *fi.Context) error { func (s *IAMInstanceProfile) CheckChanges(a, e, changes *IAMInstanceProfile) error { if a != nil { - if fi.StringValue(e.Name) == "" && !fi.BoolValue(e.Shared) { + if fi.ValueOf(e.Name) == "" && !fi.ValueOf(e.Shared) { return fi.RequiredField("Name") } } @@ -109,9 +109,9 @@ func (s *IAMInstanceProfile) CheckChanges(a, e, changes *IAMInstanceProfile) err } func (_ *IAMInstanceProfile) RenderAWS(t *awsup.AWSAPITarget, a, e, changes *IAMInstanceProfile) error { - if fi.BoolValue(e.Shared) { + if fi.ValueOf(e.Shared) { if a == nil { - return fmt.Errorf("instance role profile with id %q not found", fi.StringValue(e.ID)) + return fmt.Errorf("instance role profile with id %q not found", fi.ValueOf(e.ID)) } } else if a == nil { klog.V(2).Infof("Creating IAMInstanceProfile with Name:%q", *e.Name) @@ -182,8 +182,8 @@ func (_ *IAMInstanceProfile) RenderTerraform(t *terraform.TerraformTarget, a, e, } func (e *IAMInstanceProfile) TerraformLink() *terraformWriter.Literal { - if fi.BoolValue(e.Shared) { - return terraformWriter.LiteralFromStringValue(fi.StringValue(e.Name)) + if fi.ValueOf(e.Shared) { + return terraformWriter.LiteralFromStringValue(fi.ValueOf(e.Name)) } return terraformWriter.LiteralProperty("aws_iam_instance_profile", *e.Name, "id") } @@ -194,8 +194,8 @@ func (_ *IAMInstanceProfile) RenderCloudformation(t *cloudformation.Cloudformati } func (e *IAMInstanceProfile) CloudformationLink() *cloudformation.Literal { - if fi.BoolValue(e.Shared) { - return cloudformation.LiteralString(fi.StringValue(e.Name)) + if fi.ValueOf(e.Shared) { + return cloudformation.LiteralString(fi.ValueOf(e.Name)) } - return cloudformation.Ref("AWS::IAM::InstanceProfile", fi.StringValue(e.Name)) + return cloudformation.Ref("AWS::IAM::InstanceProfile", fi.ValueOf(e.Name)) } diff --git a/upup/pkg/fi/cloudup/awstasks/iamoidcprovider.go b/upup/pkg/fi/cloudup/awstasks/iamoidcprovider.go index 584b41d813550..04e593a04f745 100644 --- a/upup/pkg/fi/cloudup/awstasks/iamoidcprovider.go +++ b/upup/pkg/fi/cloudup/awstasks/iamoidcprovider.go @@ -75,7 +75,7 @@ func (e *IAMOIDCProvider) Find(c *fi.Context) (*IAMOIDCProvider, error) { actualURL = "https://" + actualURL } - if actualURL == fi.StringValue(e.URL) { + if actualURL == fi.ValueOf(e.URL) { actual := &IAMOIDCProvider{ ClientIDs: descResp.ClientIDList, @@ -139,7 +139,7 @@ func (p *IAMOIDCProvider) RenderAWS(t *awsup.AWSAPITarget, a, e, changes *IAMOID e.arn = response.OpenIDConnectProviderArn } else { if changes.Thumbprints != nil { - klog.V(2).Infof("Updating IAMOIDCProvider Thumbprints %q", fi.StringValue(e.arn)) + klog.V(2).Infof("Updating IAMOIDCProvider Thumbprints %q", fi.ValueOf(e.arn)) request := &iam.UpdateOpenIDConnectProviderThumbprintInput{} request.OpenIDConnectProviderArn = a.arn diff --git a/upup/pkg/fi/cloudup/awstasks/iamrole.go b/upup/pkg/fi/cloudup/awstasks/iamrole.go index d68a89c3171a7..5ef71aaec3e7b 100644 --- a/upup/pkg/fi/cloudup/awstasks/iamrole.go +++ b/upup/pkg/fi/cloudup/awstasks/iamrole.go @@ -360,7 +360,7 @@ func (_ *IAMRole) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *I tf.PermissionsBoundary = e.PermissionsBoundary } - if fi.StringValue(e.ExportWithID) != "" { + if fi.ValueOf(e.ExportWithID) != "" { t.AddOutputVariable(*e.ExportWithID+"_role_arn", terraformWriter.LiteralProperty("aws_iam_role", *e.Name, "arn")) t.AddOutputVariable(*e.ExportWithID+"_role_name", e.TerraformLink()) } diff --git a/upup/pkg/fi/cloudup/awstasks/instance.go b/upup/pkg/fi/cloudup/awstasks/instance.go index 16f9ca6bb575d..3196d64255a78 100644 --- a/upup/pkg/fi/cloudup/awstasks/instance.go +++ b/upup/pkg/fi/cloudup/awstasks/instance.go @@ -66,7 +66,7 @@ func (e *Instance) Find(c *fi.Context) (*Instance, error) { cloud := c.Cloud.(awsup.AWSCloud) var request *ec2.DescribeInstancesInput - if fi.BoolValue(e.Shared) { + if fi.ValueOf(e.Shared) { var instanceIds []*string instanceIds = append(instanceIds, e.ID) request = &ec2.DescribeInstancesInput{ @@ -200,7 +200,7 @@ func (e *Instance) Run(c *fi.Context) error { func (_ *Instance) CheckChanges(a, e, changes *Instance) error { if a != nil { - if !fi.BoolValue(e.Shared) && e.Name == nil { + if !fi.ValueOf(e.Shared) && e.Name == nil { return fi.RequiredField("Name") } } @@ -210,19 +210,19 @@ func (_ *Instance) CheckChanges(a, e, changes *Instance) error { func (_ *Instance) RenderAWS(t *awsup.AWSAPITarget, a, e, changes *Instance) error { if a == nil { - if fi.BoolValue(e.Shared) { - return fmt.Errorf("NAT EC2 Instance %q not found", fi.StringValue(e.ID)) + if fi.ValueOf(e.Shared) { + return fmt.Errorf("NAT EC2 Instance %q not found", fi.ValueOf(e.ID)) } if e.ImageID == nil { return fi.RequiredField("ImageID") } - image, err := t.Cloud.ResolveImage(fi.StringValue(e.ImageID)) + image, err := t.Cloud.ResolveImage(fi.ValueOf(e.ImageID)) if err != nil { return err } - klog.V(2).Infof("Creating Instance with Name:%q", fi.StringValue(e.Name)) + klog.V(2).Infof("Creating Instance with Name:%q", fi.ValueOf(e.Name)) request := &ec2.RunInstancesInput{ ImageId: image.ImageId, InstanceType: e.InstanceType, @@ -250,7 +250,7 @@ func (_ *Instance) RenderAWS(t *awsup.AWSAPITarget, a, e, changes *Instance) err // Build up the actual block device mappings // TODO: Support RootVolumeType & RootVolumeSize (see launchconfiguration) - blockDeviceMappings, err := buildEphemeralDevices(t.Cloud, fi.StringValue(e.InstanceType)) + blockDeviceMappings, err := buildEphemeralDevices(t.Cloud, fi.ValueOf(e.InstanceType)) if err != nil { return err } @@ -297,7 +297,7 @@ func (_ *Instance) RenderAWS(t *awsup.AWSAPITarget, a, e, changes *Instance) err } func (e *Instance) TerraformLink() *terraformWriter.Literal { - if fi.BoolValue(e.Shared) { + if fi.ValueOf(e.Shared) { if e.ID == nil { klog.Fatalf("ID must be set, if NAT Instance is shared: %s", e) } diff --git a/upup/pkg/fi/cloudup/awstasks/instancerequirements.go b/upup/pkg/fi/cloudup/awstasks/instancerequirements.go index 2e8fbc1d7c235..ceb53157d15f8 100644 --- a/upup/pkg/fi/cloudup/awstasks/instancerequirements.go +++ b/upup/pkg/fi/cloudup/awstasks/instancerequirements.go @@ -67,7 +67,7 @@ func overridesFromInstanceRequirements(ir *InstanceRequirements) *autoscaling.La Max: ir.MemoryMax, Min: ir.MemoryMin, }, - BurstablePerformance: fi.String("included"), + BurstablePerformance: fi.PtrTo("included"), }, } } diff --git a/upup/pkg/fi/cloudup/awstasks/internetgateway.go b/upup/pkg/fi/cloudup/awstasks/internetgateway.go index e326042187a89..9419f62e74438 100644 --- a/upup/pkg/fi/cloudup/awstasks/internetgateway.go +++ b/upup/pkg/fi/cloudup/awstasks/internetgateway.go @@ -69,9 +69,9 @@ func (e *InternetGateway) Find(c *fi.Context) (*InternetGateway, error) { request := &ec2.DescribeInternetGatewaysInput{} - shared := fi.BoolValue(e.Shared) + shared := fi.ValueOf(e.Shared) if shared { - if fi.StringValue(e.VPC.ID) == "" { + if fi.ValueOf(e.VPC.ID) == "" { return nil, fmt.Errorf("VPC ID is required when InternetGateway is shared") } @@ -114,7 +114,7 @@ func (e *InternetGateway) Find(c *fi.Context) (*InternetGateway, error) { } // We don't set the tags for a shared IGW - if fi.BoolValue(e.Shared) { + if fi.ValueOf(e.Shared) { actual.Tags = e.Tags } @@ -137,7 +137,7 @@ func (s *InternetGateway) CheckChanges(a, e, changes *InternetGateway) error { } func (_ *InternetGateway) RenderAWS(t *awsup.AWSAPITarget, a, e, changes *InternetGateway) error { - shared := fi.BoolValue(e.Shared) + shared := fi.ValueOf(e.Shared) if shared { // Verify the InternetGateway was found and matches our required settings if a == nil { @@ -185,14 +185,14 @@ type terraformInternetGateway struct { } func (_ *InternetGateway) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *InternetGateway) error { - shared := fi.BoolValue(e.Shared) + shared := fi.ValueOf(e.Shared) if shared { // Not terraform owned / managed // But ... attempt to discover the ID so TerraformLink works if e.ID == nil { request := &ec2.DescribeInternetGatewaysInput{} - vpcID := fi.StringValue(e.VPC.ID) + vpcID := fi.ValueOf(e.VPC.ID) if vpcID == "" { return fmt.Errorf("VPC ID is required when InternetGateway is shared") } @@ -220,7 +220,7 @@ func (_ *InternetGateway) RenderTerraform(t *terraform.TerraformTarget, a, e, ch } func (e *InternetGateway) TerraformLink() *terraformWriter.Literal { - shared := fi.BoolValue(e.Shared) + shared := fi.ValueOf(e.Shared) if shared { if e.ID == nil { klog.Fatalf("ID must be set, if InternetGateway is shared: %s", e) @@ -243,14 +243,14 @@ type cloudformationVpcGatewayAttachment struct { } func (_ *InternetGateway) RenderCloudformation(t *cloudformation.CloudformationTarget, a, e, changes *InternetGateway) error { - shared := fi.BoolValue(e.Shared) + shared := fi.ValueOf(e.Shared) if shared { // Not cloudformation owned / managed // But ... attempt to discover the ID so CloudformationLink works if e.ID == nil { request := &ec2.DescribeInternetGatewaysInput{} - vpcID := fi.StringValue(e.VPC.ID) + vpcID := fi.ValueOf(e.VPC.ID) if vpcID == "" { return fmt.Errorf("VPC ID is required when InternetGateway is shared") } @@ -296,7 +296,7 @@ func (_ *InternetGateway) RenderCloudformation(t *cloudformation.CloudformationT } func (e *InternetGateway) CloudformationLink() *cloudformation.Literal { - shared := fi.BoolValue(e.Shared) + shared := fi.ValueOf(e.Shared) if shared { if e.ID == nil { klog.Fatalf("ID must be set, if InternetGateway is shared: %s", e) diff --git a/upup/pkg/fi/cloudup/awstasks/internetgateway_test.go b/upup/pkg/fi/cloudup/awstasks/internetgateway_test.go index 3261a395ab118..bbfeb21816434 100644 --- a/upup/pkg/fi/cloudup/awstasks/internetgateway_test.go +++ b/upup/pkg/fi/cloudup/awstasks/internetgateway_test.go @@ -85,14 +85,14 @@ func TestSharedInternetGatewayDoesNotRename(t *testing.T) { Lifecycle: fi.LifecycleSync, CIDR: s("172.20.0.0/16"), Tags: map[string]string{"kubernetes.io/cluster/cluster.example.com": "shared"}, - Shared: fi.Bool(true), + Shared: fi.PtrTo(true), ID: vpc.Vpc.VpcId, } igw1 := &InternetGateway{ Name: s("igw1"), Lifecycle: fi.LifecycleSync, VPC: vpc1, - Shared: fi.Bool(true), + Shared: fi.PtrTo(true), ID: internetGateway.InternetGateway.InternetGatewayId, Tags: make(map[string]string), } @@ -121,7 +121,7 @@ func TestSharedInternetGatewayDoesNotRename(t *testing.T) { t.Fatalf("unexpected error during Run: %v", err) } - if fi.StringValue(igw1.ID) == "" { + if fi.ValueOf(igw1.ID) == "" { t.Fatalf("ID not set after create") } diff --git a/upup/pkg/fi/cloudup/awstasks/launchtemplate.go b/upup/pkg/fi/cloudup/awstasks/launchtemplate.go index ba33be6cbf5a6..86dc13c07a1c3 100644 --- a/upup/pkg/fi/cloudup/awstasks/launchtemplate.go +++ b/upup/pkg/fi/cloudup/awstasks/launchtemplate.go @@ -104,7 +104,7 @@ func (t *LaunchTemplate) CompareWithID() *string { // buildRootDevice is responsible for retrieving a boot device mapping from the image name func (t *LaunchTemplate) buildRootDevice(cloud awsup.AWSCloud) (map[string]*BlockDeviceMapping, error) { - image := fi.StringValue(t.ImageID) + image := fi.ValueOf(t.ImageID) if image == "" { return map[string]*BlockDeviceMapping{}, nil } diff --git a/upup/pkg/fi/cloudup/awstasks/launchtemplate_target_api.go b/upup/pkg/fi/cloudup/awstasks/launchtemplate_target_api.go index 9e94223065fdf..c91a2eeb2c38d 100644 --- a/upup/pkg/fi/cloudup/awstasks/launchtemplate_target_api.go +++ b/upup/pkg/fi/cloudup/awstasks/launchtemplate_target_api.go @@ -33,14 +33,14 @@ import ( // RenderAWS is responsible for performing creating / updating the launch template func (t *LaunchTemplate) RenderAWS(c *awsup.AWSAPITarget, a, e, changes *LaunchTemplate) error { // @step: resolve the image id to an AMI for us - image, err := c.Cloud.ResolveImage(fi.StringValue(t.ImageID)) + image, err := c.Cloud.ResolveImage(fi.ValueOf(t.ImageID)) if err != nil { return err } // @step: lets build the launch template data data := &ec2.RequestLaunchTemplateData{ - DisableApiTermination: fi.Bool(false), + DisableApiTermination: fi.PtrTo(false), EbsOptimized: t.RootVolumeOptimization, ImageId: image.ImageId, InstanceType: t.InstanceType, @@ -53,7 +53,7 @@ func (t *LaunchTemplate) RenderAWS(c *awsup.AWSAPITarget, a, e, changes *LaunchT { AssociatePublicIpAddress: t.AssociatePublicIP, DeleteOnTermination: aws.Bool(true), - DeviceIndex: fi.Int64(0), + DeviceIndex: fi.PtrTo(int64(0)), Ipv6AddressCount: t.IPv6AddressCount, }, }, @@ -64,7 +64,7 @@ func (t *LaunchTemplate) RenderAWS(c *awsup.AWSAPITarget, a, e, changes *LaunchT if err != nil { return fmt.Errorf("failed to build root device: %w", err) } - ephemeralDevices, err := buildEphemeralDevices(c.Cloud, fi.StringValue(t.InstanceType)) + ephemeralDevices, err := buildEphemeralDevices(c.Cloud, fi.ValueOf(t.InstanceType)) if err != nil { return fmt.Errorf("failed to build ephemeral devices: %w", err) } @@ -91,7 +91,7 @@ func (t *LaunchTemplate) RenderAWS(c *awsup.AWSAPITarget, a, e, changes *LaunchT data.Placement = &ec2.LaunchTemplatePlacementRequest{Tenancy: t.Tenancy} } // @step: set the instance monitoring - data.Monitoring = &ec2.LaunchTemplatesMonitoringRequest{Enabled: fi.Bool(false)} + data.Monitoring = &ec2.LaunchTemplatesMonitoringRequest{Enabled: fi.PtrTo(false)} if t.InstanceMonitoring != nil { data.Monitoring = &ec2.LaunchTemplatesMonitoringRequest{Enabled: t.InstanceMonitoring} } @@ -128,18 +128,18 @@ func (t *LaunchTemplate) RenderAWS(c *awsup.AWSAPITarget, a, e, changes *LaunchT data.UserData = aws.String(base64.StdEncoding.EncodeToString(d)) } // @step: add market options - if fi.StringValue(t.SpotPrice) != "" { + if fi.ValueOf(t.SpotPrice) != "" { s := &ec2.LaunchTemplateSpotMarketOptionsRequest{ BlockDurationMinutes: t.SpotDurationInMinutes, InstanceInterruptionBehavior: t.InstanceInterruptionBehavior, MaxPrice: t.SpotPrice, } data.InstanceMarketOptions = &ec2.LaunchTemplateInstanceMarketOptionsRequest{ - MarketType: fi.String("spot"), + MarketType: fi.PtrTo("spot"), SpotOptions: s, } } - if fi.StringValue(t.CPUCredits) != "" { + if fi.ValueOf(t.CPUCredits) != "" { data.CreditSpecification = &ec2.CreditSpecificationRequest{ CpuCredits: t.CPUCredits, } @@ -158,7 +158,7 @@ func (t *LaunchTemplate) RenderAWS(c *awsup.AWSAPITarget, a, e, changes *LaunchT } output, err := c.Cloud.EC2().CreateLaunchTemplate(input) if err != nil || output.LaunchTemplate == nil { - return fmt.Errorf("error creating LaunchTemplate %q: %v", fi.StringValue(t.Name), err) + return fmt.Errorf("error creating LaunchTemplate %q: %v", fi.ValueOf(t.Name), err) } e.ID = output.LaunchTemplate.LaunchTemplateId } else { @@ -179,7 +179,7 @@ func (t *LaunchTemplate) RenderAWS(c *awsup.AWSAPITarget, a, e, changes *LaunchT } } if changes.Tags != nil { - err = c.UpdateTags(fi.StringValue(a.ID), e.Tags) + err = c.UpdateTags(fi.ValueOf(a.ID), e.Tags) if err != nil { return fmt.Errorf("error updating LaunchTemplate tags: %v", err) } @@ -207,13 +207,13 @@ func (t *LaunchTemplate) Find(c *fi.Context) (*LaunchTemplate, error) { return nil, nil } - klog.V(3).Infof("found existing LaunchTemplate: %s", fi.StringValue(lt.LaunchTemplateName)) + klog.V(3).Infof("found existing LaunchTemplate: %s", fi.ValueOf(lt.LaunchTemplateName)) actual := &LaunchTemplate{ - AssociatePublicIP: fi.Bool(false), + AssociatePublicIP: fi.PtrTo(false), ID: lt.LaunchTemplateId, ImageID: lt.LaunchTemplateData.ImageId, - InstanceMonitoring: fi.Bool(false), + InstanceMonitoring: fi.PtrTo(false), InstanceType: lt.LaunchTemplateData.InstanceType, Lifecycle: t.Lifecycle, Name: t.Name, @@ -223,7 +223,7 @@ func (t *LaunchTemplate) Find(c *fi.Context) (*LaunchTemplate, error) { // @step: check if any of the interfaces are public facing for _, x := range lt.LaunchTemplateData.NetworkInterfaces { if aws.BoolValue(x.AssociatePublicIpAddress) { - actual.AssociatePublicIP = fi.Bool(true) + actual.AssociatePublicIP = fi.PtrTo(true) } for _, id := range x.Groups { actual.SecurityGroups = append(actual.SecurityGroups, &SecurityGroup{ID: id}) @@ -232,7 +232,7 @@ func (t *LaunchTemplate) Find(c *fi.Context) (*LaunchTemplate, error) { } // In older Kops versions, security groups were added to LaunchTemplateData.SecurityGroupIds for _, id := range lt.LaunchTemplateData.SecurityGroupIds { - actual.SecurityGroups = append(actual.SecurityGroups, &SecurityGroup{ID: fi.String("legacy-" + *id)}) + actual.SecurityGroups = append(actual.SecurityGroups, &SecurityGroup{ID: fi.PtrTo("legacy-" + *id)}) } sort.Sort(OrderSecurityGroupsById(actual.SecurityGroups)) @@ -269,7 +269,7 @@ func (t *LaunchTemplate) Find(c *fi.Context) (*LaunchTemplate, error) { // @step: get the image is order to find out the root device name as using the index // is not variable, under conditions they move - image, err := cloud.ResolveImage(fi.StringValue(t.ImageID)) + image, err := cloud.ResolveImage(fi.ValueOf(t.ImageID)) if err != nil { return nil, err } @@ -279,7 +279,7 @@ func (t *LaunchTemplate) Find(c *fi.Context) (*LaunchTemplate, error) { if b.Ebs == nil { continue } - if b.DeviceName != nil && fi.StringValue(b.DeviceName) == fi.StringValue(image.RootDeviceName) { + if b.DeviceName != nil && fi.ValueOf(b.DeviceName) == fi.ValueOf(image.RootDeviceName) { actual.RootVolumeSize = b.Ebs.VolumeSize actual.RootVolumeType = b.Ebs.VolumeType actual.RootVolumeIops = b.Ebs.Iops @@ -288,7 +288,7 @@ func (t *LaunchTemplate) Find(c *fi.Context) (*LaunchTemplate, error) { if b.Ebs.KmsKeyId != nil { actual.RootVolumeKmsKey = b.Ebs.KmsKeyId } else { - actual.RootVolumeKmsKey = fi.String("") + actual.RootVolumeKmsKey = fi.PtrTo("") } } else { _, d := BlockDeviceMappingFromLaunchTemplateBootDeviceRequest(b) @@ -412,7 +412,7 @@ func (d *deleteLaunchTemplate) TaskName() string { // Item returns the launch template name func (d *deleteLaunchTemplate) Item() string { - return fi.StringValue(d.lc.LaunchTemplateName) + return fi.ValueOf(d.lc.LaunchTemplateName) } func (d *deleteLaunchTemplate) Delete(t fi.Target) error { diff --git a/upup/pkg/fi/cloudup/awstasks/launchtemplate_target_cloudformation.go b/upup/pkg/fi/cloudup/awstasks/launchtemplate_target_cloudformation.go index a5099940b56da..5f609245f169f 100644 --- a/upup/pkg/fi/cloudup/awstasks/launchtemplate_target_cloudformation.go +++ b/upup/pkg/fi/cloudup/awstasks/launchtemplate_target_cloudformation.go @@ -166,12 +166,12 @@ type cloudformationLaunchTemplate struct { // CloudformationLink returns the cloudformation link for us func (t *LaunchTemplate) CloudformationLink() *cloudformation.Literal { - return cloudformation.Ref("AWS::EC2::LaunchTemplate", fi.StringValue(t.Name)) + return cloudformation.Ref("AWS::EC2::LaunchTemplate", fi.ValueOf(t.Name)) } // CloudformationLink returns the cloudformation version. func (t *LaunchTemplate) CloudformationVersion() *cloudformation.Literal { - return cloudformation.GetAtt("AWS::EC2::LaunchTemplate", fi.StringValue(t.Name), "LatestVersionNumber") + return cloudformation.GetAtt("AWS::EC2::LaunchTemplate", fi.ValueOf(t.Name), "LatestVersionNumber") } // RenderCloudformation is responsible for rendering the cloudformation json @@ -182,7 +182,7 @@ func (t *LaunchTemplate) RenderCloudformation(target *cloudformation.Cloudformat var image *string if e.ImageID != nil { - im, err := cloud.ResolveImage(fi.StringValue(e.ImageID)) + im, err := cloud.ResolveImage(fi.ValueOf(e.ImageID)) if err != nil { return err } @@ -200,14 +200,14 @@ func (t *LaunchTemplate) RenderCloudformation(target *cloudformation.Cloudformat NetworkInterfaces: []*cloudformationLaunchTemplateNetworkInterface{ { AssociatePublicIPAddress: e.AssociatePublicIP, - DeleteOnTermination: fi.Bool(true), - DeviceIndex: fi.Int(0), + DeleteOnTermination: fi.PtrTo(true), + DeviceIndex: fi.PtrTo(0), Ipv6AddressCount: e.IPv6AddressCount, }, }, } - if fi.StringValue(e.SpotPrice) != "" { + if fi.ValueOf(e.SpotPrice) != "" { marketSpotOptions := cloudformationLaunchTemplateMarketOptionsSpotOptions{MaxPrice: e.SpotPrice} if e.SpotDurationInMinutes != nil { marketSpotOptions.BlockDurationMinutes = e.SpotDurationInMinutes @@ -215,17 +215,17 @@ func (t *LaunchTemplate) RenderCloudformation(target *cloudformation.Cloudformat if e.InstanceInterruptionBehavior != nil { marketSpotOptions.InstanceInterruptionBehavior = e.InstanceInterruptionBehavior } - launchTemplateData.MarketOptions = &cloudformationLaunchTemplateMarketOptions{MarketType: fi.String("spot"), SpotOptions: &marketSpotOptions} + launchTemplateData.MarketOptions = &cloudformationLaunchTemplateMarketOptions{MarketType: fi.PtrTo("spot"), SpotOptions: &marketSpotOptions} } - if fi.StringValue(e.CPUCredits) != "" { + if fi.ValueOf(e.CPUCredits) != "" { launchTemplateData.CreditSpecification = &cloudformationLaunchTemplateCreditSpecification{ CPUCredits: e.CPUCredits, } } cf := &cloudformationLaunchTemplate{ - LaunchTemplateName: fi.String(fi.StringValue(e.Name)), + LaunchTemplateName: fi.PtrTo(fi.ValueOf(e.Name)), LaunchTemplateData: launchTemplateData, } data := cf.LaunchTemplateData @@ -266,9 +266,9 @@ func (t *LaunchTemplate) RenderCloudformation(target *cloudformation.Cloudformat } for name, x := range devices { data.BlockDeviceMappings = append(data.BlockDeviceMappings, &cloudformationLaunchTemplateBlockDevice{ - DeviceName: fi.String(name), + DeviceName: fi.PtrTo(name), EBS: &cloudformationLaunchTemplateBlockDeviceEBS{ - DeleteOnTermination: fi.Bool(true), + DeleteOnTermination: fi.PtrTo(true), IOPS: x.EbsVolumeIops, Throughput: x.EbsVolumeThroughput, VolumeSize: x.EbsVolumeSize, @@ -280,9 +280,9 @@ func (t *LaunchTemplate) RenderCloudformation(target *cloudformation.Cloudformat } for name, x := range additionals { data.BlockDeviceMappings = append(data.BlockDeviceMappings, &cloudformationLaunchTemplateBlockDevice{ - DeviceName: fi.String(name), + DeviceName: fi.PtrTo(name), EBS: &cloudformationLaunchTemplateBlockDeviceEBS{ - DeleteOnTermination: fi.Bool(true), + DeleteOnTermination: fi.PtrTo(true), IOPS: x.EbsVolumeIops, VolumeSize: x.EbsVolumeSize, Throughput: x.EbsVolumeThroughput, @@ -293,28 +293,28 @@ func (t *LaunchTemplate) RenderCloudformation(target *cloudformation.Cloudformat }) } - devices, err = buildEphemeralDevices(cloud, fi.StringValue(e.InstanceType)) + devices, err = buildEphemeralDevices(cloud, fi.ValueOf(e.InstanceType)) if err != nil { return err } for n, x := range devices { data.BlockDeviceMappings = append(data.BlockDeviceMappings, &cloudformationLaunchTemplateBlockDevice{ VirtualName: x.VirtualName, - DeviceName: fi.String(n), + DeviceName: fi.PtrTo(n), }) } if e.Tags != nil { tags := buildCloudformationTags(t.Tags) data.TagSpecifications = append(data.TagSpecifications, &cloudformationLaunchTemplateTagSpecification{ - ResourceType: fi.String("instance"), + ResourceType: fi.PtrTo("instance"), Tags: tags, }) data.TagSpecifications = append(data.TagSpecifications, &cloudformationLaunchTemplateTagSpecification{ - ResourceType: fi.String("volume"), + ResourceType: fi.PtrTo("volume"), Tags: tags, }) } - return target.RenderResource("AWS::EC2::LaunchTemplate", fi.StringValue(e.Name), cf) + return target.RenderResource("AWS::EC2::LaunchTemplate", fi.ValueOf(e.Name), cf) } diff --git a/upup/pkg/fi/cloudup/awstasks/launchtemplate_target_cloudformation_test.go b/upup/pkg/fi/cloudup/awstasks/launchtemplate_target_cloudformation_test.go deleted file mode 100644 index ff41a7ac4fd2b..0000000000000 --- a/upup/pkg/fi/cloudup/awstasks/launchtemplate_target_cloudformation_test.go +++ /dev/null @@ -1,204 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package awstasks - -import ( - "testing" - - "k8s.io/kops/upup/pkg/fi" -) - -func TestLaunchTemplateCloudformationRender(t *testing.T) { - cases := []*renderTest{ - { - Resource: &LaunchTemplate{ - Name: fi.String("test"), - AssociatePublicIP: fi.Bool(true), - IAMInstanceProfile: &IAMInstanceProfile{ - Name: fi.String("nodes"), - }, - ID: fi.String("test-11"), - InstanceMonitoring: fi.Bool(true), - InstanceType: fi.String("t2.medium"), - RootVolumeOptimization: fi.Bool(true), - RootVolumeIops: fi.Int64(100), - RootVolumeSize: fi.Int64(64), - SpotPrice: fi.String("10"), - SpotDurationInMinutes: fi.Int64(120), - InstanceInterruptionBehavior: fi.String("hibernate"), - SSHKey: &SSHKey{ - Name: fi.String("mykey"), - }, - SecurityGroups: []*SecurityGroup{ - {Name: fi.String("nodes-1"), ID: fi.String("1111")}, - {Name: fi.String("nodes-2"), ID: fi.String("2222")}, - }, - Tenancy: fi.String("dedicated"), - HTTPTokens: fi.String("required"), - HTTPPutResponseHopLimit: fi.Int64(1), - }, - Expected: `{ - "Resources": { - "AWSEC2LaunchTemplatetest": { - "Type": "AWS::EC2::LaunchTemplate", - "Properties": { - "LaunchTemplateName": "test", - "LaunchTemplateData": { - "EbsOptimized": true, - "IamInstanceProfile": { - "Name": { - "Ref": "AWSIAMInstanceProfilenodes" - } - }, - "InstanceType": "t2.medium", - "KeyName": "mykey", - "InstanceMarketOptions": { - "MarketType": "spot", - "SpotOptions": { - "BlockDurationMinutes": 120, - "InstanceInterruptionBehavior": "hibernate", - "MaxPrice": "10" - } - }, - "MetadataOptions": { - "HttpPutResponseHopLimit": 1, - "HttpTokens": "required" - }, - "Monitoring": { - "Enabled": true - }, - "NetworkInterfaces": [ - { - "AssociatePublicIpAddress": true, - "DeleteOnTermination": true, - "DeviceIndex": 0, - "Groups": [ - { - "Ref": "AWSEC2SecurityGroupnodes1" - }, - { - "Ref": "AWSEC2SecurityGroupnodes2" - } - ] - } - ], - "Placement": [ - { - "Tenancy": "dedicated" - } - ] - } - } - } - } -}`, - }, - { - Resource: &LaunchTemplate{ - Name: fi.String("test"), - AssociatePublicIP: fi.Bool(true), - BlockDeviceMappings: []*BlockDeviceMapping{ - { - DeviceName: fi.String("/dev/xvdd"), - EbsVolumeType: fi.String("gp2"), - EbsVolumeSize: fi.Int64(100), - EbsDeleteOnTermination: fi.Bool(true), - EbsEncrypted: fi.Bool(true), - }, - }, - IAMInstanceProfile: &IAMInstanceProfile{ - Name: fi.String("nodes"), - }, - ID: fi.String("test-11"), - InstanceMonitoring: fi.Bool(true), - InstanceType: fi.String("t2.medium"), - RootVolumeOptimization: fi.Bool(true), - RootVolumeIops: fi.Int64(100), - RootVolumeSize: fi.Int64(64), - SSHKey: &SSHKey{ - Name: fi.String("mykey"), - }, - SecurityGroups: []*SecurityGroup{ - {Name: fi.String("nodes-1"), ID: fi.String("1111")}, - {Name: fi.String("nodes-2"), ID: fi.String("2222")}, - }, - Tenancy: fi.String("dedicated"), - HTTPTokens: fi.String("optional"), - HTTPPutResponseHopLimit: fi.Int64(1), - }, - Expected: `{ - "Resources": { - "AWSEC2LaunchTemplatetest": { - "Type": "AWS::EC2::LaunchTemplate", - "Properties": { - "LaunchTemplateName": "test", - "LaunchTemplateData": { - "BlockDeviceMappings": [ - { - "DeviceName": "/dev/xvdd", - "Ebs": { - "VolumeType": "gp2", - "VolumeSize": 100, - "DeleteOnTermination": true, - "Encrypted": true - } - } - ], - "EbsOptimized": true, - "IamInstanceProfile": { - "Name": { - "Ref": "AWSIAMInstanceProfilenodes" - } - }, - "InstanceType": "t2.medium", - "KeyName": "mykey", - "MetadataOptions": { - "HttpPutResponseHopLimit": 1, - "HttpTokens": "optional" - }, - "Monitoring": { - "Enabled": true - }, - "NetworkInterfaces": [ - { - "AssociatePublicIpAddress": true, - "DeleteOnTermination": true, - "DeviceIndex": 0, - "Groups": [ - { - "Ref": "AWSEC2SecurityGroupnodes1" - }, - { - "Ref": "AWSEC2SecurityGroupnodes2" - } - ] - } - ], - "Placement": [ - { - "Tenancy": "dedicated" - } - ] - } - } - } - } -}`, - }, - } - doRenderTests(t, "RenderCloudformation", cases) -} diff --git a/upup/pkg/fi/cloudup/awstasks/launchtemplate_target_terraform.go b/upup/pkg/fi/cloudup/awstasks/launchtemplate_target_terraform.go index db8c836680137..a446c44f6abe3 100644 --- a/upup/pkg/fi/cloudup/awstasks/launchtemplate_target_terraform.go +++ b/upup/pkg/fi/cloudup/awstasks/launchtemplate_target_terraform.go @@ -167,12 +167,12 @@ type terraformLaunchTemplate struct { // TerraformLink returns the terraform reference func (t *LaunchTemplate) TerraformLink() *terraformWriter.Literal { - return terraformWriter.LiteralProperty("aws_launch_template", fi.StringValue(t.Name), "id") + return terraformWriter.LiteralProperty("aws_launch_template", fi.ValueOf(t.Name), "id") } // VersionLink returns the terraform version reference func (t *LaunchTemplate) VersionLink() *terraformWriter.Literal { - return terraformWriter.LiteralProperty("aws_launch_template", fi.StringValue(t.Name), "latest_version") + return terraformWriter.LiteralProperty("aws_launch_template", fi.ValueOf(t.Name), "latest_version") } // RenderTerraform is responsible for rendering the terraform json @@ -183,7 +183,7 @@ func (t *LaunchTemplate) RenderTerraform(target *terraform.TerraformTarget, a, e var image *string if e.ImageID != nil { - im, err := cloud.ResolveImage(fi.StringValue(e.ImageID)) + im, err := cloud.ResolveImage(fi.ValueOf(e.ImageID)) if err != nil { return err } @@ -195,10 +195,10 @@ func (t *LaunchTemplate) RenderTerraform(target *terraform.TerraformTarget, a, e EBSOptimized: e.RootVolumeOptimization, ImageID: image, InstanceType: e.InstanceType, - Lifecycle: &terraform.Lifecycle{CreateBeforeDestroy: fi.Bool(true)}, + Lifecycle: &terraform.Lifecycle{CreateBeforeDestroy: fi.PtrTo(true)}, MetadataOptions: &terraformLaunchTemplateInstanceMetadata{ // See issue https://github.com/hashicorp/terraform-provider-aws/issues/12564. - HTTPEndpoint: fi.String("enabled"), + HTTPEndpoint: fi.PtrTo("enabled"), HTTPTokens: e.HTTPTokens, HTTPPutResponseHopLimit: e.HTTPPutResponseHopLimit, HTTPProtocolIPv6: e.HTTPProtocolIPv6, @@ -206,13 +206,13 @@ func (t *LaunchTemplate) RenderTerraform(target *terraform.TerraformTarget, a, e NetworkInterfaces: []*terraformLaunchTemplateNetworkInterface{ { AssociatePublicIPAddress: e.AssociatePublicIP, - DeleteOnTermination: fi.Bool(true), + DeleteOnTermination: fi.PtrTo(true), Ipv6AddressCount: e.IPv6AddressCount, }, }, } - if fi.StringValue(e.SpotPrice) != "" { + if fi.ValueOf(e.SpotPrice) != "" { marketSpotOptions := terraformLaunchTemplateMarketOptionsSpotOptions{MaxPrice: e.SpotPrice} if e.SpotDurationInMinutes != nil { marketSpotOptions.BlockDurationMinutes = e.SpotDurationInMinutes @@ -222,12 +222,12 @@ func (t *LaunchTemplate) RenderTerraform(target *terraform.TerraformTarget, a, e } tf.MarketOptions = []*terraformLaunchTemplateMarketOptions{ { - MarketType: fi.String("spot"), + MarketType: fi.PtrTo("spot"), SpotOptions: []*terraformLaunchTemplateMarketOptionsSpotOptions{&marketSpotOptions}, }, } } - if fi.StringValue(e.CPUCredits) != "" { + if fi.ValueOf(e.CPUCredits) != "" { tf.CreditSpecification = &terraformLaunchTemplateCreditSpecification{ CPUCredits: e.CPUCredits, } @@ -257,7 +257,7 @@ func (t *LaunchTemplate) RenderTerraform(target *terraform.TerraformTarget, a, e return err } if d != nil { - tf.UserData, err = target.AddFileBytes("aws_launch_template", fi.StringValue(e.Name), "user_data", d, true) + tf.UserData, err = target.AddFileBytes("aws_launch_template", fi.ValueOf(e.Name), "user_data", d, true) if err != nil { return err } @@ -269,10 +269,10 @@ func (t *LaunchTemplate) RenderTerraform(target *terraform.TerraformTarget, a, e } for n, x := range devices { tf.BlockDeviceMappings = append(tf.BlockDeviceMappings, &terraformLaunchTemplateBlockDevice{ - DeviceName: fi.String(n), + DeviceName: fi.PtrTo(n), EBS: []*terraformLaunchTemplateBlockDeviceEBS{ { - DeleteOnTermination: fi.Bool(true), + DeleteOnTermination: fi.PtrTo(true), Encrypted: x.EbsEncrypted, KmsKeyID: x.EbsKmsKey, IOPS: x.EbsVolumeIops, @@ -289,10 +289,10 @@ func (t *LaunchTemplate) RenderTerraform(target *terraform.TerraformTarget, a, e } for n, x := range additionals { tf.BlockDeviceMappings = append(tf.BlockDeviceMappings, &terraformLaunchTemplateBlockDevice{ - DeviceName: fi.String(n), + DeviceName: fi.PtrTo(n), EBS: []*terraformLaunchTemplateBlockDeviceEBS{ { - DeleteOnTermination: fi.Bool(true), + DeleteOnTermination: fi.PtrTo(true), Encrypted: x.EbsEncrypted, IOPS: x.EbsVolumeIops, Throughput: x.EbsVolumeThroughput, @@ -304,28 +304,28 @@ func (t *LaunchTemplate) RenderTerraform(target *terraform.TerraformTarget, a, e }) } - devices, err = buildEphemeralDevices(cloud, fi.StringValue(e.InstanceType)) + devices, err = buildEphemeralDevices(cloud, fi.ValueOf(e.InstanceType)) if err != nil { return err } for n, x := range devices { tf.BlockDeviceMappings = append(tf.BlockDeviceMappings, &terraformLaunchTemplateBlockDevice{ VirtualName: x.VirtualName, - DeviceName: fi.String(n), + DeviceName: fi.PtrTo(n), }) } if e.Tags != nil { tf.TagSpecifications = append(tf.TagSpecifications, &terraformLaunchTemplateTagSpecification{ - ResourceType: fi.String("instance"), + ResourceType: fi.PtrTo("instance"), Tags: e.Tags, }) tf.TagSpecifications = append(tf.TagSpecifications, &terraformLaunchTemplateTagSpecification{ - ResourceType: fi.String("volume"), + ResourceType: fi.PtrTo("volume"), Tags: e.Tags, }) tf.Tags = e.Tags } - return target.RenderResource("aws_launch_template", fi.StringValue(e.Name), tf) + return target.RenderResource("aws_launch_template", fi.ValueOf(e.Name), tf) } diff --git a/upup/pkg/fi/cloudup/awstasks/launchtemplate_target_terraform_test.go b/upup/pkg/fi/cloudup/awstasks/launchtemplate_target_terraform_test.go index ec16255566fd3..ca76689cad5d1 100644 --- a/upup/pkg/fi/cloudup/awstasks/launchtemplate_target_terraform_test.go +++ b/upup/pkg/fi/cloudup/awstasks/launchtemplate_target_terraform_test.go @@ -26,31 +26,31 @@ func TestLaunchTemplateTerraformRender(t *testing.T) { cases := []*renderTest{ { Resource: &LaunchTemplate{ - Name: fi.String("test"), - AssociatePublicIP: fi.Bool(true), + Name: fi.PtrTo("test"), + AssociatePublicIP: fi.PtrTo(true), IAMInstanceProfile: &IAMInstanceProfile{ - Name: fi.String("nodes"), + Name: fi.PtrTo("nodes"), }, - ID: fi.String("test-11"), - InstanceMonitoring: fi.Bool(true), - InstanceType: fi.String("t2.medium"), - SpotPrice: fi.String("0.1"), - SpotDurationInMinutes: fi.Int64(60), - InstanceInterruptionBehavior: fi.String("hibernate"), - RootVolumeOptimization: fi.Bool(true), - RootVolumeIops: fi.Int64(100), - RootVolumeSize: fi.Int64(64), + ID: fi.PtrTo("test-11"), + InstanceMonitoring: fi.PtrTo(true), + InstanceType: fi.PtrTo("t2.medium"), + SpotPrice: fi.PtrTo("0.1"), + SpotDurationInMinutes: fi.PtrTo(int64(60)), + InstanceInterruptionBehavior: fi.PtrTo("hibernate"), + RootVolumeOptimization: fi.PtrTo(true), + RootVolumeIops: fi.PtrTo(int64(100)), + RootVolumeSize: fi.PtrTo(int64(64)), SSHKey: &SSHKey{ - Name: fi.String("newkey"), + Name: fi.PtrTo("newkey"), PublicKey: fi.NewStringResource("newkey"), }, SecurityGroups: []*SecurityGroup{ - {Name: fi.String("nodes-1"), ID: fi.String("1111")}, - {Name: fi.String("nodes-2"), ID: fi.String("2222")}, + {Name: fi.PtrTo("nodes-1"), ID: fi.PtrTo("1111")}, + {Name: fi.PtrTo("nodes-2"), ID: fi.PtrTo("2222")}, }, - Tenancy: fi.String("dedicated"), - HTTPTokens: fi.String("optional"), - HTTPPutResponseHopLimit: fi.Int64(1), + Tenancy: fi.PtrTo("dedicated"), + HTTPTokens: fi.PtrTo("optional"), + HTTPPutResponseHopLimit: fi.PtrTo(int64(1)), }, Expected: `provider "aws" { region = "eu-west-2" @@ -107,36 +107,36 @@ terraform { }, { Resource: &LaunchTemplate{ - Name: fi.String("test"), - AssociatePublicIP: fi.Bool(true), + Name: fi.PtrTo("test"), + AssociatePublicIP: fi.PtrTo(true), IAMInstanceProfile: &IAMInstanceProfile{ - Name: fi.String("nodes"), + Name: fi.PtrTo("nodes"), }, BlockDeviceMappings: []*BlockDeviceMapping{ { - DeviceName: fi.String("/dev/xvdd"), - EbsVolumeType: fi.String("gp2"), - EbsVolumeSize: fi.Int64(100), - EbsDeleteOnTermination: fi.Bool(true), - EbsEncrypted: fi.Bool(true), + DeviceName: fi.PtrTo("/dev/xvdd"), + EbsVolumeType: fi.PtrTo("gp2"), + EbsVolumeSize: fi.PtrTo(int64(100)), + EbsDeleteOnTermination: fi.PtrTo(true), + EbsEncrypted: fi.PtrTo(true), }, }, - ID: fi.String("test-11"), - InstanceMonitoring: fi.Bool(true), - InstanceType: fi.String("t2.medium"), - RootVolumeOptimization: fi.Bool(true), - RootVolumeIops: fi.Int64(100), - RootVolumeSize: fi.Int64(64), + ID: fi.PtrTo("test-11"), + InstanceMonitoring: fi.PtrTo(true), + InstanceType: fi.PtrTo("t2.medium"), + RootVolumeOptimization: fi.PtrTo(true), + RootVolumeIops: fi.PtrTo(int64(100)), + RootVolumeSize: fi.PtrTo(int64(64)), SSHKey: &SSHKey{ - Name: fi.String("mykey"), + Name: fi.PtrTo("mykey"), }, SecurityGroups: []*SecurityGroup{ - {Name: fi.String("nodes-1"), ID: fi.String("1111")}, - {Name: fi.String("nodes-2"), ID: fi.String("2222")}, + {Name: fi.PtrTo("nodes-1"), ID: fi.PtrTo("1111")}, + {Name: fi.PtrTo("nodes-2"), ID: fi.PtrTo("2222")}, }, - Tenancy: fi.String("dedicated"), - HTTPTokens: fi.String("required"), - HTTPPutResponseHopLimit: fi.Int64(5), + Tenancy: fi.PtrTo("dedicated"), + HTTPTokens: fi.PtrTo("required"), + HTTPPutResponseHopLimit: fi.PtrTo(int64(5)), }, Expected: `provider "aws" { region = "eu-west-2" diff --git a/upup/pkg/fi/cloudup/awstasks/natgateway.go b/upup/pkg/fi/cloudup/awstasks/natgateway.go index 827c4fc7899da..85c44838e8752 100644 --- a/upup/pkg/fi/cloudup/awstasks/natgateway.go +++ b/upup/pkg/fi/cloudup/awstasks/natgateway.go @@ -64,7 +64,7 @@ func (e *NatGateway) Find(c *fi.Context) (*NatGateway, error) { var ngw *ec2.NatGateway actual := &NatGateway{} - if fi.StringValue(e.ID) != "" { + if fi.ValueOf(e.ID) != "" { // We have an existing NGW, lets look up the EIP var ngwIds []*string ngwIds = append(ngwIds, e.ID) @@ -79,7 +79,7 @@ func (e *NatGateway) Find(c *fi.Context) (*NatGateway, error) { } if len(response.NatGateways) != 1 { - return nil, fmt.Errorf("found %d Nat Gateways with ID %q, expected 1", len(response.NatGateways), fi.StringValue(e.ID)) + return nil, fmt.Errorf("found %d Nat Gateways with ID %q, expected 1", len(response.NatGateways), fi.ValueOf(e.ID)) } ngw = response.NatGateways[0] @@ -233,7 +233,7 @@ func findNatGatewayFromRouteTable(cloud awsup.AWSCloud, routeTable *RouteTable) return nil, err } - if raws.HasOwnedTag(ec2.ResourceTypeNatgateway+":"+fi.StringValue(natGatewayID), gw.Tags, clusterName) { + if raws.HasOwnedTag(ec2.ResourceTypeNatgateway+":"+fi.ValueOf(natGatewayID), gw.Tags, clusterName) { filteredNatGateways = append(filteredNatGateways, gw) } } @@ -256,7 +256,7 @@ func findNatGatewayFromRouteTable(cloud awsup.AWSCloud, routeTable *RouteTable) func (s *NatGateway) CheckChanges(a, e, changes *NatGateway) error { // New if a == nil { - if !fi.BoolValue(e.Shared) { + if !fi.ValueOf(e.Shared) { if e.ElasticIP == nil { return fi.RequiredField("ElasticIP") } @@ -274,11 +274,11 @@ func (s *NatGateway) CheckChanges(a, e, changes *NatGateway) error { if changes.ElasticIP != nil { eID := "" if e.ElasticIP != nil { - eID = fi.StringValue(e.ElasticIP.ID) + eID = fi.ValueOf(e.ElasticIP.ID) } aID := "" if a.ElasticIP != nil { - aID = fi.StringValue(a.ElasticIP.ID) + aID = fi.ValueOf(a.ElasticIP.ID) } return fi.FieldIsImmutable(eID, aID, field.NewPath("ElasticIP")) } @@ -302,8 +302,8 @@ func (_ *NatGateway) RenderAWS(t *awsup.AWSAPITarget, a, e, changes *NatGateway) var id *string if a == nil { - if fi.BoolValue(e.Shared) { - return fmt.Errorf("NAT gateway %q not found", fi.StringValue(e.ID)) + if fi.ValueOf(e.Shared) { + return fmt.Errorf("NAT gateway %q not found", fi.ValueOf(e.ID)) } klog.V(2).Infof("Creating Nat Gateway") @@ -348,12 +348,12 @@ func (_ *NatGateway) RenderAWS(t *awsup.AWSAPITarget, a, e, changes *NatGateway) // This is better than just a tag that's shared because this lets us create a whitelist of these NGWs // without doing a bunch more work in `kutil/delete_cluster.go` - if fi.BoolValue(e.Shared) { + if fi.ValueOf(e.Shared) { if e.AssociatedRouteTable == nil { return fmt.Errorf("AssociatedRouteTable not provided") } - klog.V(2).Infof("tagging route table %s to track shared NGW", fi.StringValue(e.AssociatedRouteTable.ID)) - err = t.AddAWSTags(fi.StringValue(e.AssociatedRouteTable.ID), tags) + klog.V(2).Infof("tagging route table %s to track shared NGW", fi.ValueOf(e.AssociatedRouteTable.ID)) + err = t.AddAWSTags(fi.ValueOf(e.AssociatedRouteTable.ID), tags) if err != nil { return fmt.Errorf("unable to tag route table %v", err) } @@ -369,7 +369,7 @@ type terraformNATGateway struct { } func (_ *NatGateway) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *NatGateway) error { - if fi.BoolValue(e.Shared) { + if fi.ValueOf(e.Shared) { if e.ID == nil { return fmt.Errorf("ID must be set, if NatGateway is shared: %s", e) } @@ -388,7 +388,7 @@ func (_ *NatGateway) RenderTerraform(t *terraform.TerraformTarget, a, e, changes } func (e *NatGateway) TerraformLink() *terraformWriter.Literal { - if fi.BoolValue(e.Shared) { + if fi.ValueOf(e.Shared) { if e.ID == nil { klog.Fatalf("ID must be set, if NatGateway is shared: %s", e) } @@ -406,7 +406,7 @@ type cloudformationNATGateway struct { } func (_ *NatGateway) RenderCloudformation(t *cloudformation.CloudformationTarget, a, e, changes *NatGateway) error { - if fi.BoolValue(e.Shared) { + if fi.ValueOf(e.Shared) { if e.ID == nil { return fmt.Errorf("ID must be set, if NatGateway is shared: %s", e) } @@ -425,7 +425,7 @@ func (_ *NatGateway) RenderCloudformation(t *cloudformation.CloudformationTarget } func (e *NatGateway) CloudformationLink() *cloudformation.Literal { - if fi.BoolValue(e.Shared) { + if fi.ValueOf(e.Shared) { if e.ID == nil { klog.Fatalf("ID must be set, if NatGateway is shared: %s", e) } diff --git a/upup/pkg/fi/cloudup/awstasks/network_load_balancer.go b/upup/pkg/fi/cloudup/awstasks/network_load_balancer.go index ea5270cdf558a..cb779fd323f5d 100644 --- a/upup/pkg/fi/cloudup/awstasks/network_load_balancer.go +++ b/upup/pkg/fi/cloudup/awstasks/network_load_balancer.go @@ -92,8 +92,8 @@ type NetworkLoadBalancerListener struct { func (e *NetworkLoadBalancerListener) mapToAWS(targetGroups []*TargetGroup, loadBalancerArn string) (*elbv2.CreateListenerInput, error) { var tgARN string for _, tg := range targetGroups { - if fi.StringValue(tg.Name) == e.TargetGroupName { - tgARN = fi.StringValue(tg.ARN) + if fi.ValueOf(tg.Name) == e.TargetGroupName { + tgARN = fi.ValueOf(tg.ARN) } } if tgARN == "" { @@ -324,11 +324,11 @@ func (e *NetworkLoadBalancer) Find(c *fi.Context) (*NetworkLoadBalancer, error) if len(l.DefaultActions) > 0 { targetGroupARN := l.DefaultActions[0].TargetGroupArn if targetGroupARN != nil { - targetGroupName, err := awsup.GetTargetGroupNameFromARN(fi.StringValue(targetGroupARN)) + targetGroupName, err := awsup.GetTargetGroupNameFromARN(fi.ValueOf(targetGroupARN)) if err != nil { return nil, err } - actual.TargetGroups = append(actual.TargetGroups, &TargetGroup{ARN: targetGroupARN, Name: fi.String(targetGroupName)}) + actual.TargetGroups = append(actual.TargetGroups, &TargetGroup{ARN: targetGroupARN, Name: fi.PtrTo(targetGroupName)}) cloud := c.Cloud.(awsup.AWSCloud) descResp, err := cloud.ELBV2().DescribeTargetGroups(&elbv2.DescribeTargetGroupsInput{ @@ -367,7 +367,7 @@ func (e *NetworkLoadBalancer) Find(c *fi.Context) (*NetworkLoadBalancer, error) if err != nil { return nil, err } - actual.CrossZoneLoadBalancing = fi.Bool(b) + actual.CrossZoneLoadBalancing = fi.PtrTo(b) case "access_logs.s3.enabled": b, err := strconv.ParseBool(*value) if err != nil { @@ -376,19 +376,19 @@ func (e *NetworkLoadBalancer) Find(c *fi.Context) (*NetworkLoadBalancer, error) if actual.AccessLog == nil { actual.AccessLog = &NetworkLoadBalancerAccessLog{} } - actual.AccessLog.Enabled = fi.Bool(b) + actual.AccessLog.Enabled = fi.PtrTo(b) case "access_logs.s3.bucket": if actual.AccessLog == nil { actual.AccessLog = &NetworkLoadBalancerAccessLog{} } - if fi.StringValue(value) != "" { + if fi.ValueOf(value) != "" { actual.AccessLog.S3BucketName = value } case "access_logs.s3.prefix": if actual.AccessLog == nil { actual.AccessLog = &NetworkLoadBalancerAccessLog{} } - if fi.StringValue(value) != "" { + if fi.ValueOf(value) != "" { actual.AccessLog.S3BucketPrefix = value } default: @@ -412,13 +412,13 @@ func (e *NetworkLoadBalancer) Find(c *fi.Context) (*NetworkLoadBalancer, error) } // An existing internal NLB can't be updated to dualstack. - if fi.StringValue(actual.Scheme) == elbv2.LoadBalancerSchemeEnumInternal && fi.StringValue(actual.IpAddressType) == elbv2.IpAddressTypeIpv4 { + if fi.ValueOf(actual.Scheme) == elbv2.LoadBalancerSchemeEnumInternal && fi.ValueOf(actual.IpAddressType) == elbv2.IpAddressTypeIpv4 { e.IpAddressType = actual.IpAddressType } // We allow for the LoadBalancerName to be wrong: // 1. We don't want to force a rename of the NLB, because that is a destructive operation - if fi.StringValue(e.LoadBalancerName) != fi.StringValue(actual.LoadBalancerName) { + if fi.ValueOf(e.LoadBalancerName) != fi.ValueOf(actual.LoadBalancerName) { klog.V(2).Infof("Reusing existing load balancer with name: %q", aws.StringValue(actual.LoadBalancerName)) e.LoadBalancerName = actual.LoadBalancerName } @@ -449,19 +449,19 @@ func (e *NetworkLoadBalancer) FindAddresses(context *fi.Context) ([]string, erro if err != nil { return nil, fmt.Errorf("failed to find load balancer matching %q: %w", e.Tags["Name"], err) } - if lb != nil && fi.StringValue(lb.DNSName) != "" { - addresses = append(addresses, fi.StringValue(lb.DNSName)) + if lb != nil && fi.ValueOf(lb.DNSName) != "" { + addresses = append(addresses, fi.ValueOf(lb.DNSName)) } } if cluster.UsesNoneDNS() { - nis, err := cloud.FindELBV2NetworkInterfacesByName(fi.StringValue(e.VPC.ID), fi.StringValue(e.LoadBalancerName)) + nis, err := cloud.FindELBV2NetworkInterfacesByName(fi.ValueOf(e.VPC.ID), fi.ValueOf(e.LoadBalancerName)) if err != nil { - return nil, fmt.Errorf("failed to find network interfaces matching %q: %w", fi.StringValue(e.LoadBalancerName), err) + return nil, fmt.Errorf("failed to find network interfaces matching %q: %w", fi.ValueOf(e.LoadBalancerName), err) } for _, ni := range nis { - if fi.StringValue(ni.PrivateIpAddress) != "" { - addresses = append(addresses, fi.StringValue(ni.PrivateIpAddress)) + if fi.ValueOf(ni.PrivateIpAddress) != "" { + addresses = append(addresses, fi.ValueOf(ni.PrivateIpAddress)) } } } @@ -485,7 +485,7 @@ func (e *NetworkLoadBalancer) Normalize(c *fi.Context) error { func (*NetworkLoadBalancer) CheckChanges(a, e, changes *NetworkLoadBalancer) error { if a == nil { - if fi.StringValue(e.Name) == "" { + if fi.ValueOf(e.Name) == "" { return fi.RequiredField("Name") } if len(e.SubnetMappings) == 0 { @@ -526,7 +526,7 @@ func (*NetworkLoadBalancer) CheckChanges(a, e, changes *NetworkLoadBalancer) err if !ok { return fmt.Errorf("network load balancers do not support detaching subnets") } - if fi.StringValue(eIP) != fi.StringValue(s.PrivateIPv4Address) || fi.StringValue(eIP) != fi.StringValue(s.AllocationID) { + if fi.ValueOf(eIP) != fi.ValueOf(s.PrivateIPv4Address) || fi.ValueOf(eIP) != fi.ValueOf(s.AllocationID) { return fmt.Errorf("network load balancers do not support modifying address settings") } } @@ -585,14 +585,14 @@ func (_ *NetworkLoadBalancer) RenderAWS(t *awsup.AWSAPITarget, a, e, changes *Ne e.DNSName = lb.DNSName e.HostedZoneId = lb.CanonicalHostedZoneId e.VPC = &VPC{ID: lb.VpcId} - loadBalancerArn = fi.StringValue(lb.LoadBalancerArn) + loadBalancerArn = fi.ValueOf(lb.LoadBalancerArn) } // Wait for all load balancer components to be created (including network interfaces needed for NoneDNS). // Limiting this to clusters using NoneDNS because load balancer creation is quite slow. for _, tg := range e.TargetGroups { - if strings.HasPrefix(fi.StringValue(tg.Name), "kops-controller") { + if strings.HasPrefix(fi.ValueOf(tg.Name), "kops-controller") { klog.Infof("Waiting for load balancer %q to be created...", loadBalancerName) request := &elbv2.DescribeLoadBalancersInput{ Names: []*string{&loadBalancerName}, @@ -620,14 +620,14 @@ func (_ *NetworkLoadBalancer) RenderAWS(t *awsup.AWSAPITarget, a, e, changes *Ne } } } else { - loadBalancerName = fi.StringValue(a.LoadBalancerName) + loadBalancerName = fi.ValueOf(a.LoadBalancerName) lb, err := findNetworkLoadBalancerByLoadBalancerName(t.Cloud, loadBalancerName) if err != nil { return fmt.Errorf("error getting load balancer by name: %v", err) } - loadBalancerArn = fi.StringValue(lb.LoadBalancerArn) + loadBalancerArn = fi.ValueOf(lb.LoadBalancerArn) if changes.IpAddressType != nil { request := &elbv2.SetIpAddressTypeInput{ @@ -655,7 +655,7 @@ func (_ *NetworkLoadBalancer) RenderAWS(t *awsup.AWSAPITarget, a, e, changes *Ne hasChanges := false for _, s := range e.SubnetMappings { aIP, ok := actualSubnets[*s.Subnet.ID] - if !ok || (fi.StringValue(s.PrivateIPv4Address) != fi.StringValue(aIP) && fi.StringValue(s.AllocationID) != fi.StringValue(aIP)) { + if !ok || (fi.ValueOf(s.PrivateIPv4Address) != fi.ValueOf(aIP) && fi.ValueOf(s.AllocationID) != fi.ValueOf(aIP)) { hasChanges = true } awsSubnetMappings = append(awsSubnetMappings, &elbv2.SubnetMapping{ @@ -764,10 +764,10 @@ type terraformNetworkLoadBalancerListenerAction struct { func (_ *NetworkLoadBalancer) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *NetworkLoadBalancer) error { nlbTF := &terraformNetworkLoadBalancer{ Name: *e.LoadBalancerName, - Internal: fi.StringValue(e.Scheme) == elbv2.LoadBalancerSchemeEnumInternal, + Internal: fi.ValueOf(e.Scheme) == elbv2.LoadBalancerSchemeEnumInternal, Type: elbv2.LoadBalancerTypeEnumNetwork, Tags: e.Tags, - CrossZoneLoadBalancing: fi.BoolValue(e.CrossZoneLoadBalancing), + CrossZoneLoadBalancing: fi.ValueOf(e.CrossZoneLoadBalancing), } for _, subnetMapping := range e.SubnetMappings { @@ -778,7 +778,7 @@ func (_ *NetworkLoadBalancer) RenderTerraform(t *terraform.TerraformTarget, a, e }) } - if e.AccessLog != nil && fi.BoolValue(e.AccessLog.Enabled) { + if e.AccessLog != nil && fi.ValueOf(e.AccessLog.Enabled) { nlbTF.AccessLog = &terraformNetworkLoadBalancerAccessLog{ Enabled: e.AccessLog.Enabled, S3BucketName: e.AccessLog.S3BucketName, @@ -981,7 +981,7 @@ func (e *NetworkLoadBalancer) FindDeletions(context *fi.Context) ([]fi.Deletion, cloud := context.Cloud.(awsup.AWSCloud) - lb, err := cloud.FindELBByNameTag(fi.StringValue(e.CLBName)) + lb, err := cloud.FindELBByNameTag(fi.ValueOf(e.CLBName)) if err != nil { return nil, err } diff --git a/upup/pkg/fi/cloudup/awstasks/networkloadbalancer_attributes.go b/upup/pkg/fi/cloudup/awstasks/networkloadbalancer_attributes.go index 7f6234e2741b2..bfe8eb0ee2fed 100644 --- a/upup/pkg/fi/cloudup/awstasks/networkloadbalancer_attributes.go +++ b/upup/pkg/fi/cloudup/awstasks/networkloadbalancer_attributes.go @@ -72,7 +72,7 @@ func (_ *NetworkLoadBalancer) modifyLoadBalancerAttributes(t *awsup.AWSAPITarget return nil } - loadBalancerName := fi.StringValue(e.LoadBalancerName) + loadBalancerName := fi.ValueOf(e.LoadBalancerName) request := &elbv2.ModifyLoadBalancerAttributesInput{ LoadBalancerArn: aws.String(loadBalancerArn), diff --git a/upup/pkg/fi/cloudup/awstasks/render_test.go b/upup/pkg/fi/cloudup/awstasks/render_test.go index 10bc21dcf9b24..2bd4b835a6f52 100644 --- a/upup/pkg/fi/cloudup/awstasks/render_test.go +++ b/upup/pkg/fi/cloudup/awstasks/render_test.go @@ -25,7 +25,6 @@ import ( "k8s.io/kops/pkg/diff" "k8s.io/kops/upup/pkg/fi" "k8s.io/kops/upup/pkg/fi/cloudup/awsup" - "k8s.io/kops/upup/pkg/fi/cloudup/cloudformation" "k8s.io/kops/upup/pkg/fi/cloudup/terraform" ) @@ -47,9 +46,6 @@ func doRenderTests(t *testing.T, method string, cases []*renderTest) { case "RenderTerraform": target = terraform.NewTerraformTarget(cloud, "test", nil, outdir, nil) filename = "kubernetes.tf" - case "RenderCloudformation": - target = cloudformation.NewCloudformationTarget(cloud, "test", outdir) - filename = "kubernetes.json" default: t.Errorf("unknown render method: %s", method) t.FailNow() diff --git a/upup/pkg/fi/cloudup/awstasks/routetableassociation.go b/upup/pkg/fi/cloudup/awstasks/routetableassociation.go index c607f590940d6..03a6d42f3ac6c 100644 --- a/upup/pkg/fi/cloudup/awstasks/routetableassociation.go +++ b/upup/pkg/fi/cloudup/awstasks/routetableassociation.go @@ -123,7 +123,7 @@ func findExistingRouteTableForSubnet(cloud awsup.AWSCloud, subnet *Subnet) (*ec2 return nil, fmt.Errorf("subnet ID not set") } - subnetID := fi.StringValue(subnet.ID) + subnetID := fi.ValueOf(subnet.ID) request := &ec2.DescribeRouteTablesInput{ Filters: []*ec2.Filter{awsup.NewEC2Filter("association.subnet-id", subnetID)}, diff --git a/upup/pkg/fi/cloudup/awstasks/securitygroup.go b/upup/pkg/fi/cloudup/awstasks/securitygroup.go index 2e15115ef7826..91f2396e160e9 100644 --- a/upup/pkg/fi/cloudup/awstasks/securitygroup.go +++ b/upup/pkg/fi/cloudup/awstasks/securitygroup.go @@ -63,7 +63,7 @@ type OrderSecurityGroupsById []*SecurityGroup func (a OrderSecurityGroupsById) Len() int { return len(a) } func (a OrderSecurityGroupsById) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a OrderSecurityGroupsById) Less(i, j int) bool { - return fi.StringValue(a[i].ID) < fi.StringValue(a[j].ID) + return fi.ValueOf(a[i].ID) < fi.ValueOf(a[j].ID) } func (e *SecurityGroup) Find(c *fi.Context) (*SecurityGroup, error) { @@ -101,10 +101,10 @@ func (e *SecurityGroup) findEc2(c *fi.Context) (*ec2.SecurityGroup, error) { cloud := c.Cloud.(awsup.AWSCloud) request := &ec2.DescribeSecurityGroupsInput{} - if fi.StringValue(e.ID) != "" { + if fi.ValueOf(e.ID) != "" { // Find by ID. request.GroupIds = []*string{e.ID} - } else if fi.StringValue(e.Name) != "" && e.VPC != nil && e.VPC.ID != nil { + } else if fi.ValueOf(e.Name) != "" && e.VPC != nil && e.VPC.ID != nil { // Find by filters (name and VPC ID). filters := cloud.BuildFilters(e.Name) filters = append(filters, awsup.NewEC2Filter("vpc-id", *e.VPC.ID)) @@ -136,7 +136,7 @@ func (e *SecurityGroup) Run(c *fi.Context) error { } func (_ *SecurityGroup) ShouldCreate(a, e, changes *SecurityGroup) (bool, error) { - if fi.BoolValue(e.Shared) { + if fi.ValueOf(e.Shared) { return false, nil } return true, nil @@ -147,7 +147,7 @@ func (_ *SecurityGroup) CheckChanges(a, e, changes *SecurityGroup) error { if changes.ID != nil { return fi.CannotChangeField("ID") } - if changes.Name != nil && !fi.BoolValue(e.Shared) { + if changes.Name != nil && !fi.ValueOf(e.Shared) { return fi.CannotChangeField("Name") } if changes.VPC != nil { @@ -158,7 +158,7 @@ func (_ *SecurityGroup) CheckChanges(a, e, changes *SecurityGroup) error { } func (_ *SecurityGroup) RenderAWS(t *awsup.AWSAPITarget, a, e, changes *SecurityGroup) error { - shared := fi.BoolValue(e.Shared) + shared := fi.ValueOf(e.Shared) if shared { // Do we want to do any verification of the security group? return nil @@ -193,7 +193,7 @@ type terraformSecurityGroup struct { } func (_ *SecurityGroup) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *SecurityGroup) error { - shared := fi.BoolValue(e.Shared) + shared := fi.ValueOf(e.Shared) if shared { // Not terraform owned / managed return nil @@ -210,7 +210,7 @@ func (_ *SecurityGroup) RenderTerraform(t *terraform.TerraformTarget, a, e, chan } func (e *SecurityGroup) TerraformLink() *terraformWriter.Literal { - shared := fi.BoolValue(e.Shared) + shared := fi.ValueOf(e.Shared) if shared { // Not terraform owned / managed if e.ID != nil { @@ -231,7 +231,7 @@ type cloudformationSecurityGroup struct { } func (_ *SecurityGroup) RenderCloudformation(t *cloudformation.CloudformationTarget, a, e, changes *SecurityGroup) error { - shared := fi.BoolValue(e.Shared) + shared := fi.ValueOf(e.Shared) if shared { // Not cloudformation owned / managed return nil @@ -248,7 +248,7 @@ func (_ *SecurityGroup) RenderCloudformation(t *cloudformation.CloudformationTar } func (e *SecurityGroup) CloudformationLink() *cloudformation.Literal { - shared := fi.BoolValue(e.Shared) + shared := fi.ValueOf(e.Shared) if shared { // Not cloudformation owned / managed if e.ID != nil { @@ -275,7 +275,7 @@ func (d *deleteSecurityGroupRule) Delete(t fi.Target) error { return fmt.Errorf("unexpected target type for deletion: %T", t) } - if fi.BoolValue(d.rule.IsEgress) { + if fi.ValueOf(d.rule.IsEgress) { request := &ec2.RevokeSecurityGroupEgressInput{ GroupId: d.rule.GroupId, SecurityGroupRuleIds: []*string{d.rule.SecurityGroupRuleId}, @@ -307,7 +307,7 @@ func (d *deleteSecurityGroupRule) TaskName() string { } func (d *deleteSecurityGroupRule) Item() string { - s := fi.StringValue(d.rule.GroupId) + ":" + s := fi.ValueOf(d.rule.GroupId) + ":" p := d.rule if aws.Int64Value(p.FromPort) != 0 { s += fmt.Sprintf(" port=%d", aws.Int64Value(p.FromPort)) @@ -390,7 +390,7 @@ func (e *SecurityGroup) FindDeletions(c *fi.Context) ([]fi.Deletion, error) { } if er.SourceGroup != nil && er.SourceGroup.ID == nil { - klog.V(4).Infof("Deletion skipping find of SecurityGroupRule %s, because SourceGroup was not found", fi.StringValue(er.Name)) + klog.V(4).Infof("Deletion skipping find of SecurityGroupRule %s, because SourceGroup was not found", fi.ValueOf(er.Name)) return nil, nil } diff --git a/upup/pkg/fi/cloudup/awstasks/securitygroup_test.go b/upup/pkg/fi/cloudup/awstasks/securitygroup_test.go index 479d7705c119a..301bdd30d3eac 100644 --- a/upup/pkg/fi/cloudup/awstasks/securitygroup_test.go +++ b/upup/pkg/fi/cloudup/awstasks/securitygroup_test.go @@ -139,7 +139,7 @@ func TestSecurityGroupCreate(t *testing.T) { t.Fatalf("unexpected error during Run: %v", err) } - if fi.StringValue(sg1.ID) == "" { + if fi.ValueOf(sg1.ID) == "" { t.Fatalf("ID not set after create") } diff --git a/upup/pkg/fi/cloudup/awstasks/securitygrouprule.go b/upup/pkg/fi/cloudup/awstasks/securitygrouprule.go index 9366a46b94288..4126d6ee609c4 100644 --- a/upup/pkg/fi/cloudup/awstasks/securitygrouprule.go +++ b/upup/pkg/fi/cloudup/awstasks/securitygrouprule.go @@ -63,7 +63,7 @@ func (e *SecurityGroupRule) Find(c *fi.Context) (*SecurityGroupRule, error) { } if e.SourceGroup != nil && e.SourceGroup.ID == nil { - klog.V(4).Infof("Skipping find of SecurityGroupRule %s, because SourceGroup was not found", fi.StringValue(e.Name)) + klog.V(4).Infof("Skipping find of SecurityGroupRule %s, because SourceGroup was not found", fi.ValueOf(e.Name)) return nil, nil } @@ -108,11 +108,11 @@ func (e *SecurityGroupRule) Find(c *fi.Context) (*SecurityGroupRule, error) { actual.Protocol = nil } - if fi.StringValue(actual.Protocol) != "icmpv6" { - if fi.Int64Value(actual.FromPort) == int64(-1) { + if fi.ValueOf(actual.Protocol) != "icmpv6" { + if fi.ValueOf(actual.FromPort) == int64(-1) { actual.FromPort = nil } - if fi.Int64Value(actual.ToPort) == int64(-1) { + if fi.ValueOf(actual.ToPort) == int64(-1) { actual.ToPort = nil } } @@ -175,15 +175,15 @@ func (e *SecurityGroupRule) matches(rule *ec2.SecurityGroupRule) bool { return false } - if fi.StringValue(e.CIDR) != fi.StringValue(rule.CidrIpv4) { + if fi.ValueOf(e.CIDR) != fi.ValueOf(rule.CidrIpv4) { return false } - if fi.StringValue(e.IPv6CIDR) != fi.StringValue(rule.CidrIpv6) { + if fi.ValueOf(e.IPv6CIDR) != fi.ValueOf(rule.CidrIpv6) { return false } - if fi.StringValue(e.PrefixList) != fi.StringValue(rule.PrefixListId) { + if fi.ValueOf(e.PrefixList) != fi.ValueOf(rule.PrefixListId) { return false } @@ -191,7 +191,7 @@ func (e *SecurityGroupRule) matches(rule *ec2.SecurityGroupRule) bool { if e.SourceGroup == nil || rule.ReferencedGroupInfo == nil { return false } - if fi.StringValue(e.SourceGroup.ID) != fi.StringValue(rule.ReferencedGroupInfo.GroupId) { + if fi.ValueOf(e.SourceGroup.ID) != fi.ValueOf(rule.ReferencedGroupInfo.GroupId) { return false } } @@ -242,7 +242,7 @@ func (e *SecurityGroupRule) Description() string { } if e.SourceGroup != nil { - description = append(description, fmt.Sprintf("sourceGroup=%s", fi.StringValue(e.SourceGroup.ID))) + description = append(description, fmt.Sprintf("sourceGroup=%s", fi.ValueOf(e.SourceGroup.ID))) } if e.CIDR != nil { @@ -261,7 +261,7 @@ func (e *SecurityGroupRule) Description() string { } func (_ *SecurityGroupRule) RenderAWS(t *awsup.AWSAPITarget, a, e, changes *SecurityGroupRule) error { - name := fi.StringValue(e.Name) + name := fi.ValueOf(e.Name) if a == nil { protocol := e.Protocol @@ -304,7 +304,7 @@ func (_ *SecurityGroupRule) RenderAWS(t *awsup.AWSAPITarget, a, e, changes *Secu description := e.Description() - if fi.BoolValue(e.Egress) { + if fi.ValueOf(e.Egress) { request := &ec2.AuthorizeSecurityGroupEgressInput{ GroupId: e.SecurityGroup.ID, } @@ -356,29 +356,29 @@ type terraformSecurityGroupIngress struct { func (_ *SecurityGroupRule) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *SecurityGroupRule) error { tf := &terraformSecurityGroupIngress{ - Type: fi.String("ingress"), + Type: fi.PtrTo("ingress"), SecurityGroup: e.SecurityGroup.TerraformLink(), FromPort: e.FromPort, ToPort: e.ToPort, Protocol: e.Protocol, } - if fi.BoolValue(e.Egress) { - tf.Type = fi.String("egress") + if fi.ValueOf(e.Egress) { + tf.Type = fi.PtrTo("egress") } if e.Protocol == nil { - tf.Protocol = fi.String("-1") - tf.FromPort = fi.Int64(0) - tf.ToPort = fi.Int64(0) + tf.Protocol = fi.PtrTo("-1") + tf.FromPort = fi.PtrTo(int64(0)) + tf.ToPort = fi.PtrTo(int64(0)) } if tf.FromPort == nil { // FromPort is required by tf - tf.FromPort = fi.Int64(0) + tf.FromPort = fi.PtrTo(int64(0)) } if tf.ToPort == nil { // ToPort is required by tf - tf.ToPort = fi.Int64(65535) + tf.ToPort = fi.PtrTo(int64(65535)) } if e.SourceGroup != nil { @@ -413,7 +413,7 @@ type cloudformationSecurityGroupIngress struct { func (_ *SecurityGroupRule) RenderCloudformation(t *cloudformation.CloudformationTarget, a, e, changes *SecurityGroupRule) error { cfType := "AWS::EC2::SecurityGroupIngress" - if fi.BoolValue(e.Egress) { + if fi.ValueOf(e.Egress) { cfType = "AWS::EC2::SecurityGroupEgress" } @@ -425,18 +425,18 @@ func (_ *SecurityGroupRule) RenderCloudformation(t *cloudformation.Cloudformatio } if e.Protocol == nil { - tf.Protocol = fi.String("-1") - tf.FromPort = fi.Int64(0) - tf.ToPort = fi.Int64(0) + tf.Protocol = fi.PtrTo("-1") + tf.FromPort = fi.PtrTo(int64(0)) + tf.ToPort = fi.PtrTo(int64(0)) } if tf.FromPort == nil { // FromPort is required by tf - tf.FromPort = fi.Int64(0) + tf.FromPort = fi.PtrTo(int64(0)) } if tf.ToPort == nil { // ToPort is required by tf - tf.ToPort = fi.Int64(65535) + tf.ToPort = fi.PtrTo(int64(65535)) } if e.SourceGroup != nil { diff --git a/upup/pkg/fi/cloudup/awstasks/sqs.go b/upup/pkg/fi/cloudup/awstasks/sqs.go index c5f72444eb21c..91cd47a2b1685 100644 --- a/upup/pkg/fi/cloudup/awstasks/sqs.go +++ b/upup/pkg/fi/cloudup/awstasks/sqs.go @@ -251,7 +251,7 @@ func (_ *SQS) RenderCloudformation(t *cloudformation.CloudformationTarget, a, e, return fmt.Errorf("error parsing SQS PolicyDocument: %v", err) } - cfQueueRef := cloudformation.Ref("AWS::SQS::Queue", fi.StringValue(e.Name)) + cfQueueRef := cloudformation.Ref("AWS::SQS::Queue", fi.ValueOf(e.Name)) cfQueuePolicy := &cloudformationSQSQueuePolicy{ Queues: []*cloudformation.Literal{cfQueueRef}, diff --git a/upup/pkg/fi/cloudup/awstasks/sshkey.go b/upup/pkg/fi/cloudup/awstasks/sshkey.go index 76f72b92713c9..5f8a0c08d9e96 100644 --- a/upup/pkg/fi/cloudup/awstasks/sshkey.go +++ b/upup/pkg/fi/cloudup/awstasks/sshkey.go @@ -95,18 +95,18 @@ func (e *SSHKey) find(cloud awsup.AWSCloud) (*SSHKey, error) { } // Avoid spurious changes - if fi.StringValue(k.KeyType) == ec2.KeyTypeEd25519 { + if fi.ValueOf(k.KeyType) == ec2.KeyTypeEd25519 { // Trim the trailing "=" and prefix with "SHA256:" to match the output of "ssh-keygen -lf" - fingerprint := fi.StringValue(k.KeyFingerprint) + fingerprint := fi.ValueOf(k.KeyFingerprint) fingerprint = strings.TrimRight(fingerprint, "=") fingerprint = fmt.Sprintf("SHA256:%s", fingerprint) - actual.KeyFingerprint = fi.String(fingerprint) + actual.KeyFingerprint = fi.PtrTo(fingerprint) } - if fi.StringValue(actual.KeyFingerprint) == fi.StringValue(e.KeyFingerprint) { + if fi.ValueOf(actual.KeyFingerprint) == fi.ValueOf(e.KeyFingerprint) { klog.V(2).Infof("SSH key fingerprints match; assuming public keys match") actual.PublicKey = e.PublicKey } else { - klog.V(2).Infof("Computed SSH key fingerprint mismatch: %q %q", fi.StringValue(e.KeyFingerprint), fi.StringValue(actual.KeyFingerprint)) + klog.V(2).Infof("Computed SSH key fingerprint mismatch: %q %q", fi.ValueOf(e.KeyFingerprint), fi.ValueOf(actual.KeyFingerprint)) } actual.Lifecycle = e.Lifecycle if actual.Shared { diff --git a/upup/pkg/fi/cloudup/awstasks/subnet.go b/upup/pkg/fi/cloudup/awstasks/subnet.go index 8db9f7e500c90..a914264250bf8 100644 --- a/upup/pkg/fi/cloudup/awstasks/subnet.go +++ b/upup/pkg/fi/cloudup/awstasks/subnet.go @@ -67,7 +67,7 @@ type OrderSubnetsById []*Subnet func (a OrderSubnetsById) Len() int { return len(a) } func (a OrderSubnetsById) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a OrderSubnetsById) Less(i, j int) bool { - return fi.StringValue(a[i].ID) < fi.StringValue(a[j].ID) + return fi.ValueOf(a[i].ID) < fi.ValueOf(a[j].ID) } func (e *Subnet) Find(c *fi.Context) (*Subnet, error) { @@ -104,12 +104,12 @@ func (e *Subnet) Find(c *fi.Context) (*Subnet, error) { break } - actual.ResourceBasedNaming = fi.Bool(aws.StringValue(subnet.PrivateDnsNameOptionsOnLaunch.HostnameType) == ec2.HostnameTypeResourceName) + actual.ResourceBasedNaming = fi.PtrTo(aws.StringValue(subnet.PrivateDnsNameOptionsOnLaunch.HostnameType) == ec2.HostnameTypeResourceName) if *actual.ResourceBasedNaming { - if fi.StringValue(actual.CIDR) != "" && !aws.BoolValue(subnet.PrivateDnsNameOptionsOnLaunch.EnableResourceNameDnsARecord) { + if fi.ValueOf(actual.CIDR) != "" && !aws.BoolValue(subnet.PrivateDnsNameOptionsOnLaunch.EnableResourceNameDnsARecord) { actual.ResourceBasedNaming = nil } - if fi.StringValue(actual.IPv6CIDR) != "" && !aws.BoolValue(subnet.PrivateDnsNameOptionsOnLaunch.EnableResourceNameDnsAAAARecord) { + if fi.ValueOf(actual.IPv6CIDR) != "" && !aws.BoolValue(subnet.PrivateDnsNameOptionsOnLaunch.EnableResourceNameDnsAAAARecord) { actual.ResourceBasedNaming = nil } } @@ -203,7 +203,7 @@ func (s *Subnet) CheckChanges(a, e, changes *Subnet) error { errors = append(errors, fi.FieldIsImmutable(e.IPv6CIDR, a.IPv6CIDR, fieldPath.Child("IPv6CIDR"))) } - if fi.BoolValue(e.Shared) { + if fi.ValueOf(e.Shared) { if changes.IPv6CIDR != nil && a.IPv6CIDR == nil { errors = append(errors, field.Forbidden(fieldPath.Child("IPv6CIDR"), "field cannot be set on shared subnet")) } @@ -218,7 +218,7 @@ func (s *Subnet) CheckChanges(a, e, changes *Subnet) error { } func (_ *Subnet) ShouldCreate(a, e, changes *Subnet) (bool, error) { - if fi.BoolValue(e.Shared) { + if fi.ValueOf(e.Shared) { changes.ResourceBasedNaming = nil return changes.Tags != nil, nil } @@ -226,11 +226,11 @@ func (_ *Subnet) ShouldCreate(a, e, changes *Subnet) (bool, error) { } func (_ *Subnet) RenderAWS(t *awsup.AWSAPITarget, a, e, changes *Subnet) error { - shared := fi.BoolValue(e.Shared) + shared := fi.ValueOf(e.Shared) if shared { // Verify the subnet was found if a == nil { - return fmt.Errorf("subnet with id %q not found", fi.StringValue(e.ID)) + return fmt.Errorf("subnet with id %q not found", fi.ValueOf(e.ID)) } } @@ -255,7 +255,7 @@ func (_ *Subnet) RenderAWS(t *awsup.AWSAPITarget, a, e, changes *Subnet) error { } if a == nil { - klog.V(2).Infof("Creating Subnet with CIDR: %q IPv6CIDR: %q", fi.StringValue(e.CIDR), fi.StringValue(e.IPv6CIDR)) + klog.V(2).Infof("Creating Subnet with CIDR: %q IPv6CIDR: %q", fi.ValueOf(e.CIDR), fi.ValueOf(e.IPv6CIDR)) request := &ec2.CreateSubnetInput{ CidrBlock: e.CIDR, @@ -303,7 +303,7 @@ func (_ *Subnet) RenderAWS(t *awsup.AWSAPITarget, a, e, changes *Subnet) error { return fmt.Errorf("error modifying hostname type: %w", err) } - if fi.StringValue(e.CIDR) == "" { + if fi.ValueOf(e.CIDR) == "" { request = &ec2.ModifySubnetAttributeInput{ SubnetId: e.ID, EnableDns64: &ec2.AttributeBooleanValue{Value: aws.Bool(true)}, @@ -323,7 +323,7 @@ func (_ *Subnet) RenderAWS(t *awsup.AWSAPITarget, a, e, changes *Subnet) error { } } - if fi.StringValue(e.IPv6CIDR) != "" { + if fi.ValueOf(e.IPv6CIDR) != "" { request = &ec2.ModifySubnetAttributeInput{ SubnetId: e.ID, EnableResourceNameDnsAAAARecordOnLaunch: &ec2.AttributeBooleanValue{Value: changes.ResourceBasedNaming}, @@ -368,14 +368,14 @@ type terraformSubnet struct { } func (_ *Subnet) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *Subnet) error { - if fi.StringValue(e.ShortName) != "" { - name := fi.StringValue(e.ShortName) + if fi.ValueOf(e.ShortName) != "" { + name := fi.ValueOf(e.ShortName) if err := t.AddOutputVariable("subnet_"+name+"_id", e.TerraformLink()); err != nil { return err } } - shared := fi.BoolValue(e.Shared) + shared := fi.ValueOf(e.Shared) if shared { // Not terraform owned / managed // We won't apply changes, but our validation (kops update) will still warn @@ -399,20 +399,20 @@ func (_ *Subnet) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *Su AvailabilityZone: e.AvailabilityZone, Tags: e.Tags, } - if fi.StringValue(e.CIDR) == "" { - tf.EnableDNS64 = fi.Bool(true) - tf.IPv6Native = fi.Bool(true) + if fi.ValueOf(e.CIDR) == "" { + tf.EnableDNS64 = fi.PtrTo(true) + tf.IPv6Native = fi.PtrTo(true) } if e.ResourceBasedNaming != nil { hostnameType := ec2.HostnameTypeIpName if *e.ResourceBasedNaming { hostnameType = ec2.HostnameTypeResourceName } - tf.PrivateDNSHostnameTypeOnLaunch = fi.String(hostnameType) - if fi.StringValue(e.CIDR) != "" { + tf.PrivateDNSHostnameTypeOnLaunch = fi.PtrTo(hostnameType) + if fi.ValueOf(e.CIDR) != "" { tf.EnableResourceNameDNSARecordOnLaunch = e.ResourceBasedNaming } - if fi.StringValue(e.IPv6CIDR) != "" { + if fi.ValueOf(e.IPv6CIDR) != "" { tf.EnableResourceNameDNSAAAARecordOnLaunch = e.ResourceBasedNaming } } @@ -421,7 +421,7 @@ func (_ *Subnet) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *Su } func (e *Subnet) TerraformLink() *terraformWriter.Literal { - shared := fi.BoolValue(e.Shared) + shared := fi.ValueOf(e.Shared) if shared { if e.ID == nil { klog.Fatalf("ID must be set, if subnet is shared: %s", e) @@ -443,7 +443,7 @@ type cloudformationSubnet struct { } func (_ *Subnet) RenderCloudformation(t *cloudformation.CloudformationTarget, a, e, changes *Subnet) error { - shared := fi.BoolValue(e.Shared) + shared := fi.ValueOf(e.Shared) if shared { // Not cloudformation owned / managed // We won't apply changes, but our validation (kops update) will still warn @@ -468,7 +468,7 @@ func (_ *Subnet) RenderCloudformation(t *cloudformation.CloudformationTarget, a, } func (e *Subnet) CloudformationLink() *cloudformation.Literal { - shared := fi.BoolValue(e.Shared) + shared := fi.ValueOf(e.Shared) if shared { if e.ID == nil { klog.Fatalf("ID must be set, if subnet is shared: %s", e) diff --git a/upup/pkg/fi/cloudup/awstasks/subnet_mapping.go b/upup/pkg/fi/cloudup/awstasks/subnet_mapping.go index c29939cb4ca73..4204469936f32 100644 --- a/upup/pkg/fi/cloudup/awstasks/subnet_mapping.go +++ b/upup/pkg/fi/cloudup/awstasks/subnet_mapping.go @@ -36,14 +36,14 @@ type OrderSubnetMappingsByID []*SubnetMapping func (a OrderSubnetMappingsByID) Len() int { return len(a) } func (a OrderSubnetMappingsByID) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a OrderSubnetMappingsByID) Less(i, j int) bool { - v1 := fi.StringValue(a[i].Subnet.ID) - v2 := fi.StringValue(a[j].Subnet.ID) + v1 := fi.ValueOf(a[i].Subnet.ID) + v2 := fi.ValueOf(a[j].Subnet.ID) if v1 == v2 { if a[i].PrivateIPv4Address != nil && a[j].PrivateIPv4Address != nil { - return fi.StringValue(a[i].PrivateIPv4Address) < fi.StringValue(a[j].PrivateIPv4Address) + return fi.ValueOf(a[i].PrivateIPv4Address) < fi.ValueOf(a[j].PrivateIPv4Address) } if a[i].AllocationID != nil && a[j].AllocationID != nil { - return fi.StringValue(a[i].AllocationID) < fi.StringValue(a[j].AllocationID) + return fi.ValueOf(a[i].AllocationID) < fi.ValueOf(a[j].AllocationID) } } return v1 < v2 @@ -55,8 +55,8 @@ type OrderSubnetMappingsByName []*SubnetMapping func (a OrderSubnetMappingsByName) Len() int { return len(a) } func (a OrderSubnetMappingsByName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a OrderSubnetMappingsByName) Less(i, j int) bool { - v1 := fi.StringValue(a[i].Subnet.Name) - v2 := fi.StringValue(a[j].Subnet.Name) + v1 := fi.ValueOf(a[i].Subnet.Name) + v2 := fi.ValueOf(a[j].Subnet.Name) return v1 < v2 } @@ -82,10 +82,10 @@ func subnetMappingSlicesEqualIgnoreOrder(l, r []*SubnetMapping) bool { if !ok { return false } - if fi.StringValue(s.PrivateIPv4Address) != fi.StringValue(s2.PrivateIPv4Address) { + if fi.ValueOf(s.PrivateIPv4Address) != fi.ValueOf(s2.PrivateIPv4Address) { return false } - if fi.StringValue(s.AllocationID) != fi.StringValue(s2.AllocationID) { + if fi.ValueOf(s.AllocationID) != fi.ValueOf(s2.AllocationID) { return false } } diff --git a/upup/pkg/fi/cloudup/awstasks/subnet_test.go b/upup/pkg/fi/cloudup/awstasks/subnet_test.go index 824820c496785..6b86494e4ebe4 100644 --- a/upup/pkg/fi/cloudup/awstasks/subnet_test.go +++ b/upup/pkg/fi/cloudup/awstasks/subnet_test.go @@ -81,7 +81,7 @@ func TestSubnetCreate(t *testing.T) { Lifecycle: fi.LifecycleSync, VPC: vpc1, CIDR: s("172.20.1.0/24"), - ResourceBasedNaming: fi.Bool(true), + ResourceBasedNaming: fi.PtrTo(true), Tags: map[string]string{"Name": "subnet1"}, } @@ -109,7 +109,7 @@ func TestSubnetCreate(t *testing.T) { t.Fatalf("unexpected error during Run: %v", err) } - if fi.StringValue(subnet1.ID) == "" { + if fi.ValueOf(subnet1.ID) == "" { t.Fatalf("ID not set after create") } @@ -170,7 +170,7 @@ func TestSubnetCreateIPv6(t *testing.T) { VPC: vpc1, CIDR: s("172.20.1.0/24"), IPv6CIDR: s("2001:db8:0:1::/64"), - ResourceBasedNaming: fi.Bool(true), + ResourceBasedNaming: fi.PtrTo(true), Tags: map[string]string{"Name": "subnet1"}, } @@ -199,7 +199,7 @@ func TestSubnetCreateIPv6(t *testing.T) { t.Fatalf("unexpected error during Run: %v", err) } - if fi.StringValue(subnet1.ID) == "" { + if fi.ValueOf(subnet1.ID) == "" { t.Fatalf("ID not set after create") } @@ -297,7 +297,7 @@ func TestSubnetCreateIPv6NetNum(t *testing.T) { t.Fatalf("unexpected error during Run: %v", err) } - if fi.StringValue(subnet1.ID) == "" { + if fi.ValueOf(subnet1.ID) == "" { t.Fatalf("ID not set after create") } @@ -392,7 +392,7 @@ func TestSharedSubnetCreateDoesNotCreateNew(t *testing.T) { Lifecycle: fi.LifecycleSync, CIDR: s("172.20.0.0/16"), Tags: map[string]string{"kubernetes.io/cluster/cluster.example.com": "shared"}, - Shared: fi.Bool(true), + Shared: fi.PtrTo(true), ID: vpc.Vpc.VpcId, } subnet1 := &Subnet{ @@ -401,7 +401,7 @@ func TestSharedSubnetCreateDoesNotCreateNew(t *testing.T) { VPC: vpc1, CIDR: s("172.20.1.0/24"), Tags: map[string]string{"kubernetes.io/cluster/cluster.example.com": "shared"}, - Shared: fi.Bool(true), + Shared: fi.PtrTo(true), ID: subnet.Subnet.SubnetId, } @@ -429,7 +429,7 @@ func TestSharedSubnetCreateDoesNotCreateNew(t *testing.T) { t.Fatalf("unexpected error during Run: %v", err) } - if fi.StringValue(subnet1.ID) == "" { + if fi.ValueOf(subnet1.ID) == "" { t.Fatalf("ID not set after create") } diff --git a/upup/pkg/fi/cloudup/awstasks/targetgroup.go b/upup/pkg/fi/cloudup/awstasks/targetgroup.go index 3dd2496a3f094..0f41470af81b7 100644 --- a/upup/pkg/fi/cloudup/awstasks/targetgroup.go +++ b/upup/pkg/fi/cloudup/awstasks/targetgroup.go @@ -69,7 +69,7 @@ func (e *TargetGroup) Find(c *fi.Context) (*TargetGroup, error) { response, err := cloud.ELBV2().DescribeTargetGroups(request) if err != nil { if aerr, ok := err.(awserr.Error); ok && aerr.Code() == elbv2.ErrCodeTargetGroupNotFoundException { - if !fi.BoolValue(e.Shared) { + if !fi.ValueOf(e.Shared) { return nil, nil } } @@ -77,7 +77,7 @@ func (e *TargetGroup) Find(c *fi.Context) (*TargetGroup, error) { } if len(response.TargetGroups) > 1 { - return nil, fmt.Errorf("found %d TargetGroups with ID %q, expected 1", len(response.TargetGroups), fi.StringValue(e.Name)) + return nil, fmt.Errorf("found %d TargetGroups with ID %q, expected 1", len(response.TargetGroups), fi.ValueOf(e.Name)) } else if len(response.TargetGroups) == 0 { return nil, nil } @@ -108,7 +108,7 @@ func (e *TargetGroup) Find(c *fi.Context) (*TargetGroup, error) { tags := make(map[string]string) for _, tagDesc := range tagsResp.TagDescriptions { for _, tag := range tagDesc.Tags { - tags[fi.StringValue(tag.Key)] = fi.StringValue(tag.Value) + tags[fi.ValueOf(tag.Key)] = fi.ValueOf(tag.Value) } } actual.Tags = tags @@ -152,7 +152,7 @@ func (e *TargetGroup) Run(c *fi.Context) error { } func (_ *TargetGroup) ShouldCreate(a, e, changes *TargetGroup) (bool, error) { - if fi.BoolValue(e.Shared) { + if fi.ValueOf(e.Shared) { return false, nil } return true, nil @@ -163,7 +163,7 @@ func (s *TargetGroup) CheckChanges(a, e, changes *TargetGroup) error { } func (_ *TargetGroup) RenderAWS(t *awsup.AWSAPITarget, a, e, changes *TargetGroup) error { - shared := fi.BoolValue(e.Shared) + shared := fi.ValueOf(e.Shared) if shared { return nil } @@ -191,10 +191,10 @@ func (_ *TargetGroup) RenderAWS(t *awsup.AWSAPITarget, a, e, changes *TargetGrou } targetGroupArn := *response.TargetGroups[0].TargetGroupArn - e.ARN = fi.String(targetGroupArn) + e.ARN = fi.PtrTo(targetGroupArn) } else { if a.ARN != nil { - if err := t.AddELBV2Tags(fi.StringValue(a.ARN), e.Tags); err != nil { + if err := t.AddELBV2Tags(fi.ValueOf(a.ARN), e.Tags); err != nil { return err } } @@ -208,7 +208,7 @@ type OrderTargetGroupsByName []*TargetGroup func (a OrderTargetGroupsByName) Len() int { return len(a) } func (a OrderTargetGroupsByName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a OrderTargetGroupsByName) Less(i, j int) bool { - return fi.StringValue(a[i].Name) < fi.StringValue(a[j].Name) + return fi.ValueOf(a[i].Name) < fi.ValueOf(a[j].Name) } type terraformTargetGroup struct { @@ -228,7 +228,7 @@ type terraformTargetGroupHealthCheck struct { } func (_ *TargetGroup) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *TargetGroup) error { - shared := fi.BoolValue(e.Shared) + shared := fi.ValueOf(e.Shared) if shared { return nil } @@ -255,7 +255,7 @@ func (_ *TargetGroup) RenderTerraform(t *terraform.TerraformTarget, a, e, change } func (e *TargetGroup) TerraformLink(params ...string) *terraformWriter.Literal { - shared := fi.BoolValue(e.Shared) + shared := fi.ValueOf(e.Shared) if shared { if e.ARN != nil { return terraformWriter.LiteralFromStringValue(*e.ARN) @@ -279,7 +279,7 @@ type cloudformationTargetGroup struct { } func (_ *TargetGroup) RenderCloudformation(t *cloudformation.CloudformationTarget, a, e, changes *TargetGroup) error { - shared := fi.BoolValue(e.Shared) + shared := fi.ValueOf(e.Shared) if shared { return nil } @@ -298,7 +298,7 @@ func (_ *TargetGroup) RenderCloudformation(t *cloudformation.CloudformationTarge } func (e *TargetGroup) CloudformationLink() *cloudformation.Literal { - shared := fi.BoolValue(e.Shared) + shared := fi.ValueOf(e.Shared) if shared { if e.ARN != nil { return cloudformation.LiteralString(*e.ARN) diff --git a/upup/pkg/fi/cloudup/awstasks/vpc.go b/upup/pkg/fi/cloudup/awstasks/vpc.go index 3f28ed774c60f..57212d168c4c2 100644 --- a/upup/pkg/fi/cloudup/awstasks/vpc.go +++ b/upup/pkg/fi/cloudup/awstasks/vpc.go @@ -72,7 +72,7 @@ func (e *VPC) Find(c *fi.Context) (*VPC, error) { request := &ec2.DescribeVpcsInput{} - if fi.StringValue(e.ID) != "" { + if fi.ValueOf(e.ID) != "" { request.VpcIds = []*string{e.ID} } else { request.Filters = cloud.BuildFilters(e.Name) @@ -173,11 +173,11 @@ func (e *VPC) Run(c *fi.Context) error { } func (_ *VPC) RenderAWS(t *awsup.AWSAPITarget, a, e, changes *VPC) error { - shared := fi.BoolValue(e.Shared) + shared := fi.ValueOf(e.Shared) if shared { // Verify the VPC was found and matches our required settings if a == nil { - return fmt.Errorf("VPC with id %q not found", fi.StringValue(e.ID)) + return fmt.Errorf("VPC with id %q not found", fi.ValueOf(e.ID)) } if changes != nil && changes.EnableDNSSupport != nil { @@ -185,7 +185,7 @@ func (_ *VPC) RenderAWS(t *awsup.AWSAPITarget, a, e, changes *VPC) error { klog.Warningf("VPC did not have EnableDNSSupport=true, but ignoring because of VPCSkipEnableDNSSupport feature-flag") } else { // TODO: We could easily just allow kops to fix this... - return fmt.Errorf("VPC with id %q was set to be shared, but did not have EnableDNSSupport=true.", fi.StringValue(e.ID)) + return fmt.Errorf("VPC with id %q was set to be shared, but did not have EnableDNSSupport=true.", fi.ValueOf(e.ID)) } } } @@ -234,7 +234,7 @@ func (_ *VPC) RenderAWS(t *awsup.AWSAPITarget, a, e, changes *VPC) error { } func (e *VPC) FindDeletions(c *fi.Context) ([]fi.Deletion, error) { - if fi.IsNilOrEmpty(e.ID) || fi.BoolValue(e.Shared) { + if fi.IsNilOrEmpty(e.ID) || fi.ValueOf(e.Shared) { return nil, nil } @@ -258,13 +258,13 @@ func (e *VPC) FindDeletions(c *fi.Context) ([]fi.Deletion, error) { for _, association := range vpc.CidrBlockAssociationSet { // We'll only delete CIDR associations that are not the primary association // and that have a state of "associated" - if fi.StringValue(association.CidrBlock) == fi.StringValue(vpc.CidrBlock) || - association.CidrBlockState != nil && fi.StringValue(association.CidrBlockState.State) != ec2.VpcCidrBlockStateCodeAssociated { + if fi.ValueOf(association.CidrBlock) == fi.ValueOf(vpc.CidrBlock) || + association.CidrBlockState != nil && fi.ValueOf(association.CidrBlockState.State) != ec2.VpcCidrBlockStateCodeAssociated { continue } match := false for _, cidr := range e.AssociateExtraCIDRBlocks { - if fi.StringValue(association.CidrBlock) == cidr { + if fi.ValueOf(association.CidrBlock) == cidr { match = true break } @@ -293,7 +293,7 @@ func (_ *VPC) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *VPC) return err } - shared := fi.BoolValue(e.Shared) + shared := fi.ValueOf(e.Shared) if shared { // Not terraform owned / managed // We won't apply changes, but our validation (kops update) will still warn @@ -317,7 +317,7 @@ func (_ *VPC) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *VPC) } func (e *VPC) TerraformLink() *terraformWriter.Literal { - shared := fi.BoolValue(e.Shared) + shared := fi.ValueOf(e.Shared) if shared { if e.ID == nil { klog.Fatalf("ID must be set, if VPC is shared: %s", e) @@ -338,7 +338,7 @@ type cloudformationVPC struct { } func (_ *VPC) RenderCloudformation(t *cloudformation.CloudformationTarget, a, e, changes *VPC) error { - shared := fi.BoolValue(e.Shared) + shared := fi.ValueOf(e.Shared) if shared { // Not cloudformation owned / managed // We won't apply changes, but our validation (kops update) will still warn @@ -356,7 +356,7 @@ func (_ *VPC) RenderCloudformation(t *cloudformation.CloudformationTarget, a, e, } func (e *VPC) CloudformationLink() *cloudformation.Literal { - shared := fi.BoolValue(e.Shared) + shared := fi.ValueOf(e.Shared) if shared { if e.ID == nil { klog.Fatalf("ID must be set, if VPC is shared: %s", e) diff --git a/upup/pkg/fi/cloudup/awstasks/vpc_test.go b/upup/pkg/fi/cloudup/awstasks/vpc_test.go index 919ad87b6a4c7..0ec0944be731a 100644 --- a/upup/pkg/fi/cloudup/awstasks/vpc_test.go +++ b/upup/pkg/fi/cloudup/awstasks/vpc_test.go @@ -63,7 +63,7 @@ func TestVPCCreate(t *testing.T) { t.Fatalf("unexpected error during Run: %v", err) } - if fi.StringValue(vpc1.ID) == "" { + if fi.ValueOf(vpc1.ID) == "" { t.Fatalf("ID not set after create") } @@ -73,7 +73,7 @@ func TestVPCCreate(t *testing.T) { expected := &ec2.Vpc{ CidrBlock: s("172.21.0.0/16"), - IsDefault: fi.Bool(false), + IsDefault: fi.PtrTo(false), VpcId: vpc1.ID, Tags: buildTags(map[string]string{ "Name": "vpc1", @@ -162,7 +162,7 @@ func TestSharedVPCAdditionalCIDR(t *testing.T) { Lifecycle: fi.LifecycleSync, CIDR: s("172.21.0.0/16"), Tags: map[string]string{"Name": "vpc-1"}, - Shared: fi.Bool(true), + Shared: fi.PtrTo(true), } return map[string]fi.Task{ "vpc-1": vpc1, @@ -187,7 +187,7 @@ func TestSharedVPCAdditionalCIDR(t *testing.T) { t.Fatalf("unexpected error during Run: %v", err) } - if fi.StringValue(vpc1.ID) == "" { + if fi.ValueOf(vpc1.ID) == "" { t.Fatalf("ID not set") } @@ -197,7 +197,7 @@ func TestSharedVPCAdditionalCIDR(t *testing.T) { expected := &ec2.Vpc{ CidrBlock: s("172.21.0.0/16"), - IsDefault: fi.Bool(false), + IsDefault: fi.PtrTo(false), VpcId: vpc1.ID, Tags: buildTags(map[string]string{ "Name": "vpc-1", diff --git a/upup/pkg/fi/cloudup/awstasks/warmpool.go b/upup/pkg/fi/cloudup/awstasks/warmpool.go index c9e2077b1af1e..424af506e1654 100644 --- a/upup/pkg/fi/cloudup/awstasks/warmpool.go +++ b/upup/pkg/fi/cloudup/awstasks/warmpool.go @@ -61,16 +61,16 @@ func (e *WarmPool) Find(c *fi.Context) (*WarmPool, error) { return &WarmPool{ Name: e.Name, Lifecycle: e.Lifecycle, - Enabled: fi.Bool(false), + Enabled: fi.PtrTo(false), }, nil } actual := &WarmPool{ Name: e.Name, Lifecycle: e.Lifecycle, - Enabled: fi.Bool(true), + Enabled: fi.PtrTo(true), MaxSize: warmPool.WarmPoolConfiguration.MaxGroupPreparedCapacity, - MinSize: fi.Int64Value(warmPool.WarmPoolConfiguration.MinSize), + MinSize: fi.ValueOf(warmPool.WarmPoolConfiguration.MinSize), } return actual, nil } @@ -86,16 +86,16 @@ func (*WarmPool) CheckChanges(a, e, changes *WarmPool) error { func (*WarmPool) RenderAWS(t *awsup.AWSAPITarget, a, e, changes *WarmPool) error { svc := t.Cloud.Autoscaling() if changes != nil { - if fi.BoolValue(e.Enabled) { + if fi.ValueOf(e.Enabled) { minSize := e.MinSize maxSize := e.MaxSize if maxSize == nil { - maxSize = fi.Int64(-1) + maxSize = fi.PtrTo(int64(-1)) } request := &autoscaling.PutWarmPoolInput{ AutoScalingGroupName: e.Name, MaxGroupPreparedCapacity: maxSize, - MinSize: fi.Int64(minSize), + MinSize: fi.PtrTo(minSize), } _, err := svc.PutWarmPool(request) @@ -109,7 +109,7 @@ func (*WarmPool) RenderAWS(t *awsup.AWSAPITarget, a, e, changes *WarmPool) error _, err := svc.DeleteWarmPool(&autoscaling.DeleteWarmPoolInput{ AutoScalingGroupName: e.Name, // We don't need to do any cleanup so, the faster the better - ForceDelete: fi.Bool(true), + ForceDelete: fi.PtrTo(true), }) if err != nil { return fmt.Errorf("error deleting warm pool: %w", err) diff --git a/upup/pkg/fi/cloudup/awsup/aws_cloud.go b/upup/pkg/fi/cloudup/awsup/aws_cloud.go index ddb450c2876aa..22e19ebd078c3 100644 --- a/upup/pkg/fi/cloudup/awsup/aws_cloud.go +++ b/upup/pkg/fi/cloudup/awsup/aws_cloud.go @@ -2279,7 +2279,7 @@ func getApiIngressStatus(c AWSCloud, cluster *kops.Cluster) ([]fi.ApiIngressStat func findDNSName(c AWSCloud, cluster *kops.Cluster) (string, error) { name := "api." + cluster.Name - if cluster.Spec.API == nil || cluster.Spec.API.LoadBalancer == nil { + if cluster.Spec.API.LoadBalancer == nil { return "", nil } if cluster.Spec.API.LoadBalancer.Class == kops.LoadBalancerClassClassic { @@ -2317,7 +2317,7 @@ func (c *awsCloudImplementation) DefaultInstanceType(cluster *kops.Cluster, ig * imageArch := "x86_64" if imageInfo, err := c.ResolveImage(ig.Spec.Image); err == nil { - imageArch = fi.StringValue(imageInfo.Architecture) + imageArch = fi.ValueOf(imageInfo.Architecture) } // Find the AZs the InstanceGroup targets diff --git a/upup/pkg/fi/cloudup/azuretasks/disk.go b/upup/pkg/fi/cloudup/azuretasks/disk.go index b2f4f395f6dbd..08671b5c1f844 100644 --- a/upup/pkg/fi/cloudup/azuretasks/disk.go +++ b/upup/pkg/fi/cloudup/azuretasks/disk.go @@ -109,9 +109,9 @@ func (*Disk) CheckChanges(a, e, changes *Disk) error { // RenderAzure creates or updates a Disk. func (*Disk) RenderAzure(t *azure.AzureAPITarget, a, e, changes *Disk) error { if a == nil { - klog.Infof("Creating a new Disk with name: %s", fi.StringValue(e.Name)) + klog.Infof("Creating a new Disk with name: %s", fi.ValueOf(e.Name)) } else { - klog.Infof("Updating a Disk with name: %s", fi.StringValue(e.Name)) + klog.Infof("Updating a Disk with name: %s", fi.ValueOf(e.Name)) } name := *e.Name diff --git a/upup/pkg/fi/cloudup/azuretasks/loadbalancer.go b/upup/pkg/fi/cloudup/azuretasks/loadbalancer.go index eca70759b0835..d10282476e8fa 100644 --- a/upup/pkg/fi/cloudup/azuretasks/loadbalancer.go +++ b/upup/pkg/fi/cloudup/azuretasks/loadbalancer.go @@ -129,9 +129,9 @@ func (*LoadBalancer) CheckChanges(a, e, changes *LoadBalancer) error { // RenderAzure creates or updates a Loadbalancer. func (*LoadBalancer) RenderAzure(t *azure.AzureAPITarget, a, e, changes *LoadBalancer) error { if a == nil { - klog.Infof("Creating a new Loadbalancer with name: %s", fi.StringValue(e.Name)) + klog.Infof("Creating a new Loadbalancer with name: %s", fi.ValueOf(e.Name)) } else { - klog.Infof("Updating a Loadbalancer with name: %s", fi.StringValue(e.Name)) + klog.Infof("Updating a Loadbalancer with name: %s", fi.ValueOf(e.Name)) } idPrefix := fmt.Sprintf("subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network", t.Cloud.SubscriptionID(), *e.ResourceGroup.Name) diff --git a/upup/pkg/fi/cloudup/azuretasks/publicipaddress.go b/upup/pkg/fi/cloudup/azuretasks/publicipaddress.go index 673efbb9296a7..a3feac0e25f40 100644 --- a/upup/pkg/fi/cloudup/azuretasks/publicipaddress.go +++ b/upup/pkg/fi/cloudup/azuretasks/publicipaddress.go @@ -106,9 +106,9 @@ func (*PublicIPAddress) CheckChanges(a, e, changes *PublicIPAddress) error { // RenderAzure creates or updates a Public IP Address. func (*PublicIPAddress) RenderAzure(t *azure.AzureAPITarget, a, e, changes *PublicIPAddress) error { if a == nil { - klog.Infof("Creating a new Public IP Address with name: %s", fi.StringValue(e.Name)) + klog.Infof("Creating a new Public IP Address with name: %s", fi.ValueOf(e.Name)) } else { - klog.Infof("Updating a Public IP Address with name: %s", fi.StringValue(e.Name)) + klog.Infof("Updating a Public IP Address with name: %s", fi.ValueOf(e.Name)) } p := network.PublicIPAddress{ diff --git a/upup/pkg/fi/cloudup/azuretasks/resourcegroup.go b/upup/pkg/fi/cloudup/azuretasks/resourcegroup.go index 511272a068a0c..afda2463b2174 100644 --- a/upup/pkg/fi/cloudup/azuretasks/resourcegroup.go +++ b/upup/pkg/fi/cloudup/azuretasks/resourcegroup.go @@ -105,9 +105,9 @@ func (*ResourceGroup) CheckChanges(a, e, changes *ResourceGroup) error { // RenderAzure creates or updates a resource group. func (*ResourceGroup) RenderAzure(t *azure.AzureAPITarget, a, e, changes *ResourceGroup) error { if a == nil { - klog.Infof("Creating a new Resource Group with name: %s", fi.StringValue(e.Name)) + klog.Infof("Creating a new Resource Group with name: %s", fi.ValueOf(e.Name)) } else { - klog.Infof("Updating a Resource Group with name: %s", fi.StringValue(e.Name)) + klog.Infof("Updating a Resource Group with name: %s", fi.ValueOf(e.Name)) } return t.Cloud.ResourceGroup().CreateOrUpdate( context.TODO(), diff --git a/upup/pkg/fi/cloudup/azuretasks/roleassignment.go b/upup/pkg/fi/cloudup/azuretasks/roleassignment.go index 2bc2693d57a5b..63f9b9d322659 100644 --- a/upup/pkg/fi/cloudup/azuretasks/roleassignment.go +++ b/upup/pkg/fi/cloudup/azuretasks/roleassignment.go @@ -124,7 +124,7 @@ func (r *RoleAssignment) Find(c *fi.Context) (*RoleAssignment, error) { Name: foundVMSS.Name, }, ID: found.ID, - RoleDefID: fi.String(filepath.Base(fi.StringValue(found.RoleDefinitionID))), + RoleDefID: fi.PtrTo(filepath.Base(fi.ValueOf(found.RoleDefinitionID))), }, nil } diff --git a/upup/pkg/fi/cloudup/azuretasks/routetable.go b/upup/pkg/fi/cloudup/azuretasks/routetable.go index b1287533f75d0..67bb82ee0bd53 100644 --- a/upup/pkg/fi/cloudup/azuretasks/routetable.go +++ b/upup/pkg/fi/cloudup/azuretasks/routetable.go @@ -105,9 +105,9 @@ func (*RouteTable) CheckChanges(a, e, changes *RouteTable) error { // RenderAzure creates or updates a Route Table. func (*RouteTable) RenderAzure(t *azure.AzureAPITarget, a, e, changes *RouteTable) error { if a == nil { - klog.Infof("Creating a new Route Table with name: %s", fi.StringValue(e.Name)) + klog.Infof("Creating a new Route Table with name: %s", fi.ValueOf(e.Name)) } else { - klog.Infof("Updating a Route Table with name: %s", fi.StringValue(e.Name)) + klog.Infof("Updating a Route Table with name: %s", fi.ValueOf(e.Name)) } rt := network.RouteTable{ diff --git a/upup/pkg/fi/cloudup/azuretasks/subnet.go b/upup/pkg/fi/cloudup/azuretasks/subnet.go index 45d51e71a4e71..9d05d8f1dbe1d 100644 --- a/upup/pkg/fi/cloudup/azuretasks/subnet.go +++ b/upup/pkg/fi/cloudup/azuretasks/subnet.go @@ -104,9 +104,9 @@ func (*Subnet) CheckChanges(a, e, changes *Subnet) error { // RenderAzure creates or updates a subnet. func (*Subnet) RenderAzure(t *azure.AzureAPITarget, a, e, changes *Subnet) error { if a == nil { - klog.Infof("Creating a new Subnet with name: %s", fi.StringValue(e.Name)) + klog.Infof("Creating a new Subnet with name: %s", fi.ValueOf(e.Name)) } else { - klog.Infof("Updating a Subnet with name: %s", fi.StringValue(e.Name)) + klog.Infof("Updating a Subnet with name: %s", fi.ValueOf(e.Name)) } // TODO(kenji): Be able to specify security groups. diff --git a/upup/pkg/fi/cloudup/azuretasks/testing.go b/upup/pkg/fi/cloudup/azuretasks/testing.go index 61eb6679590e2..c80318685dff3 100644 --- a/upup/pkg/fi/cloudup/azuretasks/testing.go +++ b/upup/pkg/fi/cloudup/azuretasks/testing.go @@ -157,7 +157,7 @@ func (c *MockAzureCloud) GetCloudGroups( // AddClusterTags add the cluster tag to the given tag map. func (c *MockAzureCloud) AddClusterTags(tags map[string]*string) { - tags[azure.TagClusterName] = fi.String(testClusterName) + tags[azure.TagClusterName] = fi.PtrTo(testClusterName) } // FindClusterStatus discovers the status of the cluster, by looking for the tagged etcd volumes @@ -389,7 +389,7 @@ func (c *MockVMScaleSetsClient) CreateOrUpdate(ctx context.Context, resourceGrou return nil, fmt.Errorf("update not supported") } parameters.Name = &vmScaleSetName - parameters.Identity.PrincipalID = fi.String(uuid.New().String()) + parameters.Identity.PrincipalID = fi.PtrTo(uuid.New().String()) c.VMSSes[vmScaleSetName] = parameters return ¶meters, nil } diff --git a/upup/pkg/fi/cloudup/azuretasks/virtualnetwork.go b/upup/pkg/fi/cloudup/azuretasks/virtualnetwork.go index c636cb2e7aa46..a75bd62d7d02a 100644 --- a/upup/pkg/fi/cloudup/azuretasks/virtualnetwork.go +++ b/upup/pkg/fi/cloudup/azuretasks/virtualnetwork.go @@ -119,13 +119,13 @@ func (*VirtualNetwork) CheckChanges(a, e, changes *VirtualNetwork) error { // RenderAzure creates or updates a Virtual Network. func (*VirtualNetwork) RenderAzure(t *azure.AzureAPITarget, a, e, changes *VirtualNetwork) error { if a == nil { - klog.Infof("Creating a new Virtual Network with name: %s", fi.StringValue(e.Name)) + klog.Infof("Creating a new Virtual Network with name: %s", fi.ValueOf(e.Name)) } else { // Only allow tags to be updated. if changes.Tags == nil { return nil } - klog.Infof("Updating a Virtual Network with name: %s", fi.StringValue(e.Name)) + klog.Infof("Updating a Virtual Network with name: %s", fi.ValueOf(e.Name)) } vnet := network.VirtualNetwork{ diff --git a/upup/pkg/fi/cloudup/azuretasks/vmscaleset.go b/upup/pkg/fi/cloudup/azuretasks/vmscaleset.go index d45810bbf47d0..8809dd641df9c 100644 --- a/upup/pkg/fi/cloudup/azuretasks/vmscaleset.go +++ b/upup/pkg/fi/cloudup/azuretasks/vmscaleset.go @@ -269,9 +269,9 @@ func (s *VMScaleSet) CheckChanges(a, e, changes *VMScaleSet) error { // RenderAzure creates or updates a VM Scale Set. func (s *VMScaleSet) RenderAzure(t *azure.AzureAPITarget, a, e, changes *VMScaleSet) error { if a == nil { - klog.Infof("Creating a new VM Scale Set with name: %s", fi.StringValue(e.Name)) + klog.Infof("Creating a new VM Scale Set with name: %s", fi.ValueOf(e.Name)) } else { - klog.Infof("Updating a VM Scale Set with name: %s", fi.StringValue(e.Name)) + klog.Infof("Updating a VM Scale Set with name: %s", fi.ValueOf(e.Name)) } name := *e.Name diff --git a/upup/pkg/fi/cloudup/azuretasks/vmscaleset_test.go b/upup/pkg/fi/cloudup/azuretasks/vmscaleset_test.go index 6fa93ec4987c1..444ae022be05b 100644 --- a/upup/pkg/fi/cloudup/azuretasks/vmscaleset_test.go +++ b/upup/pkg/fi/cloudup/azuretasks/vmscaleset_test.go @@ -316,7 +316,7 @@ func TestVMScaleSetRun(t *testing.T) { t.Fatalf("unexpected error: %s", err) } expectedTags := map[string]*string{ - azure.TagClusterName: fi.String(testClusterName), + azure.TagClusterName: fi.PtrTo(testClusterName), } if a, e := vmss.Tags, expectedTags; !reflect.DeepEqual(a, e) { t.Errorf("unexpected tags: expected %+v, but got %+v", e, a) diff --git a/upup/pkg/fi/cloudup/bootstrapchannelbuilder/bootstrapchannelbuilder.go b/upup/pkg/fi/cloudup/bootstrapchannelbuilder/bootstrapchannelbuilder.go index 43b85e7f7df94..ccbdb347f24b2 100644 --- a/upup/pkg/fi/cloudup/bootstrapchannelbuilder/bootstrapchannelbuilder.go +++ b/upup/pkg/fi/cloudup/bootstrapchannelbuilder/bootstrapchannelbuilder.go @@ -155,8 +155,8 @@ func (b *BootstrapChannelBuilder) Build(c *fi.ModelBuilderContext) error { c.AddTask(&fitasks.ManagedFile{ Contents: fi.NewBytesResource(manifestBytes), Lifecycle: b.Lifecycle, - Location: fi.String(manifestPath), - Name: fi.String(name), + Location: fi.PtrTo(manifestPath), + Name: fi.PtrTo(name), }) } @@ -202,8 +202,8 @@ func (b *BootstrapChannelBuilder) Build(c *fi.ModelBuilderContext) error { c.AddTask(&fitasks.ManagedFile{ Contents: fi.NewBytesResource(manifestBytes), Lifecycle: b.Lifecycle, - Location: fi.String(manifestPath), - Name: fi.String(name), + Location: fi.PtrTo(manifestPath), + Name: fi.PtrTo(name), }) addon := addons.Add(&a.Spec) @@ -235,9 +235,9 @@ func (b *BootstrapChannelBuilder) Build(c *fi.ModelBuilderContext) error { location := key + "/default.yaml" a := &channelsapi.AddonSpec{ - Name: fi.String(key), + Name: fi.PtrTo(key), Selector: map[string]string{"k8s-addon": key}, - Manifest: fi.String(location), + Manifest: fi.PtrTo(location), } name := b.Cluster.ObjectMeta.Name + "-addons-" + key @@ -262,8 +262,8 @@ func (b *BootstrapChannelBuilder) Build(c *fi.ModelBuilderContext) error { c.AddTask(&fitasks.ManagedFile{ Contents: fi.NewBytesResource(manifestBytes), Lifecycle: b.Lifecycle, - Location: fi.String(manifestPath), - Name: fi.String(name), + Location: fi.PtrTo(manifestPath), + Name: fi.PtrTo(name), }) addons.Add(a) @@ -294,8 +294,8 @@ func (b *BootstrapChannelBuilder) Build(c *fi.ModelBuilderContext) error { c.AddTask(&fitasks.ManagedFile{ Contents: fi.NewBytesResource(addonsYAML), Lifecycle: b.Lifecycle, - Location: fi.String("addons/bootstrap-channel.yaml"), - Name: fi.String(name), + Location: fi.PtrTo("addons/bootstrap-channel.yaml"), + Name: fi.PtrTo(name), }) return nil @@ -337,9 +337,9 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*Addon id := "k8s-1.16" addons.Add(&channelsapi.AddonSpec{ - Name: fi.String(key), + Name: fi.PtrTo(key), Selector: map[string]string{"k8s-addon": key}, - Manifest: fi.String(location), + Manifest: fi.PtrTo(location), NeedsRollingUpdate: "control-plane", Id: id, }) @@ -355,9 +355,9 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*Addon id := "k8s-1.12" addons.Add(&channelsapi.AddonSpec{ - Name: fi.String(key), + Name: fi.PtrTo(key), Selector: map[string]string{"k8s-addon": key}, - Manifest: fi.String(location), + Manifest: fi.PtrTo(location), Id: id, }) } @@ -378,9 +378,9 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*Addon id := "k8s-1.12" addons.Add(&channelsapi.AddonSpec{ - Name: fi.String(key), + Name: fi.PtrTo(key), Selector: map[string]string{"k8s-addon": key}, - Manifest: fi.String(location), + Manifest: fi.PtrTo(location), Id: id, }) } @@ -396,9 +396,9 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*Addon id := "k8s-1.12" addons.Add(&channelsapi.AddonSpec{ - Name: fi.String(key), + Name: fi.PtrTo(key), Selector: map[string]string{"k8s-addon": key}, - Manifest: fi.String(location), + Manifest: fi.PtrTo(location), Id: id, }) } @@ -427,9 +427,9 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*Addon id := "k8s-1.8" addons.Add(&channelsapi.AddonSpec{ - Name: fi.String(key), + Name: fi.PtrTo(key), Selector: map[string]string{"k8s-addon": key}, - Manifest: fi.String(location), + Manifest: fi.PtrTo(location), Id: id, }) } @@ -447,9 +447,9 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*Addon id := "k8s-1.9" addons.Add(&channelsapi.AddonSpec{ - Name: fi.String(key), + Name: fi.PtrTo(key), Selector: map[string]string{"k8s-addon": key}, - Manifest: fi.String(location), + Manifest: fi.PtrTo(location), Id: id, }) } @@ -466,9 +466,9 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*Addon id := "k8s-1.23" addons.Add(&channelsapi.AddonSpec{ - Name: fi.String(key), + Name: fi.PtrTo(key), Selector: map[string]string{"k8s-addon": key}, - Manifest: fi.String(location), + Manifest: fi.PtrTo(location), Id: id, }) } @@ -480,9 +480,9 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*Addon location := key + "/v" + version + ".yaml" addons.Add(&channelsapi.AddonSpec{ - Name: fi.String(key), + Name: fi.PtrTo(key), Selector: map[string]string{"k8s-addon": key}, - Manifest: fi.String(location), + Manifest: fi.PtrTo(location), }) } @@ -494,9 +494,9 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*Addon id := "k8s-1.12" addons.Add(&channelsapi.AddonSpec{ - Name: fi.String(key), + Name: fi.PtrTo(key), Selector: map[string]string{"k8s-addon": key}, - Manifest: fi.String(location), + Manifest: fi.PtrTo(location), Id: id, }) } @@ -515,9 +515,9 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*Addon id := "k8s-1.19" addons.Add(&channelsapi.AddonSpec{ - Name: fi.String(key), + Name: fi.PtrTo(key), Selector: map[string]string{"k8s-addon": key}, - Manifest: fi.String(location), + Manifest: fi.PtrTo(location), Id: id, }) } @@ -531,7 +531,7 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*Addon // @check if the node-local-dns is enabled NodeLocalDNS := b.Cluster.Spec.KubeDNS.NodeLocalDNS - if kubeDNS.Provider == "CoreDNS" && NodeLocalDNS != nil && fi.BoolValue(NodeLocalDNS.Enabled) { + if kubeDNS.Provider == "CoreDNS" && NodeLocalDNS != nil && fi.ValueOf(NodeLocalDNS.Enabled) { { key := "nodelocaldns.addons.k8s.io" @@ -540,16 +540,16 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*Addon id := "k8s-1.12" addons.Add(&channelsapi.AddonSpec{ - Name: fi.String(key), + Name: fi.PtrTo(key), Selector: map[string]string{"k8s-addon": key}, - Manifest: fi.String(location), + Manifest: fi.PtrTo(location), Id: id, }) } } } - if b.Cluster.Spec.ClusterAutoscaler != nil && fi.BoolValue(b.Cluster.Spec.ClusterAutoscaler.Enabled) { + if b.Cluster.Spec.ClusterAutoscaler != nil && fi.ValueOf(b.Cluster.Spec.ClusterAutoscaler.Enabled) { { key := "cluster-autoscaler.addons.k8s.io" @@ -558,9 +558,9 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*Addon id := "k8s-1.15" addons.Add(&channelsapi.AddonSpec{ - Name: fi.String(key), + Name: fi.PtrTo(key), Selector: map[string]string{"k8s-addon": key}, - Manifest: fi.String(location), + Manifest: fi.PtrTo(location), Id: id, }) } @@ -572,7 +572,7 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*Addon } - if b.Cluster.Spec.MetricsServer != nil && fi.BoolValue(b.Cluster.Spec.MetricsServer.Enabled) { + if b.Cluster.Spec.MetricsServer != nil && fi.ValueOf(b.Cluster.Spec.MetricsServer.Enabled) { { key := "metrics-server.addons.k8s.io" @@ -581,17 +581,17 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*Addon id := "k8s-1.11" addons.Add(&channelsapi.AddonSpec{ - Name: fi.String(key), + Name: fi.PtrTo(key), Selector: map[string]string{"k8s-app": "metrics-server"}, - Manifest: fi.String(location), + Manifest: fi.PtrTo(location), Id: id, - NeedsPKI: !fi.BoolValue(b.Cluster.Spec.MetricsServer.Insecure), + NeedsPKI: !fi.ValueOf(b.Cluster.Spec.MetricsServer.Insecure), }) } } } - if b.Cluster.Spec.CertManager != nil && fi.BoolValue(b.Cluster.Spec.CertManager.Enabled) && (b.Cluster.Spec.CertManager.Managed == nil || fi.BoolValue(b.Cluster.Spec.CertManager.Managed)) { + if b.Cluster.Spec.CertManager != nil && fi.ValueOf(b.Cluster.Spec.CertManager.Enabled) && (b.Cluster.Spec.CertManager.Managed == nil || fi.ValueOf(b.Cluster.Spec.CertManager.Managed)) { { key := "certmanager.io" @@ -600,8 +600,8 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*Addon id := "k8s-1.16" addons.Add(&channelsapi.AddonSpec{ - Name: fi.String(key), - Manifest: fi.String(location), + Name: fi.PtrTo(key), + Manifest: fi.PtrTo(location), Id: id, }) } @@ -614,7 +614,7 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*Addon nth := b.Cluster.Spec.NodeTerminationHandler - if nth != nil && fi.BoolValue(nth.Enabled) { + if nth != nil && fi.ValueOf(nth.Enabled) { key := "node-termination-handler.aws" @@ -623,9 +623,9 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*Addon id := "k8s-1.11" addon := addons.Add(&channelsapi.AddonSpec{ - Name: fi.String(key), + Name: fi.PtrTo(key), Selector: map[string]string{"k8s-addon": key}, - Manifest: fi.String(location), + Manifest: fi.PtrTo(location), Id: id, }) addon.BuildPrune = true @@ -638,7 +638,7 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*Addon npd := b.Cluster.Spec.NodeProblemDetector - if npd != nil && fi.BoolValue(npd.Enabled) { + if npd != nil && fi.ValueOf(npd.Enabled) { key := "node-problem-detector.addons.k8s.io" @@ -647,9 +647,9 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*Addon id := "k8s-1.17" addons.Add(&channelsapi.AddonSpec{ - Name: fi.String(key), + Name: fi.PtrTo(key), Selector: map[string]string{"k8s-addon": key}, - Manifest: fi.String(location), + Manifest: fi.PtrTo(location), Id: id, }) } @@ -658,13 +658,13 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*Addon nvidia := b.Cluster.Spec.Containerd.NvidiaGPU igNvidia := false for _, ig := range b.KopsModelContext.InstanceGroups { - if ig.Spec.Containerd != nil && ig.Spec.Containerd.NvidiaGPU != nil && fi.BoolValue(ig.Spec.Containerd.NvidiaGPU.Enabled) { + if ig.Spec.Containerd != nil && ig.Spec.Containerd.NvidiaGPU != nil && fi.ValueOf(ig.Spec.Containerd.NvidiaGPU.Enabled) { igNvidia = true break } } - if nvidia != nil && fi.BoolValue(nvidia.Enabled) || igNvidia { + if nvidia != nil && fi.ValueOf(nvidia.Enabled) || igNvidia { key := "nvidia.addons.k8s.io" @@ -673,15 +673,15 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*Addon id := "k8s-1.16" addons.Add(&channelsapi.AddonSpec{ - Name: fi.String(key), + Name: fi.PtrTo(key), Selector: map[string]string{"k8s-addon": key}, - Manifest: fi.String(location), + Manifest: fi.PtrTo(location), Id: id, }) } } - if b.Cluster.Spec.AWSLoadBalancerController != nil && fi.BoolValue(b.Cluster.Spec.AWSLoadBalancerController.Enabled) { + if b.Cluster.Spec.AWSLoadBalancerController != nil && fi.ValueOf(b.Cluster.Spec.AWSLoadBalancerController.Enabled) { key := "aws-load-balancer-controller.addons.k8s.io" @@ -689,9 +689,9 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*Addon id := "k8s-1.19" addons.Add(&channelsapi.AddonSpec{ - Name: fi.String(key), + Name: fi.PtrTo(key), Selector: map[string]string{"k8s-addon": key}, - Manifest: fi.String(location), + Manifest: fi.PtrTo(location), Id: id, NeedsPKI: true, }) @@ -702,7 +702,7 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*Addon } } - if b.Cluster.Spec.PodIdentityWebhook != nil && fi.BoolValue(&b.Cluster.Spec.PodIdentityWebhook.Enabled) { + if b.Cluster.Spec.PodIdentityWebhook != nil && fi.ValueOf(&b.Cluster.Spec.PodIdentityWebhook.Enabled) { key := "eks-pod-identity-webhook.addons.k8s.io" @@ -711,18 +711,18 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*Addon location := key + "/" + id + ".yaml" addons.Add(&channelsapi.AddonSpec{ - Name: fi.String(key), + Name: fi.PtrTo(key), Selector: map[string]string{ "k8s-addon": key, }, - Manifest: fi.String(location), + Manifest: fi.PtrTo(location), Id: id, NeedsPKI: true, }) } } - if fi.BoolValue(b.Cluster.Spec.CloudConfig.ManageStorageClasses) { + if fi.ValueOf(b.Cluster.Spec.CloudConfig.ManageStorageClasses) { if b.Cluster.Spec.GetCloudProvider() == kops.CloudProviderAWS { key := "storage-aws.addons.k8s.io" @@ -731,14 +731,13 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*Addon location := key + "/" + id + ".yaml" addons.Add(&channelsapi.AddonSpec{ - Name: fi.String(key), + Name: fi.PtrTo(key), Selector: map[string]string{"k8s-addon": key}, - Manifest: fi.String(location), + Manifest: fi.PtrTo(location), Id: id, }) } } - } if b.Cluster.Spec.GetCloudProvider() == kops.CloudProviderDO { @@ -749,9 +748,9 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*Addon location := key + "/" + id + ".yaml" addons.Add(&channelsapi.AddonSpec{ - Name: fi.String(key), + Name: fi.PtrTo(key), Selector: map[string]string{"k8s-addon": key}, - Manifest: fi.String(location), + Manifest: fi.PtrTo(location), Id: id, }) } @@ -763,9 +762,9 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*Addon location := key + "/" + id + ".yaml" addons.Add(&channelsapi.AddonSpec{ - Name: fi.String(key), + Name: fi.PtrTo(key), Selector: map[string]string{"k8s-addon": key}, - Manifest: fi.String(location), + Manifest: fi.PtrTo(location), Id: id, }) } @@ -778,9 +777,9 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*Addon location := key + "/" + id + ".yaml" addons.Add(&channelsapi.AddonSpec{ - Name: fi.String(key), + Name: fi.PtrTo(key), Selector: map[string]string{"k8s-addon": key}, - Manifest: fi.String(location), + Manifest: fi.PtrTo(location), Id: id, }) } @@ -790,16 +789,16 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*Addon location := key + "/" + id + ".yaml" addons.Add(&channelsapi.AddonSpec{ - Name: fi.String(key), + Name: fi.PtrTo(key), Selector: map[string]string{"k8s-addon": key}, - Manifest: fi.String(location), + Manifest: fi.PtrTo(location), Id: id, }) } } if b.Cluster.Spec.GetCloudProvider() == kops.CloudProviderGCE { - if fi.BoolValue(b.Cluster.Spec.CloudConfig.ManageStorageClasses) { + if fi.ValueOf(b.Cluster.Spec.CloudConfig.ManageStorageClasses) { key := "storage-gce.addons.k8s.io" { @@ -807,21 +806,21 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*Addon location := key + "/" + id + ".yaml" addons.Add(&channelsapi.AddonSpec{ - Name: fi.String(key), + Name: fi.PtrTo(key), Selector: map[string]string{"k8s-addon": key}, - Manifest: fi.String(location), + Manifest: fi.PtrTo(location), Id: id, }) } } - if b.Cluster.Spec.CloudConfig != nil && b.Cluster.Spec.CloudConfig.GCPPDCSIDriver != nil && fi.BoolValue(b.Cluster.Spec.CloudConfig.GCPPDCSIDriver.Enabled) { + if b.Cluster.Spec.CloudConfig != nil && b.Cluster.Spec.CloudConfig.GCPPDCSIDriver != nil && fi.ValueOf(b.Cluster.Spec.CloudConfig.GCPPDCSIDriver.Enabled) { key := "gcp-pd-csi-driver.addons.k8s.io" { id := "k8s-1.23" location := key + "/" + id + ".yaml" addons.Add(&channelsapi.AddonSpec{ - Name: fi.String(key), - Manifest: fi.String(location), + Name: fi.PtrTo(key), + Manifest: fi.PtrTo(location), Selector: map[string]string{"k8s-addon": key}, Id: id, }) @@ -837,9 +836,9 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*Addon location := key + "/" + id + ".yaml" addons.Add(&channelsapi.AddonSpec{ - Name: fi.String(key), + Name: fi.PtrTo(key), Selector: map[string]string{"k8s-addon": key}, - Manifest: fi.String(location), + Manifest: fi.PtrTo(location), Id: id, }) } @@ -855,9 +854,9 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*Addon location := key + "/" + id + ".yaml" addons.Add(&channelsapi.AddonSpec{ - Name: fi.String(key), + Name: fi.PtrTo(key), Selector: map[string]string{"k8s-addon": key}, - Manifest: fi.String(location), + Manifest: fi.PtrTo(location), Id: id, }) } @@ -869,8 +868,8 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*Addon id := "k8s-1.23" location := key + "/" + id + ".yaml" addon := addons.Add(&channelsapi.AddonSpec{ - Name: fi.String(key), - Manifest: fi.String(location), + Name: fi.PtrTo(key), + Manifest: fi.PtrTo(location), Selector: map[string]string{"k8s-addon": key}, Id: id, }) @@ -888,9 +887,9 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*Addon id := "k8s-1.12" addon := addons.Add(&channelsapi.AddonSpec{ - Name: fi.String(key), + Name: fi.PtrTo(key), Selector: networkingSelector(), - Manifest: fi.String(location), + Manifest: fi.PtrTo(location), Id: id, }) @@ -906,9 +905,9 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*Addon id := "k8s-1.12" addons.Add(&channelsapi.AddonSpec{ - Name: fi.String(key), + Name: fi.PtrTo(key), Selector: networkingSelector(), - Manifest: fi.String(location), + Manifest: fi.PtrTo(location), Id: id, }) } @@ -922,9 +921,9 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*Addon location := key + "/" + id + ".yaml" addon := addons.Add(&channelsapi.AddonSpec{ - Name: fi.String(key), + Name: fi.PtrTo(key), Selector: networkingSelector(), - Manifest: fi.String(location), + Manifest: fi.PtrTo(location), Id: id, }) addon.BuildPrune = true @@ -933,9 +932,9 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*Addon location := key + "/" + id + ".yaml" addon := addons.Add(&channelsapi.AddonSpec{ - Name: fi.String(key), + Name: fi.PtrTo(key), Selector: networkingSelector(), - Manifest: fi.String(location), + Manifest: fi.PtrTo(location), Id: id, }) addon.BuildPrune = true @@ -950,9 +949,9 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*Addon location := key + "/" + id + ".yaml" addon := addons.Add(&channelsapi.AddonSpec{ - Name: fi.String(key), + Name: fi.PtrTo(key), Selector: networkingSelector(), - Manifest: fi.String(location), + Manifest: fi.PtrTo(location), Id: id, }) addon.BuildPrune = true @@ -961,9 +960,9 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*Addon location := key + "/" + id + ".yaml" addon := addons.Add(&channelsapi.AddonSpec{ - Name: fi.String(key), + Name: fi.PtrTo(key), Selector: networkingSelector(), - Manifest: fi.String(location), + Manifest: fi.PtrTo(location), Id: id, }) addon.BuildPrune = true @@ -972,9 +971,9 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*Addon location := key + "/" + id + ".yaml" addon := addons.Add(&channelsapi.AddonSpec{ - Name: fi.String(key), + Name: fi.PtrTo(key), Selector: networkingSelector(), - Manifest: fi.String(location), + Manifest: fi.PtrTo(location), Id: id, }) addon.BuildPrune = true @@ -989,9 +988,9 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*Addon location := key + "/" + id + ".yaml" addon := addons.Add(&channelsapi.AddonSpec{ - Name: fi.String(key), + Name: fi.PtrTo(key), Selector: networkingSelector(), - Manifest: fi.String(location), + Manifest: fi.PtrTo(location), Id: id, }) addon.BuildPrune = true @@ -1000,9 +999,9 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*Addon location := key + "/" + id + ".yaml" addon := addons.Add(&channelsapi.AddonSpec{ - Name: fi.String(key), + Name: fi.PtrTo(key), Selector: networkingSelector(), - Manifest: fi.String(location), + Manifest: fi.PtrTo(location), Id: id, }) addon.BuildPrune = true @@ -1011,9 +1010,9 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*Addon location := key + "/" + id + ".yaml" addon := addons.Add(&channelsapi.AddonSpec{ - Name: fi.String(key), + Name: fi.PtrTo(key), Selector: networkingSelector(), - Manifest: fi.String(location), + Manifest: fi.PtrTo(location), Id: id, }) addon.BuildPrune = true @@ -1028,9 +1027,9 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*Addon id := "k8s-1.12" addons.Add(&channelsapi.AddonSpec{ - Name: fi.String(key), + Name: fi.PtrTo(key), Selector: networkingSelector(), - Manifest: fi.String(location), + Manifest: fi.PtrTo(location), Id: id, }) } @@ -1049,9 +1048,9 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*Addon location := key + "/" + id + ".yaml" addons.Add(&channelsapi.AddonSpec{ - Name: fi.String(key), + Name: fi.PtrTo(key), Selector: networkingSelector(), - Manifest: fi.String(location), + Manifest: fi.PtrTo(location), Id: id, NeedsRollingUpdate: "all", }) @@ -1074,9 +1073,9 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*Addon id := "k8s-1.12" addons.Add(&channelsapi.AddonSpec{ - Name: fi.String(key), + Name: fi.PtrTo(key), Selector: authenticationSelector, - Manifest: fi.String(location), + Manifest: fi.PtrTo(location), Id: id, }) } @@ -1089,9 +1088,9 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*Addon id := "k8s-1.12" addons.Add(&channelsapi.AddonSpec{ - Name: fi.String(key), + Name: fi.PtrTo(key), Selector: authenticationSelector, - Manifest: fi.String(location), + Manifest: fi.PtrTo(location), Id: id, }) } @@ -1106,8 +1105,8 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*Addon location := key + "/" + id + ".yaml" addon := addons.Add(&channelsapi.AddonSpec{ - Name: fi.String(key), - Manifest: fi.String(location), + Name: fi.PtrTo(key), + Manifest: fi.PtrTo(location), Selector: map[string]string{"k8s-addon": key}, Id: id, }) @@ -1123,8 +1122,8 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*Addon id := "k8s-1.13-ccm" addons.Add(&channelsapi.AddonSpec{ - Name: fi.String(key), - Manifest: fi.String(location), + Name: fi.PtrTo(key), + Manifest: fi.PtrTo(location), Selector: map[string]string{"k8s-addon": key}, Id: id, }) @@ -1141,8 +1140,8 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*Addon id := "k8s-1.18" location := key + "/" + id + ".yaml" addons.Add(&channelsapi.AddonSpec{ - Name: fi.String(key), - Manifest: fi.String(location), + Name: fi.PtrTo(key), + Manifest: fi.PtrTo(location), Selector: map[string]string{"k8s-addon": key}, Id: id, }) @@ -1152,16 +1151,16 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*Addon } } if b.Cluster.Spec.CloudConfig != nil && b.Cluster.Spec.CloudConfig.AWSEBSCSIDriver != nil && - fi.BoolValue(b.Cluster.Spec.CloudConfig.AWSEBSCSIDriver.Enabled) && - (b.Cluster.Spec.CloudConfig.AWSEBSCSIDriver.Managed == nil || fi.BoolValue(b.Cluster.Spec.CloudConfig.AWSEBSCSIDriver.Managed)) { + fi.ValueOf(b.Cluster.Spec.CloudConfig.AWSEBSCSIDriver.Enabled) && + (b.Cluster.Spec.CloudConfig.AWSEBSCSIDriver.Managed == nil || fi.ValueOf(b.Cluster.Spec.CloudConfig.AWSEBSCSIDriver.Managed)) { key := "aws-ebs-csi-driver.addons.k8s.io" { id := "k8s-1.17" location := key + "/" + id + ".yaml" addons.Add(&channelsapi.AddonSpec{ - Name: fi.String(key), - Manifest: fi.String(location), + Name: fi.PtrTo(key), + Manifest: fi.PtrTo(location), Selector: map[string]string{"k8s-addon": key}, Id: id, }) @@ -1172,30 +1171,30 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*Addon } } - if b.Cluster.Spec.SnapshotController != nil && fi.BoolValue(b.Cluster.Spec.SnapshotController.Enabled) { + if b.Cluster.Spec.SnapshotController != nil && fi.ValueOf(b.Cluster.Spec.SnapshotController.Enabled) { key := "snapshot-controller.addons.k8s.io" { id := "k8s-1.20" location := key + "/" + id + ".yaml" addons.Add(&channelsapi.AddonSpec{ - Name: fi.String(key), - Manifest: fi.String(location), + Name: fi.PtrTo(key), + Manifest: fi.PtrTo(location), Selector: map[string]string{"k8s-addon": key}, NeedsPKI: true, Id: id, }) } } - if b.Cluster.Spec.Karpenter != nil && fi.BoolValue(&b.Cluster.Spec.Karpenter.Enabled) { + if b.Cluster.Spec.Karpenter != nil && fi.ValueOf(&b.Cluster.Spec.Karpenter.Enabled) { key := "karpenter.sh" { id := "k8s-1.19" location := key + "/" + id + ".yaml" addons.Add(&channelsapi.AddonSpec{ - Name: fi.String(key), - Manifest: fi.String(location), + Name: fi.PtrTo(key), + Manifest: fi.PtrTo(location), Selector: map[string]string{"k8s-addon": key}, Id: id, }) @@ -1211,9 +1210,9 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*Addon location := key + "/v" + version + ".yaml" addons.Add(&channelsapi.AddonSpec{ - Name: fi.String(key), + Name: fi.PtrTo(key), Selector: map[string]string{"k8s-addon": key}, - Manifest: fi.String(location), + Manifest: fi.PtrTo(location), }) } diff --git a/upup/pkg/fi/cloudup/bootstrapchannelbuilder/cilium.go b/upup/pkg/fi/cloudup/bootstrapchannelbuilder/cilium.go index 940c170e01666..dbebf8d8a04b5 100644 --- a/upup/pkg/fi/cloudup/bootstrapchannelbuilder/cilium.go +++ b/upup/pkg/fi/cloudup/bootstrapchannelbuilder/cilium.go @@ -31,13 +31,13 @@ func addCiliumAddon(b *BootstrapChannelBuilder, addons *AddonList) error { location := key + "/" + id + "-v1.11.yaml" addon := &api.AddonSpec{ - Name: fi.String(key), + Name: fi.PtrTo(key), Selector: networkingSelector(), - Manifest: fi.String(location), + Manifest: fi.PtrTo(location), Id: id, NeedsRollingUpdate: "all", } - if cilium.Hubble != nil && fi.BoolValue(cilium.Hubble.Enabled) { + if cilium.Hubble != nil && fi.ValueOf(cilium.Hubble.Enabled) { addon.NeedsPKI = true } addons.Add(addon) diff --git a/upup/pkg/fi/cloudup/containerd.go b/upup/pkg/fi/cloudup/containerd.go index 0bcf84d6c8af8..fe54f518897c6 100644 --- a/upup/pkg/fi/cloudup/containerd.go +++ b/upup/pkg/fi/cloudup/containerd.go @@ -49,18 +49,18 @@ func findContainerdAsset(c *kops.Cluster, assetBuilder *assets.AssetBuilder, arc if containerd.Packages != nil { if arch == architectures.ArchitectureAmd64 && containerd.Packages.UrlAmd64 != nil && containerd.Packages.HashAmd64 != nil { - assetUrl := fi.StringValue(containerd.Packages.UrlAmd64) - assetHash := fi.StringValue(containerd.Packages.HashAmd64) + assetUrl := fi.ValueOf(containerd.Packages.UrlAmd64) + assetHash := fi.ValueOf(containerd.Packages.HashAmd64) return findAssetsUrlHash(assetBuilder, assetUrl, assetHash) } if arch == architectures.ArchitectureArm64 && containerd.Packages.UrlArm64 != nil && containerd.Packages.HashArm64 != nil { - assetUrl := fi.StringValue(containerd.Packages.UrlArm64) - assetHash := fi.StringValue(containerd.Packages.HashArm64) + assetUrl := fi.ValueOf(containerd.Packages.UrlArm64) + assetHash := fi.ValueOf(containerd.Packages.HashArm64) return findAssetsUrlHash(assetBuilder, assetUrl, assetHash) } } - version := fi.StringValue(containerd.Version) + version := fi.ValueOf(containerd.Version) if version == "" { return nil, nil, fmt.Errorf("unable to find containerd version") } diff --git a/upup/pkg/fi/cloudup/deepvalidate_test.go b/upup/pkg/fi/cloudup/deepvalidate_test.go index 0d7148e8e79ce..eb8b3fa9811b9 100644 --- a/upup/pkg/fi/cloudup/deepvalidate_test.go +++ b/upup/pkg/fi/cloudup/deepvalidate_test.go @@ -134,8 +134,8 @@ func TestDeepValidate_EvenEtcdClusterSize(t *testing.T) { { Name: "main", Members: []kopsapi.EtcdMemberSpec{ - {Name: "us-test-1a", InstanceGroup: fi.String("us-test-1a")}, - {Name: "us-test-1b", InstanceGroup: fi.String("us-test-1b")}, + {Name: "us-test-1a", InstanceGroup: fi.PtrTo("us-test-1a")}, + {Name: "us-test-1b", InstanceGroup: fi.PtrTo("us-test-1b")}, }, }, } @@ -156,9 +156,9 @@ func TestDeepValidate_MissingEtcdMember(t *testing.T) { { Name: "main", Members: []kopsapi.EtcdMemberSpec{ - {Name: "us-test-1a", InstanceGroup: fi.String("us-test-1a")}, - {Name: "us-test-1b", InstanceGroup: fi.String("us-test-1b")}, - {Name: "us-test-1c", InstanceGroup: fi.String("us-test-1c")}, + {Name: "us-test-1a", InstanceGroup: fi.PtrTo("us-test-1a")}, + {Name: "us-test-1b", InstanceGroup: fi.PtrTo("us-test-1b")}, + {Name: "us-test-1c", InstanceGroup: fi.PtrTo("us-test-1c")}, }, }, } diff --git a/upup/pkg/fi/cloudup/defaults.go b/upup/pkg/fi/cloudup/defaults.go index 36e7f4c937c91..82bd56101730a 100644 --- a/upup/pkg/fi/cloudup/defaults.go +++ b/upup/pkg/fi/cloudup/defaults.go @@ -107,8 +107,8 @@ func PerformAssignments(c *kops.Cluster, cloud fi.Cloud) error { } // TODO: Unclear this should be here - it isn't too hard to change - if c.UsesPublicDNS() && c.Spec.MasterPublicName == "" && c.ObjectMeta.Name != "" { - c.Spec.MasterPublicName = "api." + c.ObjectMeta.Name + if c.UsesPublicDNS() && c.Spec.API.PublicName == "" && c.ObjectMeta.Name != "" { + c.Spec.API.PublicName = "api." + c.ObjectMeta.Name } // We only assign subnet CIDRs on AWS, OpenStack, and Azure. @@ -201,7 +201,7 @@ func assignProxy(cluster *kops.Cluster) (*kops.EgressProxySpec, error) { "127.0.0.1", "localhost", cluster.Spec.ClusterDNSDomain, // TODO we may want this for public loadbalancers - cluster.Spec.MasterPublicName, + cluster.Spec.API.PublicName, cluster.ObjectMeta.Name, firstIP, cluster.Spec.NonMasqueradeCIDR, diff --git a/upup/pkg/fi/cloudup/dns.go b/upup/pkg/fi/cloudup/dns.go index 356ebd1df290a..5436460e28ee9 100644 --- a/upup/pkg/fi/cloudup/dns.go +++ b/upup/pkg/fi/cloudup/dns.go @@ -249,25 +249,25 @@ func buildPrecreateDNSHostnames(cluster *kops.Cluster) []recordKey { internalType = rrstype.AAAA } - hasAPILoadbalancer := cluster.Spec.API != nil && cluster.Spec.API.LoadBalancer != nil + hasAPILoadbalancer := cluster.Spec.API.LoadBalancer != nil useLBForInternalAPI := hasAPILoadbalancer && cluster.Spec.API.LoadBalancer.UseForInternalAPI - if cluster.Spec.MasterPublicName != "" && !hasAPILoadbalancer { + if cluster.Spec.API.PublicName != "" && !hasAPILoadbalancer { recordKeys = append(recordKeys, recordKey{ - hostname: cluster.Spec.MasterPublicName, + hostname: cluster.Spec.API.PublicName, rrsType: rrstype.A, }) if internalType != rrstype.A { recordKeys = append(recordKeys, recordKey{ - hostname: cluster.Spec.MasterPublicName, + hostname: cluster.Spec.API.PublicName, rrsType: internalType, }) } } - if cluster.Spec.MasterInternalName != "" && !useLBForInternalAPI { + if !useLBForInternalAPI { recordKeys = append(recordKeys, recordKey{ - hostname: cluster.Spec.MasterInternalName, + hostname: cluster.APIInternalName(), rrsType: internalType, }) } diff --git a/upup/pkg/fi/cloudup/dns_test.go b/upup/pkg/fi/cloudup/dns_test.go index 6a6ce1d325cfd..b960a77558a17 100644 --- a/upup/pkg/fi/cloudup/dns_test.go +++ b/upup/pkg/fi/cloudup/dns_test.go @@ -52,7 +52,7 @@ func TestPrecreateDNSNames(t *testing.T) { { cluster: &kops.Cluster{ Spec: kops.ClusterSpec{ - API: &kops.AccessSpec{ + API: kops.APISpec{ LoadBalancer: &kops.LoadBalancerAccessSpec{}, }, }, @@ -64,7 +64,7 @@ func TestPrecreateDNSNames(t *testing.T) { { cluster: &kops.Cluster{ Spec: kops.ClusterSpec{ - API: &kops.AccessSpec{ + API: kops.APISpec{ LoadBalancer: &kops.LoadBalancerAccessSpec{}, }, NonMasqueradeCIDR: "::/0", @@ -77,7 +77,7 @@ func TestPrecreateDNSNames(t *testing.T) { { cluster: &kops.Cluster{ Spec: kops.ClusterSpec{ - API: &kops.AccessSpec{ + API: kops.APISpec{ LoadBalancer: &kops.LoadBalancerAccessSpec{ UseForInternalAPI: true, }, @@ -124,8 +124,7 @@ func TestPrecreateDNSNames(t *testing.T) { cluster := g.cluster cluster.ObjectMeta.Name = "cluster1.example.com" - cluster.Spec.MasterPublicName = "api." + cluster.ObjectMeta.Name - cluster.Spec.MasterInternalName = "api.internal." + cluster.ObjectMeta.Name + cluster.Spec.API.PublicName = "api." + cluster.ObjectMeta.Name cluster.Spec.EtcdClusters = []kops.EtcdClusterSpec{ { Name: "main", diff --git a/upup/pkg/fi/cloudup/do/cloud.go b/upup/pkg/fi/cloudup/do/cloud.go index 0a6bc9139f30c..8dc0ba6bb24e9 100644 --- a/upup/pkg/fi/cloudup/do/cloud.go +++ b/upup/pkg/fi/cloudup/do/cloud.go @@ -522,9 +522,9 @@ func buildCloudInstanceGroup(c *doCloudImplementation, ig *kops.InstanceGroup, g HumanName: g.InstanceGroupName, InstanceGroup: ig, Raw: g, - MinSize: int(fi.Int32Value(ig.Spec.MinSize)), - TargetSize: int(fi.Int32Value(ig.Spec.MinSize)), - MaxSize: int(fi.Int32Value(ig.Spec.MaxSize)), + MinSize: int(fi.ValueOf(ig.Spec.MinSize)), + TargetSize: int(fi.ValueOf(ig.Spec.MinSize)), + MaxSize: int(fi.ValueOf(ig.Spec.MaxSize)), } for _, member := range g.Members { diff --git a/upup/pkg/fi/cloudup/docker.go b/upup/pkg/fi/cloudup/docker.go index dd83cf21147f4..979f3735fc7d4 100644 --- a/upup/pkg/fi/cloudup/docker.go +++ b/upup/pkg/fi/cloudup/docker.go @@ -49,18 +49,18 @@ func findDockerAsset(c *kops.Cluster, assetBuilder *assets.AssetBuilder, arch ar if docker.Packages != nil { if arch == architectures.ArchitectureAmd64 && docker.Packages.UrlAmd64 != nil && docker.Packages.HashAmd64 != nil { - assetUrl := fi.StringValue(docker.Packages.UrlAmd64) - assetHash := fi.StringValue(docker.Packages.HashAmd64) + assetUrl := fi.ValueOf(docker.Packages.UrlAmd64) + assetHash := fi.ValueOf(docker.Packages.HashAmd64) return findAssetsUrlHash(assetBuilder, assetUrl, assetHash) } if arch == architectures.ArchitectureArm64 && docker.Packages.UrlArm64 != nil && docker.Packages.HashArm64 != nil { - assetUrl := fi.StringValue(docker.Packages.UrlArm64) - assetHash := fi.StringValue(docker.Packages.HashArm64) + assetUrl := fi.ValueOf(docker.Packages.UrlArm64) + assetHash := fi.ValueOf(docker.Packages.HashArm64) return findAssetsUrlHash(assetBuilder, assetUrl, assetHash) } } - version := fi.StringValue(docker.Version) + version := fi.ValueOf(docker.Version) if version == "" { return nil, nil, fmt.Errorf("unable to find Docker version") } diff --git a/upup/pkg/fi/cloudup/dotasks/droplet.go b/upup/pkg/fi/cloudup/dotasks/droplet.go index e8a218d3f6459..36630dc785025 100644 --- a/upup/pkg/fi/cloudup/dotasks/droplet.go +++ b/upup/pkg/fi/cloudup/dotasks/droplet.go @@ -68,7 +68,7 @@ func (d *Droplet) Find(c *fi.Context) (*Droplet, error) { count := 0 var foundDroplet godo.Droplet for _, droplet := range droplets { - if droplet.Name == fi.StringValue(d.Name) { + if droplet.Name == fi.ValueOf(d.Name) { found = true count++ foundDroplet = droplet @@ -80,15 +80,15 @@ func (d *Droplet) Find(c *fi.Context) (*Droplet, error) { } return &Droplet{ - Name: fi.String(foundDroplet.Name), + Name: fi.PtrTo(foundDroplet.Name), Count: count, - Region: fi.String(foundDroplet.Region.Slug), - Size: fi.String(foundDroplet.Size.Slug), + Region: fi.PtrTo(foundDroplet.Region.Slug), + Size: fi.PtrTo(foundDroplet.Size.Slug), Image: d.Image, //Image should not change so we keep it as-is Tags: foundDroplet.Tags, SSHKey: d.SSHKey, // TODO: get from droplet or ignore change UserData: d.UserData, // TODO: get from droplet or ignore change - VPCUUID: fi.String(foundDroplet.VPCUUID), + VPCUUID: fi.PtrTo(foundDroplet.VPCUUID), Lifecycle: d.Lifecycle, }, nil } @@ -151,29 +151,29 @@ func (_ *Droplet) RenderDO(t *do.DOAPITarget, a, e, changes *Droplet) error { // associate vpcuuid to the droplet if set. vpcUUID := "" - if fi.StringValue(e.NetworkCIDR) != "" { - vpcUUID, err = t.Cloud.GetVPCUUID(fi.StringValue(e.NetworkCIDR), fi.StringValue(e.VPCName)) + if fi.ValueOf(e.NetworkCIDR) != "" { + vpcUUID, err = t.Cloud.GetVPCUUID(fi.ValueOf(e.NetworkCIDR), fi.ValueOf(e.VPCName)) if err != nil { - return fmt.Errorf("Error fetching vpcUUID from network cidr=%s", fi.StringValue(e.NetworkCIDR)) + return fmt.Errorf("Error fetching vpcUUID from network cidr=%s", fi.ValueOf(e.NetworkCIDR)) } - } else if fi.StringValue(e.VPCUUID) != "" { - vpcUUID = fi.StringValue(e.VPCUUID) + } else if fi.ValueOf(e.VPCUUID) != "" { + vpcUUID = fi.ValueOf(e.VPCUUID) } for i := 0; i < newDropletCount; i++ { _, _, err = t.Cloud.DropletsService().Create(context.TODO(), &godo.DropletCreateRequest{ - Name: fi.StringValue(e.Name), - Region: fi.StringValue(e.Region), - Size: fi.StringValue(e.Size), - Image: godo.DropletCreateImage{Slug: fi.StringValue(e.Image)}, + Name: fi.ValueOf(e.Name), + Region: fi.ValueOf(e.Region), + Size: fi.ValueOf(e.Size), + Image: godo.DropletCreateImage{Slug: fi.ValueOf(e.Image)}, Tags: e.Tags, VPCUUID: vpcUUID, UserData: userData, - SSHKeys: []godo.DropletCreateSSHKey{{Fingerprint: fi.StringValue(e.SSHKey)}}, + SSHKeys: []godo.DropletCreateSSHKey{{Fingerprint: fi.ValueOf(e.SSHKey)}}, }) if err != nil { - return fmt.Errorf("Error creating droplet with Name=%s", fi.StringValue(e.Name)) + return fmt.Errorf("Error creating droplet with Name=%s", fi.ValueOf(e.Name)) } } diff --git a/upup/pkg/fi/cloudup/dotasks/loadbalancer.go b/upup/pkg/fi/cloudup/dotasks/loadbalancer.go index 686d062326cd6..bf54d699dd2d6 100644 --- a/upup/pkg/fi/cloudup/dotasks/loadbalancer.go +++ b/upup/pkg/fi/cloudup/dotasks/loadbalancer.go @@ -64,24 +64,24 @@ func (lb *LoadBalancer) CompareWithID() *string { } func (lb *LoadBalancer) Find(c *fi.Context) (*LoadBalancer, error) { - klog.V(10).Infof("load balancer FIND - ID=%s, name=%s", fi.StringValue(lb.ID), fi.StringValue(lb.Name)) - if fi.StringValue(lb.ID) == "" { + klog.V(10).Infof("load balancer FIND - ID=%s, name=%s", fi.ValueOf(lb.ID), fi.ValueOf(lb.Name)) + if fi.ValueOf(lb.ID) == "" { // Loadbalancer = nil if not found return nil, nil } cloud := c.Cloud.(do.DOCloud) lbService := cloud.LoadBalancersService() - loadbalancer, _, err := lbService.Get(context.TODO(), fi.StringValue(lb.ID)) + loadbalancer, _, err := lbService.Get(context.TODO(), fi.ValueOf(lb.ID)) if err != nil { return nil, fmt.Errorf("load balancer service get request returned error %v", err) } return &LoadBalancer{ - Name: fi.String(loadbalancer.Name), - ID: fi.String(loadbalancer.ID), - Region: fi.String(loadbalancer.Region.Slug), - VPCUUID: fi.String(loadbalancer.VPCUUID), + Name: fi.PtrTo(loadbalancer.Name), + ID: fi.PtrTo(loadbalancer.ID), + Region: fi.PtrTo(loadbalancer.Region.Slug), + VPCUUID: fi.PtrTo(loadbalancer.VPCUUID), // Ignore system fields Lifecycle: lb.Lifecycle, @@ -149,41 +149,41 @@ func (_ *LoadBalancer) RenderDO(t *do.DOAPITarget, a, e, changes *LoadBalancer) } for _, loadbalancer := range loadBalancers { - klog.V(10).Infof("load balancer retrieved=%s, e.Name=%s", loadbalancer.Name, fi.StringValue(e.Name)) - if strings.Contains(loadbalancer.Name, fi.StringValue(e.Name)) { + klog.V(10).Infof("load balancer retrieved=%s, e.Name=%s", loadbalancer.Name, fi.ValueOf(e.Name)) + if strings.Contains(loadbalancer.Name, fi.ValueOf(e.Name)) { // load balancer already exists. - e.ID = fi.String(loadbalancer.ID) - e.IPAddress = fi.String(loadbalancer.IP) // This will be empty on create, but will be filled later on FindAddresses invokation. + e.ID = fi.PtrTo(loadbalancer.ID) + e.IPAddress = fi.PtrTo(loadbalancer.IP) // This will be empty on create, but will be filled later on FindAddresses invokation. return nil } } // associate vpcuuid to the loadbalancer if set vpcUUID := "" - if fi.StringValue(e.NetworkCIDR) != "" { - vpcUUID, err = t.Cloud.GetVPCUUID(fi.StringValue(e.NetworkCIDR), fi.StringValue(e.VPCName)) + if fi.ValueOf(e.NetworkCIDR) != "" { + vpcUUID, err = t.Cloud.GetVPCUUID(fi.ValueOf(e.NetworkCIDR), fi.ValueOf(e.VPCName)) if err != nil { - return fmt.Errorf("Error fetching vpcUUID from network cidr=%s", fi.StringValue(e.NetworkCIDR)) + return fmt.Errorf("Error fetching vpcUUID from network cidr=%s", fi.ValueOf(e.NetworkCIDR)) } - } else if fi.StringValue(e.VPCUUID) != "" { - vpcUUID = fi.StringValue(e.VPCUUID) + } else if fi.ValueOf(e.VPCUUID) != "" { + vpcUUID = fi.ValueOf(e.VPCUUID) } loadBalancerService := t.Cloud.LoadBalancersService() loadbalancer, _, err := loadBalancerService.Create(context.TODO(), &godo.LoadBalancerRequest{ - Name: fi.StringValue(e.Name), - Region: fi.StringValue(e.Region), - Tag: fi.StringValue(e.DropletTag), + Name: fi.ValueOf(e.Name), + Region: fi.ValueOf(e.Region), + Tag: fi.ValueOf(e.DropletTag), VPCUUID: vpcUUID, ForwardingRules: Rules, HealthCheck: HealthCheck, }) if err != nil { - return fmt.Errorf("Error creating load balancer with Name=%s, Error=%v", fi.StringValue(e.Name), err) + return fmt.Errorf("Error creating load balancer with Name=%s, Error=%v", fi.ValueOf(e.Name), err) } - e.ID = fi.String(loadbalancer.ID) - e.IPAddress = fi.String(loadbalancer.IP) // This will be empty on create, but will be filled later on FindAddresses invokation. + e.ID = fi.PtrTo(loadbalancer.ID) + e.IPAddress = fi.PtrTo(loadbalancer.IP) // This will be empty on create, but will be filled later on FindAddresses invokation. klog.V(2).Infof("load balancer for DO created with id: %s", loadbalancer.ID) return nil @@ -198,13 +198,13 @@ func (lb *LoadBalancer) FindAddresses(c *fi.Context) ([]string, error) { loadBalancerService := cloud.LoadBalancersService() address := "" - if len(fi.StringValue(lb.ID)) > 0 { + if len(fi.ValueOf(lb.ID)) > 0 { // able to retrieve ID. done, err := vfs.RetryWithBackoff(readBackoff, func() (bool, error) { - klog.V(2).Infof("Finding IP address for load balancer ID=%s", fi.StringValue(lb.ID)) - loadBalancer, _, err := loadBalancerService.Get(context.TODO(), fi.StringValue(lb.ID)) + klog.V(2).Infof("Finding IP address for load balancer ID=%s", fi.ValueOf(lb.ID)) + loadBalancer, _, err := loadBalancerService.Get(context.TODO(), fi.ValueOf(lb.ID)) if err != nil { - klog.Errorf("Error fetching load balancer with Name=%s", fi.StringValue(lb.Name)) + klog.Errorf("Error fetching load balancer with Name=%s", fi.ValueOf(lb.Name)) return false, err } diff --git a/upup/pkg/fi/cloudup/dotasks/volume.go b/upup/pkg/fi/cloudup/dotasks/volume.go index 31781b17aa061..6ee795e1a284a 100644 --- a/upup/pkg/fi/cloudup/dotasks/volume.go +++ b/upup/pkg/fi/cloudup/dotasks/volume.go @@ -51,20 +51,20 @@ func (v *Volume) Find(c *fi.Context) (*Volume, error) { volumes, _, err := volService.ListVolumes(context.TODO(), &godo.ListVolumeParams{ Region: cloud.Region(), - Name: fi.StringValue(v.Name), + Name: fi.ValueOf(v.Name), }) if err != nil { return nil, err } for _, volume := range volumes { - if volume.Name == fi.StringValue(v.Name) { + if volume.Name == fi.ValueOf(v.Name) { return &Volume{ - Name: fi.String(volume.Name), - ID: fi.String(volume.ID), + Name: fi.PtrTo(volume.Name), + ID: fi.PtrTo(volume.ID), Lifecycle: v.Lifecycle, - SizeGB: fi.Int64(volume.SizeGigaBytes), - Region: fi.String(volume.Region.Slug), + SizeGB: fi.PtrTo(volume.SizeGigaBytes), + Region: fi.PtrTo(volume.Region.Slug), }, nil } } @@ -120,9 +120,9 @@ func (_ *Volume) RenderDO(t *do.DOAPITarget, a, e, changes *Volume) error { volService := t.Cloud.VolumeService() _, _, err := volService.CreateVolume(context.TODO(), &godo.VolumeCreateRequest{ - Name: fi.StringValue(e.Name), - Region: fi.StringValue(e.Region), - SizeGigaBytes: fi.Int64Value(e.SizeGB), + Name: fi.ValueOf(e.Name), + Region: fi.ValueOf(e.Region), + SizeGigaBytes: fi.ValueOf(e.SizeGB), Tags: tagArray, }) diff --git a/upup/pkg/fi/cloudup/dotasks/volume_test.go b/upup/pkg/fi/cloudup/dotasks/volume_test.go index 28d98e9874b27..ada11e4563104 100644 --- a/upup/pkg/fi/cloudup/dotasks/volume_test.go +++ b/upup/pkg/fi/cloudup/dotasks/volume_test.go @@ -100,15 +100,15 @@ func Test_Find(t *testing.T) { }, }, &Volume{ - Name: fi.String("test0"), - SizeGB: fi.Int64(int64(100)), - Region: fi.String("nyc1"), + Name: fi.PtrTo("test0"), + SizeGB: fi.PtrTo(int64(100)), + Region: fi.PtrTo("nyc1"), }, &Volume{ - Name: fi.String("test0"), - ID: fi.String("100"), - SizeGB: fi.Int64(int64(100)), - Region: fi.String("nyc1"), + Name: fi.PtrTo("test0"), + ID: fi.PtrTo("100"), + SizeGB: fi.PtrTo(int64(100)), + Region: fi.PtrTo("nyc1"), }, nil, }, @@ -121,9 +121,9 @@ func Test_Find(t *testing.T) { }, }, &Volume{ - Name: fi.String("test1"), - SizeGB: fi.Int64(int64(100)), - Region: fi.String("nyc1"), + Name: fi.PtrTo("test1"), + SizeGB: fi.PtrTo(int64(100)), + Region: fi.PtrTo("nyc1"), }, nil, nil, @@ -137,9 +137,9 @@ func Test_Find(t *testing.T) { }, }, &Volume{ - Name: fi.String("test1"), - SizeGB: fi.Int64(int64(100)), - Region: fi.String("nyc1"), + Name: fi.PtrTo("test1"), + SizeGB: fi.PtrTo(int64(100)), + Region: fi.PtrTo("nyc1"), }, nil, errors.New("error!"), diff --git a/upup/pkg/fi/cloudup/dotasks/vpc.go b/upup/pkg/fi/cloudup/dotasks/vpc.go index 16c617969ea71..fe69bb6d4f08d 100644 --- a/upup/pkg/fi/cloudup/dotasks/vpc.go +++ b/upup/pkg/fi/cloudup/dotasks/vpc.go @@ -51,13 +51,13 @@ func (v *VPC) Find(c *fi.Context) (*VPC, error) { } for _, vpc := range vpcs { - if vpc.Name == fi.StringValue(v.Name) { + if vpc.Name == fi.ValueOf(v.Name) { return &VPC{ - Name: fi.String(vpc.Name), - ID: fi.String(vpc.ID), + Name: fi.PtrTo(vpc.Name), + ID: fi.PtrTo(vpc.ID), Lifecycle: v.Lifecycle, - IPRange: fi.String(vpc.IPRange), - Region: fi.String(vpc.RegionSlug), + IPRange: fi.PtrTo(vpc.IPRange), + Region: fi.PtrTo(vpc.RegionSlug), }, nil } } @@ -99,9 +99,9 @@ func (_ *VPC) RenderDO(t *do.DOAPITarget, a, e, changes *VPC) error { vpcService := t.Cloud.VPCsService() _, _, err := vpcService.Create(context.TODO(), &godo.VPCCreateRequest{ - Name: fi.StringValue(e.Name), - RegionSlug: fi.StringValue(e.Region), - IPRange: fi.StringValue(e.IPRange), + Name: fi.ValueOf(e.Name), + RegionSlug: fi.ValueOf(e.Region), + IPRange: fi.ValueOf(e.IPRange), }) return err diff --git a/upup/pkg/fi/cloudup/gce/gcemetadata/clustername.go b/upup/pkg/fi/cloudup/gce/gcemetadata/clustername.go index 67c5fb291ad85..5452b4a7382b4 100644 --- a/upup/pkg/fi/cloudup/gce/gcemetadata/clustername.go +++ b/upup/pkg/fi/cloudup/gce/gcemetadata/clustername.go @@ -33,7 +33,7 @@ func MetadataMatchesClusterName(findClusterName string, metadata *compute.Metada } for _, item := range metadata.Items { if item.Key == MetadataKeyClusterName { - value := fi.StringValue(item.Value) + value := fi.ValueOf(item.Value) if strings.TrimSpace(value) == findClusterName { return true } else { diff --git a/upup/pkg/fi/cloudup/gce/network.go b/upup/pkg/fi/cloudup/gce/network.go index e4de46335069b..99f86c8d0c1b5 100644 --- a/upup/pkg/fi/cloudup/gce/network.go +++ b/upup/pkg/fi/cloudup/gce/network.go @@ -190,7 +190,7 @@ func performSubnetAssignments(ctx context.Context, c *kops.Cluster, cloudObj fi. needCIDR := 0 for i := range c.Spec.Subnets { subnet := &c.Spec.Subnets[i] - if subnet.ProviderID != "" { + if subnet.ID != "" { continue } if subnet.CIDR == "" { @@ -212,7 +212,7 @@ func performSubnetAssignments(ctx context.Context, c *kops.Cluster, cloudObj fi. for i := range c.Spec.Subnets { subnet := &c.Spec.Subnets[i] - if subnet.ProviderID != "" { + if subnet.ID != "" { continue } if subnet.CIDR != "" { diff --git a/upup/pkg/fi/cloudup/gce/tpm/gcetpmverifier/tpmverifier.go b/upup/pkg/fi/cloudup/gce/tpm/gcetpmverifier/tpmverifier.go index 53d49a0688f47..8aa603515c313 100644 --- a/upup/pkg/fi/cloudup/gce/tpm/gcetpmverifier/tpmverifier.go +++ b/upup/pkg/fi/cloudup/gce/tpm/gcetpmverifier/tpmverifier.go @@ -136,9 +136,9 @@ func (v *tpmVerifier) VerifyToken(ctx context.Context, authToken string, body [] for _, item := range instance.Metadata.Items { switch item.Key { case gce.MetadataKeyInstanceGroupName: - instanceGroupName = fi.StringValue(item.Value) + instanceGroupName = fi.ValueOf(item.Value) case gcemetadata.MetadataKeyClusterName: - clusterName = fi.StringValue(item.Value) + clusterName = fi.ValueOf(item.Value) } } diff --git a/upup/pkg/fi/cloudup/gcetasks/address.go b/upup/pkg/fi/cloudup/gcetasks/address.go index 36916fc5bd96e..d3746b3e8a88f 100644 --- a/upup/pkg/fi/cloudup/gcetasks/address.go +++ b/upup/pkg/fi/cloudup/gcetasks/address.go @@ -108,7 +108,7 @@ func (e *Address) FindAddresses(context *fi.Context) ([]string, error) { if actual == nil { return nil, nil } - return []string{fi.StringValue(actual.IPAddress)}, nil + return []string{fi.ValueOf(actual.IPAddress)}, nil } func (e *Address) Run(c *fi.Context) error { @@ -131,7 +131,7 @@ func (_ *Address) RenderGCE(t *gce.GCEAPITarget, a, e, changes *Address) error { cloud := t.Cloud addr := &compute.Address{ Name: *e.Name, - Address: fi.StringValue(e.IPAddress), + Address: fi.ValueOf(e.IPAddress), Region: cloud.Region(), } @@ -165,7 +165,7 @@ func (_ *Address) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *A } func (e *Address) TerraformAddress() *terraformWriter.Literal { - name := fi.StringValue(e.Name) + name := fi.ValueOf(e.Name) return terraformWriter.LiteralProperty("google_compute_address", name, "address") } diff --git a/upup/pkg/fi/cloudup/gcetasks/backend_service.go b/upup/pkg/fi/cloudup/gcetasks/backend_service.go index 54341ca557145..7f6d105947e21 100644 --- a/upup/pkg/fi/cloudup/gcetasks/backend_service.go +++ b/upup/pkg/fi/cloudup/gcetasks/backend_service.go @@ -189,7 +189,7 @@ func (_ *BackendService) RenderTerraform(t *terraform.TerraformTarget, a, e, cha } func (e *BackendService) TerraformAddress() *terraformWriter.Literal { - name := fi.StringValue(e.Name) + name := fi.ValueOf(e.Name) return terraformWriter.LiteralProperty("google_compute_backend_service", name, "id") } diff --git a/upup/pkg/fi/cloudup/gcetasks/disk.go b/upup/pkg/fi/cloudup/gcetasks/disk.go index 114547e9560be..e61449efcfd5a 100644 --- a/upup/pkg/fi/cloudup/gcetasks/disk.go +++ b/upup/pkg/fi/cloudup/gcetasks/disk.go @@ -58,8 +58,8 @@ func (e *Disk) Find(c *fi.Context) (*Disk, error) { actual := &Disk{} actual.Name = &r.Name - actual.VolumeType = fi.String(gce.LastComponent(r.Type)) - actual.Zone = fi.String(gce.LastComponent(r.Zone)) + actual.VolumeType = fi.PtrTo(gce.LastComponent(r.Type)) + actual.Zone = fi.PtrTo(gce.LastComponent(r.Zone)) actual.SizeGB = &r.SizeGb actual.Labels = r.Labels diff --git a/upup/pkg/fi/cloudup/gcetasks/firewallrule.go b/upup/pkg/fi/cloudup/gcetasks/firewallrule.go index 7d80a57101639..56ac6215c49a5 100644 --- a/upup/pkg/fi/cloudup/gcetasks/firewallrule.go +++ b/upup/pkg/fi/cloudup/gcetasks/firewallrule.go @@ -67,7 +67,7 @@ func (e *FirewallRule) Find(c *fi.Context) (*FirewallRule, error) { actual := &FirewallRule{} actual.Name = &r.Name - actual.Network = &Network{Name: fi.String(lastComponent(r.Network))} + actual.Network = &Network{Name: fi.PtrTo(lastComponent(r.Network))} actual.TargetTags = r.TargetTags actual.SourceRanges = r.SourceRanges actual.SourceTags = r.SourceTags @@ -103,7 +103,7 @@ func (e *FirewallRule) Normalize(c *fi.Context) error { return fmt.Errorf("SourceRanges and SourceTags should not both be specified") } - name := fi.StringValue(e.Name) + name := fi.ValueOf(e.Name) // Make sure we've split the ipv4 / ipv6 addresses. // A single firewall rule can't mix ipv4 and ipv6 addresses, so we split them into two rules. diff --git a/upup/pkg/fi/cloudup/gcetasks/forwardingrule.go b/upup/pkg/fi/cloudup/gcetasks/forwardingrule.go index aaa387ee27e3f..b29b779a45b89 100644 --- a/upup/pkg/fi/cloudup/gcetasks/forwardingrule.go +++ b/upup/pkg/fi/cloudup/gcetasks/forwardingrule.go @@ -57,7 +57,7 @@ func (e *ForwardingRule) CompareWithID() *string { func (e *ForwardingRule) Find(c *fi.Context) (*ForwardingRule, error) { cloud := c.Cloud.(gce.GCECloud) - name := fi.StringValue(e.Name) + name := fi.ValueOf(e.Name) r, err := cloud.Compute().ForwardingRules().Get(cloud.Project(), cloud.Region(), name) if err != nil { @@ -68,7 +68,7 @@ func (e *ForwardingRule) Find(c *fi.Context) (*ForwardingRule, error) { } actual := &ForwardingRule{ - Name: fi.String(r.Name), + Name: fi.PtrTo(r.Name), IPProtocol: r.IPProtocol, } if r.PortRange != "" { @@ -80,7 +80,7 @@ func (e *ForwardingRule) Find(c *fi.Context) (*ForwardingRule, error) { if r.Target != "" { actual.TargetPool = &TargetPool{ - Name: fi.String(lastComponent(r.Target)), + Name: fi.PtrTo(lastComponent(r.Target)), } } if r.IPAddress != "" { @@ -102,14 +102,14 @@ func (e *ForwardingRule) Run(c *fi.Context) error { } func (_ *ForwardingRule) CheckChanges(a, e, changes *ForwardingRule) error { - if fi.StringValue(e.Name) == "" { + if fi.ValueOf(e.Name) == "" { return fi.RequiredField("Name") } return nil } func (_ *ForwardingRule) RenderGCE(t *gce.GCEAPITarget, a, e, changes *ForwardingRule) error { - name := fi.StringValue(e.Name) + name := fi.ValueOf(e.Name) o := &compute.ForwardingRule{ Name: name, @@ -138,7 +138,7 @@ func (_ *ForwardingRule) RenderGCE(t *gce.GCEAPITarget, a, e, changes *Forwardin } if e.IPAddress != nil { - o.IPAddress = fi.StringValue(e.IPAddress.IPAddress) + o.IPAddress = fi.ValueOf(e.IPAddress.IPAddress) if o.IPAddress == "" { addr, err := e.IPAddress.find(t.Cloud) if err != nil { @@ -148,7 +148,7 @@ func (_ *ForwardingRule) RenderGCE(t *gce.GCEAPITarget, a, e, changes *Forwardin return fmt.Errorf("Address %q was not found", e.IPAddress) } - o.IPAddress = fi.StringValue(addr.IPAddress) + o.IPAddress = fi.ValueOf(addr.IPAddress) if o.IPAddress == "" { return fmt.Errorf("Address had no IP: %v", e.IPAddress) } @@ -210,7 +210,7 @@ type terraformForwardingRule struct { } func (_ *ForwardingRule) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *ForwardingRule) error { - name := fi.StringValue(e.Name) + name := fi.ValueOf(e.Name) tf := &terraformForwardingRule{ Name: name, @@ -246,7 +246,7 @@ func (_ *ForwardingRule) RenderTerraform(t *terraform.TerraformTarget, a, e, cha } func (e *ForwardingRule) TerraformLink() *terraformWriter.Literal { - name := fi.StringValue(e.Name) + name := fi.ValueOf(e.Name) return terraformWriter.LiteralSelfLink("google_compute_forwarding_rule", name) } diff --git a/upup/pkg/fi/cloudup/gcetasks/httphealthcheck.go b/upup/pkg/fi/cloudup/gcetasks/httphealthcheck.go index 7444505b70650..d14c57858fb47 100644 --- a/upup/pkg/fi/cloudup/gcetasks/httphealthcheck.go +++ b/upup/pkg/fi/cloudup/gcetasks/httphealthcheck.go @@ -43,7 +43,7 @@ func (e *HTTPHealthcheck) CompareWithID() *string { func (e *HTTPHealthcheck) Find(c *fi.Context) (*HTTPHealthcheck, error) { cloud := c.Cloud.(gce.GCECloud) - name := fi.StringValue(e.Name) + name := fi.ValueOf(e.Name) r, err := cloud.Compute().HTTPHealthChecks().Get(cloud.Project(), name) if err != nil { if gce.IsNotFound(err) { @@ -52,8 +52,8 @@ func (e *HTTPHealthcheck) Find(c *fi.Context) (*HTTPHealthcheck, error) { return nil, fmt.Errorf("error getting HealthCheck %q: %v", name, err) } actual := &HTTPHealthcheck{ - Name: fi.String(r.Name), - Port: fi.Int64(r.Port), + Name: fi.PtrTo(r.Name), + Port: fi.PtrTo(r.Port), SelfLink: r.SelfLink, } // System fields @@ -67,7 +67,7 @@ func (e *HTTPHealthcheck) Run(c *fi.Context) error { } func (_ *HTTPHealthcheck) CheckChanges(a, e, changes *HTTPHealthcheck) error { - if fi.StringValue(e.Name) == "" { + if fi.ValueOf(e.Name) == "" { return fi.RequiredField("Name") } return nil @@ -76,8 +76,8 @@ func (_ *HTTPHealthcheck) CheckChanges(a, e, changes *HTTPHealthcheck) error { func (h *HTTPHealthcheck) RenderGCE(t *gce.GCEAPITarget, a, e, changes *HTTPHealthcheck) error { if a == nil { o := &compute.HttpHealthCheck{ - Name: fi.StringValue(e.Name), - Port: fi.Int64Value(e.Port), + Name: fi.ValueOf(e.Name), + Port: fi.ValueOf(e.Port), RequestPath: "/healthz", } diff --git a/upup/pkg/fi/cloudup/gcetasks/instance.go b/upup/pkg/fi/cloudup/gcetasks/instance.go index 69765a87a1dfc..aadddf8694fd1 100644 --- a/upup/pkg/fi/cloudup/gcetasks/instance.go +++ b/upup/pkg/fi/cloudup/gcetasks/instance.go @@ -76,8 +76,8 @@ func (e *Instance) Find(c *fi.Context) (*Instance, error) { actual := &Instance{} actual.Name = &r.Name actual.Tags = append(actual.Tags, r.Tags.Items...) - actual.Zone = fi.String(lastComponent(r.Zone)) - actual.MachineType = fi.String(lastComponent(r.MachineType)) + actual.Zone = fi.PtrTo(lastComponent(r.Zone)) + actual.MachineType = fi.PtrTo(lastComponent(r.MachineType)) actual.CanIPForward = &r.CanIpForward if r.Scheduling != nil { @@ -85,7 +85,7 @@ func (e *Instance) Find(c *fi.Context) (*Instance, error) { } if len(r.NetworkInterfaces) != 0 { ni := r.NetworkInterfaces[0] - actual.Network = &Network{Name: fi.String(lastComponent(ni.Network))} + actual.Network = &Network{Name: fi.PtrTo(lastComponent(ni.Network))} if len(ni.AccessConfigs) != 0 { ac := ni.AccessConfigs[0] if ac.NatIP != "" { @@ -126,7 +126,7 @@ func (e *Instance) Find(c *fi.Context) (*Instance, error) { if err != nil { return nil, fmt.Errorf("error parsing source image URL: %v", err) } - actual.Image = fi.String(image) + actual.Image = fi.PtrTo(image) } else { url, err := gce.ParseGoogleCloudURL(disk.Source) if err != nil { @@ -140,7 +140,7 @@ func (e *Instance) Find(c *fi.Context) (*Instance, error) { if r.Metadata != nil { actual.Metadata = make(map[string]fi.Resource) for _, i := range r.Metadata.Items { - actual.Metadata[i.Key] = fi.NewStringResource(fi.StringValue(i.Value)) + actual.Metadata[i.Key] = fi.NewStringResource(fi.ValueOf(i.Value)) } actual.metadataFingerprint = r.Metadata.Fingerprint } @@ -189,14 +189,14 @@ func (e *Instance) mapToGCE(project string, ipAddressResolver func(*Address) (*s zone := *e.Zone var scheduling *compute.Scheduling - if fi.BoolValue(e.Preemptible) { + if fi.ValueOf(e.Preemptible) { scheduling = &compute.Scheduling{ OnHostMaintenance: "TERMINATE", Preemptible: true, } } else { scheduling = &compute.Scheduling{ - AutomaticRestart: fi.Bool(true), + AutomaticRestart: fi.PtrTo(true), // TODO: Migrate or terminate? OnHostMaintenance: "MIGRATE", Preemptible: false, @@ -263,7 +263,7 @@ func (e *Instance) mapToGCE(project string, ipAddressResolver func(*Address) (*s scopes = append(scopes, s) } serviceAccounts = append(serviceAccounts, &compute.ServiceAccount{ - Email: fi.StringValue(e.ServiceAccount.Email), + Email: fi.ValueOf(e.ServiceAccount.Email), Scopes: scopes, }) } @@ -277,7 +277,7 @@ func (e *Instance) mapToGCE(project string, ipAddressResolver func(*Address) (*s } metadataItems = append(metadataItems, &compute.MetadataItems{ Key: key, - Value: fi.String(v), + Value: fi.PtrTo(v), }) } @@ -485,7 +485,7 @@ func (_ *Instance) RenderTerraform(t *terraform.TerraformTarget, a, e, changes * if i.Scheduling != nil { tf.Scheduling = &terraformScheduling{ - AutomaticRestart: fi.BoolValue(i.Scheduling.AutomaticRestart), + AutomaticRestart: fi.ValueOf(i.Scheduling.AutomaticRestart), OnHostMaintenance: i.Scheduling.OnHostMaintenance, Preemptible: i.Scheduling.Preemptible, } diff --git a/upup/pkg/fi/cloudup/gcetasks/instancegroupmanager.go b/upup/pkg/fi/cloudup/gcetasks/instancegroupmanager.go index fa690b4b48513..0d71bc6f5c0c3 100644 --- a/upup/pkg/fi/cloudup/gcetasks/instancegroupmanager.go +++ b/upup/pkg/fi/cloudup/gcetasks/instancegroupmanager.go @@ -59,14 +59,14 @@ func (e *InstanceGroupManager) Find(c *fi.Context) (*InstanceGroupManager, error actual := &InstanceGroupManager{} actual.Name = &r.Name - actual.Zone = fi.String(lastComponent(r.Zone)) + actual.Zone = fi.PtrTo(lastComponent(r.Zone)) actual.BaseInstanceName = &r.BaseInstanceName actual.TargetSize = &r.TargetSize - actual.InstanceTemplate = &InstanceTemplate{ID: fi.String(lastComponent(r.InstanceTemplate))} + actual.InstanceTemplate = &InstanceTemplate{ID: fi.PtrTo(lastComponent(r.InstanceTemplate))} for _, targetPool := range r.TargetPools { actual.TargetPools = append(actual.TargetPools, &TargetPool{ - Name: fi.String(lastComponent(targetPool)), + Name: fi.PtrTo(lastComponent(targetPool)), }) } // TODO: Sort by name diff --git a/upup/pkg/fi/cloudup/gcetasks/instancetemplate.go b/upup/pkg/fi/cloudup/gcetasks/instancetemplate.go index eac19c0fbeaea..b15524500adcb 100644 --- a/upup/pkg/fi/cloudup/gcetasks/instancetemplate.go +++ b/upup/pkg/fi/cloudup/gcetasks/instancetemplate.go @@ -106,7 +106,7 @@ func (e *InstanceTemplate) Find(c *fi.Context) (*InstanceTemplate, error) { } for _, r := range templates { - if !strings.HasPrefix(r.Name, fi.StringValue(e.NamePrefix)+"-") { + if !strings.HasPrefix(r.Name, fi.ValueOf(e.NamePrefix)+"-") { continue } @@ -120,14 +120,14 @@ func (e *InstanceTemplate) Find(c *fi.Context) (*InstanceTemplate, error) { actual.Tags = append(actual.Tags, p.Tags.Items...) actual.Labels = p.Labels - actual.MachineType = fi.String(lastComponent(p.MachineType)) + actual.MachineType = fi.PtrTo(lastComponent(p.MachineType)) actual.CanIPForward = &p.CanIpForward bootDiskImage, err := ShortenImageURL(cloud.Project(), p.Disks[0].InitializeParams.SourceImage) if err != nil { return nil, fmt.Errorf("error parsing source image URL: %v", err) } - actual.BootDiskImage = fi.String(bootDiskImage) + actual.BootDiskImage = fi.PtrTo(bootDiskImage) actual.BootDiskType = &p.Disks[0].InitializeParams.DiskType actual.BootDiskSizeGB = &p.Disks[0].InitializeParams.DiskSizeGb @@ -137,7 +137,7 @@ func (e *InstanceTemplate) Find(c *fi.Context) (*InstanceTemplate, error) { } if len(p.NetworkInterfaces) != 0 { ni := p.NetworkInterfaces[0] - actual.Network = &Network{Name: fi.String(lastComponent(ni.Network))} + actual.Network = &Network{Name: fi.PtrTo(lastComponent(ni.Network))} if len(ni.AliasIpRanges) != 0 { actual.AliasIPRanges = make(map[string]string) @@ -147,7 +147,7 @@ func (e *InstanceTemplate) Find(c *fi.Context) (*InstanceTemplate, error) { } if ni.Subnetwork != "" { - actual.Subnet = &Subnet{Name: fi.String(lastComponent(ni.Subnetwork))} + actual.Subnet = &Subnet{Name: fi.PtrTo(lastComponent(ni.Subnetwork))} } acs := ni.AccessConfigs @@ -158,9 +158,9 @@ func (e *InstanceTemplate) Find(c *fi.Context) (*InstanceTemplate, error) { if acs[0].Type != accessConfigOneToOneNAT { return nil, fmt.Errorf("unexpected access type in template %q: %s", *actual.Name, acs[0].Type) } - actual.HasExternalIP = fi.Bool(true) + actual.HasExternalIP = fi.PtrTo(true) } else { - actual.HasExternalIP = fi.Bool(false) + actual.HasExternalIP = fi.PtrTo(false) } } @@ -191,7 +191,7 @@ func (e *InstanceTemplate) Find(c *fi.Context) (*InstanceTemplate, error) { // if err != nil { // return nil, fmt.Errorf("unable to parse image URL: %q", d.SourceImage) // } - // actual.Image = fi.String(imageURL.Project + "/" + imageURL.Name) + // actual.Image = fi.PtrTo(imageURL.Project + "/" + imageURL.Name) // } // } //} @@ -199,7 +199,7 @@ func (e *InstanceTemplate) Find(c *fi.Context) (*InstanceTemplate, error) { if p.Metadata != nil { actual.Metadata = make(map[string]fi.Resource) for _, meta := range p.Metadata.Items { - actual.Metadata[meta.Key] = fi.NewStringResource(fi.StringValue(meta.Value)) + actual.Metadata[meta.Key] = fi.NewStringResource(fi.ValueOf(meta.Value)) } } @@ -234,10 +234,10 @@ func (e *InstanceTemplate) Run(c *fi.Context) error { } func (_ *InstanceTemplate) CheckChanges(a, e, changes *InstanceTemplate) error { - if fi.StringValue(e.BootDiskImage) == "" { + if fi.ValueOf(e.BootDiskImage) == "" { return fi.RequiredField("BootDiskImage") } - if fi.StringValue(e.MachineType) == "" { + if fi.ValueOf(e.MachineType) == "" { return fi.RequiredField("MachineType") } return nil @@ -247,16 +247,16 @@ func (e *InstanceTemplate) mapToGCE(project string, region string) (*compute.Ins // TODO: This is similar to Instance... var scheduling *compute.Scheduling - if fi.BoolValue(e.Preemptible) { + if fi.ValueOf(e.Preemptible) { scheduling = &compute.Scheduling{ - AutomaticRestart: fi.Bool(false), + AutomaticRestart: fi.PtrTo(false), OnHostMaintenance: "TERMINATE", - ProvisioningModel: fi.StringValue(e.GCPProvisioningModel), + ProvisioningModel: fi.ValueOf(e.GCPProvisioningModel), Preemptible: true, } } else { scheduling = &compute.Scheduling{ - AutomaticRestart: fi.Bool(true), + AutomaticRestart: fi.PtrTo(true), // TODO: Migrate or terminate? OnHostMaintenance: "MIGRATE", ProvisioningModel: "STANDARD", @@ -303,7 +303,7 @@ func (e *InstanceTemplate) mapToGCE(project string, region string) (*compute.Ins Kind: "compute#networkInterface", Network: e.Network.URL(networkProject), } - if fi.BoolValue(e.HasExternalIP) { + if fi.ValueOf(e.HasExternalIP) { ni.AccessConfigs = []*compute.AccessConfig{ { Kind: "compute#accessConfig", @@ -337,7 +337,7 @@ func (e *InstanceTemplate) mapToGCE(project string, region string) (*compute.Ins var serviceAccounts []*compute.ServiceAccount for _, sa := range e.ServiceAccounts { serviceAccounts = append(serviceAccounts, &compute.ServiceAccount{ - Email: fi.StringValue(sa.Email), + Email: fi.ValueOf(sa.Email), Scopes: scopes, }) } @@ -350,7 +350,7 @@ func (e *InstanceTemplate) mapToGCE(project string, region string) (*compute.Ins } metadataItems = append(metadataItems, &compute.MetadataItems{ Key: key, - Value: fi.String(v), + Value: fi.PtrTo(v), }) } @@ -460,7 +460,7 @@ func (_ *InstanceTemplate) RenderGCE(t *gce.GCEAPITarget, a, e, changes *Instanc if a == nil { klog.V(4).Infof("Creating InstanceTemplate %v", i) - name := fi.StringValue(e.NamePrefix) + "-" + strconv.FormatInt(time.Now().Unix(), 10) + name := fi.ValueOf(e.NamePrefix) + "-" + strconv.FormatInt(time.Now().Unix(), 10) e.ID = &name i.Name = name @@ -568,7 +568,7 @@ func addMetadata(target *terraform.TerraformTarget, name string, metadata *compu } m := make(map[string]*terraformWriter.Literal) for _, g := range metadata.Items { - val := fi.StringValue(g.Value) + val := fi.ValueOf(g.Value) if strings.Contains(val, "\n") { tfResource, err := target.AddFileBytes("google_compute_instance_template", name, "metadata_"+g.Key, []byte(val), false) if err != nil { @@ -609,10 +609,10 @@ func (_ *InstanceTemplate) RenderTerraform(t *terraform.TerraformTarget, a, e, c return err } - name := fi.StringValue(e.Name) + name := fi.ValueOf(e.Name) tf := &terraformInstanceTemplate{ - NamePrefix: fi.StringValue(e.NamePrefix) + "-", + NamePrefix: fi.ValueOf(e.NamePrefix) + "-", } tf.CanIPForward = i.Properties.CanIpForward @@ -649,7 +649,7 @@ func (_ *InstanceTemplate) RenderTerraform(t *terraform.TerraformTarget, a, e, c if i.Properties.Scheduling != nil { tf.Scheduling = &terraformScheduling{ - AutomaticRestart: fi.BoolValue(i.Properties.Scheduling.AutomaticRestart), + AutomaticRestart: fi.ValueOf(i.Properties.Scheduling.AutomaticRestart), OnHostMaintenance: i.Properties.Scheduling.OnHostMaintenance, Preemptible: i.Properties.Scheduling.Preemptible, ProvisioningModel: i.Properties.Scheduling.ProvisioningModel, diff --git a/upup/pkg/fi/cloudup/gcetasks/network.go b/upup/pkg/fi/cloudup/gcetasks/network.go index f545c3ca97ecd..4ea44355f0572 100644 --- a/upup/pkg/fi/cloudup/gcetasks/network.go +++ b/upup/pkg/fi/cloudup/gcetasks/network.go @@ -105,13 +105,13 @@ func (e *Network) Run(c *fi.Context) error { } func (_ *Network) CheckChanges(a, e, changes *Network) error { - cidr := fi.StringValue(e.CIDR) + cidr := fi.ValueOf(e.CIDR) switch e.Mode { case "legacy": if cidr == "" { return fmt.Errorf("CIDR must specified for networks where mode=legacy") } - klog.Warningf("using legacy mode for GCE network %q", fi.StringValue(e.Name)) + klog.Warningf("using legacy mode for GCE network %q", fi.ValueOf(e.Name)) default: if cidr != "" { return fmt.Errorf("CIDR cannot specified for networks where mode=%s", e.Mode) @@ -126,7 +126,7 @@ func (_ *Network) CheckChanges(a, e, changes *Network) error { case "": // Treated as "keep existing", only allowed for shared mode - if !fi.BoolValue(e.Shared) { + if !fi.ValueOf(e.Shared) { return fmt.Errorf("must specify mode for (non-shared) Network") } @@ -138,16 +138,16 @@ func (_ *Network) CheckChanges(a, e, changes *Network) error { } func (_ *Network) RenderGCE(t *gce.GCEAPITarget, a, e, changes *Network) error { - shared := fi.BoolValue(e.Shared) + shared := fi.ValueOf(e.Shared) if shared { // Verify the network was found if a == nil { - return fmt.Errorf("Network with name %q not found", fi.StringValue(e.Name)) + return fmt.Errorf("Network with name %q not found", fi.ValueOf(e.Name)) } } if a == nil { - klog.V(2).Infof("Creating Network with CIDR: %q", fi.StringValue(e.CIDR)) + klog.V(2).Infof("Creating Network with CIDR: %q", fi.ValueOf(e.CIDR)) network := &compute.Network{ Name: *e.Name, @@ -155,7 +155,7 @@ func (_ *Network) RenderGCE(t *gce.GCEAPITarget, a, e, changes *Network) error { switch e.Mode { case "legacy": - network.IPv4Range = fi.StringValue(e.CIDR) + network.IPv4Range = fi.ValueOf(e.CIDR) case "auto": network.AutoCreateSubnetworks = true @@ -200,7 +200,7 @@ type terraformNetwork struct { } func (_ *Network) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *Network) error { - shared := fi.BoolValue(e.Shared) + shared := fi.ValueOf(e.Shared) if shared { // Not terraform owned / managed return nil @@ -216,17 +216,17 @@ func (_ *Network) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *N tf.IPv4Range = e.CIDR case "auto": - tf.AutoCreateSubnetworks = fi.Bool(true) + tf.AutoCreateSubnetworks = fi.PtrTo(true) case "custom": - tf.AutoCreateSubnetworks = fi.Bool(false) + tf.AutoCreateSubnetworks = fi.PtrTo(false) } return t.RenderResource("google_compute_network", *e.Name, tf) } func (e *Network) TerraformLink() *terraformWriter.Literal { - shared := fi.BoolValue(e.Shared) + shared := fi.ValueOf(e.Shared) if shared { if e.Name == nil { klog.Fatalf("Name must be set, if network is shared: %#v", e) diff --git a/upup/pkg/fi/cloudup/gcetasks/poolhealthcheck.go b/upup/pkg/fi/cloudup/gcetasks/poolhealthcheck.go index 373ef2243c92c..52ff918cd18f2 100644 --- a/upup/pkg/fi/cloudup/gcetasks/poolhealthcheck.go +++ b/upup/pkg/fi/cloudup/gcetasks/poolhealthcheck.go @@ -55,7 +55,7 @@ func (e *PoolHealthCheck) CompareWithID() *string { func (e *PoolHealthCheck) Find(c *fi.Context) (*PoolHealthCheck, error) { cloud := c.Cloud.(gce.GCECloud) - name := fi.StringValue(e.Pool.Name) + name := fi.ValueOf(e.Pool.Name) r, err := cloud.Compute().TargetPools().Get(cloud.Project(), cloud.Region(), name) if err != nil { if gce.IsNotFound(err) { @@ -86,7 +86,7 @@ func (_ *PoolHealthCheck) CheckChanges(a, e, changes *PoolHealthCheck) error { func (p *PoolHealthCheck) RenderGCE(t *gce.GCEAPITarget, a, e, changes *PoolHealthCheck) error { if a == nil { - targetPool := fi.StringValue(p.Pool.Name) + targetPool := fi.ValueOf(p.Pool.Name) req := &compute.TargetPoolsAddHealthCheckRequest{ HealthChecks: []*compute.HealthCheckReference{ { diff --git a/upup/pkg/fi/cloudup/gcetasks/projectiambinding.go b/upup/pkg/fi/cloudup/gcetasks/projectiambinding.go index 4d4779b170ae4..b4d6f3f52cfa1 100644 --- a/upup/pkg/fi/cloudup/gcetasks/projectiambinding.go +++ b/upup/pkg/fi/cloudup/gcetasks/projectiambinding.go @@ -49,9 +49,9 @@ func (e *ProjectIAMBinding) Find(c *fi.Context) (*ProjectIAMBinding, error) { cloud := c.Cloud.(gce.GCECloud) - projectID := fi.StringValue(e.Project) - member := fi.StringValue(e.Member) - role := fi.StringValue(e.Role) + projectID := fi.ValueOf(e.Project) + member := fi.ValueOf(e.Member) + role := fi.ValueOf(e.Role) klog.V(2).Infof("Checking IAM for project %q", projectID) options := &cloudresourcemanager.GetIamPolicyRequest{Options: &cloudresourcemanager.GetPolicyOptions{RequestedPolicyVersion: 3}} @@ -85,13 +85,13 @@ func (e *ProjectIAMBinding) Run(c *fi.Context) error { } func (_ *ProjectIAMBinding) CheckChanges(a, e, changes *ProjectIAMBinding) error { - if fi.StringValue(e.Project) == "" { + if fi.ValueOf(e.Project) == "" { return fi.RequiredField("Project") } - if fi.StringValue(e.Member) == "" { + if fi.ValueOf(e.Member) == "" { return fi.RequiredField("Member") } - if fi.StringValue(e.Role) == "" { + if fi.ValueOf(e.Role) == "" { return fi.RequiredField("Role") } return nil @@ -100,9 +100,9 @@ func (_ *ProjectIAMBinding) CheckChanges(a, e, changes *ProjectIAMBinding) error func (_ *ProjectIAMBinding) RenderGCE(t *gce.GCEAPITarget, a, e, changes *ProjectIAMBinding) error { ctx := context.TODO() - projectID := fi.StringValue(e.Project) - member := fi.StringValue(e.Member) - role := fi.StringValue(e.Role) + projectID := fi.ValueOf(e.Project) + member := fi.ValueOf(e.Member) + role := fi.ValueOf(e.Role) request := &cloudresourcemanager.GetIamPolicyRequest{} policy, err := t.Cloud.CloudResourceManager().Projects.GetIamPolicy(projectID, request).Context(ctx).Do() @@ -134,9 +134,9 @@ type terraformProjectIAMBinding struct { func (_ *ProjectIAMBinding) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *ProjectIAMBinding) error { tf := &terraformProjectIAMBinding{ - Project: fi.StringValue(e.Project), - Role: fi.StringValue(e.Role), - Members: []string{fi.StringValue(e.Member)}, + Project: fi.ValueOf(e.Project), + Role: fi.ValueOf(e.Role), + Members: []string{fi.ValueOf(e.Member)}, } return t.RenderResource("google_project_iam_binding", *e.Name, tf) diff --git a/upup/pkg/fi/cloudup/gcetasks/projectiambinding_test.go b/upup/pkg/fi/cloudup/gcetasks/projectiambinding_test.go index bb58afefdcc8a..0c8c9a904ce40 100644 --- a/upup/pkg/fi/cloudup/gcetasks/projectiambinding_test.go +++ b/upup/pkg/fi/cloudup/gcetasks/projectiambinding_test.go @@ -34,9 +34,9 @@ func TestProjectIAMBinding(t *testing.T) { binding := &ProjectIAMBinding{ Lifecycle: fi.LifecycleSync, - Project: fi.String("testproject"), - Member: fi.String("serviceAccount:foo@testproject.iam.gserviceaccount.com"), - Role: fi.String("roles/owner"), + Project: fi.PtrTo("testproject"), + Member: fi.PtrTo("serviceAccount:foo@testproject.iam.gserviceaccount.com"), + Role: fi.PtrTo("roles/owner"), } return map[string]fi.Task{ diff --git a/upup/pkg/fi/cloudup/gcetasks/router.go b/upup/pkg/fi/cloudup/gcetasks/router.go index 98eb90b5fea32..c08012d0bc87c 100644 --- a/upup/pkg/fi/cloudup/gcetasks/router.go +++ b/upup/pkg/fi/cloudup/gcetasks/router.go @@ -89,8 +89,8 @@ func (r *Router) Find(c *fi.Context) (*Router, error) { actual := &Router{ Name: &found.Name, Lifecycle: r.Lifecycle, - Network: &Network{Name: fi.String(lastComponent(found.Network))}, - Region: fi.String(lastComponent(found.Region)), + Network: &Network{Name: fi.PtrTo(lastComponent(found.Network))}, + Region: fi.PtrTo(lastComponent(found.Region)), NATIPAllocationOption: &nat.NatIpAllocateOption, SourceSubnetworkIPRangesToNAT: &nat.SourceSubnetworkIpRangesToNat, } @@ -147,7 +147,7 @@ func (*Router) CheckChanges(a, e, changes *Router) error { func (*Router) RenderGCE(t *gce.GCEAPITarget, a, e, changes *Router) error { cloud := t.Cloud project := cloud.Project() - region := fi.StringValue(e.Region) + region := fi.ValueOf(e.Region) if a == nil { klog.V(2).Infof("Creating Cloud NAT Gateway %v", e.Name) diff --git a/upup/pkg/fi/cloudup/gcetasks/serviceaccount.go b/upup/pkg/fi/cloudup/gcetasks/serviceaccount.go index 2ff5966b06b63..bebb185c47a5b 100644 --- a/upup/pkg/fi/cloudup/gcetasks/serviceaccount.go +++ b/upup/pkg/fi/cloudup/gcetasks/serviceaccount.go @@ -52,7 +52,7 @@ func (e *ServiceAccount) Find(c *fi.Context) (*ServiceAccount, error) { ctx := context.TODO() - email := fi.StringValue(e.Email) + email := fi.ValueOf(e.Email) if email == "default" { // Special case - the default serviceaccount always exists @@ -103,9 +103,9 @@ func (_ *ServiceAccount) RenderGCE(t *gce.GCEAPITarget, a, e, changes *ServiceAc cloud := t.Cloud - email := fi.StringValue(e.Email) + email := fi.ValueOf(e.Email) - shared := fi.BoolValue(e.Shared) + shared := fi.ValueOf(e.Shared) if shared { // Verify the service account was found if a == nil { @@ -126,8 +126,8 @@ func (_ *ServiceAccount) RenderGCE(t *gce.GCEAPITarget, a, e, changes *ServiceAc sa := &iam.CreateServiceAccountRequest{ AccountId: accountID, ServiceAccount: &iam.ServiceAccount{ - Description: fi.StringValue(e.Description), - DisplayName: fi.StringValue(e.DisplayName), + Description: fi.ValueOf(e.Description), + DisplayName: fi.ValueOf(e.DisplayName), }, } @@ -142,8 +142,8 @@ func (_ *ServiceAccount) RenderGCE(t *gce.GCEAPITarget, a, e, changes *ServiceAc if changes.Description != nil || changes.DisplayName != nil { sa := &iam.ServiceAccount{ Email: email, - Description: fi.StringValue(e.Description), - DisplayName: fi.StringValue(e.DisplayName), + Description: fi.ValueOf(e.Description), + DisplayName: fi.ValueOf(e.DisplayName), } _, err := cloud.IAM().ServiceAccounts().Update(ctx, fqn, sa) @@ -172,13 +172,13 @@ type terraformServiceAccount struct { } func (_ *ServiceAccount) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *ServiceAccount) error { - shared := fi.BoolValue(e.Shared) + shared := fi.ValueOf(e.Shared) if shared { // Not terraform owned / managed return nil } - email := fi.StringValue(e.Email) + email := fi.ValueOf(e.Email) accountID, projectID, err := gce.SplitServiceAccountEmail(email) if err != nil { return err @@ -195,9 +195,9 @@ func (_ *ServiceAccount) RenderTerraform(t *terraform.TerraformTarget, a, e, cha } func (e *ServiceAccount) TerraformLink() *terraformWriter.Literal { - shared := fi.BoolValue(e.Shared) + shared := fi.ValueOf(e.Shared) if shared { - email := fi.StringValue(e.Email) + email := fi.ValueOf(e.Email) if email == "" { klog.Fatalf("Email must be set, if ServiceAccount is shared: %#v", e) } diff --git a/upup/pkg/fi/cloudup/gcetasks/serviceaccount_test.go b/upup/pkg/fi/cloudup/gcetasks/serviceaccount_test.go index bade222ed7c69..e80654ee6b20b 100644 --- a/upup/pkg/fi/cloudup/gcetasks/serviceaccount_test.go +++ b/upup/pkg/fi/cloudup/gcetasks/serviceaccount_test.go @@ -38,12 +38,12 @@ func TestServiceAccount(t *testing.T) { // We define a function so we can rebuild the tasks, because we modify in-place when running buildTasks := func() map[string]fi.Task { serviceAccount := &ServiceAccount{ - Name: fi.String("test"), + Name: fi.PtrTo("test"), Lifecycle: fi.LifecycleSync, - Email: fi.String("test@testproject.iam.gserviceaccount.com"), - Description: fi.String("description of ServiceAccount"), - DisplayName: fi.String("display name of ServiceAccount"), + Email: fi.PtrTo("test@testproject.iam.gserviceaccount.com"), + Description: fi.PtrTo("description of ServiceAccount"), + DisplayName: fi.PtrTo("display name of ServiceAccount"), } return map[string]fi.Task{ diff --git a/upup/pkg/fi/cloudup/gcetasks/storagebucketacl.go b/upup/pkg/fi/cloudup/gcetasks/storagebucketacl.go index 6022bca4283e2..97959d65afe16 100644 --- a/upup/pkg/fi/cloudup/gcetasks/storagebucketacl.go +++ b/upup/pkg/fi/cloudup/gcetasks/storagebucketacl.go @@ -47,8 +47,8 @@ func (e *StorageBucketAcl) CompareWithID() *string { func (e *StorageBucketAcl) Find(c *fi.Context) (*StorageBucketAcl, error) { cloud := c.Cloud.(gce.GCECloud) - bucket := fi.StringValue(e.Bucket) - entity := fi.StringValue(e.Entity) + bucket := fi.ValueOf(e.Bucket) + entity := fi.ValueOf(e.Entity) klog.V(2).Infof("Checking GCS bucket ACL for gs://%s for %s", bucket, entity) r, err := cloud.Storage().BucketAccessControls.Get(bucket, entity).Do() @@ -77,19 +77,19 @@ func (e *StorageBucketAcl) Run(c *fi.Context) error { } func (_ *StorageBucketAcl) CheckChanges(a, e, changes *StorageBucketAcl) error { - if fi.StringValue(e.Bucket) == "" { + if fi.ValueOf(e.Bucket) == "" { return fi.RequiredField("Bucket") } - if fi.StringValue(e.Entity) == "" { + if fi.ValueOf(e.Entity) == "" { return fi.RequiredField("Entity") } return nil } func (_ *StorageBucketAcl) RenderGCE(t *gce.GCEAPITarget, a, e, changes *StorageBucketAcl) error { - bucket := fi.StringValue(e.Bucket) - entity := fi.StringValue(e.Entity) - role := fi.StringValue(e.Role) + bucket := fi.ValueOf(e.Bucket) + entity := fi.ValueOf(e.Entity) + role := fi.ValueOf(e.Role) acl := &storage.BucketAccessControl{ Entity: entity, @@ -123,9 +123,9 @@ type terraformStorageBucketAcl struct { func (_ *StorageBucketAcl) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *StorageBucketAcl) error { var roleEntities []string - roleEntities = append(roleEntities, fi.StringValue(e.Role)+":"+fi.StringValue(e.Entity)) + roleEntities = append(roleEntities, fi.ValueOf(e.Role)+":"+fi.ValueOf(e.Entity)) tf := &terraformStorageBucketAcl{ - Bucket: fi.StringValue(e.Bucket), + Bucket: fi.ValueOf(e.Bucket), RoleEntity: roleEntities, } diff --git a/upup/pkg/fi/cloudup/gcetasks/storagebucketiam.go b/upup/pkg/fi/cloudup/gcetasks/storagebucketiam.go index d81430c15d02f..4b5c426255e3f 100644 --- a/upup/pkg/fi/cloudup/gcetasks/storagebucketiam.go +++ b/upup/pkg/fi/cloudup/gcetasks/storagebucketiam.go @@ -49,9 +49,9 @@ func (e *StorageBucketIAM) Find(c *fi.Context) (*StorageBucketIAM, error) { cloud := c.Cloud.(gce.GCECloud) - bucket := fi.StringValue(e.Bucket) - member := fi.StringValue(e.Member) - role := fi.StringValue(e.Role) + bucket := fi.ValueOf(e.Bucket) + member := fi.ValueOf(e.Member) + role := fi.ValueOf(e.Role) klog.V(2).Infof("Checking GCS bucket IAM for gs://%s for %s", bucket, member) policy, err := cloud.Storage().Buckets.GetIamPolicy(bucket).Context(ctx).Do() @@ -84,13 +84,13 @@ func (e *StorageBucketIAM) Run(c *fi.Context) error { } func (_ *StorageBucketIAM) CheckChanges(a, e, changes *StorageBucketIAM) error { - if fi.StringValue(e.Bucket) == "" { + if fi.ValueOf(e.Bucket) == "" { return fi.RequiredField("Bucket") } - if fi.StringValue(e.Member) == "" { + if fi.ValueOf(e.Member) == "" { return fi.RequiredField("Member") } - if fi.StringValue(e.Role) == "" { + if fi.ValueOf(e.Role) == "" { return fi.RequiredField("Role") } return nil @@ -99,9 +99,9 @@ func (_ *StorageBucketIAM) CheckChanges(a, e, changes *StorageBucketIAM) error { func (_ *StorageBucketIAM) RenderGCE(t *gce.GCEAPITarget, a, e, changes *StorageBucketIAM) error { ctx := context.TODO() - bucket := fi.StringValue(e.Bucket) - member := fi.StringValue(e.Member) - role := fi.StringValue(e.Role) + bucket := fi.ValueOf(e.Bucket) + member := fi.ValueOf(e.Member) + role := fi.ValueOf(e.Role) klog.V(2).Infof("Creating GCS bucket IAM for gs://%s for %s as %s", bucket, member, role) @@ -133,9 +133,9 @@ type terraformStorageBucketIAM struct { func (_ *StorageBucketIAM) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *StorageBucketIAM) error { tf := &terraformStorageBucketIAM{ - Bucket: fi.StringValue(e.Bucket), - Role: fi.StringValue(e.Role), - Member: fi.StringValue(e.Member), + Bucket: fi.ValueOf(e.Bucket), + Role: fi.ValueOf(e.Role), + Member: fi.ValueOf(e.Member), } return t.RenderResource("google_storage_bucket_iam_member", *e.Name, tf) diff --git a/upup/pkg/fi/cloudup/gcetasks/storagebucketiam_test.go b/upup/pkg/fi/cloudup/gcetasks/storagebucketiam_test.go index b0578627d3e60..826ba3e1a9178 100644 --- a/upup/pkg/fi/cloudup/gcetasks/storagebucketiam_test.go +++ b/upup/pkg/fi/cloudup/gcetasks/storagebucketiam_test.go @@ -34,9 +34,9 @@ func TestStorageBucketIAM(t *testing.T) { binding := &StorageBucketIAM{ Lifecycle: fi.LifecycleSync, - Bucket: fi.String("bucket1"), - Member: fi.String("serviceAccount:foo@testproject.iam.gserviceaccount.com"), - Role: fi.String("roles/owner"), + Bucket: fi.PtrTo("bucket1"), + Member: fi.PtrTo("serviceAccount:foo@testproject.iam.gserviceaccount.com"), + Role: fi.PtrTo("roles/owner"), } return map[string]fi.Task{ diff --git a/upup/pkg/fi/cloudup/gcetasks/storageobjectacl.go b/upup/pkg/fi/cloudup/gcetasks/storageobjectacl.go index 35f26f9d833c6..f9a1dbc7cc726 100644 --- a/upup/pkg/fi/cloudup/gcetasks/storageobjectacl.go +++ b/upup/pkg/fi/cloudup/gcetasks/storageobjectacl.go @@ -48,9 +48,9 @@ func (e *StorageObjectAcl) CompareWithID() *string { func (e *StorageObjectAcl) Find(c *fi.Context) (*StorageObjectAcl, error) { cloud := c.Cloud.(gce.GCECloud) - bucket := fi.StringValue(e.Bucket) - object := fi.StringValue(e.Object) - entity := fi.StringValue(e.Entity) + bucket := fi.ValueOf(e.Bucket) + object := fi.ValueOf(e.Object) + entity := fi.ValueOf(e.Entity) klog.V(2).Infof("Checking GCS object ACL for gs://%s/%s for %s", bucket, object, entity) r, err := cloud.Storage().ObjectAccessControls.Get(bucket, object, entity).Do() @@ -80,23 +80,23 @@ func (e *StorageObjectAcl) Run(c *fi.Context) error { } func (_ *StorageObjectAcl) CheckChanges(a, e, changes *StorageObjectAcl) error { - if fi.StringValue(e.Bucket) == "" { + if fi.ValueOf(e.Bucket) == "" { return fi.RequiredField("Bucket") } - if fi.StringValue(e.Object) == "" { + if fi.ValueOf(e.Object) == "" { return fi.RequiredField("Object") } - if fi.StringValue(e.Entity) == "" { + if fi.ValueOf(e.Entity) == "" { return fi.RequiredField("Entity") } return nil } func (_ *StorageObjectAcl) RenderGCE(t *gce.GCEAPITarget, a, e, changes *StorageObjectAcl) error { - bucket := fi.StringValue(e.Bucket) - object := fi.StringValue(e.Object) - entity := fi.StringValue(e.Entity) - role := fi.StringValue(e.Role) + bucket := fi.ValueOf(e.Bucket) + object := fi.ValueOf(e.Object) + entity := fi.ValueOf(e.Entity) + role := fi.ValueOf(e.Role) acl := &storage.ObjectAccessControl{ Entity: entity, @@ -131,10 +131,10 @@ type terraformStorageObjectAcl struct { func (_ *StorageObjectAcl) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *StorageObjectAcl) error { var roleEntities []string - roleEntities = append(roleEntities, fi.StringValue(e.Role)+":"+fi.StringValue(e.Name)) + roleEntities = append(roleEntities, fi.ValueOf(e.Role)+":"+fi.ValueOf(e.Name)) tf := &terraformStorageObjectAcl{ - Bucket: fi.StringValue(e.Bucket), - Object: fi.StringValue(e.Object), + Bucket: fi.ValueOf(e.Bucket), + Object: fi.ValueOf(e.Object), RoleEntity: roleEntities, } diff --git a/upup/pkg/fi/cloudup/gcetasks/subnet.go b/upup/pkg/fi/cloudup/gcetasks/subnet.go index 1c651b7ebd99c..2f78384a68a1f 100644 --- a/upup/pkg/fi/cloudup/gcetasks/subnet.go +++ b/upup/pkg/fi/cloudup/gcetasks/subnet.go @@ -66,11 +66,11 @@ func (e *Subnet) Find(c *fi.Context) (*Subnet, error) { actual := &Subnet{} actual.Name = &s.Name - actual.Network = &Network{Name: fi.String(lastComponent(s.Network))} - actual.Region = fi.String(lastComponent(s.Region)) + actual.Network = &Network{Name: fi.PtrTo(lastComponent(s.Network))} + actual.Region = fi.PtrTo(lastComponent(s.Region)) actual.CIDR = &s.IpCidrRange - shared := fi.BoolValue(e.Shared) + shared := fi.ValueOf(e.Shared) { actual.SecondaryIpRanges = make(map[string]string) for _, r := range s.SecondaryIpRanges { @@ -102,11 +102,11 @@ func (_ *Subnet) CheckChanges(a, e, changes *Subnet) error { } func (_ *Subnet) RenderGCE(t *gce.GCEAPITarget, a, e, changes *Subnet) error { - shared := fi.BoolValue(e.Shared) + shared := fi.ValueOf(e.Shared) if shared { // Verify the subnet was found if a == nil { - return fmt.Errorf("Subnet with name %q not found", fi.StringValue(e.Name)) + return fmt.Errorf("Subnet with name %q not found", fi.ValueOf(e.Name)) } } @@ -114,10 +114,10 @@ func (_ *Subnet) RenderGCE(t *gce.GCEAPITarget, a, e, changes *Subnet) error { project := cloud.Project() if a == nil { - klog.V(2).Infof("Creating Subnet with CIDR: %q", fi.StringValue(e.CIDR)) + klog.V(2).Infof("Creating Subnet with CIDR: %q", fi.ValueOf(e.CIDR)) subnet := &compute.Subnetwork{ - IpCidrRange: fi.StringValue(e.CIDR), + IpCidrRange: fi.ValueOf(e.CIDR), Name: *e.Name, Network: e.Network.URL(project), } @@ -252,7 +252,7 @@ type terraformSubnetRange struct { } func (_ *Subnet) RenderSubnet(t *terraform.TerraformTarget, a, e, changes *Subnet) error { - shared := fi.BoolValue(e.Shared) + shared := fi.ValueOf(e.Shared) if shared { // Not terraform owned / managed return nil @@ -276,7 +276,7 @@ func (_ *Subnet) RenderSubnet(t *terraform.TerraformTarget, a, e, changes *Subne } func (e *Subnet) TerraformLink() *terraformWriter.Literal { - shared := fi.BoolValue(e.Shared) + shared := fi.ValueOf(e.Shared) if shared { if e.Name == nil { klog.Fatalf("GCEName must be set, if subnet is shared: %#v", e) diff --git a/upup/pkg/fi/cloudup/gcetasks/targetpool.go b/upup/pkg/fi/cloudup/gcetasks/targetpool.go index 64436d7504209..2228b5c8806ff 100644 --- a/upup/pkg/fi/cloudup/gcetasks/targetpool.go +++ b/upup/pkg/fi/cloudup/gcetasks/targetpool.go @@ -42,7 +42,7 @@ func (e *TargetPool) CompareWithID() *string { func (e *TargetPool) Find(c *fi.Context) (*TargetPool, error) { cloud := c.Cloud.(gce.GCECloud) - name := fi.StringValue(e.Name) + name := fi.ValueOf(e.Name) r, err := cloud.Compute().TargetPools().Get(cloud.Project(), cloud.Region(), name) if err != nil { @@ -53,7 +53,7 @@ func (e *TargetPool) Find(c *fi.Context) (*TargetPool, error) { } actual := &TargetPool{} - actual.Name = fi.String(r.Name) + actual.Name = fi.PtrTo(r.Name) // Avoid spurious changes actual.Lifecycle = e.Lifecycle @@ -66,20 +66,20 @@ func (e *TargetPool) Run(c *fi.Context) error { } func (_ *TargetPool) CheckChanges(a, e, changes *TargetPool) error { - if fi.StringValue(e.Name) == "" { + if fi.ValueOf(e.Name) == "" { return fi.RequiredField("Name") } return nil } func (e *TargetPool) URL(cloud gce.GCECloud) string { - name := fi.StringValue(e.Name) + name := fi.ValueOf(e.Name) return fmt.Sprintf("https://www.googleapis.com/compute/v1/projects/%s/regions/%s/targetPools/%s", cloud.Project(), cloud.Region(), name) } func (_ *TargetPool) RenderGCE(t *gce.GCEAPITarget, a, e, changes *TargetPool) error { - name := fi.StringValue(e.Name) + name := fi.ValueOf(e.Name) o := &compute.TargetPool{ Name: name, @@ -112,7 +112,7 @@ type terraformTargetPool struct { } func (_ *TargetPool) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *TargetPool) error { - name := fi.StringValue(e.Name) + name := fi.ValueOf(e.Name) tf := &terraformTargetPool{ Name: name, @@ -122,7 +122,7 @@ func (_ *TargetPool) RenderTerraform(t *terraform.TerraformTarget, a, e, changes } func (e *TargetPool) TerraformLink() *terraformWriter.Literal { - name := fi.StringValue(e.Name) + name := fi.ValueOf(e.Name) return terraformWriter.LiteralSelfLink("google_compute_target_pool", name) } diff --git a/upup/pkg/fi/cloudup/hetzner/cloud.go b/upup/pkg/fi/cloudup/hetzner/cloud.go index 6febea377c2c7..ec6e5d67a9492 100644 --- a/upup/pkg/fi/cloudup/hetzner/cloud.go +++ b/upup/pkg/fi/cloudup/hetzner/cloud.go @@ -362,9 +362,9 @@ func buildCloudInstanceGroup(ig *kops.InstanceGroup, sg []*hcloud.Server, nodeMa HumanName: ig.Name, InstanceGroup: ig, Raw: sg, - MinSize: int(fi.Int32Value(ig.Spec.MinSize)), - TargetSize: int(fi.Int32Value(ig.Spec.MinSize)), - MaxSize: int(fi.Int32Value(ig.Spec.MaxSize)), + MinSize: int(fi.ValueOf(ig.Spec.MinSize)), + TargetSize: int(fi.ValueOf(ig.Spec.MinSize)), + MaxSize: int(fi.ValueOf(ig.Spec.MaxSize)), } for _, server := range sg { diff --git a/upup/pkg/fi/cloudup/hetznertasks/firewall.go b/upup/pkg/fi/cloudup/hetznertasks/firewall.go index 01827b477b4c4..ad958f92e0b0c 100644 --- a/upup/pkg/fi/cloudup/hetznertasks/firewall.go +++ b/upup/pkg/fi/cloudup/hetznertasks/firewall.go @@ -42,7 +42,7 @@ type Firewall struct { var _ fi.CompareWithID = &Firewall{} func (v *Firewall) CompareWithID() *string { - return fi.String(strconv.Itoa(fi.IntValue(v.ID))) + return fi.PtrTo(strconv.Itoa(fi.ValueOf(v.ID))) } func (v *Firewall) Find(c *fi.Context) (*Firewall, error) { @@ -56,11 +56,11 @@ func (v *Firewall) Find(c *fi.Context) (*Firewall, error) { } for _, firewall := range firewalls { - if firewall.Name == fi.StringValue(v.Name) { + if firewall.Name == fi.ValueOf(v.Name) { matches := &Firewall{ Lifecycle: v.Lifecycle, - Name: fi.String(firewall.Name), - ID: fi.Int(firewall.ID), + Name: fi.PtrTo(firewall.Name), + ID: fi.PtrTo(firewall.ID), Labels: firewall.Labels, } for _, rule := range firewall.Rules { @@ -115,7 +115,7 @@ func (_ *Firewall) RenderHetzner(t *hetzner.HetznerAPITarget, a, e, changes *Fir client := t.Cloud.FirewallClient() if a == nil { opts := hcloud.FirewallCreateOpts{ - Name: fi.StringValue(e.Name), + Name: fi.ValueOf(e.Name), ApplyTo: []hcloud.FirewallResource{ { Type: hcloud.FirewallResourceTypeLabelSelector, @@ -139,7 +139,7 @@ func (_ *Firewall) RenderHetzner(t *hetzner.HetznerAPITarget, a, e, changes *Fir } } else { - firewall, _, err := client.Get(context.TODO(), fi.StringValue(e.Name)) + firewall, _, err := client.Get(context.TODO(), fi.ValueOf(e.Name)) if err != nil { return err } @@ -147,7 +147,7 @@ func (_ *Firewall) RenderHetzner(t *hetzner.HetznerAPITarget, a, e, changes *Fir // Update the labels if changes.Name != nil || len(changes.Labels) != 0 { _, _, err := client.Update(context.TODO(), firewall, hcloud.FirewallUpdateOpts{ - Name: fi.StringValue(e.Name), + Name: fi.ValueOf(e.Name), Labels: e.Labels, }) if err != nil { @@ -231,19 +231,19 @@ func (_ *Firewall) RenderTerraform(t *terraform.TerraformTarget, a, e, changes * Name: e.Name, ApplyTo: []*terraformFirewallApplyTo{ { - LabelSelector: fi.String(e.Selector), + LabelSelector: fi.PtrTo(e.Selector), }, }, Labels: e.Labels, } for _, rule := range e.Rules { tfr := &terraformFirewallRule{ - Direction: fi.String(string(rule.Direction)), - Protocol: fi.String(string(rule.Protocol)), + Direction: fi.PtrTo(string(rule.Direction)), + Protocol: fi.PtrTo(string(rule.Protocol)), Port: rule.Port, } for _, ip := range rule.SourceIPs { - tfr.SourceIPs = append(tfr.SourceIPs, fi.String(ip.String())) + tfr.SourceIPs = append(tfr.SourceIPs, fi.PtrTo(ip.String())) } tf.Rules = append(tf.Rules, tfr) } diff --git a/upup/pkg/fi/cloudup/hetznertasks/loadbalancer.go b/upup/pkg/fi/cloudup/hetznertasks/loadbalancer.go index 809c86b9eacc9..9044bca893e83 100644 --- a/upup/pkg/fi/cloudup/hetznertasks/loadbalancer.go +++ b/upup/pkg/fi/cloudup/hetznertasks/loadbalancer.go @@ -47,7 +47,7 @@ type LoadBalancer struct { var _ fi.CompareWithID = &LoadBalancer{} func (v *LoadBalancer) CompareWithID() *string { - return fi.String(strconv.Itoa(fi.IntValue(v.ID))) + return fi.PtrTo(strconv.Itoa(fi.ValueOf(v.ID))) } var _ fi.HasAddress = &LoadBalancer{} @@ -73,15 +73,15 @@ func (v *LoadBalancer) FindAddresses(c *fi.Context) ([]string, error) { } for _, loadbalancer := range loadbalancers { - if loadbalancer.Name == fi.StringValue(v.Name) { + if loadbalancer.Name == fi.ValueOf(v.Name) { var addresses []string if loadbalancer.PublicNet.IPv4.IP == nil { - return nil, fmt.Errorf("failed to find load-balancer %q public address", fi.StringValue(v.Name)) + return nil, fmt.Errorf("failed to find load-balancer %q public address", fi.ValueOf(v.Name)) } addresses = append(addresses, loadbalancer.PublicNet.IPv4.IP.String()) for _, privateNetwork := range loadbalancer.PrivateNet { if privateNetwork.IP == nil { - return nil, fmt.Errorf("failed to find load-balancer %q private address", fi.StringValue(v.Name)) + return nil, fmt.Errorf("failed to find load-balancer %q private address", fi.ValueOf(v.Name)) } addresses = append(addresses, privateNetwork.IP.String()) } @@ -104,11 +104,11 @@ func (v *LoadBalancer) Find(c *fi.Context) (*LoadBalancer, error) { } for _, loadbalancer := range loadbalancers { - if loadbalancer.Name == fi.StringValue(v.Name) { + if loadbalancer.Name == fi.ValueOf(v.Name) { matches := &LoadBalancer{ Lifecycle: v.Lifecycle, - Name: fi.String(loadbalancer.Name), - ID: fi.Int(loadbalancer.ID), + Name: fi.PtrTo(loadbalancer.Name), + ID: fi.PtrTo(loadbalancer.ID), Labels: loadbalancer.Labels, } @@ -122,8 +122,8 @@ func (v *LoadBalancer) Find(c *fi.Context) (*LoadBalancer, error) { for _, service := range loadbalancer.Services { loadbalancerService := LoadBalancerService{ Protocol: string(service.Protocol), - ListenerPort: fi.Int(service.ListenPort), - DestinationPort: fi.Int(service.DestinationPort), + ListenerPort: fi.PtrTo(service.ListenPort), + DestinationPort: fi.PtrTo(service.DestinationPort), } matches.Services = append(matches.Services, &loadbalancerService) } @@ -196,16 +196,16 @@ func (_ *LoadBalancer) RenderHetzner(t *hetzner.HetznerAPITarget, a, e, changes if a == nil { if e.Network == nil { - return fmt.Errorf("failed to find network for loadbalancer %q", fi.StringValue(e.Name)) + return fmt.Errorf("failed to find network for loadbalancer %q", fi.ValueOf(e.Name)) } - networkID, err := strconv.Atoi(fi.StringValue(e.Network.ID)) + networkID, err := strconv.Atoi(fi.ValueOf(e.Network.ID)) if err != nil { - return fmt.Errorf("failed to convert network ID %q to int: %w", fi.StringValue(e.Network.ID), err) + return fmt.Errorf("failed to convert network ID %q to int: %w", fi.ValueOf(e.Network.ID), err) } opts := hcloud.LoadBalancerCreateOpts{ - Name: fi.StringValue(e.Name), + Name: fi.ValueOf(e.Name), LoadBalancerType: &hcloud.LoadBalancerType{ Name: e.Type, }, @@ -222,7 +222,7 @@ func (_ *LoadBalancer) RenderHetzner(t *hetzner.HetznerAPITarget, a, e, changes LabelSelector: hcloud.LoadBalancerCreateOptsTargetLabelSelector{ Selector: e.Target, }, - UsePrivateIP: fi.Bool(true), + UsePrivateIP: fi.PtrTo(true), }, }, Network: &hcloud.Network{ @@ -249,7 +249,7 @@ func (_ *LoadBalancer) RenderHetzner(t *hetzner.HetznerAPITarget, a, e, changes } else { var err error - loadbalancer, _, err := client.Get(ctx, strconv.Itoa(fi.IntValue(a.ID))) + loadbalancer, _, err := client.Get(ctx, strconv.Itoa(fi.ValueOf(a.ID))) if err != nil { return err } @@ -257,7 +257,7 @@ func (_ *LoadBalancer) RenderHetzner(t *hetzner.HetznerAPITarget, a, e, changes // Update the labels if changes.Name != nil || len(changes.Labels) != 0 { _, _, err := client.Update(ctx, loadbalancer, hcloud.LoadBalancerUpdateOpts{ - Name: fi.StringValue(e.Name), + Name: fi.ValueOf(e.Name), Labels: e.Labels, }) if err != nil { @@ -289,7 +289,7 @@ func (_ *LoadBalancer) RenderHetzner(t *hetzner.HetznerAPITarget, a, e, changes if a.Target == "" { action, _, err := client.AddLabelSelectorTarget(ctx, loadbalancer, hcloud.LoadBalancerAddLabelSelectorTargetOpts{ Selector: e.Target, - UsePrivateIP: fi.Bool(true), + UsePrivateIP: fi.PtrTo(true), }) if err != nil { return err @@ -375,7 +375,7 @@ func (_ *LoadBalancer) RenderTerraform(t *terraform.TerraformTarget, a, e, chang for _, service := range e.Services { tf := &terraformLoadBalancerService{ LoadBalancerID: e.TerraformLink(), - Protocol: fi.String(service.Protocol), + Protocol: fi.PtrTo(service.Protocol), ListenPort: service.ListenerPort, DestinationPort: service.DestinationPort, } @@ -389,9 +389,9 @@ func (_ *LoadBalancer) RenderTerraform(t *terraform.TerraformTarget, a, e, chang { tf := &terraformLoadBalancerTarget{ LoadBalancerID: e.TerraformLink(), - Type: fi.String(string(hcloud.LoadBalancerTargetTypeLabelSelector)), - LabelSelector: fi.String(e.Target), - UsePrivateIP: fi.Bool(true), + Type: fi.PtrTo(string(hcloud.LoadBalancerTargetTypeLabelSelector)), + LabelSelector: fi.PtrTo(e.Target), + UsePrivateIP: fi.PtrTo(true), } err := t.RenderResource("hcloud_load_balancer_target", *e.Name, tf) diff --git a/upup/pkg/fi/cloudup/hetznertasks/network.go b/upup/pkg/fi/cloudup/hetznertasks/network.go index 6b7d7860ffa34..1364e398cb180 100644 --- a/upup/pkg/fi/cloudup/hetznertasks/network.go +++ b/upup/pkg/fi/cloudup/hetznertasks/network.go @@ -53,9 +53,9 @@ func (v *Network) Find(c *fi.Context) (*Network, error) { cloud := c.Cloud.(hetzner.HetznerCloud) client := cloud.NetworkClient() - idOrName := fi.StringValue(v.Name) + idOrName := fi.ValueOf(v.Name) if v.ID != nil { - idOrName = fi.StringValue(v.ID) + idOrName = fi.ValueOf(v.ID) } network, _, err := client.Get(context.TODO(), idOrName) @@ -72,7 +72,7 @@ func (v *Network) Find(c *fi.Context) (*Network, error) { matches := &Network{ Name: v.Name, Lifecycle: v.Lifecycle, - ID: fi.String(strconv.Itoa(network.ID)), + ID: fi.PtrTo(strconv.Itoa(network.ID)), } if v.ID == nil { @@ -143,7 +143,7 @@ func (_ *Network) RenderHetzner(t *hetzner.HetznerAPITarget, a, e, changes *Netw return err } opts := hcloud.NetworkCreateOpts{ - Name: fi.StringValue(e.Name), + Name: fi.ValueOf(e.Name), IPRange: ipRange, Labels: e.Labels, } @@ -151,11 +151,11 @@ func (_ *Network) RenderHetzner(t *hetzner.HetznerAPITarget, a, e, changes *Netw if err != nil { return err } - e.ID = fi.String(strconv.Itoa(network.ID)) + e.ID = fi.PtrTo(strconv.Itoa(network.ID)) } else { var err error - network, _, err = client.Get(context.TODO(), fi.StringValue(e.Name)) + network, _, err = client.Get(context.TODO(), fi.ValueOf(e.Name)) if err != nil { return err } @@ -163,7 +163,7 @@ func (_ *Network) RenderHetzner(t *hetzner.HetznerAPITarget, a, e, changes *Netw // Update the labels if changes.Name != nil || len(changes.Labels) != 0 { _, _, err := client.Update(context.TODO(), network, hcloud.NetworkUpdateOpts{ - Name: fi.StringValue(e.Name), + Name: fi.ValueOf(e.Name), Labels: e.Labels, }) if err != nil { @@ -221,7 +221,7 @@ func (_ *Network) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *N { tf := &terraformNetwork{ Name: e.Name, - IPRange: fi.String(e.IPRange), + IPRange: fi.PtrTo(e.IPRange), Labels: e.Labels, } @@ -239,9 +239,9 @@ func (_ *Network) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *N tf := &terraformNetworkSubnet{ NetworkID: e.TerraformLink(), - Type: fi.String(string(hcloud.NetworkSubnetTypeCloud)), - IPRange: fi.String(subnetIpRange.String()), - NetworkZone: fi.String(e.Region), + Type: fi.PtrTo(string(hcloud.NetworkSubnetTypeCloud)), + IPRange: fi.PtrTo(subnetIpRange.String()), + NetworkZone: fi.PtrTo(e.Region), } err = t.RenderResource("hcloud_network_subnet", *e.Name+"-"+subnet, tf) diff --git a/upup/pkg/fi/cloudup/hetznertasks/servergroup.go b/upup/pkg/fi/cloudup/hetznertasks/servergroup.go index fffb1b94fdcf6..c08f2b715dd39 100644 --- a/upup/pkg/fi/cloudup/hetznertasks/servergroup.go +++ b/upup/pkg/fi/cloudup/hetznertasks/servergroup.go @@ -60,7 +60,7 @@ func (v *ServerGroup) Find(c *fi.Context) (*ServerGroup, error) { labelSelector := []string{ fmt.Sprintf("%s=%s", hetzner.TagKubernetesClusterName, c.Cluster.Name), - fmt.Sprintf("%s=%s", hetzner.TagKubernetesInstanceGroup, fi.StringValue(v.Name)), + fmt.Sprintf("%s=%s", hetzner.TagKubernetesInstanceGroup, fi.ValueOf(v.Name)), } listOptions := hcloud.ListOpts{ PerPage: 50, @@ -190,10 +190,10 @@ func (_ *ServerGroup) RenderHetzner(t *hetzner.HetznerAPITarget, a, e, changes * } if len(e.SSHKeys) == 0 { - return fmt.Errorf("failed to find ssh keys for server group %q", fi.StringValue(e.Name)) + return fmt.Errorf("failed to find ssh keys for server group %q", fi.ValueOf(e.Name)) } if e.Network == nil { - return fmt.Errorf("failed to find network for server group %q", fi.StringValue(e.Name)) + return fmt.Errorf("failed to find network for server group %q", fi.ValueOf(e.Name)) } userData, err := fi.ResourceAsString(e.UserData) @@ -206,18 +206,18 @@ func (_ *ServerGroup) RenderHetzner(t *hetzner.HetznerAPITarget, a, e, changes * } userDataHash := safeBytesHash(userDataBytes) - networkID, err := strconv.Atoi(fi.StringValue(e.Network.ID)) + networkID, err := strconv.Atoi(fi.ValueOf(e.Network.ID)) if err != nil { - return fmt.Errorf("failed to convert network ID %q to int: %w", fi.StringValue(e.Network.ID), err) + return fmt.Errorf("failed to convert network ID %q to int: %w", fi.ValueOf(e.Network.ID), err) } for i := 1; i <= expectedCount-actualCount; i++ { // Append a random/unique ID to the node name - name := fmt.Sprintf("%s-%x", fi.StringValue(e.Name), rand.Int63()) + name := fmt.Sprintf("%s-%x", fi.ValueOf(e.Name), rand.Int63()) opts := hcloud.ServerCreateOpts{ Name: name, - StartAfterCreate: fi.Bool(true), + StartAfterCreate: fi.PtrTo(true), Networks: []*hcloud.Network{ { ID: networkID, @@ -242,7 +242,7 @@ func (_ *ServerGroup) RenderHetzner(t *hetzner.HetznerAPITarget, a, e, changes * // Add the SSH keys for _, sshkey := range e.SSHKeys { - opts.SSHKeys = append(opts.SSHKeys, &hcloud.SSHKey{ID: fi.IntValue(sshkey.ID)}) + opts.SSHKeys = append(opts.SSHKeys, &hcloud.SSHKey{ID: fi.ValueOf(sshkey.ID)}) } // Add the user-data hash label @@ -298,21 +298,21 @@ type terraformServerPublicNet struct { } func (_ *ServerGroup) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *ServerGroup) error { - name := terraformWriter.LiteralWithIndex(fi.StringValue(e.Name)) + name := terraformWriter.LiteralWithIndex(fi.ValueOf(e.Name)) tf := &terraformServer{ - Count: fi.Int(e.Count), + Count: fi.PtrTo(e.Count), Name: name, - Location: fi.String(e.Location), - ServerType: fi.String(e.Size), - Image: fi.String(e.Image), + Location: fi.PtrTo(e.Location), + ServerType: fi.PtrTo(e.Size), + Image: fi.PtrTo(e.Image), Network: []*terraformServerNetwork{ { ID: e.Network.TerraformLink(), }, }, PublicNet: &terraformServerPublicNet{ - EnableIPv4: fi.Bool(e.EnableIPv4), - EnableIPv6: fi.Bool(e.EnableIPv6), + EnableIPv4: fi.PtrTo(e.EnableIPv4), + EnableIPv6: fi.PtrTo(e.EnableIPv6), }, Labels: e.Labels, } @@ -327,12 +327,12 @@ func (_ *ServerGroup) RenderTerraform(t *terraform.TerraformTarget, a, e, change return err } if data != nil { - tf.UserData, err = t.AddFileBytes("hcloud_server", fi.StringValue(e.Name), "user_data", data, true) + tf.UserData, err = t.AddFileBytes("hcloud_server", fi.ValueOf(e.Name), "user_data", data, true) if err != nil { return err } } } - return t.RenderResource("hcloud_server", fi.StringValue(e.Name), tf) + return t.RenderResource("hcloud_server", fi.ValueOf(e.Name), tf) } diff --git a/upup/pkg/fi/cloudup/hetznertasks/sshkey.go b/upup/pkg/fi/cloudup/hetznertasks/sshkey.go index 11e0631acdf21..49eb775373ef6 100644 --- a/upup/pkg/fi/cloudup/hetznertasks/sshkey.go +++ b/upup/pkg/fi/cloudup/hetznertasks/sshkey.go @@ -44,7 +44,7 @@ type SSHKey struct { var _ fi.CompareWithID = &SSHKey{} func (v *SSHKey) CompareWithID() *string { - return fi.String(strconv.Itoa(fi.IntValue(v.ID))) + return fi.PtrTo(strconv.Itoa(fi.ValueOf(v.ID))) } func (v *SSHKey) Find(c *fi.Context) (*SSHKey, error) { @@ -65,7 +65,7 @@ func (v *SSHKey) Find(c *fi.Context) (*SSHKey, error) { matches := &SSHKey{ Name: v.Name, Lifecycle: v.Lifecycle, - ID: fi.Int(sshkey.ID), + ID: fi.PtrTo(sshkey.ID), PublicKey: sshkey.PublicKey, Labels: v.Labels, } @@ -106,7 +106,7 @@ func (_ *SSHKey) CheckChanges(a, e, changes *SSHKey) error { func (_ *SSHKey) RenderHetzner(t *hetzner.HetznerAPITarget, a, e, changes *SSHKey) error { client := t.Cloud.SSHKeyClient() if a == nil { - name := fi.StringValue(e.Name) + name := fi.ValueOf(e.Name) tokens := strings.Fields(e.PublicKey) if len(tokens) == 3 { sshkeyComment := tokens[2] @@ -124,7 +124,7 @@ func (_ *SSHKey) RenderHetzner(t *hetzner.HetznerAPITarget, a, e, changes *SSHKe if err != nil { return err } - e.ID = fi.Int(sshkey.ID) + e.ID = fi.PtrTo(sshkey.ID) } return nil @@ -139,7 +139,7 @@ type terraformSSHKey struct { func (_ *SSHKey) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *SSHKey) error { tf := &terraformSSHKey{ Name: e.Name, - PublicKey: fi.String(e.PublicKey), + PublicKey: fi.PtrTo(e.PublicKey), Labels: e.Labels, } diff --git a/upup/pkg/fi/cloudup/hetznertasks/volume.go b/upup/pkg/fi/cloudup/hetznertasks/volume.go index cd9b7c092e6dd..267a421cd9ca9 100644 --- a/upup/pkg/fi/cloudup/hetznertasks/volume.go +++ b/upup/pkg/fi/cloudup/hetznertasks/volume.go @@ -41,7 +41,7 @@ type Volume struct { var _ fi.CompareWithID = &Volume{} func (v *Volume) CompareWithID() *string { - return fi.String(strconv.Itoa(fi.IntValue(v.ID))) + return fi.PtrTo(strconv.Itoa(fi.ValueOf(v.ID))) } func (v *Volume) Find(c *fi.Context) (*Volume, error) { @@ -54,11 +54,11 @@ func (v *Volume) Find(c *fi.Context) (*Volume, error) { } for _, volume := range volumes { - if volume.Name == fi.StringValue(v.Name) { + if volume.Name == fi.ValueOf(v.Name) { matches := &Volume{ Lifecycle: v.Lifecycle, - Name: fi.String(volume.Name), - ID: fi.Int(volume.ID), + Name: fi.PtrTo(volume.Name), + ID: fi.PtrTo(volume.ID), Size: volume.Size, Labels: volume.Labels, } @@ -106,7 +106,7 @@ func (_ *Volume) RenderHetzner(t *hetzner.HetznerAPITarget, a, e, changes *Volum if a == nil { opts := hcloud.VolumeCreateOpts{ - Name: fi.StringValue(e.Name), + Name: fi.ValueOf(e.Name), Location: &hcloud.Location{ Name: e.Location, }, @@ -119,7 +119,7 @@ func (_ *Volume) RenderHetzner(t *hetzner.HetznerAPITarget, a, e, changes *Volum } } else { - volume, _, err := client.Get(context.TODO(), strconv.Itoa(fi.IntValue(a.ID))) + volume, _, err := client.Get(context.TODO(), strconv.Itoa(fi.ValueOf(a.ID))) if err != nil { return err } @@ -127,7 +127,7 @@ func (_ *Volume) RenderHetzner(t *hetzner.HetznerAPITarget, a, e, changes *Volum // Update the labels if changes.Name != nil || len(changes.Labels) != 0 { _, _, err := client.Update(context.TODO(), volume, hcloud.VolumeUpdateOpts{ - Name: fi.StringValue(e.Name), + Name: fi.ValueOf(e.Name), Labels: e.Labels, }) if err != nil { @@ -149,8 +149,8 @@ type terraformVolume struct { func (_ *Volume) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *Volume) error { tf := &terraformVolume{ Name: e.Name, - Size: fi.Int(e.Size), - Location: fi.String(e.Location), + Size: fi.PtrTo(e.Size), + Location: fi.PtrTo(e.Location), Labels: e.Labels, } diff --git a/upup/pkg/fi/cloudup/new_cluster.go b/upup/pkg/fi/cloudup/new_cluster.go index ad876e5a6b5eb..b9f55e40dab07 100644 --- a/upup/pkg/fi/cloudup/new_cluster.go +++ b/upup/pkg/fi/cloudup/new_cluster.go @@ -30,7 +30,6 @@ import ( "k8s.io/klog/v2" "k8s.io/kops" api "k8s.io/kops/pkg/apis/kops" - kopsapi "k8s.io/kops/pkg/apis/kops" "k8s.io/kops/pkg/apis/kops/model" "k8s.io/kops/pkg/apis/kops/util" "k8s.io/kops/pkg/client/simple" @@ -64,6 +63,8 @@ type NewClusterOptions struct { DiscoveryStore string // KubernetesVersion is the version of Kubernetes to deploy. It defaults to the version recommended by the channel. KubernetesVersion string + // KubernetesFeatureGates is the list of Kubernetes feature gates to enable/disable. + KubernetesFeatureGates []string // AdminAccess is the set of CIDR blocks permitted to connect to the Kubernetes API. It defaults to "0.0.0.0/0" and "::/0". AdminAccess []string // SSHAccess is the set of CIDR blocks permitted to connect to SSH on the nodes. It defaults to the value of AdminAccess. @@ -136,7 +137,7 @@ type NewClusterOptions struct { // Networking is the networking provider/node to use. Networking string - // Topology is the network topology to use. Defaults to "public". + // Topology is the network topology to use. Defaults to "public" for IPv4 clusters and "private" for IPv6 clusters. Topology string // DNSType is the DNS type to use; "public" or "private". Defaults to "public". DNSType string @@ -166,7 +167,6 @@ func (o *NewClusterOptions) InitDefaults() { o.Authorization = AuthorizationFlagRBAC o.AdminAccess = []string{"0.0.0.0/0", "::/0"} o.Networking = "cilium" - o.Topology = api.TopologyPublic o.InstanceManager = "cloudgroups" } @@ -235,13 +235,45 @@ func NewCluster(opt *NewClusterOptions, clientset simple.Clientset) (*NewCluster AllowContainerRegistry: true, } cluster.Spec.Kubelet = &api.KubeletConfigSpec{ - AnonymousAuth: fi.Bool(false), + AnonymousAuth: fi.PtrTo(false), + } + + if len(opt.KubernetesFeatureGates) > 0 { + cluster.Spec.Kubelet.FeatureGates = make(map[string]string) + cluster.Spec.KubeAPIServer = &api.KubeAPIServerConfig{ + FeatureGates: make(map[string]string), + } + cluster.Spec.KubeControllerManager = &api.KubeControllerManagerConfig{ + FeatureGates: make(map[string]string), + } + cluster.Spec.KubeProxy = &api.KubeProxyConfig{ + FeatureGates: make(map[string]string), + } + cluster.Spec.KubeScheduler = &api.KubeSchedulerConfig{ + FeatureGates: make(map[string]string), + } + + for _, featureGate := range opt.KubernetesFeatureGates { + enabled := true + if featureGate[0] == '+' { + featureGate = featureGate[1:] + } + if featureGate[0] == '-' { + enabled = false + featureGate = featureGate[1:] + } + cluster.Spec.Kubelet.FeatureGates[featureGate] = strconv.FormatBool(enabled) + cluster.Spec.KubeAPIServer.FeatureGates[featureGate] = strconv.FormatBool(enabled) + cluster.Spec.KubeControllerManager.FeatureGates[featureGate] = strconv.FormatBool(enabled) + cluster.Spec.KubeProxy.FeatureGates[featureGate] = strconv.FormatBool(enabled) + cluster.Spec.KubeScheduler.FeatureGates[featureGate] = strconv.FormatBool(enabled) + } } if len(opt.AdminAccess) == 0 { opt.AdminAccess = []string{"0.0.0.0/0", "::/0"} } - cluster.Spec.KubernetesAPIAccess = opt.AdminAccess + cluster.Spec.API.Access = opt.AdminAccess if len(opt.SSHAccess) != 0 { cluster.Spec.SSHAccess = opt.SSHAccess } else { @@ -300,16 +332,16 @@ func NewCluster(opt *NewClusterOptions, clientset simple.Clientset) (*NewCluster case api.CloudProviderOpenstack: cluster.Spec.CloudProvider.Openstack = &api.OpenstackSpec{ Router: &api.OpenstackRouter{ - ExternalNetwork: fi.String(opt.OpenstackExternalNet), + ExternalNetwork: fi.PtrTo(opt.OpenstackExternalNet), }, BlockStorage: &api.OpenstackBlockStorageConfig{ - Version: fi.String("v3"), - IgnoreAZ: fi.Bool(opt.OpenstackStorageIgnoreAZ), + Version: fi.PtrTo("v3"), + IgnoreAZ: fi.PtrTo(opt.OpenstackStorageIgnoreAZ), }, Monitor: &api.OpenstackMonitor{ - Delay: fi.String("15s"), - Timeout: fi.String("10s"), - MaxRetries: fi.Int(3), + Delay: fi.PtrTo("15s"), + Timeout: fi.PtrTo("10s"), + MaxRetries: fi.PtrTo(3), }, } default: @@ -326,7 +358,7 @@ func NewCluster(opt *NewClusterOptions, clientset simple.Clientset) (*NewCluster } if cluster.Spec.GetCloudProvider() == api.CloudProviderAWS { cluster.Spec.ServiceAccountIssuerDiscovery.EnableAWSOIDCProvider = true - cluster.Spec.IAM.UseServiceAccountExternalPermissions = fi.Bool(true) + cluster.Spec.IAM.UseServiceAccountExternalPermissions = fi.PtrTo(true) } } @@ -417,7 +449,7 @@ func NewCluster(opt *NewClusterOptions, clientset simple.Clientset) (*NewCluster } } - } else if g.Spec.Role == kopsapi.InstanceGroupRoleBastion { + } else if g.Spec.Role == api.InstanceGroupRoleBastion { if g.Spec.MachineType == "" { g.Spec.MachineType, err = defaultMachineType(cloud, &cluster, g) if err != nil { @@ -439,7 +471,7 @@ func NewCluster(opt *NewClusterOptions, clientset simple.Clientset) (*NewCluster if ig.Spec.Tenancy != "" && ig.Spec.Tenancy != "default" { switch cluster.Spec.GetCloudProvider() { - case kopsapi.CloudProviderAWS: + case api.CloudProviderAWS: if _, ok := awsDedicatedInstanceExceptions[g.Spec.MachineType]; ok { return nil, fmt.Errorf("invalid dedicated instance type: %s", g.Spec.MachineType) } @@ -455,7 +487,7 @@ func NewCluster(opt *NewClusterOptions, clientset simple.Clientset) (*NewCluster } else if ig.IsAPIServerOnly() && cluster.Spec.IsIPv6Only() { if len(ig.Spec.Subnets) == 0 { for _, subnet := range cluster.Spec.Subnets { - if subnet.Type != kopsapi.SubnetTypePrivate && subnet.Type != kopsapi.SubnetTypeUtility { + if subnet.Type != api.SubnetTypePrivate && subnet.Type != api.SubnetTypeUtility { ig.Spec.Subnets = append(g.Spec.Subnets, subnet.Name) } } @@ -463,7 +495,7 @@ func NewCluster(opt *NewClusterOptions, clientset simple.Clientset) (*NewCluster } else { if len(ig.Spec.Subnets) == 0 { for _, subnet := range cluster.Spec.Subnets { - if subnet.Type != kopsapi.SubnetTypeDualStack && subnet.Type != kopsapi.SubnetTypeUtility { + if subnet.Type != api.SubnetTypeDualStack && subnet.Type != api.SubnetTypeUtility { g.Spec.Subnets = append(g.Spec.Subnets, subnet.Name) } } @@ -471,7 +503,7 @@ func NewCluster(opt *NewClusterOptions, clientset simple.Clientset) (*NewCluster if len(g.Spec.Subnets) == 0 { for _, subnet := range cluster.Spec.Subnets { - if subnet.Type != kopsapi.SubnetTypeUtility { + if subnet.Type != api.SubnetTypeUtility { g.Spec.Subnets = append(g.Spec.Subnets, subnet.Name) } } @@ -553,10 +585,10 @@ func setupVPC(opt *NewClusterOptions, cluster *api.Cluster, cloud fi.Cloud) erro } if opt.OpenstackDNSServers != "" { - cluster.Spec.CloudProvider.Openstack.Router.DNSServers = fi.String(opt.OpenstackDNSServers) + cluster.Spec.CloudProvider.Openstack.Router.DNSServers = fi.PtrTo(opt.OpenstackDNSServers) } if opt.OpenstackExternalSubnet != "" { - cluster.Spec.CloudProvider.Openstack.Router.ExternalSubnet = fi.String(opt.OpenstackExternalSubnet) + cluster.Spec.CloudProvider.Openstack.Router.ExternalSubnet = fi.PtrTo(opt.OpenstackExternalSubnet) } case api.CloudProviderAzure: // TODO(kenji): Find a right place for this. @@ -572,10 +604,10 @@ func setupVPC(opt *NewClusterOptions, cluster *api.Cluster, cloud fi.Cloud) erro cluster.Spec.CloudConfig = &api.CloudConfiguration{} } if opt.SpotinstProduct != "" { - cluster.Spec.CloudConfig.SpotinstProduct = fi.String(opt.SpotinstProduct) + cluster.Spec.CloudConfig.SpotinstProduct = fi.PtrTo(opt.SpotinstProduct) } if opt.SpotinstOrientation != "" { - cluster.Spec.CloudConfig.SpotinstOrientation = fi.String(opt.SpotinstOrientation) + cluster.Spec.CloudConfig.SpotinstOrientation = fi.PtrTo(opt.SpotinstOrientation) } } @@ -611,8 +643,7 @@ func setupZones(opt *NewClusterOptions, cluster *api.Cluster, allZones sets.Stri if len(opt.SubnetIDs) != 1 { return nil, fmt.Errorf("expected exactly one subnet for GCE, got %d", len(opt.SubnetIDs)) } - providerID := opt.SubnetIDs[0] - subnet.ProviderID = providerID + subnet.ID = opt.SubnetIDs[0] } cluster.Spec.Subnets = append(cluster.Spec.Subnets, *subnet) } @@ -704,7 +735,7 @@ func setupZones(opt *NewClusterOptions, cluster *api.Cluster, allZones sets.Stri Egress: opt.Egress, } if subnetID, ok := zoneToSubnetProviderID[zoneName]; ok { - subnet.ProviderID = subnetID + subnet.ID = subnetID } cluster.Spec.Subnets = append(cluster.Spec.Subnets, *subnet) } @@ -831,8 +862,8 @@ func setupMasters(opt *NewClusterOptions, cluster *api.Cluster, zoneToSubnetMap g := &api.InstanceGroup{} g.Spec.Role = api.InstanceGroupRoleMaster - g.Spec.MinSize = fi.Int32(1) - g.Spec.MaxSize = fi.Int32(1) + g.Spec.MinSize = fi.PtrTo(int32(1)) + g.Spec.MaxSize = fi.PtrTo(int32(1)) g.ObjectMeta.Name = "master-" + name subnet := zoneToSubnetMap[zone] @@ -851,12 +882,12 @@ func setupMasters(opt *NewClusterOptions, cluster *api.Cluster, zoneToSubnetMap if cluster.IsKubernetesGTE("1.22") { if cloudProvider == api.CloudProviderAWS { g.Spec.InstanceMetadata = &api.InstanceMetadataOptions{ - HTTPPutResponseHopLimit: fi.Int64(3), - HTTPTokens: fi.String("required"), + HTTPPutResponseHopLimit: fi.PtrTo(int64(3)), + HTTPTokens: fi.PtrTo("required"), } } - if cluster.IsKubernetesGTE("1.26") && fi.BoolValue(cluster.Spec.IAM.UseServiceAccountExternalPermissions) { - g.Spec.InstanceMetadata.HTTPPutResponseHopLimit = fi.Int64(1) + if cluster.IsKubernetesGTE("1.26") && fi.ValueOf(cluster.Spec.IAM.UseServiceAccountExternalPermissions) { + g.Spec.InstanceMetadata.HTTPPutResponseHopLimit = fi.PtrTo(int64(1)) } } @@ -897,7 +928,7 @@ func setupMasters(opt *NewClusterOptions, cluster *api.Cluster, zoneToSubnetMap encryptEtcdStorage := false if opt.EncryptEtcdStorage != nil { - encryptEtcdStorage = fi.BoolValue(opt.EncryptEtcdStorage) + encryptEtcdStorage = fi.ValueOf(opt.EncryptEtcdStorage) } else if cloudProvider == api.CloudProviderAWS { encryptEtcdStorage = true } @@ -965,8 +996,8 @@ func setupNodes(opt *NewClusterOptions, cluster *api.Cluster, zoneToSubnetMap ma g := &api.InstanceGroup{} g.Spec.Role = api.InstanceGroupRoleNode - g.Spec.MinSize = fi.Int32(count) - g.Spec.MaxSize = fi.Int32(count) + g.Spec.MinSize = fi.PtrTo(count) + g.Spec.MaxSize = fi.PtrTo(count) g.ObjectMeta.Name = "nodes-" + zone subnet := zoneToSubnetMap[zone] @@ -982,8 +1013,8 @@ func setupNodes(opt *NewClusterOptions, cluster *api.Cluster, zoneToSubnetMap ma if cluster.IsKubernetesGTE("1.22") { if cloudProvider == api.CloudProviderAWS { g.Spec.InstanceMetadata = &api.InstanceMetadataOptions{ - HTTPPutResponseHopLimit: fi.Int64(1), - HTTPTokens: fi.String("required"), + HTTPPutResponseHopLimit: fi.PtrTo(int64(1)), + HTTPTokens: fi.PtrTo("required"), } } } @@ -1004,8 +1035,8 @@ func setupKarpenterNodes(opt *NewClusterOptions, cluster *api.Cluster, zoneToSub g.ObjectMeta.Name = "nodes" g.Spec.InstanceMetadata = &api.InstanceMetadataOptions{ - HTTPPutResponseHopLimit: fi.Int64(1), - HTTPTokens: fi.String("required"), + HTTPPutResponseHopLimit: fi.PtrTo(int64(1)), + HTTPTokens: fi.PtrTo("required"), } return []*api.InstanceGroup{g}, nil @@ -1034,8 +1065,8 @@ func setupAPIServers(opt *NewClusterOptions, cluster *api.Cluster, zoneToSubnetM g := &api.InstanceGroup{} g.Spec.Role = api.InstanceGroupRoleAPIServer - g.Spec.MinSize = fi.Int32(count) - g.Spec.MaxSize = fi.Int32(count) + g.Spec.MinSize = fi.PtrTo(count) + g.Spec.MaxSize = fi.PtrTo(count) g.ObjectMeta.Name = "apiserver-" + zone subnet := zoneToSubnetMap[zone] @@ -1051,8 +1082,8 @@ func setupAPIServers(opt *NewClusterOptions, cluster *api.Cluster, zoneToSubnetM if cluster.IsKubernetesGTE("1.22") { if cloudProvider == api.CloudProviderAWS { g.Spec.InstanceMetadata = &api.InstanceMetadataOptions{ - HTTPPutResponseHopLimit: fi.Int64(1), - HTTPTokens: fi.String("required"), + HTTPPutResponseHopLimit: fi.PtrTo(int64(1)), + HTTPTokens: fi.PtrTo("required"), } } } @@ -1124,8 +1155,16 @@ func setupNetworking(opt *NewClusterOptions, cluster *api.Cluster) error { func setupTopology(opt *NewClusterOptions, cluster *api.Cluster, allZones sets.String) ([]*api.InstanceGroup, error) { var bastions []*api.InstanceGroup + if opt.Topology == "" { + if opt.IPv6 { + opt.Topology = api.TopologyPrivate + } else { + opt.Topology = api.TopologyPublic + } + } + switch opt.Topology { - case api.TopologyPublic, "": + case api.TopologyPublic: cluster.Spec.Topology = &api.TopologySpec{ ControlPlane: api.TopologyPublic, Nodes: api.TopologyPublic, @@ -1185,7 +1224,7 @@ func setupTopology(opt *NewClusterOptions, cluster *api.Cluster, allZones sets.S Region: s.Region, } if subnetID, ok := zoneToSubnetProviderID[s.Zone]; ok { - subnet.ProviderID = subnetID + subnet.ID = subnetID } dualStackSubnets = append(dualStackSubnets, subnet) } @@ -1213,7 +1252,7 @@ func setupTopology(opt *NewClusterOptions, cluster *api.Cluster, allZones sets.S Region: s.Region, } if subnetID, ok := zoneToSubnetProviderID[s.Zone]; ok { - subnet.ProviderID = subnetID + subnet.ID = subnetID } utilitySubnets = append(utilitySubnets, subnet) } @@ -1224,8 +1263,8 @@ func setupTopology(opt *NewClusterOptions, cluster *api.Cluster, allZones sets.S bastionGroup := &api.InstanceGroup{} bastionGroup.Spec.Role = api.InstanceGroupRoleBastion bastionGroup.ObjectMeta.Name = "bastions" - bastionGroup.Spec.MaxSize = fi.Int32(1) - bastionGroup.Spec.MinSize = fi.Int32(1) + bastionGroup.Spec.MaxSize = fi.PtrTo(int32(1)) + bastionGroup.Spec.MinSize = fi.PtrTo(int32(1)) bastions = append(bastions, bastionGroup) if !cluster.IsGossip() && !cluster.UsesNoneDNS() { @@ -1235,7 +1274,7 @@ func setupTopology(opt *NewClusterOptions, cluster *api.Cluster, allZones sets.S } if opt.IPv6 { for _, s := range cluster.Spec.Subnets { - if s.Type == kopsapi.SubnetTypeDualStack { + if s.Type == api.SubnetTypeDualStack { bastionGroup.Spec.Subnets = append(bastionGroup.Spec.Subnets, s.Name) } } @@ -1246,8 +1285,8 @@ func setupTopology(opt *NewClusterOptions, cluster *api.Cluster, allZones sets.S if cluster.IsKubernetesGTE("1.22") { bastionGroup.Spec.InstanceMetadata = &api.InstanceMetadataOptions{ - HTTPPutResponseHopLimit: fi.Int64(1), - HTTPTokens: fi.String("required"), + HTTPPutResponseHopLimit: fi.PtrTo(int64(1)), + HTTPTokens: fi.PtrTo("required"), } } @@ -1295,14 +1334,13 @@ func setupTopology(opt *NewClusterOptions, cluster *api.Cluster, allZones sets.S func setupAPI(opt *NewClusterOptions, cluster *api.Cluster) error { // Populate the API access, so that it can be discoverable klog.Infof(" Cloud Provider ID = %s", cluster.Spec.GetCloudProvider()) - cluster.Spec.API = &api.AccessSpec{} if cluster.Spec.GetCloudProvider() == api.CloudProviderOpenstack { initializeOpenstackAPI(opt, cluster) } else if cluster.Spec.GetCloudProvider() == api.CloudProviderAzure { // Do nothing to disable the use of loadbalancer for the k8s API server. // TODO(kenji): Remove this condition once we support the loadbalancer // in pkg/model/azuremodel/api_loadbalancer.go. - cluster.Spec.API = nil + cluster.Spec.API.LoadBalancer = nil return nil } else if opt.APILoadBalancerType != "" || opt.APISSLCertificate != "" { cluster.Spec.API.LoadBalancer = &api.LoadBalancerAccessSpec{} @@ -1371,14 +1409,14 @@ func initializeOpenstackAPI(opt *NewClusterOptions, cluster *api.Cluster) { LbMethod = "SOURCE_IP_PORT" } cluster.Spec.CloudProvider.Openstack.Loadbalancer = &api.OpenstackLoadbalancerConfig{ - FloatingNetwork: fi.String(opt.OpenstackExternalNet), - Method: fi.String(LbMethod), - Provider: fi.String(provider), - UseOctavia: fi.Bool(opt.OpenstackLBOctavia), + FloatingNetwork: fi.PtrTo(opt.OpenstackExternalNet), + Method: fi.PtrTo(LbMethod), + Provider: fi.PtrTo(provider), + UseOctavia: fi.PtrTo(opt.OpenstackLBOctavia), } if opt.OpenstackLBSubnet != "" { - cluster.Spec.CloudProvider.Openstack.Loadbalancer.FloatingSubnet = fi.String(opt.OpenstackLBSubnet) + cluster.Spec.CloudProvider.Openstack.Loadbalancer.FloatingSubnet = fi.PtrTo(opt.OpenstackLBSubnet) } } } @@ -1418,11 +1456,11 @@ func createEtcdCluster(etcdCluster string, masters []*api.InstanceGroup, encrypt m.EncryptedVolume = &encryptEtcdStorage } if len(etcdStorageType) > 0 { - m.VolumeType = fi.String(etcdStorageType) + m.VolumeType = fi.PtrTo(etcdStorageType) } m.Name = names[i] - m.InstanceGroup = fi.String(ig.ObjectMeta.Name) + m.InstanceGroup = fi.PtrTo(ig.ObjectMeta.Name) etcd.Members = append(etcd.Members, m) } @@ -1453,7 +1491,7 @@ func addCiliumNetwork(cluster *api.Cluster) { } // defaultImage returns the default Image, based on the cloudprovider -func defaultImage(cluster *kopsapi.Cluster, channel *kopsapi.Channel, architecture architectures.Architecture) string { +func defaultImage(cluster *api.Cluster, channel *api.Channel, architecture architectures.Architecture) string { if channel != nil { var kubernetesVersion *semver.Version if cluster.Spec.KubernetesVersion != "" { @@ -1472,7 +1510,7 @@ func defaultImage(cluster *kopsapi.Cluster, channel *kopsapi.Channel, architectu } switch cluster.Spec.GetCloudProvider() { - case kopsapi.CloudProviderDO: + case api.CloudProviderDO: return defaultDONodeImage } klog.Infof("Cannot set default Image for CloudProvider=%q", cluster.Spec.GetCloudProvider()) @@ -1490,7 +1528,7 @@ func MachineArchitecture(cloud fi.Cloud, machineType string) (architectures.Arch } switch cloud.ProviderID() { - case kopsapi.CloudProviderAWS: + case api.CloudProviderAWS: info, err := cloud.(awsup.AWSCloud).DescribeInstanceType(machineType) if err != nil { return "", fmt.Errorf("error finding instance info for instance type %q: %w", machineType, err) @@ -1501,13 +1539,13 @@ func MachineArchitecture(cloud fi.Cloud, machineType string) (architectures.Arch var unsupported []string for _, arch := range info.ProcessorInfo.SupportedArchitectures { // Return the first found supported architecture, in order of popularity - switch fi.StringValue(arch) { + switch fi.ValueOf(arch) { case ec2.ArchitectureTypeX8664: return architectures.ArchitectureAmd64, nil case ec2.ArchitectureTypeArm64: return architectures.ArchitectureArm64, nil default: - unsupported = append(unsupported, fi.StringValue(arch)) + unsupported = append(unsupported, fi.ValueOf(arch)) } } return "", fmt.Errorf("unsupported architecture for instance type %q: %v", machineType, unsupported) diff --git a/upup/pkg/fi/cloudup/new_cluster_test.go b/upup/pkg/fi/cloudup/new_cluster_test.go index ab0dced1bd295..66f1d283e630b 100644 --- a/upup/pkg/fi/cloudup/new_cluster_test.go +++ b/upup/pkg/fi/cloudup/new_cluster_test.go @@ -233,7 +233,7 @@ func TestSetupNetworking(t *testing.T) { expected: api.Cluster{ Spec: api.ClusterSpec{ KubeProxy: &api.KubeProxyConfig{ - Enabled: fi.Bool(false), + Enabled: fi.PtrTo(false), }, Networking: &api.NetworkingSpec{ Kuberouter: &api.KuberouterNetworkingSpec{}, @@ -272,7 +272,7 @@ func TestSetupNetworking(t *testing.T) { expected: api.Cluster{ Spec: api.ClusterSpec{ KubeProxy: &api.KubeProxyConfig{ - Enabled: fi.Bool(false), + Enabled: fi.PtrTo(false), }, Networking: &api.NetworkingSpec{ Cilium: &api.CiliumNetworkingSpec{ @@ -289,7 +289,7 @@ func TestSetupNetworking(t *testing.T) { expected: api.Cluster{ Spec: api.ClusterSpec{ KubeProxy: &api.KubeProxyConfig{ - Enabled: fi.Bool(false), + Enabled: fi.PtrTo(false), }, Networking: &api.NetworkingSpec{ Cilium: &api.CiliumNetworkingSpec{ diff --git a/upup/pkg/fi/cloudup/openstack/cloud.go b/upup/pkg/fi/cloudup/openstack/cloud.go index ff5831a6f98aa..98c9a78825d77 100644 --- a/upup/pkg/fi/cloudup/openstack/cloud.go +++ b/upup/pkg/fi/cloudup/openstack/cloud.go @@ -349,7 +349,7 @@ func NewOpenstackCloud(tags map[string]string, spec *kops.ClusterSpec, uagent st if spec != nil && spec.CloudProvider.Openstack != nil && spec.CloudProvider.Openstack.InsecureSkipVerify != nil { tlsconfig := &tls.Config{} - tlsconfig.InsecureSkipVerify = fi.BoolValue(spec.CloudProvider.Openstack.InsecureSkipVerify) + tlsconfig.InsecureSkipVerify = fi.ValueOf(spec.CloudProvider.Openstack.InsecureSkipVerify) transport := &http.Transport{TLSClientConfig: tlsconfig} provider.HTTPClient = http.Client{ Transport: transport, @@ -439,16 +439,16 @@ func NewOpenstackCloud(tags map[string]string, spec *kops.ClusterSpec, uagent st spec.CloudProvider.Openstack.Loadbalancer.FloatingNetwork != nil { // This field is derived lbNet, err := c.ListNetworks(networks.ListOpts{ - Name: fi.StringValue(spec.CloudProvider.Openstack.Loadbalancer.FloatingNetwork), + Name: fi.ValueOf(spec.CloudProvider.Openstack.Loadbalancer.FloatingNetwork), }) if err != nil || len(lbNet) != 1 { return c, fmt.Errorf("could not establish floating network id") } - spec.CloudProvider.Openstack.Loadbalancer.FloatingNetworkID = fi.String(lbNet[0].ID) + spec.CloudProvider.Openstack.Loadbalancer.FloatingNetworkID = fi.PtrTo(lbNet[0].ID) } if spec.CloudProvider.Openstack.Loadbalancer != nil { if spec.CloudProvider.Openstack.Loadbalancer.UseOctavia != nil { - octavia = fi.BoolValue(spec.CloudProvider.Openstack.Loadbalancer.UseOctavia) + octavia = fi.ValueOf(spec.CloudProvider.Openstack.Loadbalancer.UseOctavia) } if spec.CloudProvider.Openstack.Loadbalancer.FloatingSubnet != nil { c.floatingSubnet = spec.CloudProvider.Openstack.Loadbalancer.FloatingSubnet @@ -693,11 +693,11 @@ func getApiIngressStatus(c OpenstackCloud, cluster *kops.Cluster) ([]fi.ApiIngre func getLoadBalancerIngressStatus(c OpenstackCloud, cluster *kops.Cluster) ([]fi.ApiIngressStatus, error) { var ingresses []fi.ApiIngressStatus - if cluster.Spec.MasterPublicName != "" { + if cluster.Spec.API.PublicName != "" { // Note that this must match OpenstackModel lb name klog.V(2).Infof("Querying Openstack to find Loadbalancers for API (%q)", cluster.Name) lbList, err := c.ListLBs(loadbalancers.ListOpts{ - Name: cluster.Spec.MasterPublicName, + Name: cluster.Spec.API.PublicName, }) if err != nil { return ingresses, fmt.Errorf("GetApiIngressStatus: Failed to list openstack loadbalancers: %v", err) @@ -750,7 +750,7 @@ func getIPIngressStatus(c OpenstackCloud, cluster *kops.Cluster) (ingresses []fi } for _, ip := range ips { ingresses = append(ingresses, fi.ApiIngressStatus{ - IP: fi.StringValue(ip), + IP: fi.ValueOf(ip), }) } } diff --git a/upup/pkg/fi/cloudup/openstack/cloud_test.go b/upup/pkg/fi/cloudup/openstack/cloud_test.go index 1d645b89bb6b5..30ffd996c817a 100644 --- a/upup/pkg/fi/cloudup/openstack/cloud_test.go +++ b/upup/pkg/fi/cloudup/openstack/cloud_test.go @@ -49,7 +49,9 @@ func Test_OpenstackCloud_GetApiIngressStatus(t *testing.T) { desc: "Loadbalancer configured master public name set", cluster: &kops.Cluster{ Spec: kops.ClusterSpec{ - MasterPublicName: "master.k8s.local", + API: kops.APISpec{ + PublicName: "master.k8s.local", + }, CloudProvider: kops.CloudProviderSpec{ Openstack: &kops.OpenstackSpec{ Loadbalancer: &kops.OpenstackLoadbalancerConfig{}, @@ -85,7 +87,9 @@ func Test_OpenstackCloud_GetApiIngressStatus(t *testing.T) { desc: "Loadbalancer configured master public name set multiple IPs match", cluster: &kops.Cluster{ Spec: kops.ClusterSpec{ - MasterPublicName: "master.k8s.local", + API: kops.APISpec{ + PublicName: "master.k8s.local", + }, CloudProvider: kops.CloudProviderSpec{ Openstack: &kops.OpenstackSpec{ Loadbalancer: &kops.OpenstackLoadbalancerConfig{}, diff --git a/upup/pkg/fi/cloudup/openstack/instance.go b/upup/pkg/fi/cloudup/openstack/instance.go index 90df1f15a2695..01d1d471d8f28 100644 --- a/upup/pkg/fi/cloudup/openstack/instance.go +++ b/upup/pkg/fi/cloudup/openstack/instance.go @@ -81,7 +81,7 @@ func createInstance(c OpenstackCloud, opt servers.CreateOptsBuilder, portID stri if port.DeviceID != "" && port.DeviceOwner == "" { klog.Warningf("Port %s is attached to Device that does not exist anymore, reseting the status of DeviceID", portID) _, err := c.UpdatePort(portID, ports.UpdateOpts{ - DeviceID: fi.String(""), + DeviceID: fi.PtrTo(""), }) if err != nil { return false, fmt.Errorf("error updating port %s deviceid: %v", portID, err) @@ -124,10 +124,10 @@ func listServerFloatingIPs(c OpenstackCloud, instanceID string, floatingEnabled for _, props := range addrList { if floatingEnabled { if props.IPType == "floating" { - result = append(result, fi.String(props.Addr)) + result = append(result, fi.PtrTo(props.Addr)) } } else { - result = append(result, fi.String(props.Addr)) + result = append(result, fi.PtrTo(props.Addr)) } } } @@ -242,7 +242,7 @@ func drainSingleLB(c OpenstackCloud, lb loadbalancers.LoadBalancer, instanceName // Setting the member weight to 0 means that the member will not receive new requests but will finish any existing connections. // This “drains” the backend member of active connections. _, err := c.UpdateMemberInPool(pool.ID, member.ID, v2pools.UpdateMemberOpts{ - Weight: fi.Int(0), + Weight: fi.PtrTo(0), }) if err != nil { return err diff --git a/upup/pkg/fi/cloudup/openstack/server_group.go b/upup/pkg/fi/cloudup/openstack/server_group.go index b259b877c2acf..6dbc174bb4409 100644 --- a/upup/pkg/fi/cloudup/openstack/server_group.go +++ b/upup/pkg/fi/cloudup/openstack/server_group.go @@ -114,9 +114,9 @@ func osBuildCloudInstanceGroup(c OpenstackCloud, cluster *kops.Cluster, ig *kops cg := &cloudinstances.CloudInstanceGroup{ HumanName: newLaunchConfigName, InstanceGroup: ig, - MinSize: int(fi.Int32Value(ig.Spec.MinSize)), - TargetSize: int(fi.Int32Value(ig.Spec.MinSize)), // TODO: Retrieve the target size from OpenStack? - MaxSize: int(fi.Int32Value(ig.Spec.MaxSize)), + MinSize: int(fi.ValueOf(ig.Spec.MinSize)), + TargetSize: int(fi.ValueOf(ig.Spec.MinSize)), // TODO: Retrieve the target size from OpenStack? + MaxSize: int(fi.ValueOf(ig.Spec.MaxSize)), Raw: &g, } for _, i := range g.Members { diff --git a/upup/pkg/fi/cloudup/openstack/subnet.go b/upup/pkg/fi/cloudup/openstack/subnet.go index 178af71b65728..7908f1866cfcd 100644 --- a/upup/pkg/fi/cloudup/openstack/subnet.go +++ b/upup/pkg/fi/cloudup/openstack/subnet.go @@ -135,7 +135,7 @@ func getExternalSubnet(c OpenstackCloud, subnetName *string) (subnet *subnets.Su } subnets, err := c.ListSubnets(subnets.ListOpts{ - Name: fi.StringValue(subnetName), + Name: fi.ValueOf(subnetName), }) if err != nil { return nil, err @@ -157,7 +157,7 @@ func getLBFloatingSubnet(c OpenstackCloud, floatingSubnet *string) (subnet *subn } subnets, err := c.ListSubnets(subnets.ListOpts{ - Name: fi.StringValue(floatingSubnet), + Name: fi.ValueOf(floatingSubnet), }) if err != nil { return nil, err diff --git a/upup/pkg/fi/cloudup/openstacktasks/floatingip.go b/upup/pkg/fi/cloudup/openstacktasks/floatingip.go index 7091a184e05ff..4a819050cd0ea 100644 --- a/upup/pkg/fi/cloudup/openstacktasks/floatingip.go +++ b/upup/pkg/fi/cloudup/openstacktasks/floatingip.go @@ -88,18 +88,18 @@ func (e *FloatingIP) FindAddresses(context *fi.Context) ([]string, error) { // try to find ip address using LB port if e.ID == nil && e.LB != nil && e.LB.PortID != nil { fips, err := findL3Floating(cloud, l3floatingip.ListOpts{ - PortID: fi.StringValue(e.LB.PortID), + PortID: fi.ValueOf(e.LB.PortID), }) if err != nil { return nil, err } - if len(fips) == 1 && fips[0].PortID == fi.StringValue(e.LB.PortID) { + if len(fips) == 1 && fips[0].PortID == fi.ValueOf(e.LB.PortID) { return []string{fips[0].FloatingIP}, nil } - return nil, fmt.Errorf("Could not find port floatingips port=%s", fi.StringValue(e.LB.PortID)) + return nil, fmt.Errorf("Could not find port floatingips port=%s", fi.ValueOf(e.LB.PortID)) } - fip, err := cloud.GetL3FloatingIP(fi.StringValue(e.ID)) + fip, err := cloud.GetL3FloatingIP(fi.ValueOf(e.ID)) if err != nil { return nil, err } @@ -134,7 +134,7 @@ func (e *FloatingIP) Find(c *fi.Context) (*FloatingIP, error) { } cloud := c.Cloud.(openstack.OpenstackCloud) if e.LB != nil && e.LB.PortID != nil { - fip, err := findFipByPortID(cloud, fi.StringValue(e.LB.PortID)) + fip, err := findFipByPortID(cloud, fi.ValueOf(e.LB.PortID)) if err != nil { return nil, fmt.Errorf("failed to find floating ip: %v", err) } @@ -142,15 +142,15 @@ func (e *FloatingIP) Find(c *fi.Context) (*FloatingIP, error) { return nil, nil } actual := &FloatingIP{ - Name: fi.String(fip.Description), - ID: fi.String(fip.ID), + Name: fi.PtrTo(fip.Description), + ID: fi.PtrTo(fip.ID), LB: e.LB, Lifecycle: e.Lifecycle, } e.ID = actual.ID return actual, nil } - fipname := fi.StringValue(e.Name) + fipname := fi.ValueOf(e.Name) fips, err := cloud.ListL3FloatingIPs(l3floatingip.ListOpts{ Description: fipname, }) @@ -159,11 +159,11 @@ func (e *FloatingIP) Find(c *fi.Context) (*FloatingIP, error) { } for _, fip := range fips { - if fip.Description == fi.StringValue(e.Name) { + if fip.Description == fi.ValueOf(e.Name) { actual := &FloatingIP{ - ID: fi.String(fips[0].ID), + ID: fi.PtrTo(fips[0].ID), Name: e.Name, - IP: fi.String(fip.FloatingIP), + IP: fi.PtrTo(fip.FloatingIP), Lifecycle: e.Lifecycle, } e.ID = actual.ID @@ -194,8 +194,8 @@ func (e *FloatingIP) Find(c *fi.Context) (*FloatingIP, error) { return nil, nil } actual := &FloatingIP{ - Name: fi.String(fip.Description), - ID: fi.String(fip.ID), + Name: fi.PtrTo(fip.Description), + ID: fi.PtrTo(fip.ID), Lifecycle: e.Lifecycle, } e.ID = actual.ID @@ -237,7 +237,7 @@ func (_ *FloatingIP) CheckChanges(a, e, changes *FloatingIP) error { } //TODO: add back into kops 1.21 /* - if changes.Name != nil && fi.StringValue(a.Name) != "" { + if changes.Name != nil && fi.ValueOf(a.Name) != "" { return fi.CannotChangeField("Name") } */ @@ -266,11 +266,11 @@ func (f *FloatingIP) RenderOpenstack(t *openstack.OpenstackAPITarget, a, e, chan opts := l3floatingip.CreateOpts{ FloatingNetworkID: external.ID, - Description: fi.StringValue(e.Name), + Description: fi.ValueOf(e.Name), } if e.LB != nil { - opts.PortID = fi.StringValue(e.LB.PortID) + opts.PortID = fi.ValueOf(e.LB.PortID) } // instance floatingips comes from the same subnet as the kubernetes API floatingip @@ -286,17 +286,17 @@ func (f *FloatingIP) RenderOpenstack(t *openstack.OpenstackAPITarget, a, e, chan return fmt.Errorf("Failed to create floating IP: %v", err) } - e.ID = fi.String(fip.ID) - e.IP = fi.String(fip.FloatingIP) + e.ID = fi.PtrTo(fip.ID) + e.IP = fi.PtrTo(fip.FloatingIP) return nil } if changes.Name != nil { - _, err := l3floatingip.Update(cloud.NetworkingClient(), fi.StringValue(a.ID), l3floatingip.UpdateOpts{ + _, err := l3floatingip.Update(cloud.NetworkingClient(), fi.ValueOf(a.ID), l3floatingip.UpdateOpts{ Description: e.Name, }).Extract() if err != nil { - return fmt.Errorf("failed to update floating ip %v: %v", fi.StringValue(e.Name), err) + return fmt.Errorf("failed to update floating ip %v: %v", fi.ValueOf(e.Name), err) } } diff --git a/upup/pkg/fi/cloudup/openstacktasks/instance.go b/upup/pkg/fi/cloudup/openstacktasks/instance.go index 1429739daa32e..522b49ba9f965 100644 --- a/upup/pkg/fi/cloudup/openstacktasks/instance.go +++ b/upup/pkg/fi/cloudup/openstacktasks/instance.go @@ -114,7 +114,7 @@ func (e *Instance) FindAddresses(context *fi.Context) ([]string, error) { return nil, nil } - ports, err := cloud.GetPort(fi.StringValue(e.Port.ID)) + ports, err := cloud.GetPort(fi.ValueOf(e.Port.ID)) if err != nil { return nil, err } @@ -156,7 +156,7 @@ func (e *Instance) Find(c *fi.Context) (*Instance, error) { cloud := c.Cloud.(openstack.OpenstackCloud) computeClient := cloud.ComputeClient() serverPage, err := servers.List(computeClient, servers.ListOpts{ - Name: fmt.Sprintf("^%s", fi.StringValue(e.GroupName)), + Name: fmt.Sprintf("^%s", fi.ValueOf(e.GroupName)), }).AllPages() if err != nil { return nil, fmt.Errorf("error listing servers: %v", err) @@ -169,7 +169,7 @@ func (e *Instance) Find(c *fi.Context) (*Instance, error) { var filteredList []servers.Server for _, server := range serverList { val, ok := server.Metadata["k8s"] - if !ok || val != fi.StringValue(e.ServerGroup.ClusterName) { + if !ok || val != fi.ValueOf(e.ServerGroup.ClusterName) { continue } metadataName := "" @@ -179,7 +179,7 @@ func (e *Instance) Find(c *fi.Context) (*Instance, error) { } // name or metadata tag should match to instance name // this is needed for backwards compatibility - if server.Name == fi.StringValue(e.Name) || metadataName == fi.StringValue(e.Name) { + if server.Name == fi.ValueOf(e.Name) || metadataName == fi.ValueOf(e.Name) { filteredList = append(filteredList, server) } } @@ -188,17 +188,17 @@ func (e *Instance) Find(c *fi.Context) (*Instance, error) { return nil, nil } if len(filteredList) > 1 { - return nil, fmt.Errorf("Multiple servers found with name %s", fi.StringValue(e.Name)) + return nil, fmt.Errorf("Multiple servers found with name %s", fi.ValueOf(e.Name)) } server := filteredList[0] actual := &Instance{ - ID: fi.String(server.ID), + ID: fi.PtrTo(server.ID), Name: e.Name, - SSHKey: fi.String(server.KeyName), + SSHKey: fi.PtrTo(server.KeyName), Lifecycle: e.Lifecycle, Metadata: server.Metadata, - Role: fi.String(server.Metadata["KopsRole"]), + Role: fi.PtrTo(server.Metadata["KopsRole"]), AvailabilityZone: e.AvailabilityZone, GroupName: e.GroupName, ConfigDrive: e.ConfigDrive, @@ -211,7 +211,7 @@ func (e *Instance) Find(c *fi.Context) (*Instance, error) { return nil, fmt.Errorf("failed to fetch port for instance %v: %v", server.ID, err) } - ports = filterInstancePorts(ports, fi.StringValue(e.ServerGroup.ClusterName)) + ports = filterInstancePorts(ports, fi.ValueOf(e.ServerGroup.ClusterName)) if len(ports) == 1 { port := ports[0] @@ -227,7 +227,7 @@ func (e *Instance) Find(c *fi.Context) (*Instance, error) { if e.FloatingIP != nil && e.Port != nil { fips, err := cloud.ListL3FloatingIPs(l3floatingip.ListOpts{ - PortID: fi.StringValue(e.Port.ID), + PortID: fi.ValueOf(e.Port.ID), }) if err != nil { return nil, fmt.Errorf("failed to fetch floating ips for instance %v: %v", server.ID, err) @@ -236,8 +236,8 @@ func (e *Instance) Find(c *fi.Context) (*Instance, error) { if len(fips) == 1 { fip := fips[0] fipTask := &FloatingIP{ - ID: fi.String(fip.ID), - Name: fi.String(fip.Description), + ID: fi.PtrTo(fip.ID), + Name: fi.PtrTo(fip.Description), } actual.FloatingIP = fipTask @@ -308,7 +308,7 @@ func generateInstanceName(e *Instance) (string, error) { return "", err } - return strings.ToLower(fmt.Sprintf("%s-%s", fi.StringValue(e.GroupName), hash[0:6])), nil + return strings.ToLower(fmt.Sprintf("%s-%s", fi.ValueOf(e.GroupName), hash[0:6])), nil } func (_ *Instance) RenderOpenstack(t *openstack.OpenstackAPITarget, a, e, changes *Instance) error { @@ -320,13 +320,13 @@ func (_ *Instance) RenderOpenstack(t *openstack.OpenstackAPITarget, a, e, change } klog.V(2).Infof("Creating Instance with name: %q", serverName) - imageName := fi.StringValue(e.Image) + imageName := fi.ValueOf(e.Image) image, err := cloud.GetImage(imageName) if err != nil { return fmt.Errorf("failed to find image %v: %v", imageName, err) } - flavorName := fi.StringValue(e.Flavor) + flavorName := fi.ValueOf(e.Flavor) flavor, err := cloud.GetFlavor(flavorName) if err != nil { return fmt.Errorf("failed to find flavor %v: %v", flavorName, err) @@ -338,7 +338,7 @@ func (_ *Instance) RenderOpenstack(t *openstack.OpenstackAPITarget, a, e, change FlavorRef: flavor.ID, Networks: []servers.Network{ { - Port: fi.StringValue(e.Port.ID), + Port: fi.ValueOf(e.Port.ID), }, }, Metadata: e.Metadata, @@ -353,11 +353,11 @@ func (_ *Instance) RenderOpenstack(t *openstack.OpenstackAPITarget, a, e, change opt.UserData = bytes } if e.AvailabilityZone != nil { - opt.AvailabilityZone = fi.StringValue(e.AvailabilityZone) + opt.AvailabilityZone = fi.ValueOf(e.AvailabilityZone) } keyext := keypairs.CreateOptsExt{ CreateOptsBuilder: opt, - KeyName: openstackKeyPairName(fi.StringValue(e.SSHKey)), + KeyName: openstackKeyPairName(fi.ValueOf(e.SSHKey)), } sgext := schedulerhints.CreateOptsExt{ @@ -372,12 +372,12 @@ func (_ *Instance) RenderOpenstack(t *openstack.OpenstackAPITarget, a, e, change return err } - v, err := t.Cloud.CreateInstance(opts, fi.StringValue(e.Port.ID)) + v, err := t.Cloud.CreateInstance(opts, fi.ValueOf(e.Port.ID)) if err != nil { return fmt.Errorf("Error creating instance: %v", err) } - e.ID = fi.String(v.ID) - e.ServerGroup.AddNewMember(fi.StringValue(e.ID)) + e.ID = fi.PtrTo(v.ID) + e.ServerGroup.AddNewMember(fi.ValueOf(e.ID)) if e.FloatingIP != nil { err = associateFloatingIP(t, e) @@ -391,7 +391,7 @@ func (_ *Instance) RenderOpenstack(t *openstack.OpenstackAPITarget, a, e, change return nil } if changes.Port != nil { - ports.Update(cloud.NetworkingClient(), fi.StringValue(changes.Port.ID), ports.UpdateOpts{ + ports.Update(cloud.NetworkingClient(), fi.ValueOf(changes.Port.ID), ports.UpdateOpts{ DeviceID: e.ID, }) } @@ -407,7 +407,7 @@ func (_ *Instance) RenderOpenstack(t *openstack.OpenstackAPITarget, a, e, change func associateFloatingIP(t *openstack.OpenstackAPITarget, e *Instance) error { client := t.Cloud.NetworkingClient() - _, err := l3floatingip.Update(client, fi.StringValue(e.FloatingIP.ID), l3floatingip.UpdateOpts{ + _, err := l3floatingip.Update(client, fi.ValueOf(e.FloatingIP.ID), l3floatingip.UpdateOpts{ PortID: e.Port.ID, }).Extract() if err != nil { @@ -421,7 +421,7 @@ func includeBootVolumeOptions(t *openstack.OpenstackAPITarget, e *Instance, opts return opts, nil } - i, err := t.Cloud.GetImage(fi.StringValue(e.Image)) + i, err := t.Cloud.GetImage(fi.ValueOf(e.Image)) if err != nil { return nil, fmt.Errorf("Error getting image information: %v", err) } diff --git a/upup/pkg/fi/cloudup/openstacktasks/lb.go b/upup/pkg/fi/cloudup/openstacktasks/lb.go index 92621840a3587..ec175be1ba2a4 100644 --- a/upup/pkg/fi/cloudup/openstacktasks/lb.go +++ b/upup/pkg/fi/cloudup/openstacktasks/lb.go @@ -119,17 +119,17 @@ func NewLBTaskFromCloud(cloud openstack.OpenstackCloud, lifecycle fi.Lifecycle, } actual := &LB{ - ID: fi.String(lb.ID), - Name: fi.String(lb.Name), + ID: fi.PtrTo(lb.ID), + Name: fi.PtrTo(lb.Name), Lifecycle: lifecycle, - PortID: fi.String(lb.VipPortID), - Subnet: fi.String(sub.Name), - VipSubnet: fi.String(lb.VipSubnetID), - Provider: fi.String(lb.Provider), + PortID: fi.PtrTo(lb.VipPortID), + Subnet: fi.PtrTo(sub.Name), + VipSubnet: fi.PtrTo(lb.VipSubnetID), + Provider: fi.PtrTo(lb.Provider), } if secGroup { - sg, err := getSecurityGroupByName(&SecurityGroup{Name: fi.String(lb.Name)}, osCloud) + sg, err := getSecurityGroupByName(&SecurityGroup{Name: fi.PtrTo(lb.Name)}, osCloud) if err != nil { return nil, err } @@ -151,10 +151,10 @@ func (s *LB) Find(context *fi.Context) (*LB, error) { cloud := context.Cloud.(openstack.OpenstackCloud) lbPage, err := loadbalancers.List(cloud.LoadBalancerClient(), loadbalancers.ListOpts{ - Name: fi.StringValue(s.Name), + Name: fi.ValueOf(s.Name), }).AllPages() if err != nil { - return nil, fmt.Errorf("Failed to retrieve loadbalancers for name %s: %v", fi.StringValue(s.Name), err) + return nil, fmt.Errorf("Failed to retrieve loadbalancers for name %s: %v", fi.ValueOf(s.Name), err) } lbs, err := loadbalancers.ExtractLoadBalancers(lbPage) if err != nil { @@ -164,7 +164,7 @@ func (s *LB) Find(context *fi.Context) (*LB, error) { return nil, nil } if len(lbs) > 1 { - return nil, fmt.Errorf("Multiple load balancers for name %s", fi.StringValue(s.Name)) + return nil, fmt.Errorf("Multiple load balancers for name %s", fi.ValueOf(s.Name)) } return NewLBTaskFromCloud(cloud, s.Lifecycle, &lbs[0], s) @@ -192,34 +192,34 @@ func (_ *LB) CheckChanges(a, e, changes *LB) error { func (_ *LB) RenderOpenstack(t *openstack.OpenstackAPITarget, a, e, changes *LB) error { if a == nil { - klog.V(2).Infof("Creating LB with Name: %q", fi.StringValue(e.Name)) + klog.V(2).Infof("Creating LB with Name: %q", fi.ValueOf(e.Name)) subnets, err := t.Cloud.ListSubnets(subnets.ListOpts{ - Name: fi.StringValue(e.Subnet), + Name: fi.ValueOf(e.Subnet), }) if err != nil { - return fmt.Errorf("Failed to retrieve subnet `%s` in loadbalancer creation: %v", fi.StringValue(e.Subnet), err) + return fmt.Errorf("Failed to retrieve subnet `%s` in loadbalancer creation: %v", fi.ValueOf(e.Subnet), err) } if len(subnets) != 1 { - return fmt.Errorf("Unexpected desired subnets for `%s`. Expected 1, got %d", fi.StringValue(e.Subnet), len(subnets)) + return fmt.Errorf("Unexpected desired subnets for `%s`. Expected 1, got %d", fi.ValueOf(e.Subnet), len(subnets)) } lbopts := loadbalancers.CreateOpts{ - Name: fi.StringValue(e.Name), + Name: fi.ValueOf(e.Name), VipSubnetID: subnets[0].ID, } lb, err := t.Cloud.CreateLB(lbopts) if err != nil { return fmt.Errorf("error creating LB: %v", err) } - e.ID = fi.String(lb.ID) - e.PortID = fi.String(lb.VipPortID) - e.VipSubnet = fi.String(lb.VipSubnetID) - e.Provider = fi.String(lb.Provider) + e.ID = fi.PtrTo(lb.ID) + e.PortID = fi.PtrTo(lb.VipPortID) + e.VipSubnet = fi.PtrTo(lb.VipSubnetID) + e.Provider = fi.PtrTo(lb.Provider) if e.SecurityGroup != nil { opts := ports.UpdateOpts{ - SecurityGroups: &[]string{fi.StringValue(e.SecurityGroup.ID)}, + SecurityGroups: &[]string{fi.ValueOf(e.SecurityGroup.ID)}, } _, err = ports.Update(t.Cloud.NetworkingClient(), lb.VipPortID, opts).Extract() if err != nil { @@ -229,20 +229,20 @@ func (_ *LB) RenderOpenstack(t *openstack.OpenstackAPITarget, a, e, changes *LB) return nil } // We may have failed to update the security groups on the load balancer - port, err := t.Cloud.GetPort(fi.StringValue(a.PortID)) + port, err := t.Cloud.GetPort(fi.ValueOf(a.PortID)) if err != nil { - return fmt.Errorf("Failed to get port with id %s: %v", fi.StringValue(a.PortID), err) + return fmt.Errorf("Failed to get port with id %s: %v", fi.ValueOf(a.PortID), err) } // Ensure the loadbalancer port has one security group and it is the one specified, if e.SecurityGroup != nil && - (len(port.SecurityGroups) < 1 || port.SecurityGroups[0] != fi.StringValue(e.SecurityGroup.ID)) { + (len(port.SecurityGroups) < 1 || port.SecurityGroups[0] != fi.ValueOf(e.SecurityGroup.ID)) { opts := ports.UpdateOpts{ - SecurityGroups: &[]string{fi.StringValue(e.SecurityGroup.ID)}, + SecurityGroups: &[]string{fi.ValueOf(e.SecurityGroup.ID)}, } - _, err = ports.Update(t.Cloud.NetworkingClient(), fi.StringValue(a.PortID), opts).Extract() + _, err = ports.Update(t.Cloud.NetworkingClient(), fi.ValueOf(a.PortID), opts).Extract() if err != nil { - return fmt.Errorf("Failed to update security group for port %s: %v", fi.StringValue(a.PortID), err) + return fmt.Errorf("Failed to update security group for port %s: %v", fi.ValueOf(a.PortID), err) } return nil } diff --git a/upup/pkg/fi/cloudup/openstacktasks/lblistener.go b/upup/pkg/fi/cloudup/openstacktasks/lblistener.go index 71675853c9ffc..a27c92726ac86 100644 --- a/upup/pkg/fi/cloudup/openstacktasks/lblistener.go +++ b/upup/pkg/fi/cloudup/openstacktasks/lblistener.go @@ -59,8 +59,8 @@ func NewLBListenerTaskFromCloud(cloud openstack.OpenstackCloud, lifecycle fi.Lif // sort for consistent comparison sort.Strings(listener.AllowedCIDRs) listenerTask := &LBListener{ - ID: fi.String(listener.ID), - Name: fi.String(listener.Name), + ID: fi.PtrTo(listener.ID), + Name: fi.PtrTo(listener.Name), AllowedCIDRs: listener.AllowedCIDRs, Lifecycle: lifecycle, } @@ -103,17 +103,17 @@ func (s *LBListener) Find(context *fi.Context) (*LBListener, error) { cloud := context.Cloud.(openstack.OpenstackCloud) listenerList, err := cloud.ListListeners(listeners.ListOpts{ - ID: fi.StringValue(s.ID), - Name: fi.StringValue(s.Name), + ID: fi.ValueOf(s.ID), + Name: fi.ValueOf(s.Name), }) if err != nil { - return nil, fmt.Errorf("Failed to list loadbalancer listeners for name %s: %v", fi.StringValue(s.Name), err) + return nil, fmt.Errorf("Failed to list loadbalancer listeners for name %s: %v", fi.ValueOf(s.Name), err) } if len(listenerList) == 0 { return nil, nil } if len(listenerList) > 1 { - return nil, fmt.Errorf("Multiple listeners found with name %s", fi.StringValue(s.Name)) + return nil, fmt.Errorf("Multiple listeners found with name %s", fi.ValueOf(s.Name)) } return NewLBListenerTaskFromCloud(cloud, s.Lifecycle, &listenerList[0], s) @@ -146,16 +146,16 @@ func (_ *LBListener) RenderOpenstack(t *openstack.OpenstackAPITarget, a, e, chan } if a == nil { - klog.V(2).Infof("Creating LB with Name: %q", fi.StringValue(e.Name)) + klog.V(2).Infof("Creating LB with Name: %q", fi.ValueOf(e.Name)) listeneropts := listeners.CreateOpts{ - Name: fi.StringValue(e.Name), - DefaultPoolID: fi.StringValue(e.Pool.ID), - LoadbalancerID: fi.StringValue(e.Pool.Loadbalancer.ID), + Name: fi.ValueOf(e.Name), + DefaultPoolID: fi.ValueOf(e.Pool.ID), + LoadbalancerID: fi.ValueOf(e.Pool.Loadbalancer.ID), Protocol: listeners.ProtocolTCP, ProtocolPort: 443, } - if useVIPACL && (fi.StringValue(e.Pool.Loadbalancer.Provider) != "ovn") { + if useVIPACL && (fi.ValueOf(e.Pool.Loadbalancer.Provider) != "ovn") { listeneropts.AllowedCIDRs = e.AllowedCIDRs } @@ -163,14 +163,14 @@ func (_ *LBListener) RenderOpenstack(t *openstack.OpenstackAPITarget, a, e, chan if err != nil { return fmt.Errorf("error creating LB listener: %v", err) } - e.ID = fi.String(listener.ID) + e.ID = fi.PtrTo(listener.ID) return nil } else if len(changes.AllowedCIDRs) > 0 { - if useVIPACL && (fi.StringValue(a.Pool.Loadbalancer.Provider) != "ovn") { + if useVIPACL && (fi.ValueOf(a.Pool.Loadbalancer.Provider) != "ovn") { opts := listeners.UpdateOpts{ AllowedCIDRs: &changes.AllowedCIDRs, } - _, err := listeners.Update(t.Cloud.LoadBalancerClient(), fi.StringValue(a.ID), opts).Extract() + _, err := listeners.Update(t.Cloud.LoadBalancerClient(), fi.ValueOf(a.ID), opts).Extract() if err != nil { return fmt.Errorf("error updating LB listener: %v", err) } diff --git a/upup/pkg/fi/cloudup/openstacktasks/lbpool.go b/upup/pkg/fi/cloudup/openstacktasks/lbpool.go index 8977dec09af3c..e6d746186642a 100644 --- a/upup/pkg/fi/cloudup/openstacktasks/lbpool.go +++ b/upup/pkg/fi/cloudup/openstacktasks/lbpool.go @@ -56,8 +56,8 @@ func NewLBPoolTaskFromCloud(cloud openstack.OpenstackCloud, lifecycle fi.Lifecyc } a := &LBPool{ - ID: fi.String(pool.ID), - Name: fi.String(pool.Name), + ID: fi.PtrTo(pool.ID), + Name: fi.PtrTo(pool.Name), Lifecycle: lifecycle, } if len(pool.Loadbalancers) == 1 { @@ -87,8 +87,8 @@ func (p *LBPool) Find(context *fi.Context) (*LBPool, error) { cloud := context.Cloud.(openstack.OpenstackCloud) poolList, err := cloud.ListPools(v2pools.ListOpts{ - ID: fi.StringValue(p.ID), - Name: fi.StringValue(p.Name), + ID: fi.ValueOf(p.ID), + Name: fi.ValueOf(p.Name), }) if err != nil { return nil, fmt.Errorf("Failed to list pools: %v", err) @@ -97,7 +97,7 @@ func (p *LBPool) Find(context *fi.Context) (*LBPool, error) { return nil, nil } if len(poolList) > 1 { - return nil, fmt.Errorf("Multiple pools found for name %s", fi.StringValue(p.Name)) + return nil, fmt.Errorf("Multiple pools found for name %s", fi.ValueOf(p.Name)) } return NewLBPoolTaskFromCloud(cloud, p.Lifecycle, &poolList[0], p) @@ -127,26 +127,26 @@ func (_ *LBPool) RenderOpenstack(t *openstack.OpenstackAPITarget, a, e, changes if a == nil { // wait that lb is in ACTIVE state - provisioningStatus, err := waitLoadbalancerActiveProvisioningStatus(t.Cloud.LoadBalancerClient(), fi.StringValue(e.Loadbalancer.ID)) + provisioningStatus, err := waitLoadbalancerActiveProvisioningStatus(t.Cloud.LoadBalancerClient(), fi.ValueOf(e.Loadbalancer.ID)) if err != nil { return fmt.Errorf("failed to loadbalancer ACTIVE provisioning status %v: %v", provisioningStatus, err) } LbMethod := v2pools.LBMethodRoundRobin - if fi.StringValue(e.Loadbalancer.Provider) == "ovn" { + if fi.ValueOf(e.Loadbalancer.Provider) == "ovn" { LbMethod = v2pools.LBMethodSourceIpPort } poolopts := v2pools.CreateOpts{ - Name: fi.StringValue(e.Name), + Name: fi.ValueOf(e.Name), LBMethod: LbMethod, Protocol: v2pools.ProtocolTCP, - LoadbalancerID: fi.StringValue(e.Loadbalancer.ID), + LoadbalancerID: fi.ValueOf(e.Loadbalancer.ID), } pool, err := t.Cloud.CreatePool(poolopts) if err != nil { return fmt.Errorf("error creating LB pool: %v", err) } - e.ID = fi.String(pool.ID) + e.ID = fi.PtrTo(pool.ID) return nil } diff --git a/upup/pkg/fi/cloudup/openstacktasks/network.go b/upup/pkg/fi/cloudup/openstacktasks/network.go index ee206bc3d183f..1563b3ccff3ea 100644 --- a/upup/pkg/fi/cloudup/openstacktasks/network.go +++ b/upup/pkg/fi/cloudup/openstacktasks/network.go @@ -42,15 +42,15 @@ func (n *Network) CompareWithID() *string { func NewNetworkTaskFromCloud(cloud openstack.OpenstackCloud, lifecycle fi.Lifecycle, network *networks.Network, networkName *string) (*Network, error) { tag := "" - if networkName != nil && fi.ArrayContains(network.Tags, fi.StringValue(networkName)) { - tag = fi.StringValue(networkName) + if networkName != nil && fi.ArrayContains(network.Tags, fi.ValueOf(networkName)) { + tag = fi.ValueOf(networkName) } task := &Network{ - ID: fi.String(network.ID), - Name: fi.String(network.Name), + ID: fi.PtrTo(network.ID), + Name: fi.PtrTo(network.Name), Lifecycle: lifecycle, - Tag: fi.String(tag), + Tag: fi.PtrTo(tag), AvailabilityZoneHints: fi.StringSlice(network.AvailabilityZoneHints), } return task, nil @@ -63,8 +63,8 @@ func (n *Network) Find(context *fi.Context) (*Network, error) { cloud := context.Cloud.(openstack.OpenstackCloud) opt := networks.ListOpts{ - ID: fi.StringValue(n.ID), - Name: fi.StringValue(n.Name), + ID: fi.ValueOf(n.ID), + Name: fi.ValueOf(n.Name), } ns, err := cloud.ListNetworks(opt) if err != nil { @@ -73,7 +73,7 @@ func (n *Network) Find(context *fi.Context) (*Network, error) { if ns == nil { return nil, nil } else if len(ns) != 1 { - return nil, fmt.Errorf("found multiple networks with name: %s", fi.StringValue(n.Name)) + return nil, fmt.Errorf("found multiple networks with name: %s", fi.ValueOf(n.Name)) } v := ns[0] actual, err := NewNetworkTaskFromCloud(cloud, n.Lifecycle, &v, n.Tag) @@ -116,11 +116,11 @@ func (_ *Network) ShouldCreate(a, e, changes *Network) (bool, error) { func (_ *Network) RenderOpenstack(t *openstack.OpenstackAPITarget, a, e, changes *Network) error { if a == nil { - klog.V(2).Infof("Creating Network with name:%q", fi.StringValue(e.Name)) + klog.V(2).Infof("Creating Network with name:%q", fi.ValueOf(e.Name)) opt := networks.CreateOpts{ - Name: fi.StringValue(e.Name), - AdminStateUp: fi.Bool(true), + Name: fi.ValueOf(e.Name), + AdminStateUp: fi.PtrTo(true), AvailabilityZoneHints: fi.StringSliceValue(e.AvailabilityZoneHints), } @@ -129,21 +129,21 @@ func (_ *Network) RenderOpenstack(t *openstack.OpenstackAPITarget, a, e, changes return fmt.Errorf("Error creating network: %v", err) } - err = t.Cloud.AppendTag(openstack.ResourceTypeNetwork, v.ID, fi.StringValue(e.Tag)) + err = t.Cloud.AppendTag(openstack.ResourceTypeNetwork, v.ID, fi.ValueOf(e.Tag)) if err != nil { return fmt.Errorf("Error appending tag to network: %v", err) } - e.ID = fi.String(v.ID) + e.ID = fi.PtrTo(v.ID) klog.V(2).Infof("Creating a new Openstack network, id=%s", v.ID) return nil } else { - err := t.Cloud.AppendTag(openstack.ResourceTypeNetwork, fi.StringValue(a.ID), fi.StringValue(changes.Tag)) + err := t.Cloud.AppendTag(openstack.ResourceTypeNetwork, fi.ValueOf(a.ID), fi.ValueOf(changes.Tag)) if err != nil { return fmt.Errorf("Error appending tag to network: %v", err) } } e.ID = a.ID - klog.V(2).Infof("Using an existing Openstack network, id=%s", fi.StringValue(e.ID)) + klog.V(2).Infof("Using an existing Openstack network, id=%s", fi.ValueOf(e.ID)) return nil } diff --git a/upup/pkg/fi/cloudup/openstacktasks/poolassociation.go b/upup/pkg/fi/cloudup/openstacktasks/poolassociation.go index d300acfc18c4c..95896d9ea09f6 100644 --- a/upup/pkg/fi/cloudup/openstacktasks/poolassociation.go +++ b/upup/pkg/fi/cloudup/openstacktasks/poolassociation.go @@ -67,8 +67,8 @@ func (p *PoolAssociation) Find(context *fi.Context) (*PoolAssociation, error) { cloud := context.Cloud.(openstack.OpenstackCloud) opt := v2pools.ListOpts{ - Name: fi.StringValue(p.Pool.Name), - ID: fi.StringValue(p.Pool.ID), + Name: fi.ValueOf(p.Pool.Name), + ID: fi.ValueOf(p.Pool.ID), } rs, err := cloud.ListPools(opt) @@ -78,7 +78,7 @@ func (p *PoolAssociation) Find(context *fi.Context) (*PoolAssociation, error) { if rs == nil { return nil, nil } else if len(rs) != 1 { - return nil, fmt.Errorf("found multiple pools with name: %s", fi.StringValue(p.Pool.Name)) + return nil, fmt.Errorf("found multiple pools with name: %s", fi.ValueOf(p.Pool.Name)) } a := rs[0] @@ -89,7 +89,7 @@ func (p *PoolAssociation) Find(context *fi.Context) (*PoolAssociation, error) { if err != nil { return nil, err } - if fi.StringValue(p.Name) == poolMember.Name { + if fi.ValueOf(p.Name) == poolMember.Name { found = poolMember break } @@ -101,18 +101,18 @@ func (p *PoolAssociation) Find(context *fi.Context) (*PoolAssociation, error) { } pool, err := NewLBPoolTaskFromCloud(cloud, p.Lifecycle, &a, nil) if err != nil { - return nil, fmt.Errorf("NewLBListenerTaskFromCloud: failed to fetch pool %s: %v", fi.StringValue(pool.Name), err) + return nil, fmt.Errorf("NewLBListenerTaskFromCloud: failed to fetch pool %s: %v", fi.ValueOf(pool.Name), err) } actual := &PoolAssociation{ - ID: fi.String(found.ID), - Name: fi.String(found.Name), + ID: fi.PtrTo(found.ID), + Name: fi.PtrTo(found.Name), Pool: pool, ServerGroup: p.ServerGroup, InterfaceName: p.InterfaceName, ProtocolPort: p.ProtocolPort, Lifecycle: p.Lifecycle, - Weight: fi.Int(found.Weight), + Weight: fi.PtrTo(found.Weight), } p.ID = actual.ID return actual, nil @@ -162,24 +162,24 @@ func (_ *PoolAssociation) RenderOpenstack(t *openstack.OpenstackAPITarget, a, e, if a == nil { for _, serverID := range e.ServerGroup.GetMembers() { - server, memberAddress, err := GetServerFixedIP(t.Cloud.ComputeClient(), serverID, fi.StringValue(e.InterfaceName)) + server, memberAddress, err := GetServerFixedIP(t.Cloud.ComputeClient(), serverID, fi.ValueOf(e.InterfaceName)) if err != nil { return err } - member, err := t.Cloud.AssociateToPool(server, fi.StringValue(e.Pool.ID), v2pools.CreateMemberOpts{ - Name: fi.StringValue(e.Name), - ProtocolPort: fi.IntValue(e.ProtocolPort), - SubnetID: fi.StringValue(e.Pool.Loadbalancer.VipSubnet), + member, err := t.Cloud.AssociateToPool(server, fi.ValueOf(e.Pool.ID), v2pools.CreateMemberOpts{ + Name: fi.ValueOf(e.Name), + ProtocolPort: fi.ValueOf(e.ProtocolPort), + SubnetID: fi.ValueOf(e.Pool.Loadbalancer.VipSubnet), Address: memberAddress, }) if err != nil { return fmt.Errorf("Failed to create member: %v", err) } - e.ID = fi.String(member.ID) + e.ID = fi.PtrTo(member.ID) } } else { - _, err := t.Cloud.UpdateMemberInPool(fi.StringValue(a.Pool.ID), fi.StringValue(a.ID), v2pools.UpdateMemberOpts{ + _, err := t.Cloud.UpdateMemberInPool(fi.ValueOf(a.Pool.ID), fi.ValueOf(a.ID), v2pools.UpdateMemberOpts{ Weight: e.Weight, }) if err != nil { diff --git a/upup/pkg/fi/cloudup/openstacktasks/poolmonitor.go b/upup/pkg/fi/cloudup/openstacktasks/poolmonitor.go index 967adcd807160..32c4399c2a2bb 100644 --- a/upup/pkg/fi/cloudup/openstacktasks/poolmonitor.go +++ b/upup/pkg/fi/cloudup/openstacktasks/poolmonitor.go @@ -54,8 +54,8 @@ func (p *PoolMonitor) Find(context *fi.Context) (*PoolMonitor, error) { cloud := context.Cloud.(openstack.OpenstackCloud) opt := monitors.ListOpts{ - Name: fi.StringValue(p.Name), - PoolID: fi.StringValue(p.Pool.ID), + Name: fi.ValueOf(p.Name), + PoolID: fi.ValueOf(p.Pool.ID), } rs, err := cloud.ListMonitors(opt) @@ -65,12 +65,12 @@ func (p *PoolMonitor) Find(context *fi.Context) (*PoolMonitor, error) { if rs == nil || len(rs) == 0 { return nil, nil } else if len(rs) != 1 { - return nil, fmt.Errorf("found multiple monitors with name: %s", fi.StringValue(p.Name)) + return nil, fmt.Errorf("found multiple monitors with name: %s", fi.ValueOf(p.Name)) } found := rs[0] actual := &PoolMonitor{ - ID: fi.String(found.ID), - Name: fi.String(found.Name), + ID: fi.PtrTo(found.ID), + Name: fi.PtrTo(found.Name), Pool: p.Pool, Lifecycle: p.Lifecycle, } @@ -100,11 +100,11 @@ func (_ *PoolMonitor) CheckChanges(a, e, changes *PoolMonitor) error { func (_ *PoolMonitor) RenderOpenstack(t *openstack.OpenstackAPITarget, a, e, changes *PoolMonitor) error { if a == nil { - klog.V(2).Infof("Creating PoolMonitor with Name: %q", fi.StringValue(e.Name)) + klog.V(2).Infof("Creating PoolMonitor with Name: %q", fi.ValueOf(e.Name)) poolMonitor, err := t.Cloud.CreatePoolMonitor(monitors.CreateOpts{ - Name: fi.StringValue(e.Name), - PoolID: fi.StringValue(e.Pool.ID), + Name: fi.ValueOf(e.Name), + PoolID: fi.ValueOf(e.Pool.ID), Type: monitors.TypeTCP, Delay: 10, Timeout: 5, @@ -114,7 +114,7 @@ func (_ *PoolMonitor) RenderOpenstack(t *openstack.OpenstackAPITarget, a, e, cha if err != nil { return fmt.Errorf("error creating PoolMonitor: %v", err) } - e.ID = fi.String(poolMonitor.ID) + e.ID = fi.PtrTo(poolMonitor.ID) } return nil } diff --git a/upup/pkg/fi/cloudup/openstacktasks/port.go b/upup/pkg/fi/cloudup/openstacktasks/port.go index 8f0e8a9a7fff9..b906ff1675bcf 100644 --- a/upup/pkg/fi/cloudup/openstacktasks/port.go +++ b/upup/pkg/fi/cloudup/openstacktasks/port.go @@ -87,7 +87,7 @@ func newPortTaskFromCloud(cloud openstack.OpenstackCloud, lifecycle fi.Lifecycle continue } sgs = append(sgs, &SecurityGroup{ - ID: fi.String(sgid), + ID: fi.PtrTo(sgid), Lifecycle: lifecycle, }) } @@ -98,7 +98,7 @@ func newPortTaskFromCloud(cloud openstack.OpenstackCloud, lifecycle fi.Lifecycle subnets := make([]*Subnet, len(port.FixedIPs)) for i, subn := range port.FixedIPs { subnets[i] = &Subnet{ - ID: fi.String(subn.SubnetID), + ID: fi.PtrTo(subn.SubnetID), Lifecycle: lifecycle, } } @@ -121,7 +121,7 @@ func newPortTaskFromCloud(cloud openstack.OpenstackCloud, lifecycle fi.Lifecycle if !strings.HasPrefix(t, prefix) { continue } - cloudInstanceGroupName = fi.String("") + cloudInstanceGroupName = fi.PtrTo("") scanString := fmt.Sprintf("%s%%s", prefix) if _, err := fmt.Sscanf(t, scanString, cloudInstanceGroupName); err != nil { klog.V(2).Infof("Error extracting instance group for Port with name: %q", port.Name) @@ -129,10 +129,10 @@ func newPortTaskFromCloud(cloud openstack.OpenstackCloud, lifecycle fi.Lifecycle } actual := &Port{ - ID: fi.String(port.ID), + ID: fi.PtrTo(port.ID), InstanceGroupName: cloudInstanceGroupName, - Name: fi.String(port.Name), - Network: &Network{ID: fi.String(port.NetworkID)}, + Name: fi.PtrTo(port.Name), + Network: &Network{ID: fi.PtrTo(port.NetworkID)}, SecurityGroups: sgs, Subnets: subnets, Lifecycle: lifecycle, @@ -149,7 +149,7 @@ func newPortTaskFromCloud(cloud openstack.OpenstackCloud, lifecycle fi.Lifecycle func (s *Port) Find(context *fi.Context) (*Port, error) { cloud := context.Cloud.(openstack.OpenstackCloud) opt := ports.ListOpts{ - Name: fi.StringValue(s.Name), + Name: fi.ValueOf(s.Name), } rs, err := cloud.ListPorts(opt) if err != nil { @@ -158,7 +158,7 @@ func (s *Port) Find(context *fi.Context) (*Port, error) { if rs == nil { return nil, nil } else if len(rs) != 1 { - return nil, fmt.Errorf("found multiple ports with name: %s", fi.StringValue(s.Name)) + return nil, fmt.Errorf("found multiple ports with name: %s", fi.ValueOf(s.Name)) } // sort for consistent comparison @@ -192,7 +192,7 @@ func (_ *Port) CheckChanges(a, e, changes *Port) error { func (*Port) RenderOpenstack(t *openstack.OpenstackAPITarget, a, e, changes *Port) error { if a == nil { - klog.V(2).Infof("Creating Port with name: %q", fi.StringValue(e.Name)) + klog.V(2).Infof("Creating Port with name: %q", fi.ValueOf(e.Name)) opt, err := portCreateOptsFromPortTask(t, a, e, changes) if err != nil { @@ -212,28 +212,28 @@ func (*Port) RenderOpenstack(t *openstack.OpenstackAPITarget, a, e, changes *Por } } } - e.ID = fi.String(v.ID) + e.ID = fi.PtrTo(v.ID) klog.V(2).Infof("Creating a new Openstack port, id=%s", v.ID) return nil } if changes != nil && changes.Tags != nil { - klog.V(2).Infof("Updating tags for Port with name: %q", fi.StringValue(e.Name)) + klog.V(2).Infof("Updating tags for Port with name: %q", fi.ValueOf(e.Name)) for _, tag := range e.Tags { - err := t.Cloud.AppendTag(openstack.ResourceTypePort, fi.StringValue(a.ID), tag) + err := t.Cloud.AppendTag(openstack.ResourceTypePort, fi.ValueOf(a.ID), tag) if err != nil { return fmt.Errorf("Error appending tag to port: %v", err) } } } e.ID = a.ID - klog.V(2).Infof("Using an existing Openstack port, id=%s", fi.StringValue(e.ID)) + klog.V(2).Infof("Using an existing Openstack port, id=%s", fi.ValueOf(e.ID)) return nil } func portCreateOptsFromPortTask(t *openstack.OpenstackAPITarget, a, e, changes *Port) (ports.CreateOptsBuilder, error) { sgs := make([]string, len(e.SecurityGroups)+len(e.AdditionalSecurityGroups)) for i, sg := range e.SecurityGroups { - sgs[i] = fi.StringValue(sg.ID) + sgs[i] = fi.ValueOf(sg.ID) } for i, sg := range e.AdditionalSecurityGroups { opt := secgroup.ListOpts{ @@ -251,13 +251,13 @@ func portCreateOptsFromPortTask(t *openstack.OpenstackAPITarget, a, e, changes * fixedIPs := make([]ports.IP, len(e.Subnets)) for i, subn := range e.Subnets { fixedIPs[i] = ports.IP{ - SubnetID: fi.StringValue(subn.ID), + SubnetID: fi.ValueOf(subn.ID), } } return ports.CreateOpts{ - Name: fi.StringValue(e.Name), - NetworkID: fi.StringValue(e.Network.ID), + Name: fi.ValueOf(e.Name), + NetworkID: fi.ValueOf(e.Network.ID), SecurityGroups: &sgs, FixedIPs: fixedIPs, }, nil diff --git a/upup/pkg/fi/cloudup/openstacktasks/port_test.go b/upup/pkg/fi/cloudup/openstacktasks/port_test.go index 3f4069958cc16..fe530aea07c01 100644 --- a/upup/pkg/fi/cloudup/openstacktasks/port_test.go +++ b/upup/pkg/fi/cloudup/openstacktasks/port_test.go @@ -32,11 +32,11 @@ import ( func Test_Port_GetDependencies(t *testing.T) { tasks := map[string]fi.Task{ - "foo": &SecurityGroup{Name: fi.String("security-group")}, - "bar": &Subnet{Name: fi.String("subnet")}, - "baz": &Instance{Name: fi.String("instance")}, - "qux": &FloatingIP{Name: fi.String("fip")}, - "xxx": &Network{Name: fi.String("network")}, + "foo": &SecurityGroup{Name: fi.PtrTo("security-group")}, + "bar": &Subnet{Name: fi.PtrTo("subnet")}, + "baz": &Instance{Name: fi.PtrTo("instance")}, + "qux": &FloatingIP{Name: fi.PtrTo("fip")}, + "xxx": &Network{Name: fi.PtrTo("network")}, } port := &Port{} @@ -44,9 +44,9 @@ func Test_Port_GetDependencies(t *testing.T) { actual := port.GetDependencies(tasks) expected := []fi.Task{ - &Subnet{Name: fi.String("subnet")}, - &Network{Name: fi.String("network")}, - &SecurityGroup{Name: fi.String("security-group")}, + &Subnet{Name: fi.PtrTo("subnet")}, + &Network{Name: fi.PtrTo("network")}, + &SecurityGroup{Name: fi.PtrTo("security-group")}, } actualSorted := sortedTasks(actual) @@ -78,9 +78,9 @@ func Test_NewPortTaskFromCloud(t *testing.T) { foundPort: nil, modifiedFoundPort: nil, expectedPortTask: &Port{ - ID: fi.String(""), - Name: fi.String(""), - Network: &Network{ID: fi.String("")}, + ID: fi.PtrTo(""), + Name: fi.PtrTo(""), + Network: &Network{ID: fi.PtrTo("")}, SecurityGroups: []*SecurityGroup{}, Subnets: []*Subnet{}, Lifecycle: fi.LifecycleSync, @@ -93,11 +93,11 @@ func Test_NewPortTaskFromCloud(t *testing.T) { cloud: &portCloud{}, cloudPort: &ports.Port{}, foundPort: &Port{}, - modifiedFoundPort: &Port{ID: fi.String("")}, + modifiedFoundPort: &Port{ID: fi.PtrTo("")}, expectedPortTask: &Port{ - ID: fi.String(""), - Name: fi.String(""), - Network: &Network{ID: fi.String("")}, + ID: fi.PtrTo(""), + Name: fi.PtrTo(""), + Network: &Network{ID: fi.PtrTo("")}, SecurityGroups: []*SecurityGroup{}, Subnets: []*Subnet{}, Lifecycle: fi.LifecycleSync, @@ -122,18 +122,18 @@ func Test_NewPortTaskFromCloud(t *testing.T) { }, }, foundPort: &Port{}, - modifiedFoundPort: &Port{ID: fi.String("id")}, + modifiedFoundPort: &Port{ID: fi.PtrTo("id")}, expectedPortTask: &Port{ - ID: fi.String("id"), - Name: fi.String("name"), - Network: &Network{ID: fi.String("networkID")}, + ID: fi.PtrTo("id"), + Name: fi.PtrTo("name"), + Network: &Network{ID: fi.PtrTo("networkID")}, SecurityGroups: []*SecurityGroup{ - {ID: fi.String("sg-1"), Lifecycle: fi.LifecycleSync}, - {ID: fi.String("sg-2"), Lifecycle: fi.LifecycleSync}, + {ID: fi.PtrTo("sg-1"), Lifecycle: fi.LifecycleSync}, + {ID: fi.PtrTo("sg-2"), Lifecycle: fi.LifecycleSync}, }, Subnets: []*Subnet{ - {ID: fi.String("subnet-a"), Lifecycle: fi.LifecycleSync}, - {ID: fi.String("subnet-b"), Lifecycle: fi.LifecycleSync}, + {ID: fi.PtrTo("subnet-a"), Lifecycle: fi.LifecycleSync}, + {ID: fi.PtrTo("subnet-b"), Lifecycle: fi.LifecycleSync}, }, Lifecycle: fi.LifecycleSync, }, @@ -162,17 +162,17 @@ func Test_NewPortTaskFromCloud(t *testing.T) { foundPort: nil, modifiedFoundPort: nil, expectedPortTask: &Port{ - ID: fi.String("id"), + ID: fi.PtrTo("id"), Lifecycle: fi.LifecycleSync, - Name: fi.String("name"), - Network: &Network{ID: fi.String("networkID")}, + Name: fi.PtrTo("name"), + Network: &Network{ID: fi.PtrTo("networkID")}, SecurityGroups: []*SecurityGroup{ - {ID: fi.String("sg-1"), Lifecycle: fi.LifecycleSync}, - {ID: fi.String("sg-2"), Lifecycle: fi.LifecycleSync}, + {ID: fi.PtrTo("sg-1"), Lifecycle: fi.LifecycleSync}, + {ID: fi.PtrTo("sg-2"), Lifecycle: fi.LifecycleSync}, }, Subnets: []*Subnet{ - {ID: fi.String("subnet-a"), Lifecycle: fi.LifecycleSync}, - {ID: fi.String("subnet-b"), Lifecycle: fi.LifecycleSync}, + {ID: fi.PtrTo("subnet-a"), Lifecycle: fi.LifecycleSync}, + {ID: fi.PtrTo("subnet-b"), Lifecycle: fi.LifecycleSync}, }, Tags: []string{ "cluster", @@ -201,31 +201,31 @@ func Test_NewPortTaskFromCloud(t *testing.T) { }, }, foundPort: &Port{ - InstanceGroupName: fi.String("node-ig"), + InstanceGroupName: fi.PtrTo("node-ig"), Tags: []string{ "KopsInstanceGroup=node-ig", }, }, modifiedFoundPort: &Port{ - ID: fi.String("id"), - InstanceGroupName: fi.String("node-ig"), + ID: fi.PtrTo("id"), + InstanceGroupName: fi.PtrTo("node-ig"), Tags: []string{ "KopsInstanceGroup=node-ig", }, }, expectedPortTask: &Port{ - ID: fi.String("id"), - InstanceGroupName: fi.String("node-ig"), + ID: fi.PtrTo("id"), + InstanceGroupName: fi.PtrTo("node-ig"), Lifecycle: fi.LifecycleSync, - Name: fi.String("name"), - Network: &Network{ID: fi.String("networkID")}, + Name: fi.PtrTo("name"), + Network: &Network{ID: fi.PtrTo("networkID")}, SecurityGroups: []*SecurityGroup{ - {ID: fi.String("sg-1"), Lifecycle: fi.LifecycleSync}, - {ID: fi.String("sg-2"), Lifecycle: fi.LifecycleSync}, + {ID: fi.PtrTo("sg-1"), Lifecycle: fi.LifecycleSync}, + {ID: fi.PtrTo("sg-2"), Lifecycle: fi.LifecycleSync}, }, Subnets: []*Subnet{ - {ID: fi.String("subnet-a"), Lifecycle: fi.LifecycleSync}, - {ID: fi.String("subnet-b"), Lifecycle: fi.LifecycleSync}, + {ID: fi.PtrTo("subnet-a"), Lifecycle: fi.LifecycleSync}, + {ID: fi.PtrTo("subnet-b"), Lifecycle: fi.LifecycleSync}, }, Tags: []string{ "KopsInstanceGroup=node-ig", @@ -257,18 +257,18 @@ func Test_NewPortTaskFromCloud(t *testing.T) { foundPort: nil, modifiedFoundPort: nil, expectedPortTask: &Port{ - ID: fi.String("id"), - InstanceGroupName: fi.String("node-ig"), + ID: fi.PtrTo("id"), + InstanceGroupName: fi.PtrTo("node-ig"), Lifecycle: fi.LifecycleSync, - Name: fi.String("name"), - Network: &Network{ID: fi.String("networkID")}, + Name: fi.PtrTo("name"), + Network: &Network{ID: fi.PtrTo("networkID")}, SecurityGroups: []*SecurityGroup{ - {ID: fi.String("sg-1"), Lifecycle: fi.LifecycleSync}, - {ID: fi.String("sg-2"), Lifecycle: fi.LifecycleSync}, + {ID: fi.PtrTo("sg-1"), Lifecycle: fi.LifecycleSync}, + {ID: fi.PtrTo("sg-2"), Lifecycle: fi.LifecycleSync}, }, Subnets: []*Subnet{ - {ID: fi.String("subnet-a"), Lifecycle: fi.LifecycleSync}, - {ID: fi.String("subnet-b"), Lifecycle: fi.LifecycleSync}, + {ID: fi.PtrTo("subnet-a"), Lifecycle: fi.LifecycleSync}, + {ID: fi.PtrTo("subnet-b"), Lifecycle: fi.LifecycleSync}, }, Tags: []string{ "cluster", @@ -312,27 +312,27 @@ func Test_NewPortTaskFromCloud(t *testing.T) { }, }, modifiedFoundPort: &Port{ - ID: fi.String("id"), + ID: fi.PtrTo("id"), AdditionalSecurityGroups: []string{ "add-1", "add-2", }, }, expectedPortTask: &Port{ - ID: fi.String("id"), - Name: fi.String("name"), - Network: &Network{ID: fi.String("networkID")}, + ID: fi.PtrTo("id"), + Name: fi.PtrTo("name"), + Network: &Network{ID: fi.PtrTo("networkID")}, SecurityGroups: []*SecurityGroup{ - {ID: fi.String("sg-1"), Lifecycle: fi.LifecycleSync}, - {ID: fi.String("sg-2"), Lifecycle: fi.LifecycleSync}, + {ID: fi.PtrTo("sg-1"), Lifecycle: fi.LifecycleSync}, + {ID: fi.PtrTo("sg-2"), Lifecycle: fi.LifecycleSync}, }, AdditionalSecurityGroups: []string{ "add-1", "add-2", }, Subnets: []*Subnet{ - {ID: fi.String("subnet-a"), Lifecycle: fi.LifecycleSync}, - {ID: fi.String("subnet-b"), Lifecycle: fi.LifecycleSync}, + {ID: fi.PtrTo("subnet-a"), Lifecycle: fi.LifecycleSync}, + {ID: fi.PtrTo("subnet-b"), Lifecycle: fi.LifecycleSync}, }, Lifecycle: fi.LifecycleSync, }, @@ -376,7 +376,7 @@ func Test_Port_Find(t *testing.T) { Cloud: &portCloud{}, }, port: &Port{ - Name: fi.String("name"), + Name: fi.PtrTo("name"), Lifecycle: fi.LifecycleSync, }, expectedPortTask: nil, @@ -410,20 +410,20 @@ func Test_Port_Find(t *testing.T) { }, }, port: &Port{ - Name: fi.String("name"), + Name: fi.PtrTo("name"), Lifecycle: fi.LifecycleSync, }, expectedPortTask: &Port{ - ID: fi.String("id"), - Name: fi.String("name"), - Network: &Network{ID: fi.String("networkID")}, + ID: fi.PtrTo("id"), + Name: fi.PtrTo("name"), + Network: &Network{ID: fi.PtrTo("networkID")}, SecurityGroups: []*SecurityGroup{ - {ID: fi.String("sg-1"), Lifecycle: fi.LifecycleSync}, - {ID: fi.String("sg-2"), Lifecycle: fi.LifecycleSync}, + {ID: fi.PtrTo("sg-1"), Lifecycle: fi.LifecycleSync}, + {ID: fi.PtrTo("sg-2"), Lifecycle: fi.LifecycleSync}, }, Subnets: []*Subnet{ - {ID: fi.String("subnet-a"), Lifecycle: fi.LifecycleSync}, - {ID: fi.String("subnet-b"), Lifecycle: fi.LifecycleSync}, + {ID: fi.PtrTo("subnet-a"), Lifecycle: fi.LifecycleSync}, + {ID: fi.PtrTo("subnet-b"), Lifecycle: fi.LifecycleSync}, }, Lifecycle: fi.LifecycleSync, }, @@ -457,21 +457,21 @@ func Test_Port_Find(t *testing.T) { }, }, port: &Port{ - Name: fi.String("name"), + Name: fi.PtrTo("name"), Lifecycle: fi.LifecycleSync, Tags: []string{"clusterName"}, }, expectedPortTask: &Port{ - ID: fi.String("id"), - Name: fi.String("name"), - Network: &Network{ID: fi.String("networkID")}, + ID: fi.PtrTo("id"), + Name: fi.PtrTo("name"), + Network: &Network{ID: fi.PtrTo("networkID")}, SecurityGroups: []*SecurityGroup{ - {ID: fi.String("sg-1"), Lifecycle: fi.LifecycleSync}, - {ID: fi.String("sg-2"), Lifecycle: fi.LifecycleSync}, + {ID: fi.PtrTo("sg-1"), Lifecycle: fi.LifecycleSync}, + {ID: fi.PtrTo("sg-2"), Lifecycle: fi.LifecycleSync}, }, Subnets: []*Subnet{ - {ID: fi.String("subnet-a"), Lifecycle: fi.LifecycleSync}, - {ID: fi.String("subnet-b"), Lifecycle: fi.LifecycleSync}, + {ID: fi.PtrTo("subnet-a"), Lifecycle: fi.LifecycleSync}, + {ID: fi.PtrTo("subnet-b"), Lifecycle: fi.LifecycleSync}, }, Lifecycle: fi.LifecycleSync, Tags: []string{"clusterName"}, @@ -502,7 +502,7 @@ func Test_Port_Find(t *testing.T) { }, }, port: &Port{ - Name: fi.String("name"), + Name: fi.PtrTo("name"), Lifecycle: fi.LifecycleSync, }, expectedPortTask: nil, @@ -527,7 +527,7 @@ func Test_Port_Find(t *testing.T) { }, }, port: &Port{ - Name: fi.String("name"), + Name: fi.PtrTo("name"), Lifecycle: fi.LifecycleSync, }, expectedPortTask: nil, @@ -560,8 +560,8 @@ func Test_Port_CheckChanges(t *testing.T) { desc: "actual nil all required fields set", actual: nil, expected: &Port{ - Name: fi.String("name"), - Network: &Network{ID: fi.String("networkID")}, + Name: fi.PtrTo("name"), + Network: &Network{ID: fi.PtrTo("networkID")}, }, expectedError: nil, }, @@ -570,7 +570,7 @@ func Test_Port_CheckChanges(t *testing.T) { actual: nil, expected: &Port{ Name: nil, - Network: &Network{ID: fi.String("networkID")}, + Network: &Network{ID: fi.PtrTo("networkID")}, }, expectedError: fi.RequiredField("Name"), }, @@ -578,7 +578,7 @@ func Test_Port_CheckChanges(t *testing.T) { desc: "actual nil required field Network nil", actual: nil, expected: &Port{ - Name: fi.String("name"), + Name: fi.PtrTo("name"), Network: nil, }, expectedError: fi.RequiredField("Network"), @@ -586,11 +586,11 @@ func Test_Port_CheckChanges(t *testing.T) { { desc: "actual not nil all changeable fields set", actual: &Port{ - Name: fi.String("name"), + Name: fi.PtrTo("name"), Network: nil, }, expected: &Port{ - Name: fi.String("name"), + Name: fi.PtrTo("name"), Network: nil, }, changes: &Port{ @@ -602,31 +602,31 @@ func Test_Port_CheckChanges(t *testing.T) { { desc: "actual not nil all changeable fields set", actual: &Port{ - Name: fi.String("name"), + Name: fi.PtrTo("name"), Network: nil, }, expected: &Port{ - Name: fi.String("name"), + Name: fi.PtrTo("name"), Network: nil, }, changes: &Port{ Name: nil, - Network: &Network{ID: fi.String("networkID")}, + Network: &Network{ID: fi.PtrTo("networkID")}, }, expectedError: fi.CannotChangeField("Network"), }, { desc: "actual not nil unchangeable field Name set", actual: &Port{ - Name: fi.String("name"), + Name: fi.PtrTo("name"), Network: nil, }, expected: &Port{ - Name: fi.String("name"), - Network: &Network{ID: fi.String("networkID")}, + Name: fi.PtrTo("name"), + Network: &Network{ID: fi.PtrTo("networkID")}, }, changes: &Port{ - Name: fi.String("name"), + Name: fi.PtrTo("name"), Network: nil, }, expectedError: fi.CannotChangeField("Name"), @@ -634,16 +634,16 @@ func Test_Port_CheckChanges(t *testing.T) { { desc: "actual not nil unchangeable field Network set", actual: &Port{ - Name: fi.String("name"), + Name: fi.PtrTo("name"), Network: nil, }, expected: &Port{ Name: nil, - Network: &Network{ID: fi.String("networkID")}, + Network: &Network{ID: fi.PtrTo("networkID")}, }, changes: &Port{ Name: nil, - Network: &Network{ID: fi.String("networkID")}, + Network: &Network{ID: fi.PtrTo("networkID")}, }, expectedError: fi.CannotChangeField("Network"), }, @@ -673,19 +673,19 @@ func Test_Port_RenderOpenstack(t *testing.T) { { desc: "actual not nil", actual: &Port{ - ID: fi.String("actual-id"), - Name: fi.String("name"), - Network: &Network{ID: fi.String("networkID")}, + ID: fi.PtrTo("actual-id"), + Name: fi.PtrTo("name"), + Network: &Network{ID: fi.PtrTo("networkID")}, }, expected: &Port{ - ID: fi.String("expected-id"), - Name: fi.String("name"), - Network: &Network{ID: fi.String("networkID")}, + ID: fi.PtrTo("expected-id"), + Name: fi.PtrTo("name"), + Network: &Network{ID: fi.PtrTo("networkID")}, }, expectedAfter: &Port{ - ID: fi.String("actual-id"), - Name: fi.String("name"), - Network: &Network{ID: fi.String("networkID")}, + ID: fi.PtrTo("actual-id"), + Name: fi.PtrTo("name"), + Network: &Network{ID: fi.PtrTo("networkID")}, }, expectedCloudPort: nil, expectedError: nil, @@ -711,29 +711,29 @@ func Test_Port_RenderOpenstack(t *testing.T) { }, actual: nil, expected: &Port{ - ID: fi.String("expected-id"), - Name: fi.String("name"), - Network: &Network{ID: fi.String("networkID")}, + ID: fi.PtrTo("expected-id"), + Name: fi.PtrTo("name"), + Network: &Network{ID: fi.PtrTo("networkID")}, SecurityGroups: []*SecurityGroup{ - {ID: fi.String("sg-1")}, - {ID: fi.String("sg-2")}, + {ID: fi.PtrTo("sg-1")}, + {ID: fi.PtrTo("sg-2")}, }, Subnets: []*Subnet{ - {ID: fi.String("subnet-a")}, - {ID: fi.String("subnet-b")}, + {ID: fi.PtrTo("subnet-a")}, + {ID: fi.PtrTo("subnet-b")}, }, }, expectedAfter: &Port{ - ID: fi.String("cloud-id"), - Name: fi.String("name"), - Network: &Network{ID: fi.String("networkID")}, + ID: fi.PtrTo("cloud-id"), + Name: fi.PtrTo("name"), + Network: &Network{ID: fi.PtrTo("networkID")}, SecurityGroups: []*SecurityGroup{ - {ID: fi.String("sg-1")}, - {ID: fi.String("sg-2")}, + {ID: fi.PtrTo("sg-1")}, + {ID: fi.PtrTo("sg-2")}, }, Subnets: []*Subnet{ - {ID: fi.String("subnet-a")}, - {ID: fi.String("subnet-b")}, + {ID: fi.PtrTo("subnet-a")}, + {ID: fi.PtrTo("subnet-b")}, }, }, expectedCloudPort: &ports.Port{ @@ -760,14 +760,14 @@ func Test_Port_RenderOpenstack(t *testing.T) { }, actual: nil, expected: &Port{ - ID: fi.String("expected-id"), - Name: fi.String("name"), - Network: &Network{ID: fi.String("networkID")}, + ID: fi.PtrTo("expected-id"), + Name: fi.PtrTo("name"), + Network: &Network{ID: fi.PtrTo("networkID")}, }, expectedAfter: &Port{ - ID: fi.String("expected-id"), - Name: fi.String("name"), - Network: &Network{ID: fi.String("networkID")}, + ID: fi.PtrTo("expected-id"), + Name: fi.PtrTo("name"), + Network: &Network{ID: fi.PtrTo("networkID")}, }, expectedCloudPort: nil, expectedError: fmt.Errorf("Error creating port: port create error"), @@ -813,20 +813,20 @@ func Test_Port_createOptsFromPortTask(t *testing.T) { }, }, expected: &Port{ - ID: fi.String("expected-id"), - Name: fi.String("name"), - Network: &Network{ID: fi.String("networkID")}, + ID: fi.PtrTo("expected-id"), + Name: fi.PtrTo("name"), + Network: &Network{ID: fi.PtrTo("networkID")}, SecurityGroups: []*SecurityGroup{ - {ID: fi.String("sg-1")}, - {ID: fi.String("sg-2")}, + {ID: fi.PtrTo("sg-1")}, + {ID: fi.PtrTo("sg-2")}, }, AdditionalSecurityGroups: []string{ "add-1", "add-2", }, Subnets: []*Subnet{ - {ID: fi.String("subnet-a")}, - {ID: fi.String("subnet-b")}, + {ID: fi.PtrTo("subnet-a")}, + {ID: fi.PtrTo("subnet-b")}, }, }, expectedCreateOpts: ports.CreateOpts{ @@ -856,19 +856,19 @@ func Test_Port_createOptsFromPortTask(t *testing.T) { }, }, expected: &Port{ - ID: fi.String("expected-id"), - Name: fi.String("name"), - Network: &Network{ID: fi.String("networkID")}, + ID: fi.PtrTo("expected-id"), + Name: fi.PtrTo("name"), + Network: &Network{ID: fi.PtrTo("networkID")}, SecurityGroups: []*SecurityGroup{ - {ID: fi.String("sg-1")}, - {ID: fi.String("sg-2")}, + {ID: fi.PtrTo("sg-1")}, + {ID: fi.PtrTo("sg-2")}, }, AdditionalSecurityGroups: []string{ "add-2", }, Subnets: []*Subnet{ - {ID: fi.String("subnet-a")}, - {ID: fi.String("subnet-b")}, + {ID: fi.PtrTo("subnet-a")}, + {ID: fi.PtrTo("subnet-b")}, }, }, expectedError: fmt.Errorf("Additional SecurityGroup not found for name add-2"), diff --git a/upup/pkg/fi/cloudup/openstacktasks/router.go b/upup/pkg/fi/cloudup/openstacktasks/router.go index 4fc5dcabedc32..74d51c205cc24 100644 --- a/upup/pkg/fi/cloudup/openstacktasks/router.go +++ b/upup/pkg/fi/cloudup/openstacktasks/router.go @@ -42,8 +42,8 @@ func (n *Router) CompareWithID() *string { // NewRouterTaskFromCloud initializes and returns a new Router func NewRouterTaskFromCloud(cloud openstack.OpenstackCloud, lifecycle fi.Lifecycle, router *routers.Router, find *Router) (*Router, error) { actual := &Router{ - ID: fi.String(router.ID), - Name: fi.String(router.Name), + ID: fi.PtrTo(router.ID), + Name: fi.PtrTo(router.Name), Lifecycle: lifecycle, AvailabilityZoneHints: fi.StringSlice(router.AvailabilityZoneHints), } @@ -56,8 +56,8 @@ func NewRouterTaskFromCloud(cloud openstack.OpenstackCloud, lifecycle fi.Lifecyc func (n *Router) Find(context *fi.Context) (*Router, error) { cloud := context.Cloud.(openstack.OpenstackCloud) opt := routers.ListOpts{ - Name: fi.StringValue(n.Name), - ID: fi.StringValue(n.ID), + Name: fi.ValueOf(n.Name), + ID: fi.ValueOf(n.ID), } rs, err := cloud.ListRouters(opt) if err != nil { @@ -66,7 +66,7 @@ func (n *Router) Find(context *fi.Context) (*Router, error) { if rs == nil { return nil, nil } else if len(rs) != 1 { - return nil, fmt.Errorf("found multiple routers with name: %s", fi.StringValue(n.Name)) + return nil, fmt.Errorf("found multiple routers with name: %s", fi.ValueOf(n.Name)) } return NewRouterTaskFromCloud(cloud, n.Lifecycle, &rs[0], n) } @@ -93,11 +93,11 @@ func (_ *Router) CheckChanges(a, e, changes *Router) error { func (_ *Router) RenderOpenstack(t *openstack.OpenstackAPITarget, a, e, changes *Router) error { if a == nil { - klog.V(2).Infof("Creating Router with name:%q", fi.StringValue(e.Name)) + klog.V(2).Infof("Creating Router with name:%q", fi.ValueOf(e.Name)) opt := routers.CreateOpts{ - Name: fi.StringValue(e.Name), - AdminStateUp: fi.Bool(true), + Name: fi.ValueOf(e.Name), + AdminStateUp: fi.PtrTo(true), AvailabilityZoneHints: fi.StringSliceValue(e.AvailabilityZoneHints), } floatingNet, err := t.Cloud.GetExternalNetwork() @@ -125,11 +125,11 @@ func (_ *Router) RenderOpenstack(t *openstack.OpenstackAPITarget, a, e, changes if err != nil { return fmt.Errorf("Error creating router: %v", err) } - e.ID = fi.String(v.ID) + e.ID = fi.PtrTo(v.ID) klog.V(2).Infof("Creating a new Openstack router, id=%s", v.ID) return nil } e.ID = a.ID - klog.V(2).Infof("Using an existing Openstack router, id=%s", fi.StringValue(e.ID)) + klog.V(2).Infof("Using an existing Openstack router, id=%s", fi.ValueOf(e.ID)) return nil } diff --git a/upup/pkg/fi/cloudup/openstacktasks/routerinterface.go b/upup/pkg/fi/cloudup/openstacktasks/routerinterface.go index 2a4d1f28baf88..5fe6212263451 100644 --- a/upup/pkg/fi/cloudup/openstacktasks/routerinterface.go +++ b/upup/pkg/fi/cloudup/openstacktasks/routerinterface.go @@ -58,9 +58,9 @@ func (i *RouterInterface) CompareWithID() *string { func (i *RouterInterface) Find(context *fi.Context) (*RouterInterface, error) { cloud := context.Cloud.(openstack.OpenstackCloud) opt := ports.ListOpts{ - NetworkID: fi.StringValue(i.Subnet.Network.ID), - DeviceID: fi.StringValue(i.Router.ID), - ID: fi.StringValue(i.ID), + NetworkID: fi.ValueOf(i.Subnet.Network.ID), + DeviceID: fi.ValueOf(i.Router.ID), + ID: fi.ValueOf(i.ID), } ps, err := cloud.ListPorts(opt) if err != nil { @@ -72,7 +72,7 @@ func (i *RouterInterface) Find(context *fi.Context) (*RouterInterface, error) { var actual *RouterInterface - subnetID := fi.StringValue(i.Subnet.ID) + subnetID := fi.ValueOf(i.Subnet.ID) for _, p := range ps { for _, ip := range p.FixedIPs { if ip.SubnetID == subnetID { @@ -80,7 +80,7 @@ func (i *RouterInterface) Find(context *fi.Context) (*RouterInterface, error) { return nil, fmt.Errorf("found multiple interfaces which subnet:%s attach to", subnetID) } actual = &RouterInterface{ - ID: fi.String(p.ID), + ID: fi.PtrTo(p.ID), Name: i.Name, Router: i.Router, Subnet: i.Subnet, @@ -121,8 +121,8 @@ func (*RouterInterface) CheckChanges(a, e, changes *RouterInterface) error { func (_ *RouterInterface) RenderOpenstack(t *openstack.OpenstackAPITarget, a, e, changes *RouterInterface) error { if a == nil { - routerID := fi.StringValue(e.Router.ID) - subnetID := fi.StringValue(e.Subnet.ID) + routerID := fi.ValueOf(e.Router.ID) + subnetID := fi.ValueOf(e.Subnet.ID) klog.V(2).Infof("Creating RouterInterface for router:%s and subnet:%s", routerID, subnetID) opt := routers.AddInterfaceOpts{SubnetID: subnetID} @@ -131,11 +131,11 @@ func (_ *RouterInterface) RenderOpenstack(t *openstack.OpenstackAPITarget, a, e, return fmt.Errorf("Error creating router interface: %v", err) } - e.ID = fi.String(v.PortID) + e.ID = fi.PtrTo(v.PortID) klog.V(2).Infof("Creating a new Openstack router interface, id=%s", v.PortID) return nil } e.ID = a.ID - klog.V(2).Infof("Using an existing Openstack router interface, id=%s", fi.StringValue(e.ID)) + klog.V(2).Infof("Using an existing Openstack router interface, id=%s", fi.ValueOf(e.ID)) return nil } diff --git a/upup/pkg/fi/cloudup/openstacktasks/securitygroup.go b/upup/pkg/fi/cloudup/openstacktasks/securitygroup.go index 6e6146f1b203a..79077fbb673fb 100644 --- a/upup/pkg/fi/cloudup/openstacktasks/securitygroup.go +++ b/upup/pkg/fi/cloudup/openstacktasks/securitygroup.go @@ -44,7 +44,7 @@ type SecurityGroupsByID []*SecurityGroup func (a SecurityGroupsByID) Len() int { return len(a) } func (a SecurityGroupsByID) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a SecurityGroupsByID) Less(i, j int) bool { - return fi.StringValue(a[i].ID) < fi.StringValue(a[j].ID) + return fi.ValueOf(a[i].ID) < fi.ValueOf(a[j].ID) } var _ fi.CompareWithID = &SecurityGroup{} @@ -64,7 +64,7 @@ func (s *SecurityGroup) Find(context *fi.Context) (*SecurityGroup, error) { func getSecurityGroupByName(s *SecurityGroup, cloud openstack.OpenstackCloud) (*SecurityGroup, error) { opt := sg.ListOpts{ - Name: fi.StringValue(s.Name), + Name: fi.ValueOf(s.Name), } gs, err := cloud.ListSecurityGroups(opt) if err != nil { @@ -74,13 +74,13 @@ func getSecurityGroupByName(s *SecurityGroup, cloud openstack.OpenstackCloud) (* if n == 0 { return nil, nil } else if n != 1 { - return nil, fmt.Errorf("found multiple SecurityGroups with name: %s", fi.StringValue(s.Name)) + return nil, fmt.Errorf("found multiple SecurityGroups with name: %s", fi.ValueOf(s.Name)) } g := gs[0] actual := &SecurityGroup{ - ID: fi.String(g.ID), - Name: fi.String(g.Name), - Description: fi.String(g.Description), + ID: fi.PtrTo(g.ID), + Name: fi.PtrTo(g.Name), + Description: fi.PtrTo(g.Description), Lifecycle: s.Lifecycle, } actual.RemoveExtraRules = s.RemoveExtraRules @@ -111,11 +111,11 @@ func (_ *SecurityGroup) CheckChanges(a, e, changes *SecurityGroup) error { func (_ *SecurityGroup) RenderOpenstack(t *openstack.OpenstackAPITarget, a, e, changes *SecurityGroup) error { if a == nil { - klog.V(2).Infof("Creating SecurityGroup with Name:%q", fi.StringValue(e.Name)) + klog.V(2).Infof("Creating SecurityGroup with Name:%q", fi.ValueOf(e.Name)) opt := sg.CreateOpts{ - Name: fi.StringValue(e.Name), - Description: fi.StringValue(e.Description), + Name: fi.ValueOf(e.Name), + Description: fi.ValueOf(e.Description), } g, err := t.Cloud.CreateSecurityGroup(opt) @@ -123,7 +123,7 @@ func (_ *SecurityGroup) RenderOpenstack(t *openstack.OpenstackAPITarget, a, e, c return fmt.Errorf("error creating SecurityGroup: %v", err) } - e.ID = fi.String(g.ID) + e.ID = fi.PtrTo(g.ID) return nil } @@ -169,7 +169,7 @@ func (s *SecurityGroup) FindDeletions(c *fi.Context) ([]fi.Deletion, error) { } sgRules, err := cloud.ListSecurityGroupRules(sgr.ListOpts{ - SecGroupID: fi.StringValue(sg.ID), + SecGroupID: fi.ValueOf(sg.ID), }) if err != nil { return nil, err @@ -211,11 +211,11 @@ func (s *SecurityGroup) FindDeletions(c *fi.Context) ([]fi.Deletion, error) { } func matches(t *SecurityGroupRule, perm sgr.SecGroupRule) bool { - if fi.IntValue(t.PortRangeMin) != perm.PortRangeMin { + if fi.ValueOf(t.PortRangeMin) != perm.PortRangeMin { return false } - if fi.IntValue(t.PortRangeMax) != perm.PortRangeMax { + if fi.ValueOf(t.PortRangeMax) != perm.PortRangeMax { return false } @@ -223,7 +223,7 @@ func matches(t *SecurityGroupRule, perm sgr.SecGroupRule) bool { return false } - if perm.RemoteIPPrefix != fi.StringValue(t.RemoteIPPrefix) { + if perm.RemoteIPPrefix != fi.ValueOf(t.RemoteIPPrefix) { return false } @@ -243,7 +243,7 @@ func (d *deleteSecurityGroup) Delete(t fi.Target) error { if !ok { return fmt.Errorf("unexpected target type for deletion: %T", t) } - err := os.Cloud.DeleteSecurityGroup(fi.StringValue(d.securityGroup.ID)) + err := os.Cloud.DeleteSecurityGroup(fi.ValueOf(d.securityGroup.ID)) if err != nil { return fmt.Errorf("error revoking SecurityGroup: %v", err) } @@ -255,7 +255,7 @@ func (d *deleteSecurityGroup) TaskName() string { } func (d *deleteSecurityGroup) Item() string { - s := fmt.Sprintf("securitygroup=%s", fi.StringValue(d.securityGroup.Name)) + s := fmt.Sprintf("securitygroup=%s", fi.ValueOf(d.securityGroup.Name)) return s } @@ -294,7 +294,7 @@ func (d *deleteSecurityGroupRule) Item() string { } s += " protocol=tcp" s += fmt.Sprintf(" ip=%s", d.rule.RemoteIPPrefix) - s += fmt.Sprintf(" securitygroup=%s", fi.StringValue(d.securityGroup.Name)) + s += fmt.Sprintf(" securitygroup=%s", fi.ValueOf(d.securityGroup.Name)) return s } diff --git a/upup/pkg/fi/cloudup/openstacktasks/securitygrouprule.go b/upup/pkg/fi/cloudup/openstacktasks/securitygrouprule.go index bad66531d95e1..30329dfc746ba 100644 --- a/upup/pkg/fi/cloudup/openstacktasks/securitygrouprule.go +++ b/upup/pkg/fi/cloudup/openstacktasks/securitygrouprule.go @@ -77,16 +77,16 @@ func (r *SecurityGroupRule) Find(context *fi.Context) (*SecurityGroupRule, error cloud := context.Cloud.(openstack.OpenstackCloud) opt := sgr.ListOpts{ - Direction: fi.StringValue(r.Direction), - EtherType: fi.StringValue(r.EtherType), + Direction: fi.ValueOf(r.Direction), + EtherType: fi.ValueOf(r.EtherType), PortRangeMax: IntValue(r.PortRangeMax), PortRangeMin: IntValue(r.PortRangeMin), - Protocol: fi.StringValue(r.Protocol), - RemoteIPPrefix: fi.StringValue(r.RemoteIPPrefix), - SecGroupID: fi.StringValue(r.SecGroup.ID), + Protocol: fi.ValueOf(r.Protocol), + RemoteIPPrefix: fi.ValueOf(r.RemoteIPPrefix), + SecGroupID: fi.ValueOf(r.SecGroup.ID), } if r.RemoteGroup != nil { - opt.RemoteGroupID = fi.StringValue(r.RemoteGroup.ID) + opt.RemoteGroupID = fi.ValueOf(r.RemoteGroup.ID) } rs, err := cloud.ListSecurityGroupRules(opt) if err != nil { @@ -100,17 +100,17 @@ func (r *SecurityGroupRule) Find(context *fi.Context) (*SecurityGroupRule, error } rule := rs[0] actual := &SecurityGroupRule{ - ID: fi.String(rule.ID), - Direction: fi.String(rule.Direction), - EtherType: fi.String(rule.EtherType), + ID: fi.PtrTo(rule.ID), + Direction: fi.PtrTo(rule.Direction), + EtherType: fi.PtrTo(rule.EtherType), PortRangeMax: Int(rule.PortRangeMax), PortRangeMin: Int(rule.PortRangeMin), - Protocol: fi.String(rule.Protocol), - RemoteIPPrefix: fi.String(rule.RemoteIPPrefix), + Protocol: fi.PtrTo(rule.Protocol), + RemoteIPPrefix: fi.PtrTo(rule.RemoteIPPrefix), RemoteGroup: r.RemoteGroup, SecGroup: r.SecGroup, Lifecycle: r.Lifecycle, - Delete: fi.Bool(false), + Delete: fi.PtrTo(false), } r.ID = actual.ID @@ -152,7 +152,7 @@ func (*SecurityGroupRule) CheckChanges(a, e, changes *SecurityGroupRule) error { func (*SecurityGroupRule) RenderOpenstack(t *openstack.OpenstackAPITarget, a, e, changes *SecurityGroupRule) error { if a == nil { klog.V(2).Infof("Creating SecurityGroupRule") - etherType := fi.StringValue(e.EtherType) + etherType := fi.ValueOf(e.EtherType) if e.RemoteIPPrefix != nil { if net.IsIPv4CIDRString(*e.RemoteIPPrefix) { etherType = "IPv4" @@ -161,24 +161,24 @@ func (*SecurityGroupRule) RenderOpenstack(t *openstack.OpenstackAPITarget, a, e, } } opt := sgr.CreateOpts{ - Direction: sgr.RuleDirection(fi.StringValue(e.Direction)), + Direction: sgr.RuleDirection(fi.ValueOf(e.Direction)), EtherType: sgr.RuleEtherType(etherType), - SecGroupID: fi.StringValue(e.SecGroup.ID), + SecGroupID: fi.ValueOf(e.SecGroup.ID), PortRangeMax: IntValue(e.PortRangeMax), PortRangeMin: IntValue(e.PortRangeMin), - Protocol: sgr.RuleProtocol(fi.StringValue(e.Protocol)), - RemoteIPPrefix: fi.StringValue(e.RemoteIPPrefix), + Protocol: sgr.RuleProtocol(fi.ValueOf(e.Protocol)), + RemoteIPPrefix: fi.ValueOf(e.RemoteIPPrefix), } if e.RemoteGroup != nil { - opt.RemoteGroupID = fi.StringValue(e.RemoteGroup.ID) + opt.RemoteGroupID = fi.ValueOf(e.RemoteGroup.ID) } r, err := t.Cloud.CreateSecurityGroupRule(opt) if err != nil { - return fmt.Errorf("error creating SecurityGroupRule in SG %s: %v", fi.StringValue(e.SecGroup.GetName()), err) + return fmt.Errorf("error creating SecurityGroupRule in SG %s: %v", fi.ValueOf(e.SecGroup.GetName()), err) } - e.ID = fi.String(r.ID) + e.ID = fi.PtrTo(r.ID) return nil } @@ -210,29 +210,29 @@ func (o *SecurityGroupRule) GetName() *string { func (o *SecurityGroupRule) String() string { var dst string if o.RemoteGroup != nil { - dst = fi.StringValue(o.RemoteGroup.Name) - } else if o.RemoteIPPrefix != nil && fi.StringValue(o.RemoteIPPrefix) != "" { - dst = fi.StringValue(o.RemoteIPPrefix) + dst = fi.ValueOf(o.RemoteGroup.Name) + } else if o.RemoteIPPrefix != nil && fi.ValueOf(o.RemoteIPPrefix) != "" { + dst = fi.ValueOf(o.RemoteIPPrefix) } else { dst = "ANY" } var proto string - if o.Protocol == nil || fi.StringValue(o.Protocol) == "" { + if o.Protocol == nil || fi.ValueOf(o.Protocol) == "" { proto = "AllProtos" } else { - proto = fi.StringValue(o.Protocol) + proto = fi.ValueOf(o.Protocol) } - return fmt.Sprintf("%v-%v-%v-from-%v-to-%v-%v-%v", fi.StringValue(o.EtherType), fi.StringValue(o.Direction), - proto, fi.StringValue(o.SecGroup.Name), dst, fi.IntValue(o.PortRangeMin), fi.IntValue(o.PortRangeMax)) + return fmt.Sprintf("%v-%v-%v-from-%v-to-%v-%v-%v", fi.ValueOf(o.EtherType), fi.ValueOf(o.Direction), + proto, fi.ValueOf(o.SecGroup.Name), dst, fi.ValueOf(o.PortRangeMin), fi.ValueOf(o.PortRangeMax)) } func (o *SecurityGroupRule) FindDeletions(c *fi.Context) ([]fi.Deletion, error) { - if !fi.BoolValue(o.Delete) { + if !fi.ValueOf(o.Delete) { return nil, nil } cloud := c.Cloud.(openstack.OpenstackCloud) - rule, err := sgr.Get(cloud.NetworkingClient(), fi.StringValue(o.ID)).Extract() + rule, err := sgr.Get(cloud.NetworkingClient(), fi.ValueOf(o.ID)).Extract() if err != nil { return nil, err } diff --git a/upup/pkg/fi/cloudup/openstacktasks/servergroup.go b/upup/pkg/fi/cloudup/openstacktasks/servergroup.go index 2adc44584b59a..add2997b289ef 100644 --- a/upup/pkg/fi/cloudup/openstacktasks/servergroup.go +++ b/upup/pkg/fi/cloudup/openstacktasks/servergroup.go @@ -74,16 +74,16 @@ func (s *ServerGroup) Find(context *fi.Context) (*ServerGroup, error) { for _, serverGroup := range serverGroups { if serverGroup.Name == *s.Name { if actual != nil { - return nil, fmt.Errorf("Found multiple server groups with name %s", fi.StringValue(s.Name)) + return nil, fmt.Errorf("Found multiple server groups with name %s", fi.ValueOf(s.Name)) } actual = &ServerGroup{ - Name: fi.String(serverGroup.Name), + Name: fi.PtrTo(serverGroup.Name), ClusterName: s.ClusterName, IGName: s.IGName, - ID: fi.String(serverGroup.ID), + ID: fi.PtrTo(serverGroup.ID), Lifecycle: s.Lifecycle, Policies: serverGroup.Policies, - MaxSize: fi.Int32(int32(len(serverGroup.Members))), + MaxSize: fi.PtrTo(int32(len(serverGroup.Members))), members: serverGroup.Members, } } @@ -93,7 +93,7 @@ func (s *ServerGroup) Find(context *fi.Context) (*ServerGroup, error) { } // ignore if IG is scaled up, this is handled in instancetasks - if fi.Int32Value(actual.MaxSize) < fi.Int32Value(s.MaxSize) { + if fi.ValueOf(actual.MaxSize) < fi.ValueOf(s.MaxSize) { s.MaxSize = actual.MaxSize } @@ -124,10 +124,10 @@ func (_ *ServerGroup) CheckChanges(a, e, changes *ServerGroup) error { func (_ *ServerGroup) RenderOpenstack(t *openstack.OpenstackAPITarget, a, e, changes *ServerGroup) error { if a == nil { - klog.V(2).Infof("Creating ServerGroup with Name:%q", fi.StringValue(e.Name)) + klog.V(2).Infof("Creating ServerGroup with Name:%q", fi.ValueOf(e.Name)) opt := servergroups.CreateOpts{ - Name: fi.StringValue(e.Name), + Name: fi.ValueOf(e.Name), Policies: e.Policies, } @@ -135,16 +135,16 @@ func (_ *ServerGroup) RenderOpenstack(t *openstack.OpenstackAPITarget, a, e, cha if err != nil { return fmt.Errorf("error creating ServerGroup: %v", err) } - e.ID = fi.String(g.ID) + e.ID = fi.PtrTo(g.ID) return nil - } else if changes.MaxSize != nil && fi.Int32Value(a.MaxSize) > fi.Int32Value(changes.MaxSize) { - currentLastIndex := fi.Int32Value(a.MaxSize) + } else if changes.MaxSize != nil && fi.ValueOf(a.MaxSize) > fi.ValueOf(changes.MaxSize) { + currentLastIndex := fi.ValueOf(a.MaxSize) - for currentLastIndex > fi.Int32Value(changes.MaxSize) { - iName := strings.ToLower(fmt.Sprintf("%s-%d.%s", fi.StringValue(a.IGName), currentLastIndex, fi.StringValue(a.ClusterName))) + for currentLastIndex > fi.ValueOf(changes.MaxSize) { + iName := strings.ToLower(fmt.Sprintf("%s-%d.%s", fi.ValueOf(a.IGName), currentLastIndex, fi.ValueOf(a.ClusterName))) instanceName := strings.Replace(iName, ".", "-", -1) opts := servers.ListOpts{ - Name: fmt.Sprintf("^%s", fi.StringValue(a.IGName)), + Name: fmt.Sprintf("^%s", fi.ValueOf(a.IGName)), } allInstances, err := t.Cloud.ListInstances(opts) if err != nil { @@ -154,7 +154,7 @@ func (_ *ServerGroup) RenderOpenstack(t *openstack.OpenstackAPITarget, a, e, cha instances := []servers.Server{} for _, server := range allInstances { val, ok := server.Metadata["k8s"] - if !ok || val != fi.StringValue(a.ClusterName) { + if !ok || val != fi.ValueOf(a.ClusterName) { continue } metadataName := "" diff --git a/upup/pkg/fi/cloudup/openstacktasks/sshkey.go b/upup/pkg/fi/cloudup/openstacktasks/sshkey.go index 89f582f4629be..b5c0a026a56cc 100644 --- a/upup/pkg/fi/cloudup/openstacktasks/sshkey.go +++ b/upup/pkg/fi/cloudup/openstacktasks/sshkey.go @@ -47,7 +47,7 @@ func (e *SSHKey) CompareWithID() *string { func (e *SSHKey) Find(c *fi.Context) (*SSHKey, error) { cloud := c.Cloud.(openstack.OpenstackCloud) - rs, err := cloud.GetKeypair(openstackKeyPairName(fi.StringValue(e.Name))) + rs, err := cloud.GetKeypair(openstackKeyPairName(fi.ValueOf(e.Name))) if err != nil { return nil, err } @@ -56,15 +56,15 @@ func (e *SSHKey) Find(c *fi.Context) (*SSHKey, error) { } actual := &SSHKey{ Name: e.Name, - KeyFingerprint: fi.String(rs.Fingerprint), + KeyFingerprint: fi.PtrTo(rs.Fingerprint), } // Avoid spurious changes - if fi.StringValue(actual.KeyFingerprint) == fi.StringValue(e.KeyFingerprint) { + if fi.ValueOf(actual.KeyFingerprint) == fi.ValueOf(e.KeyFingerprint) { klog.V(2).Infof("SSH key fingerprints match; assuming public keys match") actual.PublicKey = e.PublicKey } else { - klog.V(2).Infof("Computed SSH key fingerprint mismatch: %q %q", fi.StringValue(e.KeyFingerprint), fi.StringValue(actual.KeyFingerprint)) + klog.V(2).Infof("Computed SSH key fingerprint mismatch: %q %q", fi.ValueOf(e.KeyFingerprint), fi.ValueOf(actual.KeyFingerprint)) } actual.Lifecycle = e.Lifecycle return actual, nil @@ -115,10 +115,10 @@ func openstackKeyPairName(org string) string { func (_ *SSHKey) RenderOpenstack(t *openstack.OpenstackAPITarget, a, e, changes *SSHKey) error { if a == nil { - klog.V(2).Infof("Creating Keypair with name:%q", fi.StringValue(e.Name)) + klog.V(2).Infof("Creating Keypair with name:%q", fi.ValueOf(e.Name)) opt := keypairs.CreateOpts{ - Name: openstackKeyPairName(fi.StringValue(e.Name)), + Name: openstackKeyPairName(fi.ValueOf(e.Name)), } if e.PublicKey != nil { @@ -134,11 +134,11 @@ func (_ *SSHKey) RenderOpenstack(t *openstack.OpenstackAPITarget, a, e, changes return fmt.Errorf("Error creating keypair: %v", err) } - e.KeyFingerprint = fi.String(v.Fingerprint) + e.KeyFingerprint = fi.PtrTo(v.Fingerprint) klog.V(2).Infof("Creating a new Openstack keypair, id=%s", v.Fingerprint) return nil } e.KeyFingerprint = a.KeyFingerprint - klog.V(2).Infof("Using an existing Openstack keypair, id=%s", fi.StringValue(e.KeyFingerprint)) + klog.V(2).Infof("Using an existing Openstack keypair, id=%s", fi.ValueOf(e.KeyFingerprint)) return nil } diff --git a/upup/pkg/fi/cloudup/openstacktasks/subnet.go b/upup/pkg/fi/cloudup/openstacktasks/subnet.go index 657410aa24897..1ac83a2a06d94 100644 --- a/upup/pkg/fi/cloudup/openstacktasks/subnet.go +++ b/upup/pkg/fi/cloudup/openstacktasks/subnet.go @@ -66,22 +66,22 @@ func NewSubnetTaskFromCloud(cloud openstack.OpenstackCloud, lifecycle fi.Lifecyc nameservers := make([]*string, len(subnet.DNSNameservers)) for i, ns := range subnet.DNSNameservers { - nameservers[i] = fi.String(ns) + nameservers[i] = fi.PtrTo(ns) } tag := "" - if find != nil && fi.ArrayContains(subnet.Tags, fi.StringValue(find.Tag)) { - tag = fi.StringValue(find.Tag) + if find != nil && fi.ArrayContains(subnet.Tags, fi.ValueOf(find.Tag)) { + tag = fi.ValueOf(find.Tag) } actual := &Subnet{ - ID: fi.String(subnet.ID), - Name: fi.String(subnet.Name), + ID: fi.PtrTo(subnet.ID), + Name: fi.PtrTo(subnet.Name), Network: networkTask, - CIDR: fi.String(subnet.CIDR), + CIDR: fi.PtrTo(subnet.CIDR), Lifecycle: lifecycle, DNSServers: nameservers, - Tag: fi.String(tag), + Tag: fi.PtrTo(tag), } if find != nil { find.ID = actual.ID @@ -92,11 +92,11 @@ func NewSubnetTaskFromCloud(cloud openstack.OpenstackCloud, lifecycle fi.Lifecyc func (s *Subnet) Find(context *fi.Context) (*Subnet, error) { cloud := context.Cloud.(openstack.OpenstackCloud) opt := subnets.ListOpts{ - ID: fi.StringValue(s.ID), - Name: fi.StringValue(s.Name), - NetworkID: fi.StringValue(s.Network.ID), - CIDR: fi.StringValue(s.CIDR), - EnableDHCP: fi.Bool(true), + ID: fi.ValueOf(s.ID), + Name: fi.ValueOf(s.Name), + NetworkID: fi.ValueOf(s.Network.ID), + CIDR: fi.ValueOf(s.CIDR), + EnableDHCP: fi.PtrTo(true), IPVersion: 4, } rs, err := cloud.ListSubnets(opt) @@ -106,7 +106,7 @@ func (s *Subnet) Find(context *fi.Context) (*Subnet, error) { if rs == nil { return nil, nil } else if len(rs) != 1 { - return nil, fmt.Errorf("found multiple subnets with name: %s", fi.StringValue(s.Name)) + return nil, fmt.Errorf("found multiple subnets with name: %s", fi.ValueOf(s.Name)) } return NewSubnetTaskFromCloud(cloud, s.Lifecycle, &rs[0], s) } @@ -142,20 +142,20 @@ func (*Subnet) CheckChanges(a, e, changes *Subnet) error { func (*Subnet) RenderOpenstack(t *openstack.OpenstackAPITarget, a, e, changes *Subnet) error { if a == nil { - klog.V(2).Infof("Creating Subnet with name:%q", fi.StringValue(e.Name)) + klog.V(2).Infof("Creating Subnet with name:%q", fi.ValueOf(e.Name)) opt := subnets.CreateOpts{ - Name: fi.StringValue(e.Name), - NetworkID: fi.StringValue(e.Network.ID), + Name: fi.ValueOf(e.Name), + NetworkID: fi.ValueOf(e.Network.ID), IPVersion: gophercloud.IPv4, - CIDR: fi.StringValue(e.CIDR), - EnableDHCP: fi.Bool(true), + CIDR: fi.ValueOf(e.CIDR), + EnableDHCP: fi.PtrTo(true), } if len(e.DNSServers) > 0 { dnsNameSrv := make([]string, len(e.DNSServers)) for i, ns := range e.DNSServers { - dnsNameSrv[i] = fi.StringValue(ns) + dnsNameSrv[i] = fi.ValueOf(ns) } opt.DNSNameservers = dnsNameSrv } @@ -164,17 +164,17 @@ func (*Subnet) RenderOpenstack(t *openstack.OpenstackAPITarget, a, e, changes *S return fmt.Errorf("Error creating subnet: %v", err) } - err = t.Cloud.AppendTag(openstack.ResourceTypeSubnet, v.ID, fi.StringValue(e.Tag)) + err = t.Cloud.AppendTag(openstack.ResourceTypeSubnet, v.ID, fi.ValueOf(e.Tag)) if err != nil { return fmt.Errorf("Error appending tag to subnet: %v", err) } - e.ID = fi.String(v.ID) + e.ID = fi.PtrTo(v.ID) klog.V(2).Infof("Creating a new Openstack subnet, id=%s", v.ID) return nil } else { if changes.Tag != nil { - err := t.Cloud.AppendTag(openstack.ResourceTypeSubnet, fi.StringValue(a.ID), fi.StringValue(changes.Tag)) + err := t.Cloud.AppendTag(openstack.ResourceTypeSubnet, fi.ValueOf(a.ID), fi.ValueOf(changes.Tag)) if err != nil { return fmt.Errorf("error appending tag to subnet: %v", err) } @@ -186,17 +186,17 @@ func (*Subnet) RenderOpenstack(t *openstack.OpenstackAPITarget, a, e, changes *S if changes.DNSServers != nil { dnsNameSrv := make([]string, len(e.DNSServers)) for i, ns := range e.DNSServers { - dnsNameSrv[i] = fi.StringValue(ns) + dnsNameSrv[i] = fi.ValueOf(ns) } opt.DNSNameservers = &dnsNameSrv } - result := subnets.Update(client, fi.StringValue(a.ID), opt) + result := subnets.Update(client, fi.ValueOf(a.ID), opt) klog.Infof("Updated %v", opt) if result.Err != nil { return fmt.Errorf("error updating subnet %v: %v", a.ID, result.Err) } } e.ID = a.ID - klog.V(2).Infof("Using an existing Openstack subnet, id=%s", fi.StringValue(e.ID)) + klog.V(2).Infof("Using an existing Openstack subnet, id=%s", fi.ValueOf(e.ID)) return nil } diff --git a/upup/pkg/fi/cloudup/openstacktasks/volume.go b/upup/pkg/fi/cloudup/openstacktasks/volume.go index e0a9253c790f4..c84c62a9aaaa6 100644 --- a/upup/pkg/fi/cloudup/openstacktasks/volume.go +++ b/upup/pkg/fi/cloudup/openstacktasks/volume.go @@ -46,7 +46,7 @@ func (c *Volume) CompareWithID() *string { func (c *Volume) Find(context *fi.Context) (*Volume, error) { cloud := context.Cloud.(openstack.OpenstackCloud) opt := cinderv3.ListOpts{ - Name: fi.StringValue(c.Name), + Name: fi.ValueOf(c.Name), Metadata: c.Tags, } volumes, err := cloud.ListVolumes(opt) @@ -57,15 +57,15 @@ func (c *Volume) Find(context *fi.Context) (*Volume, error) { if n == 0 { return nil, nil } else if n != 1 { - return nil, fmt.Errorf("found multiple Volumes with name: %s", fi.StringValue(c.Name)) + return nil, fmt.Errorf("found multiple Volumes with name: %s", fi.ValueOf(c.Name)) } v := volumes[0] actual := &Volume{ - ID: fi.String(v.ID), - Name: fi.String(v.Name), - AvailabilityZone: fi.String(v.AvailabilityZone), - VolumeType: fi.String(v.VolumeType), - SizeGB: fi.Int64(int64(v.Size)), + ID: fi.PtrTo(v.ID), + Name: fi.PtrTo(v.Name), + AvailabilityZone: fi.PtrTo(v.AvailabilityZone), + VolumeType: fi.PtrTo(v.VolumeType), + SizeGB: fi.PtrTo(int64(v.Size)), Tags: v.Metadata, Lifecycle: c.Lifecycle, } @@ -123,9 +123,9 @@ func (_ *Volume) CheckChanges(a, e, changes *Volume) error { func (_ *Volume) RenderOpenstack(t *openstack.OpenstackAPITarget, a, e, changes *Volume) error { if a == nil { - klog.V(2).Infof("Creating PersistentVolume with Name:%q", fi.StringValue(e.Name)) + klog.V(2).Infof("Creating PersistentVolume with Name:%q", fi.ValueOf(e.Name)) - storageAZ, err := t.Cloud.GetStorageAZFromCompute(fi.StringValue(e.AvailabilityZone)) + storageAZ, err := t.Cloud.GetStorageAZFromCompute(fi.ValueOf(e.AvailabilityZone)) if err != nil { return fmt.Errorf("Failed to get storage availability zone: %s", err) } @@ -134,8 +134,8 @@ func (_ *Volume) RenderOpenstack(t *openstack.OpenstackAPITarget, a, e, changes Size: int(*e.SizeGB), AvailabilityZone: storageAZ.ZoneName, Metadata: e.Tags, - Name: fi.StringValue(e.Name), - VolumeType: fi.StringValue(e.VolumeType), + Name: fi.ValueOf(e.Name), + VolumeType: fi.ValueOf(e.VolumeType), } v, err := t.Cloud.CreateVolume(opt) @@ -143,17 +143,17 @@ func (_ *Volume) RenderOpenstack(t *openstack.OpenstackAPITarget, a, e, changes return fmt.Errorf("error creating PersistentVolume: %v", err) } - e.ID = fi.String(v.ID) - e.AvailabilityZone = fi.String(v.AvailabilityZone) + e.ID = fi.PtrTo(v.ID) + e.AvailabilityZone = fi.PtrTo(v.AvailabilityZone) return nil } if changes != nil && changes.Tags != nil { - klog.V(2).Infof("Update the tags on volume %q: %v, the differences are %v", fi.StringValue(e.ID), e.Tags, changes.Tags) + klog.V(2).Infof("Update the tags on volume %q: %v, the differences are %v", fi.ValueOf(e.ID), e.Tags, changes.Tags) - err := t.Cloud.SetVolumeTags(fi.StringValue(e.ID), e.Tags) + err := t.Cloud.SetVolumeTags(fi.ValueOf(e.ID), e.Tags) if err != nil { - return fmt.Errorf("error updating the tags on volume %q: %v", fi.StringValue(e.ID), err) + return fmt.Errorf("error updating the tags on volume %q: %v", fi.ValueOf(e.ID), err) } } diff --git a/upup/pkg/fi/cloudup/populate_cluster_spec.go b/upup/pkg/fi/cloudup/populate_cluster_spec.go index a7e1a9df76b3d..785ce97f7d568 100644 --- a/upup/pkg/fi/cloudup/populate_cluster_spec.go +++ b/upup/pkg/fi/cloudup/populate_cluster_spec.go @@ -127,7 +127,7 @@ func (c *populateClusterSpec) run(clientset simple.Clientset) error { return fmt.Errorf("EtcdMember #%d of etcd-cluster %s did not specify a Name", i, etcd.Name) } - if fi.StringValue(m.InstanceGroup) == "" { + if fi.ValueOf(m.InstanceGroup) == "" { return fmt.Errorf("EtcdMember %s:%s did not specify a InstanceGroup", etcd.Name, m.Name) } } @@ -140,7 +140,7 @@ func (c *populateClusterSpec) run(clientset simple.Clientset) error { return fmt.Errorf("EtcdMembers found with same name %q in etcd-cluster %q", m.Name, etcd.Name) } - instanceGroupName := fi.StringValue(m.InstanceGroup) + instanceGroupName := fi.ValueOf(m.InstanceGroup) if _, ok := etcdInstanceGroups[instanceGroupName]; ok { klog.Warningf("EtcdMembers are in the same InstanceGroup %q in etcd-cluster %q (fault-tolerance may be reduced)", instanceGroupName, etcd.Name) @@ -241,8 +241,8 @@ func (c *populateClusterSpec) run(clientset simple.Clientset) error { } if !cluster.UsesNoneDNS() { - if cluster.Spec.DNSZone != "" && cluster.Spec.MasterPublicName == "" { - cluster.Spec.MasterPublicName = "api." + cluster.Name + if cluster.Spec.DNSZone != "" && cluster.Spec.API.PublicName == "" { + cluster.Spec.API.PublicName = "api." + cluster.Name } if cluster.Spec.ExternalDNS == nil { cluster.Spec.ExternalDNS = &kopsapi.ExternalDNSConfig{ diff --git a/upup/pkg/fi/cloudup/populate_cluster_spec_test.go b/upup/pkg/fi/cloudup/populate_cluster_spec_test.go index 9f251bc0d3ca3..981cc5eb23345 100644 --- a/upup/pkg/fi/cloudup/populate_cluster_spec_test.go +++ b/upup/pkg/fi/cloudup/populate_cluster_spec_test.go @@ -90,7 +90,7 @@ func TestPopulateCluster_Subnets(t *testing.T) { c.Spec.ExternalCloudControllerManager = &kopsapi.CloudControllerManagerConfig{} c.Spec.CloudConfig = &kopsapi.CloudConfiguration{ AWSEBSCSIDriver: &kopsapi.AWSEBSCSIDriver{ - Enabled: fi.Bool(true), + Enabled: fi.PtrTo(true), }, } @@ -121,8 +121,8 @@ func mockedPopulateClusterSpec(c *kopsapi.Cluster, cloud fi.Cloud) (*kopsapi.Clu func TestPopulateCluster_Docker_Spec(t *testing.T) { cloud, c := buildMinimalCluster() c.Spec.Docker = &kopsapi.DockerConfig{ - MTU: fi.Int32(5678), - InsecureRegistry: fi.String("myregistry.com:1234"), + MTU: fi.PtrTo(int32(5678)), + InsecureRegistry: fi.PtrTo("myregistry.com:1234"), InsecureRegistries: []string{"myregistry.com:1234", "myregistry2.com:1234"}, RegistryMirrors: []string{"https://registry.example.com"}, LogOpt: []string{"env=FOO"}, @@ -138,11 +138,11 @@ func TestPopulateCluster_Docker_Spec(t *testing.T) { t.Fatalf("Unexpected error from PopulateCluster: %v", err) } - if fi.Int32Value(full.Spec.Docker.MTU) != 5678 { + if fi.ValueOf(full.Spec.Docker.MTU) != 5678 { t.Fatalf("Unexpected Docker MTU: %v", full.Spec.Docker.MTU) } - if fi.StringValue(full.Spec.Docker.InsecureRegistry) != "myregistry.com:1234" { + if fi.ValueOf(full.Spec.Docker.InsecureRegistry) != "myregistry.com:1234" { t.Fatalf("Unexpected Docker InsecureRegistry: %v", full.Spec.Docker.InsecureRegistry) } @@ -172,7 +172,7 @@ func TestPopulateCluster_StorageDefault(t *testing.T) { t.Fatalf("Unexpected error from PopulateCluster: %v", err) } - if fi.StringValue(full.Spec.KubeAPIServer.StorageBackend) != "etcd3" { + if fi.ValueOf(full.Spec.KubeAPIServer.StorageBackend) != "etcd3" { t.Fatalf("Unexpected StorageBackend: %v", *full.Spec.KubeAPIServer.StorageBackend) } } @@ -186,7 +186,7 @@ func TestPopulateCluster_EvictionHard(t *testing.T) { } c.Spec.Kubelet = &kopsapi.KubeletConfigSpec{ - EvictionHard: fi.String("memory.available<250Mi"), + EvictionHard: fi.PtrTo("memory.available<250Mi"), } full, err := mockedPopulateClusterSpec(c, cloud) @@ -194,7 +194,7 @@ func TestPopulateCluster_EvictionHard(t *testing.T) { t.Fatalf("Unexpected error from PopulateCluster: %v", err) } - if fi.StringValue(full.Spec.Kubelet.EvictionHard) != "memory.available<250Mi" { + if fi.ValueOf(full.Spec.Kubelet.EvictionHard) != "memory.available<250Mi" { t.Fatalf("Unexpected StorageBackend: %v", *full.Spec.Kubelet.EvictionHard) } } @@ -242,7 +242,7 @@ func TestPopulateCluster_Custom_CIDR(t *testing.T) { func TestPopulateCluster_IsolateMasters(t *testing.T) { cloud, c := buildMinimalCluster() - c.Spec.IsolateMasters = fi.Bool(true) + c.Spec.IsolateMasters = fi.PtrTo(true) err := PerformAssignments(c, cloud) if err != nil { @@ -253,17 +253,17 @@ func TestPopulateCluster_IsolateMasters(t *testing.T) { if err != nil { t.Fatalf("Unexpected error from PopulateCluster: %v", err) } - if fi.BoolValue(full.Spec.MasterKubelet.EnableDebuggingHandlers) != false { - t.Fatalf("Unexpected EnableDebuggingHandlers: %v", fi.BoolValue(full.Spec.MasterKubelet.EnableDebuggingHandlers)) + if fi.ValueOf(full.Spec.MasterKubelet.EnableDebuggingHandlers) != false { + t.Fatalf("Unexpected EnableDebuggingHandlers: %v", fi.ValueOf(full.Spec.MasterKubelet.EnableDebuggingHandlers)) } - if fi.BoolValue(full.Spec.MasterKubelet.ReconcileCIDR) != false { - t.Fatalf("Unexpected ReconcileCIDR: %v", fi.BoolValue(full.Spec.MasterKubelet.ReconcileCIDR)) + if fi.ValueOf(full.Spec.MasterKubelet.ReconcileCIDR) != false { + t.Fatalf("Unexpected ReconcileCIDR: %v", fi.ValueOf(full.Spec.MasterKubelet.ReconcileCIDR)) } } func TestPopulateCluster_IsolateMastersFalse(t *testing.T) { cloud, c := buildMinimalCluster() - // default: c.Spec.IsolateMasters = fi.Bool(false) + // default: c.Spec.IsolateMasters = fi.PtrTo(false) err := PerformAssignments(c, cloud) if err != nil { @@ -274,8 +274,8 @@ func TestPopulateCluster_IsolateMastersFalse(t *testing.T) { if err != nil { t.Fatalf("Unexpected error from PopulateCluster: %v", err) } - if fi.BoolValue(full.Spec.MasterKubelet.EnableDebuggingHandlers) != true { - t.Fatalf("Unexpected EnableDebuggingHandlers: %v", fi.BoolValue(full.Spec.MasterKubelet.EnableDebuggingHandlers)) + if fi.ValueOf(full.Spec.MasterKubelet.EnableDebuggingHandlers) != true { + t.Fatalf("Unexpected EnableDebuggingHandlers: %v", fi.ValueOf(full.Spec.MasterKubelet.EnableDebuggingHandlers)) } } @@ -364,8 +364,8 @@ func TestPopulateCluster_APIServerCount(t *testing.T) { t.Fatalf("error during build: %v", err) } - if fi.Int32Value(full.Spec.KubeAPIServer.APIServerCount) != 3 { - t.Fatalf("Unexpected APIServerCount: %v", fi.Int32Value(full.Spec.KubeAPIServer.APIServerCount)) + if fi.ValueOf(full.Spec.KubeAPIServer.APIServerCount) != 3 { + t.Fatalf("Unexpected APIServerCount: %v", fi.ValueOf(full.Spec.KubeAPIServer.APIServerCount)) } } @@ -387,8 +387,8 @@ func TestPopulateCluster_AnonymousAuth(t *testing.T) { t.Fatalf("AnonymousAuth not specified") } - if fi.BoolValue(full.Spec.KubeAPIServer.AnonymousAuth) != false { - t.Fatalf("Unexpected AnonymousAuth: %v", fi.BoolValue(full.Spec.KubeAPIServer.AnonymousAuth)) + if fi.ValueOf(full.Spec.KubeAPIServer.AnonymousAuth) != false { + t.Fatalf("Unexpected AnonymousAuth: %v", fi.ValueOf(full.Spec.KubeAPIServer.AnonymousAuth)) } } @@ -413,8 +413,8 @@ func TestPopulateCluster_DockerVersion(t *testing.T) { t.Fatalf("error during build: %v", err) } - if fi.StringValue(full.Spec.Docker.Version) != test.DockerVersion { - t.Fatalf("Unexpected DockerVersion: %v", fi.StringValue(full.Spec.Docker.Version)) + if fi.ValueOf(full.Spec.Docker.Version) != test.DockerVersion { + t.Fatalf("Unexpected DockerVersion: %v", fi.ValueOf(full.Spec.Docker.Version)) } } } diff --git a/upup/pkg/fi/cloudup/populate_instancegroup_spec.go b/upup/pkg/fi/cloudup/populate_instancegroup_spec.go index 60140a95895a1..f9630e10199b9 100644 --- a/upup/pkg/fi/cloudup/populate_instancegroup_spec.go +++ b/upup/pkg/fi/cloudup/populate_instancegroup_spec.go @@ -88,10 +88,10 @@ func PopulateInstanceGroupSpec(cluster *kops.Cluster, input *kops.InstanceGroup, } if ig.Spec.MinSize == nil { - ig.Spec.MinSize = fi.Int32(1) + ig.Spec.MinSize = fi.PtrTo(int32(1)) } if ig.Spec.MaxSize == nil { - ig.Spec.MaxSize = fi.Int32(1) + ig.Spec.MaxSize = fi.PtrTo(int32(1)) } } else if ig.Spec.Role == kops.InstanceGroupRoleBastion { if ig.Spec.MachineType == "" { @@ -101,10 +101,10 @@ func PopulateInstanceGroupSpec(cluster *kops.Cluster, input *kops.InstanceGroup, } } if ig.Spec.MinSize == nil { - ig.Spec.MinSize = fi.Int32(1) + ig.Spec.MinSize = fi.PtrTo(int32(1)) } if ig.Spec.MaxSize == nil { - ig.Spec.MaxSize = fi.Int32(1) + ig.Spec.MaxSize = fi.PtrTo(int32(1)) } } else { if ig.IsAPIServerOnly() && !featureflag.APIServerNodes.Enabled() { @@ -117,10 +117,10 @@ func PopulateInstanceGroupSpec(cluster *kops.Cluster, input *kops.InstanceGroup, } } if ig.Spec.MinSize == nil { - ig.Spec.MinSize = fi.Int32(2) + ig.Spec.MinSize = fi.PtrTo(int32(2)) } if ig.Spec.MaxSize == nil { - ig.Spec.MaxSize = fi.Int32(2) + ig.Spec.MaxSize = fi.PtrTo(int32(2)) } } @@ -182,11 +182,11 @@ func PopulateInstanceGroupSpec(cluster *kops.Cluster, input *kops.InstanceGroup, hasGPU := false clusterNvidia := false - if cluster.Spec.Containerd != nil && cluster.Spec.Containerd.NvidiaGPU != nil && fi.BoolValue(cluster.Spec.Containerd.NvidiaGPU.Enabled) { + if cluster.Spec.Containerd != nil && cluster.Spec.Containerd.NvidiaGPU != nil && fi.ValueOf(cluster.Spec.Containerd.NvidiaGPU.Enabled) { clusterNvidia = true } igNvidia := false - if ig.Spec.Containerd != nil && ig.Spec.Containerd.NvidiaGPU != nil && fi.BoolValue(ig.Spec.Containerd.NvidiaGPU.Enabled) { + if ig.Spec.Containerd != nil && ig.Spec.Containerd.NvidiaGPU != nil && fi.ValueOf(ig.Spec.Containerd.NvidiaGPU.Enabled) { igNvidia = true } @@ -239,7 +239,7 @@ func PopulateInstanceGroupSpec(cluster *kops.Cluster, input *kops.InstanceGroup, } // A few settings in Kubelet override those in MasterKubelet. I'm not sure why. if cluster.Spec.Kubelet != nil && cluster.Spec.Kubelet.AnonymousAuth != nil && !*cluster.Spec.Kubelet.AnonymousAuth { - igKubeletConfig.AnonymousAuth = fi.Bool(false) + igKubeletConfig.AnonymousAuth = fi.PtrTo(false) } } else { if cluster.Spec.Kubelet != nil { @@ -253,7 +253,7 @@ func PopulateInstanceGroupSpec(cluster *kops.Cluster, input *kops.InstanceGroup, // rolling update will still replace nodes when they change. igKubeletConfig.NodeLabels = nodelabels.BuildNodeLabels(cluster, ig) - useSecureKubelet := fi.BoolValue(igKubeletConfig.AnonymousAuth) + useSecureKubelet := fi.ValueOf(igKubeletConfig.AnonymousAuth) // While slices are overridden in most cases, taints are explicitly merged taints := sets.NewString(igKubeletConfig.Taints...) @@ -286,7 +286,7 @@ func PopulateInstanceGroupSpec(cluster *kops.Cluster, input *kops.InstanceGroup, igKubeletConfig.Taints = taints.List() if useSecureKubelet { - igKubeletConfig.AnonymousAuth = fi.Bool(false) + igKubeletConfig.AnonymousAuth = fi.PtrTo(false) } ig.Spec.Kubelet = igKubeletConfig diff --git a/upup/pkg/fi/cloudup/populate_instancegroup_spec_test.go b/upup/pkg/fi/cloudup/populate_instancegroup_spec_test.go index 721134e22d302..9d8fcb1824031 100644 --- a/upup/pkg/fi/cloudup/populate_instancegroup_spec_test.go +++ b/upup/pkg/fi/cloudup/populate_instancegroup_spec_test.go @@ -30,8 +30,8 @@ func buildMinimalNodeInstanceGroup(subnets ...string) *kopsapi.InstanceGroup { g := &kopsapi.InstanceGroup{} g.ObjectMeta.Name = "nodes" g.Spec.Role = kopsapi.InstanceGroupRoleNode - g.Spec.MinSize = fi.Int32(1) - g.Spec.MaxSize = fi.Int32(1) + g.Spec.MinSize = fi.PtrTo(int32(1)) + g.Spec.MaxSize = fi.PtrTo(int32(1)) g.Spec.Image = "my-image" g.Spec.Subnets = subnets @@ -42,8 +42,8 @@ func buildMinimalMasterInstanceGroup(subnet string) *kopsapi.InstanceGroup { g := &kopsapi.InstanceGroup{} g.ObjectMeta.Name = "master-" + subnet g.Spec.Role = kopsapi.InstanceGroupRoleMaster - g.Spec.MinSize = fi.Int32(1) - g.Spec.MaxSize = fi.Int32(1) + g.Spec.MinSize = fi.PtrTo(int32(1)) + g.Spec.MaxSize = fi.PtrTo(int32(1)) g.Spec.Image = "my-image" g.Spec.Subnets = []string{subnet} @@ -76,7 +76,7 @@ func TestPopulateInstanceGroup_AddTaintsCollision(t *testing.T) { input := buildMinimalNodeInstanceGroup() input.Spec.Taints = []string{"nvidia.com/gpu:NoSchedule"} input.Spec.MachineType = "g4dn.xlarge" - cluster.Spec.Containerd.NvidiaGPU = &kopsapi.NvidiaGPUConfig{Enabled: fi.Bool(true)} + cluster.Spec.Containerd.NvidiaGPU = &kopsapi.NvidiaGPUConfig{Enabled: fi.PtrTo(true)} channel := &kopsapi.Channel{} @@ -125,11 +125,11 @@ func TestPopulateInstanceGroup_AddTaintsCollision3(t *testing.T) { func TestPopulateInstanceGroup_EvictionHard(t *testing.T) { _, cluster := buildMinimalCluster() cluster.Spec.Kubelet = &kopsapi.KubeletConfigSpec{ - EvictionHard: fi.String("memory.available<350Mi"), + EvictionHard: fi.PtrTo("memory.available<350Mi"), } input := buildMinimalNodeInstanceGroup() input.Spec.Kubelet = &kopsapi.KubeletConfigSpec{ - EvictionHard: fi.String("memory.available<250Mi"), + EvictionHard: fi.PtrTo("memory.available<250Mi"), } channel := &kopsapi.Channel{} @@ -142,15 +142,15 @@ func TestPopulateInstanceGroup_EvictionHard(t *testing.T) { if err != nil { t.Fatalf("error from PopulateInstanceGroupSpec: %v", err) } - if fi.StringValue(output.Spec.Kubelet.EvictionHard) != "memory.available<250Mi" { - t.Errorf("Unexpected value %v", fi.StringValue(output.Spec.Kubelet.EvictionHard)) + if fi.ValueOf(output.Spec.Kubelet.EvictionHard) != "memory.available<250Mi" { + t.Errorf("Unexpected value %v", fi.ValueOf(output.Spec.Kubelet.EvictionHard)) } } func TestPopulateInstanceGroup_EvictionHard3(t *testing.T) { _, cluster := buildMinimalCluster() cluster.Spec.Kubelet = &kopsapi.KubeletConfigSpec{ - EvictionHard: fi.String("memory.available<350Mi"), + EvictionHard: fi.PtrTo("memory.available<350Mi"), } input := buildMinimalMasterInstanceGroup("us-test-1") @@ -165,15 +165,15 @@ func TestPopulateInstanceGroup_EvictionHard3(t *testing.T) { t.Fatalf("error from PopulateInstanceGroupSpec: %v", err) } // There is no default EvictionHard - if fi.StringValue(output.Spec.Kubelet.EvictionHard) != "" { - t.Errorf("Unexpected value %v", fi.StringValue(output.Spec.Kubelet.EvictionHard)) + if fi.ValueOf(output.Spec.Kubelet.EvictionHard) != "" { + t.Errorf("Unexpected value %v", fi.ValueOf(output.Spec.Kubelet.EvictionHard)) } } func TestPopulateInstanceGroup_EvictionHard4(t *testing.T) { _, cluster := buildMinimalCluster() cluster.Spec.MasterKubelet = &kopsapi.KubeletConfigSpec{ - EvictionHard: fi.String("memory.available<350Mi"), + EvictionHard: fi.PtrTo("memory.available<350Mi"), } input := buildMinimalMasterInstanceGroup("us-test-1") @@ -187,8 +187,8 @@ func TestPopulateInstanceGroup_EvictionHard4(t *testing.T) { if err != nil { t.Fatalf("error from PopulateInstanceGroupSpec: %v", err) } - if fi.StringValue(output.Spec.Kubelet.EvictionHard) != "memory.available<350Mi" { - t.Errorf("Unexpected value %v", fi.StringValue(output.Spec.Kubelet.EvictionHard)) + if fi.ValueOf(output.Spec.Kubelet.EvictionHard) != "memory.available<350Mi" { + t.Errorf("Unexpected value %v", fi.ValueOf(output.Spec.Kubelet.EvictionHard)) } } @@ -196,7 +196,7 @@ func TestPopulateInstanceGroup_EvictionHard2(t *testing.T) { _, cluster := buildMinimalCluster() input := buildMinimalNodeInstanceGroup() input.Spec.Kubelet = &kopsapi.KubeletConfigSpec{ - EvictionHard: fi.String("memory.available<250Mi"), + EvictionHard: fi.PtrTo("memory.available<250Mi"), } channel := &kopsapi.Channel{} @@ -209,8 +209,8 @@ func TestPopulateInstanceGroup_EvictionHard2(t *testing.T) { if err != nil { t.Fatalf("error from PopulateInstanceGroupSpec: %v", err) } - if fi.StringValue(output.Spec.Kubelet.EvictionHard) != "memory.available<250Mi" { - t.Errorf("Unexpected value %v", fi.StringValue(output.Spec.Kubelet.EvictionHard)) + if fi.ValueOf(output.Spec.Kubelet.EvictionHard) != "memory.available<250Mi" { + t.Errorf("Unexpected value %v", fi.ValueOf(output.Spec.Kubelet.EvictionHard)) } } @@ -218,7 +218,7 @@ func TestPopulateInstanceGroup_AddTaints(t *testing.T) { _, cluster := buildMinimalCluster() input := buildMinimalNodeInstanceGroup() input.Spec.MachineType = "g4dn.xlarge" - cluster.Spec.Containerd.NvidiaGPU = &kopsapi.NvidiaGPUConfig{Enabled: fi.Bool(true)} + cluster.Spec.Containerd.NvidiaGPU = &kopsapi.NvidiaGPUConfig{Enabled: fi.PtrTo(true)} channel := &kopsapi.Channel{} diff --git a/upup/pkg/fi/cloudup/runc.go b/upup/pkg/fi/cloudup/runc.go index f380408c28443..a0fc5e3fe0a4d 100644 --- a/upup/pkg/fi/cloudup/runc.go +++ b/upup/pkg/fi/cloudup/runc.go @@ -40,9 +40,9 @@ func findRuncAsset(c *kops.Cluster, assetBuilder *assets.AssetBuilder, arch arch } containerd := c.Spec.Containerd - containerdVersion, err := semver.ParseTolerant(fi.StringValue(containerd.Version)) + containerdVersion, err := semver.ParseTolerant(fi.ValueOf(containerd.Version)) if err != nil { - return nil, nil, fmt.Errorf("unable to parse version string: %q", fi.StringValue(containerd.Version)) + return nil, nil, fmt.Errorf("unable to parse version string: %q", fi.ValueOf(containerd.Version)) } // A compatible runc binary is bundled with containerd builds < v1.6.0 // https://github.com/containerd/containerd/issues/6541 @@ -57,18 +57,18 @@ func findRuncAsset(c *kops.Cluster, assetBuilder *assets.AssetBuilder, arch arch if runc.Packages != nil { if arch == architectures.ArchitectureAmd64 && runc.Packages.UrlAmd64 != nil && runc.Packages.HashAmd64 != nil { - assetUrl := fi.StringValue(runc.Packages.UrlAmd64) - assetHash := fi.StringValue(runc.Packages.HashAmd64) + assetUrl := fi.ValueOf(runc.Packages.UrlAmd64) + assetHash := fi.ValueOf(runc.Packages.HashAmd64) return findAssetsUrlHash(assetBuilder, assetUrl, assetHash) } if arch == architectures.ArchitectureArm64 && runc.Packages.UrlArm64 != nil && runc.Packages.HashArm64 != nil { - assetUrl := fi.StringValue(runc.Packages.UrlArm64) - assetHash := fi.StringValue(runc.Packages.HashArm64) + assetUrl := fi.ValueOf(runc.Packages.UrlArm64) + assetHash := fi.ValueOf(runc.Packages.HashArm64) return findAssetsUrlHash(assetBuilder, assetUrl, assetHash) } } - version := fi.StringValue(runc.Version) + version := fi.ValueOf(runc.Version) if version == "" { return nil, nil, fmt.Errorf("unable to find runc version") } diff --git a/upup/pkg/fi/cloudup/spotinsttasks/elastigroup.go b/upup/pkg/fi/cloudup/spotinsttasks/elastigroup.go index ab0af7ee210c9..40e3ce8ced3f2 100644 --- a/upup/pkg/fi/cloudup/spotinsttasks/elastigroup.go +++ b/upup/pkg/fi/cloudup/spotinsttasks/elastigroup.go @@ -165,25 +165,25 @@ func (e *Elastigroup) GetDependencies(tasks map[string]fi.Task) []fi.Task { } func (e *Elastigroup) find(svc spotinst.InstanceGroupService) (*aws.Group, error) { - klog.V(4).Infof("Attempting to find Elastigroup: %q", fi.StringValue(e.Name)) + klog.V(4).Infof("Attempting to find Elastigroup: %q", fi.ValueOf(e.Name)) groups, err := svc.List(context.Background()) if err != nil { - return nil, fmt.Errorf("spotinst: failed to find elastigroup %s: %v", fi.StringValue(e.Name), err) + return nil, fmt.Errorf("spotinst: failed to find elastigroup %s: %v", fi.ValueOf(e.Name), err) } var out *aws.Group for _, group := range groups { - if group.Name() == fi.StringValue(e.Name) { + if group.Name() == fi.ValueOf(e.Name) { out = group.Obj().(*aws.Group) break } } if out == nil { - return nil, fmt.Errorf("spotinst: failed to find elastigroup %q", fi.StringValue(e.Name)) + return nil, fmt.Errorf("spotinst: failed to find elastigroup %q", fi.ValueOf(e.Name)) } - klog.V(4).Infof("Elastigroup/%s: %s", fi.StringValue(e.Name), stringutil.Stringify(out)) + klog.V(4).Infof("Elastigroup/%s: %s", fi.ValueOf(e.Name), stringutil.Stringify(out)) return out, nil } @@ -203,8 +203,8 @@ func (e *Elastigroup) Find(c *fi.Context) (*Elastigroup, error) { // Capacity. { - actual.MinSize = fi.Int64(int64(fi.IntValue(group.Capacity.Minimum))) - actual.MaxSize = fi.Int64(int64(fi.IntValue(group.Capacity.Maximum))) + actual.MinSize = fi.PtrTo(int64(fi.ValueOf(group.Capacity.Minimum))) + actual.MaxSize = fi.PtrTo(int64(fi.ValueOf(group.Capacity.Maximum))) } // Strategy. @@ -216,7 +216,7 @@ func (e *Elastigroup) Find(c *fi.Context) (*Elastigroup, error) { actual.UtilizeCommitments = group.Strategy.UtilizeCommitments if group.Strategy.DrainingTimeout != nil { - actual.DrainingTimeout = fi.Int64(int64(fi.IntValue(group.Strategy.DrainingTimeout))) + actual.DrainingTimeout = fi.PtrTo(int64(fi.ValueOf(group.Strategy.DrainingTimeout))) } } @@ -235,7 +235,7 @@ func (e *Elastigroup) Find(c *fi.Context) (*Elastigroup, error) { { for _, subnetID := range compute.SubnetIDs { actual.Subnets = append(actual.Subnets, - &awstasks.Subnet{ID: fi.String(subnetID)}) + &awstasks.Subnet{ID: fi.PtrTo(subnetID)}) } if subnetSlicesEqualIgnoreOrder(actual.Subnets, e.Subnets) { actual.Subnets = e.Subnets @@ -252,12 +252,12 @@ func (e *Elastigroup) Find(c *fi.Context) (*Elastigroup, error) { actual.ImageID = lc.ImageID if e.ImageID != nil && actual.ImageID != nil && - fi.StringValue(actual.ImageID) != fi.StringValue(e.ImageID) { - image, err := resolveImage(cloud, fi.StringValue(e.ImageID)) + fi.ValueOf(actual.ImageID) != fi.ValueOf(e.ImageID) { + image, err := resolveImage(cloud, fi.ValueOf(e.ImageID)) if err != nil { return nil, err } - if fi.StringValue(image.ImageId) == fi.StringValue(lc.ImageID) { + if fi.ValueOf(image.ImageId) == fi.ValueOf(lc.ImageID) { actual.ImageID = e.ImageID } } @@ -268,7 +268,7 @@ func (e *Elastigroup) Find(c *fi.Context) (*Elastigroup, error) { if lc.Tags != nil && len(lc.Tags) > 0 { actual.Tags = make(map[string]string) for _, tag := range lc.Tags { - actual.Tags[fi.StringValue(tag.Key)] = fi.StringValue(tag.Value) + actual.Tags[fi.ValueOf(tag.Key)] = fi.ValueOf(tag.Value) } } } @@ -278,7 +278,7 @@ func (e *Elastigroup) Find(c *fi.Context) (*Elastigroup, error) { if lc.SecurityGroupIDs != nil { for _, sgID := range lc.SecurityGroupIDs { actual.SecurityGroups = append(actual.SecurityGroups, - &awstasks.SecurityGroup{ID: fi.String(sgID)}) + &awstasks.SecurityGroup{ID: fi.PtrTo(sgID)}) } } } @@ -296,16 +296,16 @@ func (e *Elastigroup) Find(c *fi.Context) (*Elastigroup, error) { actual.RootVolumeOpts = new(RootVolumeOpts) } if b.EBS.VolumeType != nil { - actual.RootVolumeOpts.Type = fi.String(strings.ToLower(fi.StringValue(b.EBS.VolumeType))) + actual.RootVolumeOpts.Type = fi.PtrTo(strings.ToLower(fi.ValueOf(b.EBS.VolumeType))) } if b.EBS.VolumeSize != nil { - actual.RootVolumeOpts.Size = fi.Int64(int64(fi.IntValue(b.EBS.VolumeSize))) + actual.RootVolumeOpts.Size = fi.PtrTo(int64(fi.ValueOf(b.EBS.VolumeSize))) } if b.EBS.IOPS != nil { - actual.RootVolumeOpts.IOPS = fi.Int64(int64(fi.IntValue(b.EBS.IOPS))) + actual.RootVolumeOpts.IOPS = fi.PtrTo(int64(fi.ValueOf(b.EBS.IOPS))) } if b.EBS.Throughput != nil { - actual.RootVolumeOpts.Throughput = fi.Int64(int64(fi.IntValue(b.EBS.Throughput))) + actual.RootVolumeOpts.Throughput = fi.PtrTo(int64(fi.ValueOf(b.EBS.Throughput))) } if b.EBS.Encrypted != nil { actual.RootVolumeOpts.Encryption = b.EBS.Encrypted @@ -316,7 +316,7 @@ func (e *Elastigroup) Find(c *fi.Context) (*Elastigroup, error) { // EBS optimization. { - if fi.BoolValue(lc.EBSOptimized) { + if fi.ValueOf(lc.EBSOptimized) { if actual.RootVolumeOpts == nil { actual.RootVolumeOpts = new(RootVolumeOpts) } @@ -331,7 +331,7 @@ func (e *Elastigroup) Find(c *fi.Context) (*Elastigroup, error) { var userData []byte if lc.UserData != nil { - userData, err = base64.StdEncoding.DecodeString(fi.StringValue(lc.UserData)) + userData, err = base64.StdEncoding.DecodeString(fi.ValueOf(lc.UserData)) if err != nil { return nil, err } @@ -346,21 +346,21 @@ func (e *Elastigroup) Find(c *fi.Context) (*Elastigroup, error) { if lc.NetworkInterfaces != nil && len(lc.NetworkInterfaces) > 0 { for _, iface := range lc.NetworkInterfaces { - if fi.BoolValue(iface.AssociatePublicIPAddress) { + if fi.ValueOf(iface.AssociatePublicIPAddress) { associatePublicIP = true break } } } - actual.AssociatePublicIPAddress = fi.Bool(associatePublicIP) + actual.AssociatePublicIPAddress = fi.PtrTo(associatePublicIP) } // Load balancers. { if conf := lc.LoadBalancersConfig; conf != nil && len(conf.LoadBalancers) > 0 { for _, lb := range conf.LoadBalancers { - switch fi.StringValue(lb.Type) { + switch fi.ValueOf(lb.Type) { case "CLASSIC": actual.LoadBalancers = append(actual.LoadBalancers, &awstasks.ClassicLoadBalancer{ @@ -376,19 +376,19 @@ func (e *Elastigroup) Find(c *fi.Context) (*Elastigroup, error) { var apiLBTask *awstasks.ClassicLoadBalancer for _, elb := range e.LoadBalancers { - if !fi.BoolValue(elb.Shared) { + if !fi.ValueOf(elb.Shared) { apiLBTask = elb } } if apiLBTask != nil && len(actual.LoadBalancers) > 0 { - apiLBDesc, err := cloud.FindELBByNameTag(fi.StringValue(apiLBTask.Name)) + apiLBDesc, err := cloud.FindELBByNameTag(fi.ValueOf(apiLBTask.Name)) if err != nil { return nil, err } if apiLBDesc != nil { for i := 0; i < len(actual.LoadBalancers); i++ { lb := actual.LoadBalancers[i] - if fi.StringValue(apiLBDesc.LoadBalancerName) == fi.StringValue(lb.Name) { + if fi.ValueOf(apiLBDesc.LoadBalancerName) == fi.ValueOf(lb.Name) { actual.LoadBalancers[i] = apiLBTask } } @@ -439,16 +439,16 @@ func (e *Elastigroup) Find(c *fi.Context) (*Elastigroup, error) { if headroom := integration.AutoScale.Headroom; headroom != nil { actual.AutoScalerOpts.Headroom = new(AutoScalerHeadroomOpts) - if v := fi.IntValue(headroom.CPUPerUnit); v > 0 { + if v := fi.ValueOf(headroom.CPUPerUnit); v > 0 { actual.AutoScalerOpts.Headroom.CPUPerUnit = headroom.CPUPerUnit } - if v := fi.IntValue(headroom.GPUPerUnit); v > 0 { + if v := fi.ValueOf(headroom.GPUPerUnit); v > 0 { actual.AutoScalerOpts.Headroom.GPUPerUnit = headroom.GPUPerUnit } - if v := fi.IntValue(headroom.MemoryPerUnit); v > 0 { + if v := fi.ValueOf(headroom.MemoryPerUnit); v > 0 { actual.AutoScalerOpts.Headroom.MemPerUnit = headroom.MemoryPerUnit } - if v := fi.IntValue(headroom.NumOfUnits); v > 0 { + if v := fi.ValueOf(headroom.NumOfUnits); v > 0 { actual.AutoScalerOpts.Headroom.NumOfUnits = headroom.NumOfUnits } } @@ -466,7 +466,7 @@ func (e *Elastigroup) Find(c *fi.Context) (*Elastigroup, error) { actual.AutoScalerOpts.Labels = make(map[string]string) for _, label := range labels { - actual.AutoScalerOpts.Labels[fi.StringValue(label.Key)] = fi.StringValue(label.Value) + actual.AutoScalerOpts.Labels[fi.ValueOf(label.Key)] = fi.ValueOf(label.Value) } } } @@ -530,21 +530,21 @@ func (_ *Elastigroup) create(cloud awsup.AWSCloud, a, e, changes *Elastigroup) e // Capacity. { - group.Capacity.SetTarget(fi.Int(int(*e.MinSize))) - group.Capacity.SetMinimum(fi.Int(int(*e.MinSize))) - group.Capacity.SetMaximum(fi.Int(int(*e.MaxSize))) + group.Capacity.SetTarget(fi.PtrTo(int(*e.MinSize))) + group.Capacity.SetMinimum(fi.PtrTo(int(*e.MinSize))) + group.Capacity.SetMaximum(fi.PtrTo(int(*e.MaxSize))) } // Strategy. { group.Strategy.SetRisk(e.SpotPercentage) - group.Strategy.SetAvailabilityVsCost(fi.String(string(normalizeOrientation(e.Orientation)))) + group.Strategy.SetAvailabilityVsCost(fi.PtrTo(string(normalizeOrientation(e.Orientation)))) group.Strategy.SetFallbackToOnDemand(e.FallbackToOnDemand) group.Strategy.SetUtilizeReservedInstances(e.UtilizeReservedInstances) group.Strategy.SetUtilizeCommitments(e.UtilizeCommitments) if e.DrainingTimeout != nil { - group.Strategy.SetDrainingTimeout(fi.Int(int(*e.DrainingTimeout))) + group.Strategy.SetDrainingTimeout(fi.PtrTo(int(*e.DrainingTimeout))) } } @@ -562,7 +562,7 @@ func (_ *Elastigroup) create(cloud awsup.AWSCloud, a, e, changes *Elastigroup) e { subnets := make([]string, len(e.Subnets)) for i, subnet := range e.Subnets { - subnets[i] = fi.StringValue(subnet.ID) + subnets[i] = fi.ValueOf(subnet.ID) } group.Compute.SetSubnetIDs(subnets) } @@ -601,7 +601,7 @@ func (_ *Elastigroup) create(cloud awsup.AWSCloud, a, e, changes *Elastigroup) e // Image. { - image, err := resolveImage(cloud, fi.StringValue(e.ImageID)) + image, err := resolveImage(cloud, fi.ValueOf(e.ImageID)) if err != nil { return err } @@ -618,7 +618,7 @@ func (_ *Elastigroup) create(cloud awsup.AWSCloud, a, e, changes *Elastigroup) e if len(userData) > 0 { encoded := base64.StdEncoding.EncodeToString([]byte(userData)) - group.Compute.LaunchSpecification.SetUserData(fi.String(encoded)) + group.Compute.LaunchSpecification.SetUserData(fi.PtrTo(encoded)) } } } @@ -647,9 +647,9 @@ func (_ *Elastigroup) create(cloud awsup.AWSCloud, a, e, changes *Elastigroup) e { if e.AssociatePublicIPAddress != nil { iface := &aws.NetworkInterface{ - Description: fi.String("eth0"), - DeviceIndex: fi.Int(0), - DeleteOnTermination: fi.Bool(true), + Description: fi.PtrTo("eth0"), + DeviceIndex: fi.PtrTo(0), + DeleteOnTermination: fi.PtrTo(true), AssociatePublicIPAddress: e.AssociatePublicIPAddress, } @@ -706,18 +706,18 @@ func (_ *Elastigroup) create(cloud awsup.AWSCloud, a, e, changes *Elastigroup) e { if opts := e.AutoScalerOpts; opts != nil { k8s := new(aws.KubernetesIntegration) - k8s.SetIntegrationMode(fi.String("pod")) + k8s.SetIntegrationMode(fi.PtrTo("pod")) k8s.SetClusterIdentifier(opts.ClusterID) if opts.Enabled != nil { autoScaler := new(aws.AutoScaleKubernetes) autoScaler.IsEnabled = opts.Enabled - autoScaler.IsAutoConfig = fi.Bool(true) + autoScaler.IsAutoConfig = fi.PtrTo(true) autoScaler.Cooldown = opts.Cooldown // Headroom. if headroom := opts.Headroom; headroom != nil { - autoScaler.IsAutoConfig = fi.Bool(false) + autoScaler.IsAutoConfig = fi.PtrTo(false) autoScaler.Headroom = &aws.AutoScaleHeadroom{ CPUPerUnit: headroom.CPUPerUnit, GPUPerUnit: headroom.GPUPerUnit, @@ -780,8 +780,8 @@ readyLoop: return fmt.Errorf("IAM instance profile not yet created/propagated (original error: %v)", err) } - klog.V(4).Infof("Got an error indicating that the IAM instance profile %q is not ready %q", fi.StringValue(e.IAMInstanceProfile.Name), err) - klog.Infof("Waiting for IAM instance profile %q to be ready", fi.StringValue(e.IAMInstanceProfile.Name)) + klog.V(4).Infof("Got an error indicating that the IAM instance profile %q is not ready %q", fi.ValueOf(e.IAMInstanceProfile.Name), err) + klog.Infof("Waiting for IAM instance profile %q to be ready", fi.ValueOf(e.IAMInstanceProfile.Name)) goto readyLoop } } @@ -832,7 +832,7 @@ func (_ *Elastigroup) update(cloud awsup.AWSCloud, a, e, changes *Elastigroup) e group.Strategy = new(aws.Strategy) } - group.Strategy.SetAvailabilityVsCost(fi.String(string(normalizeOrientation(e.Orientation)))) + group.Strategy.SetAvailabilityVsCost(fi.PtrTo(string(normalizeOrientation(e.Orientation)))) changes.Orientation = nil changed = true } @@ -876,7 +876,7 @@ func (_ *Elastigroup) update(cloud awsup.AWSCloud, a, e, changes *Elastigroup) e group.Strategy = new(aws.Strategy) } - group.Strategy.SetDrainingTimeout(fi.Int(int(*e.DrainingTimeout))) + group.Strategy.SetDrainingTimeout(fi.PtrTo(int(*e.DrainingTimeout))) changes.DrainingTimeout = nil changed = true } @@ -939,7 +939,7 @@ func (_ *Elastigroup) update(cloud awsup.AWSCloud, a, e, changes *Elastigroup) e subnets := make([]string, len(e.Subnets)) for i, subnet := range e.Subnets { - subnets[i] = fi.StringValue(subnet.ID) + subnets[i] = fi.ValueOf(subnet.ID) } group.Compute.SetSubnetIDs(subnets) @@ -988,7 +988,7 @@ func (_ *Elastigroup) update(cloud awsup.AWSCloud, a, e, changes *Elastigroup) e } encoded := base64.StdEncoding.EncodeToString([]byte(userData)) - group.Compute.LaunchSpecification.SetUserData(fi.String(encoded)) + group.Compute.LaunchSpecification.SetUserData(fi.PtrTo(encoded)) changed = true } @@ -1007,9 +1007,9 @@ func (_ *Elastigroup) update(cloud awsup.AWSCloud, a, e, changes *Elastigroup) e } iface := &aws.NetworkInterface{ - Description: fi.String("eth0"), - DeviceIndex: fi.Int(0), - DeleteOnTermination: fi.Bool(true), + Description: fi.PtrTo("eth0"), + DeviceIndex: fi.PtrTo(0), + DeleteOnTermination: fi.PtrTo(true), AssociatePublicIPAddress: changes.AssociatePublicIPAddress, } @@ -1077,7 +1077,7 @@ func (_ *Elastigroup) update(cloud awsup.AWSCloud, a, e, changes *Elastigroup) e // Image. { if changes.ImageID != nil { - image, err := resolveImage(cloud, fi.StringValue(e.ImageID)) + image, err := resolveImage(cloud, fi.ValueOf(e.ImageID)) if err != nil { return err } @@ -1253,13 +1253,13 @@ func (_ *Elastigroup) update(cloud awsup.AWSCloud, a, e, changes *Elastigroup) e group.Capacity = new(aws.Capacity) } - group.Capacity.SetMinimum(fi.Int(int(*e.MinSize))) + group.Capacity.SetMinimum(fi.PtrTo(int(*e.MinSize))) changes.MinSize = nil changed = true // Scale up the target capacity, if needed. if int64(*actual.Capacity.Target) < *e.MinSize { - group.Capacity.SetTarget(fi.Int(int(*e.MinSize))) + group.Capacity.SetTarget(fi.PtrTo(int(*e.MinSize))) } } if changes.MaxSize != nil { @@ -1267,7 +1267,7 @@ func (_ *Elastigroup) update(cloud awsup.AWSCloud, a, e, changes *Elastigroup) e group.Capacity = new(aws.Capacity) } - group.Capacity.SetMaximum(fi.Int(int(*e.MaxSize))) + group.Capacity.SetMaximum(fi.PtrTo(int(*e.MaxSize))) changes.MaxSize = nil changed = true } @@ -1283,7 +1283,7 @@ func (_ *Elastigroup) update(cloud awsup.AWSCloud, a, e, changes *Elastigroup) e // Headroom. if headroom := opts.Headroom; headroom != nil { - autoScaler.IsAutoConfig = fi.Bool(false) + autoScaler.IsAutoConfig = fi.PtrTo(false) autoScaler.Headroom = &aws.AutoScaleHeadroom{ CPUPerUnit: e.AutoScalerOpts.Headroom.CPUPerUnit, GPUPerUnit: e.AutoScalerOpts.Headroom.GPUPerUnit, @@ -1291,7 +1291,7 @@ func (_ *Elastigroup) update(cloud awsup.AWSCloud, a, e, changes *Elastigroup) e NumOfUnits: e.AutoScalerOpts.Headroom.NumOfUnits, } } else if a.AutoScalerOpts != nil && a.AutoScalerOpts.Headroom != nil { - autoScaler.IsAutoConfig = fi.Bool(true) + autoScaler.IsAutoConfig = fi.PtrTo(true) autoScaler.SetHeadroom(nil) } @@ -1473,10 +1473,10 @@ func (_ *Elastigroup) RenderTerraform(t *terraform.TerraformTarget, a, e, change DesiredCapacity: e.MinSize, MinSize: e.MinSize, MaxSize: e.MaxSize, - CapacityUnit: fi.String("instance"), + CapacityUnit: fi.PtrTo("instance"), SpotPercentage: e.SpotPercentage, - Orientation: fi.String(string(normalizeOrientation(e.Orientation))), + Orientation: fi.PtrTo(string(normalizeOrientation(e.Orientation))), FallbackToOnDemand: e.FallbackToOnDemand, UtilizeReservedInstances: e.UtilizeReservedInstances, UtilizeCommitments: e.UtilizeCommitments, @@ -1488,7 +1488,7 @@ func (_ *Elastigroup) RenderTerraform(t *terraform.TerraformTarget, a, e, change // Image. if e.ImageID != nil { - image, err := resolveImage(cloud, fi.StringValue(e.ImageID)) + image, err := resolveImage(cloud, fi.ValueOf(e.ImageID)) if err != nil { return err } @@ -1576,9 +1576,9 @@ func (_ *Elastigroup) RenderTerraform(t *terraform.TerraformTarget, a, e, change // Public IP. if e.AssociatePublicIPAddress != nil { tf.NetworkInterfaces = append(tf.NetworkInterfaces, &terraformElastigroupNetworkInterface{ - Description: fi.String("eth0"), - DeviceIndex: fi.Int(0), - DeleteOnTermination: fi.Bool(true), + Description: fi.PtrTo("eth0"), + DeviceIndex: fi.PtrTo(0), + DeleteOnTermination: fi.PtrTo(true), AssociatePublicIPAddress: e.AssociatePublicIPAddress, }) } @@ -1600,7 +1600,7 @@ func (_ *Elastigroup) RenderTerraform(t *terraform.TerraformTarget, a, e, change VolumeIOPS: rootDevice.EbsVolumeIops, VolumeThroughput: rootDevice.EbsVolumeThroughput, Encrypted: rootDevice.EbsEncrypted, - DeleteOnTermination: fi.Bool(true), + DeleteOnTermination: fi.PtrTo(true), } ephemeralDevices, err := buildEphemeralDevices(cloud, e.OnDemandInstanceType) @@ -1632,18 +1632,18 @@ func (_ *Elastigroup) RenderTerraform(t *terraform.TerraformTarget, a, e, change { if opts := e.AutoScalerOpts; opts != nil { tf.Integration = &terraformElastigroupIntegration{ - IntegrationMode: fi.String("pod"), + IntegrationMode: fi.PtrTo("pod"), ClusterIdentifier: opts.ClusterID, } if opts.Enabled != nil { tf.Integration.Enabled = opts.Enabled - tf.Integration.AutoConfig = fi.Bool(true) + tf.Integration.AutoConfig = fi.PtrTo(true) tf.Integration.Cooldown = opts.Cooldown // Headroom. if headroom := opts.Headroom; headroom != nil { - tf.Integration.AutoConfig = fi.Bool(false) + tf.Integration.AutoConfig = fi.PtrTo(false) tf.Integration.Headroom = &terraformAutoScalerHeadroom{ CPUPerUnit: headroom.CPUPerUnit, GPUPerUnit: headroom.GPUPerUnit, @@ -1665,8 +1665,8 @@ func (_ *Elastigroup) RenderTerraform(t *terraform.TerraformTarget, a, e, change tf.Integration.Labels = make([]*terraformKV, 0, len(labels)) for k, v := range labels { tf.Integration.Labels = append(tf.Integration.Labels, &terraformKV{ - Key: fi.String(k), - Value: fi.String(v), + Key: fi.PtrTo(k), + Value: fi.PtrTo(v), }) } } @@ -1699,8 +1699,8 @@ func (e *Elastigroup) buildTags() []*aws.Tag { for key, value := range e.Tags { tags = append(tags, &aws.Tag{ - Key: fi.String(key), - Value: fi.String(value), + Key: fi.PtrTo(key), + Value: fi.PtrTo(value), }) } @@ -1711,8 +1711,8 @@ func (e *Elastigroup) buildAutoScaleLabels(labelsMap map[string]string) []*aws.A labels := make([]*aws.AutoScaleLabel, 0, len(labelsMap)) for key, value := range labelsMap { labels = append(labels, &aws.AutoScaleLabel{ - Key: fi.String(key), - Value: fi.String(value), + Key: fi.PtrTo(key), + Value: fi.PtrTo(value), }) } @@ -1723,7 +1723,7 @@ func (e *Elastigroup) buildLoadBalancers(cloud awsup.AWSCloud) ([]*aws.LoadBalan lbs := make([]*aws.LoadBalancer, len(e.LoadBalancers)) for i, lb := range e.LoadBalancers { if lb.LoadBalancerName == nil { - lbName := fi.StringValue(lb.GetName()) + lbName := fi.ValueOf(lb.GetName()) lbDesc, err := cloud.FindELBByNameTag(lbName) if err != nil { return nil, err @@ -1733,12 +1733,12 @@ func (e *Elastigroup) buildLoadBalancers(cloud awsup.AWSCloud) ([]*aws.LoadBalan "load balancer to attach: %s", lbName) } lbs[i] = &aws.LoadBalancer{ - Type: fi.String("CLASSIC"), + Type: fi.PtrTo("CLASSIC"), Name: lbDesc.LoadBalancerName, } } else { lbs[i] = &aws.LoadBalancer{ - Type: fi.String("CLASSIC"), + Type: fi.PtrTo("CLASSIC"), Name: lb.LoadBalancerName, } } @@ -1750,7 +1750,7 @@ func (e *Elastigroup) buildTargetGroups() []*aws.LoadBalancer { tgs := make([]*aws.LoadBalancer, len(e.TargetGroups)) for i, tg := range e.TargetGroups { tgs[i] = &aws.LoadBalancer{ - Type: fi.String("TARGET_GROUP"), + Type: fi.PtrTo("TARGET_GROUP"), Arn: tg.ARN, } } @@ -1758,7 +1758,7 @@ func (e *Elastigroup) buildTargetGroups() []*aws.LoadBalancer { } func buildEphemeralDevices(cloud awsup.AWSCloud, machineType *string) ([]*awstasks.BlockDeviceMapping, error) { - info, err := awsup.GetMachineTypeInfo(cloud, fi.StringValue(machineType)) + info, err := awsup.GetMachineTypeInfo(cloud, fi.ValueOf(machineType)) if err != nil { return nil, err } @@ -1766,8 +1766,8 @@ func buildEphemeralDevices(cloud awsup.AWSCloud, machineType *string) ([]*awstas bdms := make([]*awstasks.BlockDeviceMapping, len(info.EphemeralDevices())) for i, ed := range info.EphemeralDevices() { bdms[i] = &awstasks.BlockDeviceMapping{ - DeviceName: fi.String(ed.DeviceName), - VirtualName: fi.String(ed.VirtualName), + DeviceName: fi.PtrTo(ed.DeviceName), + VirtualName: fi.PtrTo(ed.VirtualName), } } @@ -1777,7 +1777,7 @@ func buildEphemeralDevices(cloud awsup.AWSCloud, machineType *string) ([]*awstas func buildRootDevice(cloud awsup.AWSCloud, volumeOpts *RootVolumeOpts, imageID *string) (*awstasks.BlockDeviceMapping, error, ) { - img, err := resolveImage(cloud, fi.StringValue(imageID)) + img, err := resolveImage(cloud, fi.ValueOf(imageID)) if err != nil { return nil, err } @@ -1787,16 +1787,16 @@ func buildRootDevice(cloud awsup.AWSCloud, volumeOpts *RootVolumeOpts, EbsVolumeSize: volumeOpts.Size, EbsVolumeType: volumeOpts.Type, EbsEncrypted: volumeOpts.Encryption, - EbsDeleteOnTermination: fi.Bool(true), + EbsDeleteOnTermination: fi.PtrTo(true), } // IOPS is not supported for gp2 volumes. - if volumeOpts.IOPS != nil && fi.StringValue(volumeOpts.Type) != "gp2" { + if volumeOpts.IOPS != nil && fi.ValueOf(volumeOpts.Type) != "gp2" { bdm.EbsVolumeIops = volumeOpts.IOPS } // Throughput is only supported for gp3 volumes. - if volumeOpts.Throughput != nil && fi.StringValue(volumeOpts.Type) == "gp3" { + if volumeOpts.Throughput != nil && fi.ValueOf(volumeOpts.Type) == "gp3" { bdm.EbsVolumeThroughput = volumeOpts.Throughput } @@ -1812,19 +1812,19 @@ func (e *Elastigroup) convertBlockDeviceMapping(in *awstasks.BlockDeviceMapping) if in.EbsDeleteOnTermination != nil || in.EbsVolumeSize != nil || in.EbsVolumeType != nil { out.EBS = &aws.EBS{ VolumeType: in.EbsVolumeType, - VolumeSize: fi.Int(int(fi.Int64Value(in.EbsVolumeSize))), + VolumeSize: fi.PtrTo(int(fi.ValueOf(in.EbsVolumeSize))), Encrypted: in.EbsEncrypted, DeleteOnTermination: in.EbsDeleteOnTermination, } // IOPS is not valid for gp2 volumes. - if in.EbsVolumeIops != nil && fi.StringValue(in.EbsVolumeType) != "gp2" { - out.EBS.IOPS = fi.Int(int(fi.Int64Value(in.EbsVolumeIops))) + if in.EbsVolumeIops != nil && fi.ValueOf(in.EbsVolumeType) != "gp2" { + out.EBS.IOPS = fi.PtrTo(int(fi.ValueOf(in.EbsVolumeIops))) } // Throughput is only valid for gp3 volumes. - if in.EbsVolumeThroughput != nil && fi.StringValue(in.EbsVolumeType) == "gp3" { - out.EBS.Throughput = fi.Int(int(fi.Int64Value(in.EbsVolumeThroughput))) + if in.EbsVolumeThroughput != nil && fi.ValueOf(in.EbsVolumeType) == "gp3" { + out.EBS.Throughput = fi.PtrTo(int(fi.ValueOf(in.EbsVolumeThroughput))) } } @@ -1833,27 +1833,27 @@ func (e *Elastigroup) convertBlockDeviceMapping(in *awstasks.BlockDeviceMapping) func (e *Elastigroup) applyDefaults() { if e.FallbackToOnDemand == nil { - e.FallbackToOnDemand = fi.Bool(true) + e.FallbackToOnDemand = fi.PtrTo(true) } if e.UtilizeReservedInstances == nil { - e.UtilizeReservedInstances = fi.Bool(true) + e.UtilizeReservedInstances = fi.PtrTo(true) } - if e.Product == nil || (e.Product != nil && fi.StringValue(e.Product) == "") { - e.Product = fi.String("Linux/UNIX") + if e.Product == nil || (e.Product != nil && fi.ValueOf(e.Product) == "") { + e.Product = fi.PtrTo("Linux/UNIX") } - if e.Orientation == nil || (e.Orientation != nil && fi.StringValue(e.Orientation) == "") { - e.Orientation = fi.String("balanced") + if e.Orientation == nil || (e.Orientation != nil && fi.ValueOf(e.Orientation) == "") { + e.Orientation = fi.PtrTo("balanced") } if e.Monitoring == nil { - e.Monitoring = fi.Bool(false) + e.Monitoring = fi.PtrTo(false) } if e.HealthCheckType == nil { - e.HealthCheckType = fi.String("K8S_NODE") + e.HealthCheckType = fi.PtrTo("K8S_NODE") } } diff --git a/upup/pkg/fi/cloudup/spotinsttasks/launch_spec.go b/upup/pkg/fi/cloudup/spotinsttasks/launch_spec.go index 54ac0459e7cbc..fc5b1ec9b4362 100644 --- a/upup/pkg/fi/cloudup/spotinsttasks/launch_spec.go +++ b/upup/pkg/fi/cloudup/spotinsttasks/launch_spec.go @@ -99,11 +99,11 @@ func (o *LaunchSpec) GetDependencies(tasks map[string]fi.Task) []fi.Task { } func (o *LaunchSpec) find(svc spotinst.LaunchSpecService, oceanID string) (*aws.LaunchSpec, error) { - klog.V(4).Infof("Attempting to find LaunchSpec: %q", fi.StringValue(o.Name)) + klog.V(4).Infof("Attempting to find LaunchSpec: %q", fi.ValueOf(o.Name)) specs, err := svc.List(context.Background(), oceanID) if err != nil { - return nil, fmt.Errorf("spotinst: failed to find launch spec %q: %v", fi.StringValue(o.Name), err) + return nil, fmt.Errorf("spotinst: failed to find launch spec %q: %v", fi.ValueOf(o.Name), err) } if len(specs) == 0 { return nil, fmt.Errorf("spotinst: no launch specs associated with ocean %q", oceanID) @@ -111,16 +111,16 @@ func (o *LaunchSpec) find(svc spotinst.LaunchSpecService, oceanID string) (*aws. var out *aws.LaunchSpec for _, spec := range specs { - if spec.Name() == fi.StringValue(o.Name) { + if spec.Name() == fi.ValueOf(o.Name) { out = spec.Obj().(*aws.LaunchSpec) break } } if out == nil { - return nil, fmt.Errorf("spotinst: failed to find launch spec %q", fi.StringValue(o.Name)) + return nil, fmt.Errorf("spotinst: failed to find launch spec %q", fi.ValueOf(o.Name)) } - klog.V(4).Infof("LaunchSpec/%s: %s", fi.StringValue(o.Name), stringutil.Stringify(out)) + klog.V(4).Infof("LaunchSpec/%s: %s", fi.ValueOf(o.Name), stringutil.Stringify(out)) return out, nil } @@ -149,8 +149,8 @@ func (o *LaunchSpec) Find(c *fi.Context) (*LaunchSpec, error) { // Capacity. { if spec.ResourceLimits != nil { - actual.MinSize = fi.Int64(int64(fi.IntValue(spec.ResourceLimits.MinInstanceCount))) - actual.MaxSize = fi.Int64(int64(fi.IntValue(spec.ResourceLimits.MaxInstanceCount))) + actual.MinSize = fi.PtrTo(int64(fi.ValueOf(spec.ResourceLimits.MinInstanceCount))) + actual.MaxSize = fi.PtrTo(int64(fi.ValueOf(spec.ResourceLimits.MaxInstanceCount))) } } @@ -159,12 +159,12 @@ func (o *LaunchSpec) Find(c *fi.Context) (*LaunchSpec, error) { actual.ImageID = spec.ImageID if o.ImageID != nil && actual.ImageID != nil && - fi.StringValue(actual.ImageID) != fi.StringValue(o.ImageID) { - image, err := resolveImage(cloud, fi.StringValue(o.ImageID)) + fi.ValueOf(actual.ImageID) != fi.ValueOf(o.ImageID) { + image, err := resolveImage(cloud, fi.ValueOf(o.ImageID)) if err != nil { return nil, err } - if fi.StringValue(image.ImageId) == fi.StringValue(spec.ImageID) { + if fi.ValueOf(image.ImageId) == fi.ValueOf(spec.ImageID) { actual.ImageID = o.ImageID } } @@ -173,7 +173,7 @@ func (o *LaunchSpec) Find(c *fi.Context) (*LaunchSpec, error) { // User data. { if spec.UserData != nil { - userData, err := base64.StdEncoding.DecodeString(fi.StringValue(spec.UserData)) + userData, err := base64.StdEncoding.DecodeString(fi.ValueOf(spec.UserData)) if err != nil { return nil, err } @@ -194,7 +194,7 @@ func (o *LaunchSpec) Find(c *fi.Context) (*LaunchSpec, error) { { if spec.RootVolumeSize != nil { actual.RootVolumeOpts = new(RootVolumeOpts) - actual.RootVolumeOpts.Size = fi.Int64(int64(*spec.RootVolumeSize)) + actual.RootVolumeOpts.Size = fi.PtrTo(int64(*spec.RootVolumeSize)) } } @@ -209,16 +209,16 @@ func (o *LaunchSpec) Find(c *fi.Context) (*LaunchSpec, error) { actual.RootVolumeOpts = new(RootVolumeOpts) } if b.EBS.VolumeType != nil { - actual.RootVolumeOpts.Type = fi.String(strings.ToLower(fi.StringValue(b.EBS.VolumeType))) + actual.RootVolumeOpts.Type = fi.PtrTo(strings.ToLower(fi.ValueOf(b.EBS.VolumeType))) } if b.EBS.VolumeSize != nil { - actual.RootVolumeOpts.Size = fi.Int64(int64(fi.IntValue(b.EBS.VolumeSize))) + actual.RootVolumeOpts.Size = fi.PtrTo(int64(fi.ValueOf(b.EBS.VolumeSize))) } if b.EBS.IOPS != nil { - actual.RootVolumeOpts.IOPS = fi.Int64(int64(fi.IntValue(b.EBS.IOPS))) + actual.RootVolumeOpts.IOPS = fi.PtrTo(int64(fi.ValueOf(b.EBS.IOPS))) } if b.EBS.Throughput != nil { - actual.RootVolumeOpts.Throughput = fi.Int64(int64(fi.IntValue(b.EBS.Throughput))) + actual.RootVolumeOpts.Throughput = fi.PtrTo(int64(fi.ValueOf(b.EBS.Throughput))) } if b.EBS.Encrypted != nil { actual.RootVolumeOpts.Encryption = b.EBS.Encrypted @@ -233,7 +233,7 @@ func (o *LaunchSpec) Find(c *fi.Context) (*LaunchSpec, error) { if spec.SecurityGroupIDs != nil { for _, sgID := range spec.SecurityGroupIDs { actual.SecurityGroups = append(actual.SecurityGroups, - &awstasks.SecurityGroup{ID: fi.String(sgID)}) + &awstasks.SecurityGroup{ID: fi.PtrTo(sgID)}) } } } @@ -243,7 +243,7 @@ func (o *LaunchSpec) Find(c *fi.Context) (*LaunchSpec, error) { if spec.SubnetIDs != nil { for _, subnetID := range spec.SubnetIDs { actual.Subnets = append(actual.Subnets, - &awstasks.Subnet{ID: fi.String(subnetID)}) + &awstasks.Subnet{ID: fi.PtrTo(subnetID)}) } if subnetSlicesEqualIgnoreOrder(actual.Subnets, o.Subnets) { actual.Subnets = o.Subnets @@ -268,7 +268,7 @@ func (o *LaunchSpec) Find(c *fi.Context) (*LaunchSpec, error) { if len(spec.Tags) > 0 { actual.Tags = make(map[string]string) for _, tag := range spec.Tags { - actual.Tags[fi.StringValue(tag.Key)] = fi.StringValue(tag.Value) + actual.Tags[fi.ValueOf(tag.Key)] = fi.ValueOf(tag.Value) } } } @@ -298,7 +298,7 @@ func (o *LaunchSpec) Find(c *fi.Context) (*LaunchSpec, error) { actual.AutoScalerOpts.Labels = make(map[string]string) for _, label := range spec.Labels { - actual.AutoScalerOpts.Labels[fi.StringValue(label.Key)] = fi.StringValue(label.Value) + actual.AutoScalerOpts.Labels[fi.ValueOf(label.Key)] = fi.ValueOf(label.Value) } } @@ -311,9 +311,9 @@ func (o *LaunchSpec) Find(c *fi.Context) (*LaunchSpec, error) { actual.AutoScalerOpts.Taints = make([]*corev1.Taint, len(spec.Taints)) for i, taint := range spec.Taints { actual.AutoScalerOpts.Taints[i] = &corev1.Taint{ - Key: fi.StringValue(taint.Key), - Value: fi.StringValue(taint.Value), - Effect: corev1.TaintEffect(fi.StringValue(taint.Effect)), + Key: fi.ValueOf(taint.Key), + Value: fi.ValueOf(taint.Value), + Effect: corev1.TaintEffect(fi.ValueOf(taint.Effect)), } } } @@ -322,7 +322,7 @@ func (o *LaunchSpec) Find(c *fi.Context) (*LaunchSpec, error) { { if strategy := spec.Strategy; strategy != nil { if strategy.SpotPercentage != nil { - actual.SpotPercentage = fi.Int64(int64(fi.IntValue(strategy.SpotPercentage))) + actual.SpotPercentage = fi.PtrTo(int64(fi.ValueOf(strategy.SpotPercentage))) } } } @@ -380,15 +380,15 @@ func (_ *LaunchSpec) create(cloud awsup.AWSCloud, a, e, changes *LaunchSpec) err { if e.MinSize != nil || e.MaxSize != nil { spec.ResourceLimits = new(aws.ResourceLimits) - spec.ResourceLimits.SetMinInstanceCount(fi.Int(int(*e.MinSize))) - spec.ResourceLimits.SetMaxInstanceCount(fi.Int(int(*e.MaxSize))) + spec.ResourceLimits.SetMinInstanceCount(fi.PtrTo(int(*e.MinSize))) + spec.ResourceLimits.SetMaxInstanceCount(fi.PtrTo(int(*e.MaxSize))) } } // Image. { if e.ImageID != nil { - image, err := resolveImage(cloud, fi.StringValue(e.ImageID)) + image, err := resolveImage(cloud, fi.ValueOf(e.ImageID)) if err != nil { return err } @@ -406,7 +406,7 @@ func (_ *LaunchSpec) create(cloud awsup.AWSCloud, a, e, changes *LaunchSpec) err if len(userData) > 0 { encoded := base64.StdEncoding.EncodeToString([]byte(userData)) - spec.SetUserData(fi.String(encoded)) + spec.SetUserData(fi.PtrTo(encoded)) } } } @@ -450,7 +450,7 @@ func (_ *LaunchSpec) create(cloud awsup.AWSCloud, a, e, changes *LaunchSpec) err if e.Subnets != nil { subnetIDs := make([]string, len(e.Subnets)) for i, subnet := range e.Subnets { - subnetIDs[i] = fi.StringValue(subnet.ID) + subnetIDs[i] = fi.ValueOf(subnet.ID) } spec.SetSubnetIDs(subnetIDs) } @@ -499,8 +499,8 @@ func (_ *LaunchSpec) create(cloud awsup.AWSCloud, a, e, changes *LaunchSpec) err var labels []*aws.Label for k, v := range opts.Labels { labels = append(labels, &aws.Label{ - Key: fi.String(k), - Value: fi.String(v), + Key: fi.PtrTo(k), + Value: fi.PtrTo(v), }) } spec.SetLabels(labels) @@ -511,9 +511,9 @@ func (_ *LaunchSpec) create(cloud awsup.AWSCloud, a, e, changes *LaunchSpec) err taints := make([]*aws.Taint, len(opts.Taints)) for i, taint := range opts.Taints { taints[i] = &aws.Taint{ - Key: fi.String(taint.Key), - Value: fi.String(taint.Value), - Effect: fi.String(string(taint.Effect)), + Key: fi.PtrTo(taint.Key), + Value: fi.PtrTo(taint.Value), + Effect: fi.PtrTo(string(taint.Effect)), } } spec.SetTaints(taints) @@ -524,13 +524,13 @@ func (_ *LaunchSpec) create(cloud awsup.AWSCloud, a, e, changes *LaunchSpec) err // Strategy. { if e.SpotPercentage != nil { - spec.Strategy.SetSpotPercentage(fi.Int(int(*e.SpotPercentage))) + spec.Strategy.SetSpotPercentage(fi.PtrTo(int(*e.SpotPercentage))) } } // Restrictions. { - if fi.BoolValue(e.RestrictScaleDown) { + if fi.ValueOf(e.RestrictScaleDown) { spec.SetRestrictScaleDown(e.RestrictScaleDown) } } @@ -576,7 +576,7 @@ func (_ *LaunchSpec) update(cloud awsup.AWSCloud, a, e, changes *LaunchSpec) err spec.ResourceLimits = new(aws.ResourceLimits) } - spec.ResourceLimits.SetMinInstanceCount(fi.Int(int(*e.MinSize))) + spec.ResourceLimits.SetMinInstanceCount(fi.PtrTo(int(*e.MinSize))) changes.MinSize = nil changed = true } @@ -585,7 +585,7 @@ func (_ *LaunchSpec) update(cloud awsup.AWSCloud, a, e, changes *LaunchSpec) err spec.ResourceLimits = new(aws.ResourceLimits) } - spec.ResourceLimits.SetMaxInstanceCount(fi.Int(int(*e.MaxSize))) + spec.ResourceLimits.SetMaxInstanceCount(fi.PtrTo(int(*e.MaxSize))) changes.MaxSize = nil changed = true } @@ -594,12 +594,12 @@ func (_ *LaunchSpec) update(cloud awsup.AWSCloud, a, e, changes *LaunchSpec) err // Image. { if changes.ImageID != nil { - image, err := resolveImage(cloud, fi.StringValue(e.ImageID)) + image, err := resolveImage(cloud, fi.ValueOf(e.ImageID)) if err != nil { return err } - if fi.StringValue(actual.ImageID) != fi.StringValue(image.ImageId) { + if fi.ValueOf(actual.ImageID) != fi.ValueOf(image.ImageId) { spec.SetImageId(image.ImageId) } @@ -618,7 +618,7 @@ func (_ *LaunchSpec) update(cloud awsup.AWSCloud, a, e, changes *LaunchSpec) err if len(userData) > 0 { encoded := base64.StdEncoding.EncodeToString([]byte(userData)) - spec.SetUserData(fi.String(encoded)) + spec.SetUserData(fi.PtrTo(encoded)) changed = true } @@ -674,7 +674,7 @@ func (_ *LaunchSpec) update(cloud awsup.AWSCloud, a, e, changes *LaunchSpec) err if changes.Subnets != nil { subnetIDs := make([]string, len(e.Subnets)) for i, subnet := range e.Subnets { - subnetIDs[i] = fi.StringValue(subnet.ID) + subnetIDs[i] = fi.ValueOf(subnet.ID) } spec.SetSubnetIDs(subnetIDs) @@ -735,8 +735,8 @@ func (_ *LaunchSpec) update(cloud awsup.AWSCloud, a, e, changes *LaunchSpec) err labels := make([]*aws.Label, 0, len(e.AutoScalerOpts.Labels)) for k, v := range e.AutoScalerOpts.Labels { labels = append(labels, &aws.Label{ - Key: fi.String(k), - Value: fi.String(v), + Key: fi.PtrTo(k), + Value: fi.PtrTo(v), }) } @@ -750,9 +750,9 @@ func (_ *LaunchSpec) update(cloud awsup.AWSCloud, a, e, changes *LaunchSpec) err taints := make([]*aws.Taint, 0, len(e.AutoScalerOpts.Taints)) for _, taint := range e.AutoScalerOpts.Taints { taints = append(taints, &aws.Taint{ - Key: fi.String(taint.Key), - Value: fi.String(taint.Value), - Effect: fi.String(string(taint.Effect)), + Key: fi.PtrTo(taint.Key), + Value: fi.PtrTo(taint.Value), + Effect: fi.PtrTo(string(taint.Effect)), }) } @@ -773,7 +773,7 @@ func (_ *LaunchSpec) update(cloud awsup.AWSCloud, a, e, changes *LaunchSpec) err spec.Strategy = new(aws.LaunchSpecStrategy) } - spec.Strategy.SetSpotPercentage(fi.Int(int(fi.Int64Value(e.SpotPercentage)))) + spec.Strategy.SetSpotPercentage(fi.PtrTo(int(fi.ValueOf(e.SpotPercentage)))) changes.SpotPercentage = nil changed = true } @@ -906,7 +906,7 @@ func (_ *LaunchSpec) RenderTerraform(t *terraform.TerraformTarget, a, e, changes // Image. { if e.ImageID != nil { - image, err := resolveImage(cloud, fi.StringValue(e.ImageID)) + image, err := resolveImage(cloud, fi.ValueOf(e.ImageID)) if err != nil { return err } @@ -986,7 +986,7 @@ func (_ *LaunchSpec) RenderTerraform(t *terraform.TerraformTarget, a, e, changes VolumeIOPS: rootDevice.EbsVolumeIops, VolumeThroughput: rootDevice.EbsVolumeThroughput, Encrypted: rootDevice.EbsEncrypted, - DeleteOnTermination: fi.Bool(true), + DeleteOnTermination: fi.PtrTo(true), }, }) } @@ -1023,8 +1023,8 @@ func (_ *LaunchSpec) RenderTerraform(t *terraform.TerraformTarget, a, e, changes tf.Labels = make([]*terraformKV, 0, len(opts.Labels)) for k, v := range opts.Labels { tf.Labels = append(tf.Labels, &terraformKV{ - Key: fi.String(k), - Value: fi.String(v), + Key: fi.PtrTo(k), + Value: fi.PtrTo(v), }) } } @@ -1034,11 +1034,11 @@ func (_ *LaunchSpec) RenderTerraform(t *terraform.TerraformTarget, a, e, changes tf.Taints = make([]*terraformTaint, len(opts.Taints)) for i, taint := range opts.Taints { t := &terraformTaint{ - Key: fi.String(taint.Key), - Effect: fi.String(string(taint.Effect)), + Key: fi.PtrTo(taint.Key), + Effect: fi.PtrTo(string(taint.Effect)), } if taint.Value != "" { - t.Value = fi.String(taint.Value) + t.Value = fi.PtrTo(taint.Value) } tf.Taints[i] = t } @@ -1057,7 +1057,7 @@ func (_ *LaunchSpec) RenderTerraform(t *terraform.TerraformTarget, a, e, changes // Restrictions. { - if fi.BoolValue(e.RestrictScaleDown) { + if fi.ValueOf(e.RestrictScaleDown) { tf.RestrictScaleDown = e.RestrictScaleDown } } @@ -1074,8 +1074,8 @@ func (o *LaunchSpec) buildTags() []*aws.Tag { for key, value := range o.Tags { tags = append(tags, &aws.Tag{ - Key: fi.String(key), - Value: fi.String(value), + Key: fi.PtrTo(key), + Value: fi.PtrTo(value), }) } @@ -1091,18 +1091,18 @@ func (o *LaunchSpec) convertBlockDeviceMapping(in *awstasks.BlockDeviceMapping) if in.EbsDeleteOnTermination != nil || in.EbsVolumeSize != nil || in.EbsVolumeType != nil { out.EBS = &aws.EBS{ VolumeType: in.EbsVolumeType, - VolumeSize: fi.Int(int(fi.Int64Value(in.EbsVolumeSize))), + VolumeSize: fi.PtrTo(int(fi.ValueOf(in.EbsVolumeSize))), DeleteOnTermination: in.EbsDeleteOnTermination, } // IOPS is not valid for gp2 volumes. - if in.EbsVolumeIops != nil && fi.StringValue(in.EbsVolumeType) != "gp2" { - out.EBS.IOPS = fi.Int(int(fi.Int64Value(in.EbsVolumeIops))) + if in.EbsVolumeIops != nil && fi.ValueOf(in.EbsVolumeType) != "gp2" { + out.EBS.IOPS = fi.PtrTo(int(fi.ValueOf(in.EbsVolumeIops))) } // Throughput is only valid for gp3 volumes. - if in.EbsVolumeThroughput != nil && fi.StringValue(in.EbsVolumeType) == "gp3" { - out.EBS.Throughput = fi.Int(int(fi.Int64Value(in.EbsVolumeThroughput))) + if in.EbsVolumeThroughput != nil && fi.ValueOf(in.EbsVolumeType) == "gp3" { + out.EBS.Throughput = fi.PtrTo(int(fi.ValueOf(in.EbsVolumeThroughput))) } } diff --git a/upup/pkg/fi/cloudup/spotinsttasks/ocean.go b/upup/pkg/fi/cloudup/spotinsttasks/ocean.go index ecc8241800dda..5cd62928f4644 100644 --- a/upup/pkg/fi/cloudup/spotinsttasks/ocean.go +++ b/upup/pkg/fi/cloudup/spotinsttasks/ocean.go @@ -106,25 +106,25 @@ func (o *Ocean) GetDependencies(tasks map[string]fi.Task) []fi.Task { } func (o *Ocean) find(svc spotinst.InstanceGroupService) (*aws.Cluster, error) { - klog.V(4).Infof("Attempting to find Ocean: %q", fi.StringValue(o.Name)) + klog.V(4).Infof("Attempting to find Ocean: %q", fi.ValueOf(o.Name)) oceans, err := svc.List(context.Background()) if err != nil { - return nil, fmt.Errorf("spotinst: failed to find ocean %q: %v", fi.StringValue(o.Name), err) + return nil, fmt.Errorf("spotinst: failed to find ocean %q: %v", fi.ValueOf(o.Name), err) } var out *aws.Cluster for _, ocean := range oceans { - if ocean.Name() == fi.StringValue(o.Name) { + if ocean.Name() == fi.ValueOf(o.Name) { out = ocean.Obj().(*aws.Cluster) break } } if out == nil { - return nil, fmt.Errorf("spotinst: failed to find ocean %q", fi.StringValue(o.Name)) + return nil, fmt.Errorf("spotinst: failed to find ocean %q", fi.ValueOf(o.Name)) } - klog.V(4).Infof("Ocean/%s: %s", fi.StringValue(o.Name), stringutil.Stringify(out)) + klog.V(4).Infof("Ocean/%s: %s", fi.ValueOf(o.Name), stringutil.Stringify(out)) return out, nil } @@ -143,9 +143,9 @@ func (o *Ocean) Find(c *fi.Context) (*Ocean, error) { // Capacity. { - if !fi.BoolValue(ocean.Compute.LaunchSpecification.UseAsTemplateOnly) { - actual.MinSize = fi.Int64(int64(fi.IntValue(ocean.Capacity.Minimum))) - actual.MaxSize = fi.Int64(int64(fi.IntValue(ocean.Capacity.Maximum))) + if !fi.ValueOf(ocean.Compute.LaunchSpecification.UseAsTemplateOnly) { + actual.MinSize = fi.PtrTo(int64(fi.ValueOf(ocean.Capacity.Minimum))) + actual.MaxSize = fi.PtrTo(int64(fi.ValueOf(ocean.Capacity.Maximum))) } } @@ -157,11 +157,11 @@ func (o *Ocean) Find(c *fi.Context) (*Ocean, error) { actual.UtilizeCommitments = strategy.UtilizeCommitments if strategy.DrainingTimeout != nil { - actual.DrainingTimeout = fi.Int64(int64(fi.IntValue(strategy.DrainingTimeout))) + actual.DrainingTimeout = fi.PtrTo(int64(fi.ValueOf(strategy.DrainingTimeout))) } if strategy.GracePeriod != nil { - actual.GracePeriod = fi.Int64(int64(fi.IntValue(strategy.GracePeriod))) + actual.GracePeriod = fi.PtrTo(int64(fi.ValueOf(strategy.GracePeriod))) } } } @@ -175,7 +175,7 @@ func (o *Ocean) Find(c *fi.Context) (*Ocean, error) { if subnets := compute.SubnetIDs; subnets != nil { for _, subnetID := range subnets { actual.Subnets = append(actual.Subnets, - &awstasks.Subnet{ID: fi.String(subnetID)}) + &awstasks.Subnet{ID: fi.PtrTo(subnetID)}) } if subnetSlicesEqualIgnoreOrder(actual.Subnets, o.Subnets) { actual.Subnets = o.Subnets @@ -208,12 +208,12 @@ func (o *Ocean) Find(c *fi.Context) (*Ocean, error) { actual.ImageID = lc.ImageID if o.ImageID != nil && actual.ImageID != nil && - fi.StringValue(actual.ImageID) != fi.StringValue(o.ImageID) { - image, err := resolveImage(cloud, fi.StringValue(o.ImageID)) + fi.ValueOf(actual.ImageID) != fi.ValueOf(o.ImageID) { + image, err := resolveImage(cloud, fi.ValueOf(o.ImageID)) if err != nil { return nil, err } - if fi.StringValue(image.ImageId) == fi.StringValue(lc.ImageID) { + if fi.ValueOf(image.ImageId) == fi.ValueOf(lc.ImageID) { actual.ImageID = o.ImageID } } @@ -224,7 +224,7 @@ func (o *Ocean) Find(c *fi.Context) (*Ocean, error) { if lc.Tags != nil && len(lc.Tags) > 0 { actual.Tags = make(map[string]string) for _, tag := range lc.Tags { - actual.Tags[fi.StringValue(tag.Key)] = fi.StringValue(tag.Value) + actual.Tags[fi.ValueOf(tag.Key)] = fi.ValueOf(tag.Value) } } } @@ -234,7 +234,7 @@ func (o *Ocean) Find(c *fi.Context) (*Ocean, error) { if lc.SecurityGroupIDs != nil { for _, sgID := range lc.SecurityGroupIDs { actual.SecurityGroups = append(actual.SecurityGroups, - &awstasks.SecurityGroup{ID: fi.String(sgID)}) + &awstasks.SecurityGroup{ID: fi.PtrTo(sgID)}) } } } @@ -242,7 +242,7 @@ func (o *Ocean) Find(c *fi.Context) (*Ocean, error) { // User data. { if lc.UserData != nil { - userData, err := base64.StdEncoding.DecodeString(fi.StringValue(lc.UserData)) + userData, err := base64.StdEncoding.DecodeString(fi.ValueOf(lc.UserData)) if err != nil { return nil, err } @@ -252,7 +252,7 @@ func (o *Ocean) Find(c *fi.Context) (*Ocean, error) { // EBS optimization. { - if fi.BoolValue(lc.EBSOptimized) { + if fi.ValueOf(lc.EBSOptimized) { if actual.RootVolumeOpts == nil { actual.RootVolumeOpts = new(RootVolumeOpts) } @@ -278,7 +278,7 @@ func (o *Ocean) Find(c *fi.Context) (*Ocean, error) { // Root volume options. if lc.RootVolumeSize != nil { actual.RootVolumeOpts = new(RootVolumeOpts) - actual.RootVolumeOpts.Size = fi.Int64(int64(*lc.RootVolumeSize)) + actual.RootVolumeOpts.Size = fi.PtrTo(int64(*lc.RootVolumeSize)) } // Monitoring. @@ -369,15 +369,15 @@ func (_ *Ocean) create(cloud awsup.AWSCloud, a, e, changes *Ocean) error { // General. { ocean.SetName(e.Name) - ocean.SetRegion(fi.String(cloud.Region())) + ocean.SetRegion(fi.PtrTo(cloud.Region())) } // Capacity. { - if !fi.BoolValue(e.UseAsTemplateOnly) { - ocean.Capacity.SetTarget(fi.Int(int(*e.MinSize))) - ocean.Capacity.SetMinimum(fi.Int(int(*e.MinSize))) - ocean.Capacity.SetMaximum(fi.Int(int(*e.MaxSize))) + if !fi.ValueOf(e.UseAsTemplateOnly) { + ocean.Capacity.SetTarget(fi.PtrTo(int(*e.MinSize))) + ocean.Capacity.SetMinimum(fi.PtrTo(int(*e.MinSize))) + ocean.Capacity.SetMaximum(fi.PtrTo(int(*e.MaxSize))) } } @@ -388,11 +388,11 @@ func (_ *Ocean) create(cloud awsup.AWSCloud, a, e, changes *Ocean) error { ocean.Strategy.SetUtilizeCommitments(e.UtilizeCommitments) if e.DrainingTimeout != nil { - ocean.Strategy.SetDrainingTimeout(fi.Int(int(*e.DrainingTimeout))) + ocean.Strategy.SetDrainingTimeout(fi.PtrTo(int(*e.DrainingTimeout))) } if e.GracePeriod != nil { - ocean.Strategy.SetGracePeriod(fi.Int(int(*e.GracePeriod))) + ocean.Strategy.SetGracePeriod(fi.PtrTo(int(*e.GracePeriod))) } } @@ -403,7 +403,7 @@ func (_ *Ocean) create(cloud awsup.AWSCloud, a, e, changes *Ocean) error { if e.Subnets != nil { subnetIDs := make([]string, len(e.Subnets)) for i, subnet := range e.Subnets { - subnetIDs[i] = fi.StringValue(subnet.ID) + subnetIDs[i] = fi.ValueOf(subnet.ID) } ocean.Compute.SetSubnetIDs(subnetIDs) } @@ -437,7 +437,7 @@ func (_ *Ocean) create(cloud awsup.AWSCloud, a, e, changes *Ocean) error { // Image. { if e.ImageID != nil { - image, err := resolveImage(cloud, fi.StringValue(e.ImageID)) + image, err := resolveImage(cloud, fi.ValueOf(e.ImageID)) if err != nil { return err } @@ -456,7 +456,7 @@ func (_ *Ocean) create(cloud awsup.AWSCloud, a, e, changes *Ocean) error { } } - if !fi.BoolValue(e.UseAsTemplateOnly) { + if !fi.ValueOf(e.UseAsTemplateOnly) { // User data. { if e.UserData != nil { @@ -467,7 +467,7 @@ func (_ *Ocean) create(cloud awsup.AWSCloud, a, e, changes *Ocean) error { if len(userData) > 0 { encoded := base64.StdEncoding.EncodeToString([]byte(userData)) - ocean.Compute.LaunchSpecification.SetUserData(fi.String(encoded)) + ocean.Compute.LaunchSpecification.SetUserData(fi.PtrTo(encoded)) } } } @@ -487,7 +487,7 @@ func (_ *Ocean) create(cloud awsup.AWSCloud, a, e, changes *Ocean) error { // Volume size. if opts.Size != nil { - ocean.Compute.LaunchSpecification.SetRootVolumeSize(fi.Int(int(*opts.Size))) + ocean.Compute.LaunchSpecification.SetRootVolumeSize(fi.PtrTo(int(*opts.Size))) } // EBS optimization. @@ -579,8 +579,8 @@ readyLoop: return fmt.Errorf("IAM instance profile not yet created/propagated (original error: %v)", err) } - klog.V(4).Infof("Got an error indicating that the IAM instance profile %q is not ready %q", fi.StringValue(e.IAMInstanceProfile.Name), err) - klog.Infof("Waiting for IAM instance profile %q to be ready", fi.StringValue(e.IAMInstanceProfile.Name)) + klog.V(4).Infof("Got an error indicating that the IAM instance profile %q is not ready %q", fi.ValueOf(e.IAMInstanceProfile.Name), err) + klog.Infof("Waiting for IAM instance profile %q to be ready", fi.ValueOf(e.IAMInstanceProfile.Name)) goto readyLoop } } @@ -646,7 +646,7 @@ func (_ *Ocean) update(cloud awsup.AWSCloud, a, e, changes *Ocean) error { ocean.Strategy = new(aws.Strategy) } - ocean.Strategy.SetDrainingTimeout(fi.Int(int(*e.DrainingTimeout))) + ocean.Strategy.SetDrainingTimeout(fi.PtrTo(int(*e.DrainingTimeout))) changes.DrainingTimeout = nil changed = true } @@ -657,7 +657,7 @@ func (_ *Ocean) update(cloud awsup.AWSCloud, a, e, changes *Ocean) error { ocean.Strategy = new(aws.Strategy) } - ocean.Strategy.SetGracePeriod(fi.Int(int(*e.GracePeriod))) + ocean.Strategy.SetGracePeriod(fi.PtrTo(int(*e.GracePeriod))) changes.GracePeriod = nil changed = true } @@ -674,7 +674,7 @@ func (_ *Ocean) update(cloud awsup.AWSCloud, a, e, changes *Ocean) error { subnetIDs := make([]string, len(e.Subnets)) for i, subnet := range e.Subnets { - subnetIDs[i] = fi.StringValue(subnet.ID) + subnetIDs[i] = fi.ValueOf(subnet.ID) } ocean.Compute.SetSubnetIDs(subnetIDs) @@ -744,7 +744,7 @@ func (_ *Ocean) update(cloud awsup.AWSCloud, a, e, changes *Ocean) error { // Image. { if changes.ImageID != nil { - image, err := resolveImage(cloud, fi.StringValue(e.ImageID)) + image, err := resolveImage(cloud, fi.ValueOf(e.ImageID)) if err != nil { return err } @@ -813,7 +813,7 @@ func (_ *Ocean) update(cloud awsup.AWSCloud, a, e, changes *Ocean) error { } } - if !fi.BoolValue(e.UseAsTemplateOnly) { + if !fi.ValueOf(e.UseAsTemplateOnly) { // User data. { if changes.UserData != nil { @@ -831,7 +831,7 @@ func (_ *Ocean) update(cloud awsup.AWSCloud, a, e, changes *Ocean) error { } encoded := base64.StdEncoding.EncodeToString([]byte(userData)) - ocean.Compute.LaunchSpecification.SetUserData(fi.String(encoded)) + ocean.Compute.LaunchSpecification.SetUserData(fi.PtrTo(encoded)) changed = true } @@ -903,7 +903,7 @@ func (_ *Ocean) update(cloud awsup.AWSCloud, a, e, changes *Ocean) error { ocean.Compute.LaunchSpecification = new(aws.LaunchSpecification) } - ocean.Compute.LaunchSpecification.SetRootVolumeSize(fi.Int(int(*opts.Size))) + ocean.Compute.LaunchSpecification.SetRootVolumeSize(fi.PtrTo(int(*opts.Size))) changed = true } @@ -929,19 +929,19 @@ func (_ *Ocean) update(cloud awsup.AWSCloud, a, e, changes *Ocean) error { // Capacity. { - if !fi.BoolValue(e.UseAsTemplateOnly) { + if !fi.ValueOf(e.UseAsTemplateOnly) { if changes.MinSize != nil { if ocean.Capacity == nil { ocean.Capacity = new(aws.Capacity) } - ocean.Capacity.SetMinimum(fi.Int(int(*e.MinSize))) + ocean.Capacity.SetMinimum(fi.PtrTo(int(*e.MinSize))) changes.MinSize = nil changed = true // Scale up the target capacity, if needed. if int64(*actual.Capacity.Target) < *e.MinSize { - ocean.Capacity.SetTarget(fi.Int(int(*e.MinSize))) + ocean.Capacity.SetTarget(fi.PtrTo(int(*e.MinSize))) } } if changes.MaxSize != nil { @@ -949,7 +949,7 @@ func (_ *Ocean) update(cloud awsup.AWSCloud, a, e, changes *Ocean) error { ocean.Capacity = new(aws.Capacity) } - ocean.Capacity.SetMaximum(fi.Int(int(*e.MaxSize))) + ocean.Capacity.SetMaximum(fi.PtrTo(int(*e.MaxSize))) changes.MaxSize = nil changed = true } @@ -1057,7 +1057,7 @@ func (_ *Ocean) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *Oce tf := &terraformOcean{ Name: e.Name, - Region: fi.String(cloud.Region()), + Region: fi.PtrTo(cloud.Region()), UseAsTemplateOnly: e.UseAsTemplateOnly, FallbackToOnDemand: e.FallbackToOnDemand, UtilizeReservedInstances: e.UtilizeReservedInstances, @@ -1068,7 +1068,7 @@ func (_ *Ocean) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *Oce // Image. if e.ImageID != nil { - image, err := resolveImage(cloud, fi.StringValue(e.ImageID)) + image, err := resolveImage(cloud, fi.ValueOf(e.ImageID)) if err != nil { return err } @@ -1165,7 +1165,7 @@ func (_ *Ocean) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *Oce } } - if !fi.BoolValue(tf.UseAsTemplateOnly) { + if !fi.ValueOf(tf.UseAsTemplateOnly) { // Capacity. tf.DesiredCapacity = e.MinSize tf.MinSize = e.MinSize @@ -1227,8 +1227,8 @@ func (o *Ocean) buildTags() []*aws.Tag { for key, value := range o.Tags { tags = append(tags, &aws.Tag{ - Key: fi.String(key), - Value: fi.String(value), + Key: fi.PtrTo(key), + Value: fi.PtrTo(value), }) } diff --git a/upup/pkg/fi/cloudup/subnets.go b/upup/pkg/fi/cloudup/subnets.go index a8641668df380..de6397d8a4d51 100644 --- a/upup/pkg/fi/cloudup/subnets.go +++ b/upup/pkg/fi/cloudup/subnets.go @@ -68,22 +68,22 @@ func assignCIDRsToSubnets(c *kops.Cluster, cloud fi.Cloud) error { } for i := range c.Spec.Subnets { subnet := &c.Spec.Subnets[i] - if subnet.ProviderID != "" { - cloudSubnet := subnetByID[subnet.ProviderID] + if subnet.ID != "" { + cloudSubnet := subnetByID[subnet.ID] if cloudSubnet == nil { - return fmt.Errorf("Subnet %q not found in VPC %q", subnet.ProviderID, c.Spec.NetworkID) + return fmt.Errorf("Subnet %q not found in VPC %q", subnet.ID, c.Spec.NetworkID) } if subnet.CIDR == "" { subnet.CIDR = cloudSubnet.CIDR if subnet.CIDR == "" { - return fmt.Errorf("Subnet %q did not have CIDR", subnet.ProviderID) + return fmt.Errorf("Subnet %q did not have CIDR", subnet.ID) } } else if subnet.CIDR != cloudSubnet.CIDR { - return fmt.Errorf("Subnet %q has configured CIDR %q, but the actual CIDR found was %q", subnet.ProviderID, subnet.CIDR, cloudSubnet.CIDR) + return fmt.Errorf("Subnet %q has configured CIDR %q, but the actual CIDR found was %q", subnet.ID, subnet.CIDR, cloudSubnet.CIDR) } if subnet.Zone != cloudSubnet.Zone { - return fmt.Errorf("Subnet %q has configured Zone %q, but the actual Zone found was %q", subnet.ProviderID, subnet.Zone, cloudSubnet.Zone) + return fmt.Errorf("Subnet %q has configured Zone %q, but the actual Zone found was %q", subnet.ID, subnet.Zone, cloudSubnet.Zone) } } diff --git a/upup/pkg/fi/cloudup/template_functions.go b/upup/pkg/fi/cloudup/template_functions.go index ca48d1e7ea09b..e2e80934bf7a0 100644 --- a/upup/pkg/fi/cloudup/template_functions.go +++ b/upup/pkg/fi/cloudup/template_functions.go @@ -116,6 +116,7 @@ func (tf *TemplateFunctions) AddTo(dest template.FuncMap, secretStore fi.SecretS dest["HasHighlyAvailableControlPlane"] = tf.HasHighlyAvailableControlPlane dest["ControlPlaneControllerReplicas"] = tf.ControlPlaneControllerReplicas dest["APIServerNodeRole"] = tf.APIServerNodeRole + dest["APIInternalName"] = tf.Cluster.APIInternalName dest["CloudTags"] = tf.CloudTagsForInstanceGroup dest["KubeDNS"] = func() *kops.KubeDNSConfig { @@ -339,7 +340,7 @@ func (tf *TemplateFunctions) AddTo(dest template.FuncMap, secretStore fi.SecretS dest["HasSnapshotController"] = func() bool { sc := cluster.Spec.SnapshotController - return sc != nil && fi.BoolValue(sc.Enabled) + return sc != nil && fi.ValueOf(sc.Enabled) } dest["IsKubernetesGTE"] = tf.IsKubernetesGTE @@ -387,7 +388,7 @@ func (tf *TemplateFunctions) GetInstanceGroup(name string) (*kops.InstanceGroup, // deployOnWorkersIfExternalPermissons should be true if a controller runs on worker nodes when external IAM permissions is enabled for the cluster. // In this case it is assumed that it can run 2 replicas. func (tf *TemplateFunctions) ControlPlaneControllerReplicas(deployOnWorkersIfExternalPermissions bool) int { - if deployOnWorkersIfExternalPermissions && tf.Cluster.Spec.IAM != nil && fi.BoolValue(tf.Cluster.Spec.IAM.UseServiceAccountExternalPermissions) { + if deployOnWorkersIfExternalPermissions && tf.Cluster.Spec.IAM != nil && fi.ValueOf(tf.Cluster.Spec.IAM.UseServiceAccountExternalPermissions) { return 2 } if tf.HasHighlyAvailableControlPlane() { @@ -473,7 +474,7 @@ func (tf *TemplateFunctions) DNSControllerArgv() ([]string, error) { // @check if the watch ingress is set var watchIngress bool if cluster.Spec.ExternalDNS.WatchIngress != nil { - watchIngress = fi.BoolValue(cluster.Spec.ExternalDNS.WatchIngress) + watchIngress = fi.ValueOf(cluster.Spec.ExternalDNS.WatchIngress) } if watchIngress { @@ -897,8 +898,8 @@ func karpenterInstanceTypes(cloud awsup.AWSCloud, ig kops.InstanceGroupSpec) ([] ir := &ec2.InstanceRequirementsRequest{ VCpuCount: &ec2.VCpuCountRangeRequest{}, MemoryMiB: &ec2.MemoryMiBRequest{}, - BurstablePerformance: fi.String("included"), - InstanceGenerations: []*string{fi.String("current")}, + BurstablePerformance: fi.PtrTo("included"), + InstanceGenerations: []*string{fi.PtrTo("current")}, } cpu := instanceRequirements.CPU if cpu != nil { @@ -917,7 +918,7 @@ func karpenterInstanceTypes(cloud awsup.AWSCloud, ig kops.InstanceGroupSpec) ([] ir.VCpuCount.Min = &cpuMin } } else { - ir.VCpuCount.Min = fi.Int64(0) + ir.VCpuCount.Min = fi.PtrTo(int64(0)) } memory := instanceRequirements.Memory @@ -931,12 +932,12 @@ func karpenterInstanceTypes(cloud awsup.AWSCloud, ig kops.InstanceGroupSpec) ([] ir.MemoryMiB.Min = &memoryMin } } else { - ir.MemoryMiB.Min = fi.Int64(0) + ir.MemoryMiB.Min = fi.PtrTo(int64(0)) } ir.AcceleratorCount = &ec2.AcceleratorCountRequest{ - Min: fi.Int64(0), - Max: fi.Int64(0), + Min: fi.PtrTo(int64(0)), + Max: fi.PtrTo(int64(0)), } response, err := cloud.EC2().GetInstanceTypesFromInstanceRequirements( diff --git a/upup/pkg/fi/cloudup/template_functions_test.go b/upup/pkg/fi/cloudup/template_functions_test.go index 247333945d3e3..18d212773abdb 100644 --- a/upup/pkg/fi/cloudup/template_functions_test.go +++ b/upup/pkg/fi/cloudup/template_functions_test.go @@ -159,7 +159,7 @@ func Test_TemplateFunctions_CloudControllerConfigArgv(t *testing.T) { Openstack: &kops.OpenstackSpec{}, }, ExternalCloudControllerManager: &kops.CloudControllerManagerConfig{ - AllocateNodeCIDRs: fi.Bool(true), + AllocateNodeCIDRs: fi.PtrTo(true), }, }}, expectedArgv: []string{ @@ -177,7 +177,7 @@ func Test_TemplateFunctions_CloudControllerConfigArgv(t *testing.T) { Openstack: &kops.OpenstackSpec{}, }, ExternalCloudControllerManager: &kops.CloudControllerManagerConfig{ - ConfigureCloudRoutes: fi.Bool(true), + ConfigureCloudRoutes: fi.PtrTo(true), }, }}, expectedArgv: []string{ @@ -195,7 +195,7 @@ func Test_TemplateFunctions_CloudControllerConfigArgv(t *testing.T) { Openstack: &kops.OpenstackSpec{}, }, ExternalCloudControllerManager: &kops.CloudControllerManagerConfig{ - CIDRAllocatorType: fi.String("RangeAllocator"), + CIDRAllocatorType: fi.PtrTo("RangeAllocator"), }, }}, expectedArgv: []string{ @@ -213,7 +213,7 @@ func Test_TemplateFunctions_CloudControllerConfigArgv(t *testing.T) { Openstack: &kops.OpenstackSpec{}, }, ExternalCloudControllerManager: &kops.CloudControllerManagerConfig{ - UseServiceAccountCredentials: fi.Bool(false), + UseServiceAccountCredentials: fi.PtrTo(false), }, }}, expectedArgv: []string{ @@ -230,7 +230,7 @@ func Test_TemplateFunctions_CloudControllerConfigArgv(t *testing.T) { Openstack: &kops.OpenstackSpec{}, }, ExternalCloudControllerManager: &kops.CloudControllerManagerConfig{ - LeaderElection: &kops.LeaderElectionConfiguration{LeaderElect: fi.Bool(true)}, + LeaderElection: &kops.LeaderElectionConfiguration{LeaderElect: fi.PtrTo(true)}, }, }}, expectedArgv: []string{ @@ -248,8 +248,8 @@ func Test_TemplateFunctions_CloudControllerConfigArgv(t *testing.T) { Openstack: &kops.OpenstackSpec{}, }, ExternalCloudControllerManager: &kops.CloudControllerManagerConfig{ - LeaderElection: &kops.LeaderElectionConfiguration{LeaderElect: fi.Bool(true)}, - EnableLeaderMigration: fi.Bool(true), + LeaderElection: &kops.LeaderElectionConfiguration{LeaderElect: fi.PtrTo(true)}, + EnableLeaderMigration: fi.PtrTo(true), }, }}, expectedArgv: []string{ diff --git a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/amazonvpc-containerd/cluster.yaml b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/amazonvpc-containerd/cluster.yaml index e9302d4fe4c18..eacc9703e8598 100644 --- a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/amazonvpc-containerd/cluster.yaml +++ b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/amazonvpc-containerd/cluster.yaml @@ -23,7 +23,6 @@ spec: name: events iam: {} kubernetesVersion: v1.21.0 - masterInternalName: api.internal.minimal.example.com masterPublicName: api.minimal.example.com additionalSans: - proxy.api.minimal.example.com diff --git a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/amazonvpc/cluster.yaml b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/amazonvpc/cluster.yaml index 63736355c04c5..1af89eb9a4f0f 100644 --- a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/amazonvpc/cluster.yaml +++ b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/amazonvpc/cluster.yaml @@ -22,7 +22,6 @@ spec: name: events iam: {} kubernetesVersion: v1.21.0 - masterInternalName: api.internal.minimal.example.com masterPublicName: api.minimal.example.com additionalSans: - proxy.api.minimal.example.com diff --git a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/awscloudcontroller/cluster.yaml b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/awscloudcontroller/cluster.yaml index a9d68b98deb21..2c49cae50d04d 100644 --- a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/awscloudcontroller/cluster.yaml +++ b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/awscloudcontroller/cluster.yaml @@ -27,7 +27,6 @@ spec: cloudProvider: aws iam: {} kubernetesVersion: v1.21.0 - masterInternalName: api.internal.minimal.example.com masterPublicName: api.minimal.example.com additionalSans: - proxy.api.minimal.example.com diff --git a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/awsiamauthenticator/crd/cluster.yaml b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/awsiamauthenticator/crd/cluster.yaml index 2a8927a644740..7b687cd85506a 100644 --- a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/awsiamauthenticator/crd/cluster.yaml +++ b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/awsiamauthenticator/crd/cluster.yaml @@ -26,7 +26,6 @@ spec: name: events iam: {} kubernetesVersion: v1.21.0 - masterInternalName: api.internal.minimal.example.com masterPublicName: api.minimal.example.com additionalSans: - proxy.api.minimal.example.com diff --git a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/awsiamauthenticator/mappings/cluster.yaml b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/awsiamauthenticator/mappings/cluster.yaml index c04893457e303..b75a1eab02bc6 100644 --- a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/awsiamauthenticator/mappings/cluster.yaml +++ b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/awsiamauthenticator/mappings/cluster.yaml @@ -30,7 +30,6 @@ spec: name: events iam: {} kubernetesVersion: v1.21.0 - masterInternalName: api.internal.minimal.example.com masterPublicName: api.minimal.example.com additionalSans: - proxy.api.minimal.example.com diff --git a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/cilium/cluster.yaml b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/cilium/cluster.yaml index 448899365bfdb..263ad97081b15 100644 --- a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/cilium/cluster.yaml +++ b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/cilium/cluster.yaml @@ -24,7 +24,6 @@ spec: name: events iam: {} kubernetesVersion: 1.22.0 - masterInternalName: api.internal.minimal.example.com masterPublicName: api.minimal.example.com additionalSans: - proxy.api.minimal.example.com diff --git a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/coredns/cluster.yaml b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/coredns/cluster.yaml index 5fe7b8c2d728e..e7fab2582860b 100644 --- a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/coredns/cluster.yaml +++ b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/coredns/cluster.yaml @@ -46,7 +46,6 @@ spec: - kube-dns topologyKey: kubernetes.io/hostname weight: 100 - masterInternalName: api.internal.minimal.example.com masterPublicName: api.minimal.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/metrics-server/insecure-1.19/cluster.yaml b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/metrics-server/insecure-1.19/cluster.yaml index 4272c75bd4c88..d708cc340f5ae 100644 --- a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/metrics-server/insecure-1.19/cluster.yaml +++ b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/metrics-server/insecure-1.19/cluster.yaml @@ -24,7 +24,6 @@ spec: name: events iam: {} kubernetesVersion: 1.22.0 - masterInternalName: api.internal.minimal.example.com masterPublicName: api.minimal.example.com metricsServer: enabled: true diff --git a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/metrics-server/secure-1.19/cluster.yaml b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/metrics-server/secure-1.19/cluster.yaml index 47fbef5416678..837dbfdf61fcd 100644 --- a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/metrics-server/secure-1.19/cluster.yaml +++ b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/metrics-server/secure-1.19/cluster.yaml @@ -26,7 +26,6 @@ spec: name: events iam: {} kubernetesVersion: 1.22.0 - masterInternalName: api.internal.minimal.example.com masterPublicName: api.minimal.example.com metricsServer: enabled: true diff --git a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/service-account-iam/cluster.yaml b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/service-account-iam/cluster.yaml index f1b64f95d021f..df925d9f999b4 100644 --- a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/service-account-iam/cluster.yaml +++ b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/service-account-iam/cluster.yaml @@ -23,7 +23,6 @@ spec: iam: useServiceAccountExternalPermissions: true kubernetesVersion: v1.22.6 - masterInternalName: api.internal.minimal.example.com masterPublicName: api.minimal.example.com additionalSans: - proxy.api.minimal.example.com diff --git a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/simple/cluster.yaml b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/simple/cluster.yaml index 72cdd293336ac..a62dd5e5b7e10 100644 --- a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/simple/cluster.yaml +++ b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/simple/cluster.yaml @@ -22,7 +22,6 @@ spec: name: events iam: {} kubernetesVersion: v1.21.0 - masterInternalName: api.internal.minimal.example.com masterPublicName: api.minimal.example.com additionalSans: - proxy.api.minimal.example.com diff --git a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/weave/cluster.yaml b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/weave/cluster.yaml index 693a214d66a5a..8f23030774144 100644 --- a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/weave/cluster.yaml +++ b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/weave/cluster.yaml @@ -20,7 +20,6 @@ spec: name: events iam: {} kubernetesVersion: v1.21.0 - masterInternalName: api.internal.minimal.example.com masterPublicName: api.minimal.example.com networkCIDR: 172.20.0.0/16 networking: diff --git a/upup/pkg/fi/cloudup/utils.go b/upup/pkg/fi/cloudup/utils.go index 4a8e88143dc46..14aa493bb65ad 100644 --- a/upup/pkg/fi/cloudup/utils.go +++ b/upup/pkg/fi/cloudup/utils.go @@ -195,7 +195,7 @@ func FindDNSHostedZone(dns dnsprovider.Interface, clusterDNSName string, dnsType hostedZone := awsZone.Route53HostedZone() if hostedZone.Config != nil { zoneDNSType := kops.DNSTypePublic - if fi.BoolValue(hostedZone.Config.PrivateZone) { + if fi.ValueOf(hostedZone.Config.PrivateZone) { zoneDNSType = kops.DNSTypePrivate } if zoneDNSType != dnsType { diff --git a/upup/pkg/fi/cloudup/validation_test.go b/upup/pkg/fi/cloudup/validation_test.go index a442ec78f72b1..4de12d6c604ee 100644 --- a/upup/pkg/fi/cloudup/validation_test.go +++ b/upup/pkg/fi/cloudup/validation_test.go @@ -121,11 +121,11 @@ func TestValidateFull_UpdatePolicy_Valid(t *testing.T) { }, { label: "automatic", - policy: fi.String(api.UpdatePolicyAutomatic), + policy: fi.PtrTo(api.UpdatePolicyAutomatic), }, { label: "external", - policy: fi.String(api.UpdatePolicyExternal), + policy: fi.PtrTo(api.UpdatePolicyExternal), }, } { t.Run(test.label, func(t *testing.T) { diff --git a/upup/pkg/fi/dryrun_target.go b/upup/pkg/fi/dryrun_target.go index db8ab61ed7516..5090eada93d8b 100644 --- a/upup/pkg/fi/dryrun_target.go +++ b/upup/pkg/fi/dryrun_target.go @@ -188,7 +188,7 @@ func (t *DryRunTarget) PrintReport(taskMap map[string]Task, out io.Writer) error if field.CanInterface() { hasName, ok := field.Interface().(HasName) if ok { - name = StringValue(hasName.GetName()) + name = ValueOf(hasName.GetName()) } } if name != "" { diff --git a/upup/pkg/fi/dryruntarget_test.go b/upup/pkg/fi/dryruntarget_test.go index 107ab400dd668..171cdc4865f87 100644 --- a/upup/pkg/fi/dryruntarget_test.go +++ b/upup/pkg/fi/dryruntarget_test.go @@ -76,12 +76,12 @@ func Test_DryrunTarget_PrintReport(t *testing.T) { target := NewDryRunTarget(builder, &stdout) tasks := map[string]Task{} a := &testTask{ - Name: String("TestName"), + Name: PtrTo("TestName"), Lifecycle: LifecycleSync, Tags: map[string]string{"key": "value"}, } e := &testTask{ - Name: String("TestName"), + Name: PtrTo("TestName"), Lifecycle: LifecycleSync, Tags: map[string]string{"key": "value"}, } diff --git a/upup/pkg/fi/fitasks/keypair.go b/upup/pkg/fi/fitasks/keypair.go index 8072cd0899b74..ee92ac678cb00 100644 --- a/upup/pkg/fi/fitasks/keypair.go +++ b/upup/pkg/fi/fitasks/keypair.go @@ -69,7 +69,7 @@ func (e *Keypair) CompareWithID() *string { } func (e *Keypair) Find(c *fi.Context) (*Keypair, error) { - name := fi.StringValue(e.Name) + name := fi.ValueOf(e.Name) if name == "" { return nil, nil } @@ -159,7 +159,7 @@ func (_ *Keypair) ShouldCreate(a, e, changes *Keypair) (bool, error) { } func (_ *Keypair) Render(c *fi.Context, a, e, changes *Keypair) error { - name := fi.StringValue(e.Name) + name := fi.ValueOf(e.Name) if name == "" { return fi.RequiredField("Name") } @@ -216,7 +216,7 @@ func (_ *Keypair) Render(c *fi.Context, a, e, changes *Keypair) error { signer := fi.CertificateIDCA if e.Signer != nil { - signer = fi.StringValue(e.Signer.Name) + signer = fi.ValueOf(e.Signer.Name) } klog.Infof("Issuing new certificate: %q", *e.Name) diff --git a/upup/pkg/fi/fitasks/keypair_test.go b/upup/pkg/fi/fitasks/keypair_test.go index 71d1b2baa4034..16002227cc436 100644 --- a/upup/pkg/fi/fitasks/keypair_test.go +++ b/upup/pkg/fi/fitasks/keypair_test.go @@ -25,10 +25,10 @@ import ( func TestKeypairDeps(t *testing.T) { ca := &Keypair{ - Name: fi.String("ca"), + Name: fi.PtrTo("ca"), } cert := &Keypair{ - Name: fi.String("cert"), + Name: fi.PtrTo("cert"), Signer: ca, } diff --git a/upup/pkg/fi/fitasks/managedfile.go b/upup/pkg/fi/fitasks/managedfile.go index a47b63d56ffe2..ce2b6f9b13564 100644 --- a/upup/pkg/fi/fitasks/managedfile.go +++ b/upup/pkg/fi/fitasks/managedfile.go @@ -53,7 +53,7 @@ func (e *ManagedFile) Find(c *fi.Context) (*ManagedFile, error) { return nil, err } - location := fi.StringValue(e.Location) + location := fi.ValueOf(e.Location) if location == "" { return nil, nil } @@ -83,7 +83,7 @@ func (e *ManagedFile) Find(c *fi.Context) (*ManagedFile, error) { actual.Public = &public if e.Public == nil { - e.Public = fi.Bool(false) + e.Public = fi.PtrTo(false) } } @@ -95,7 +95,7 @@ func (e *ManagedFile) Find(c *fi.Context) (*ManagedFile, error) { actual.Public = &public if e.Public == nil { - e.Public = fi.Bool(false) + e.Public = fi.PtrTo(false) } } @@ -123,18 +123,18 @@ func (s *ManagedFile) CheckChanges(a, e, changes *ManagedFile) error { func (e *ManagedFile) getACL(c *fi.Context, p vfs.Path) (vfs.ACL, error) { var acl vfs.ACL - if fi.BoolValue(e.Public) { + if fi.ValueOf(e.Public) { switch p := p.(type) { case *vfs.S3Path: acl = &vfs.S3Acl{ - RequestACL: fi.String("public-read"), + RequestACL: fi.PtrTo("public-read"), } case *vfs.MemFSPath: if !p.IsClusterReadable() { return nil, fmt.Errorf("the %q path is intended for use in tests", p.Path()) } acl = &vfs.S3Acl{ - RequestACL: fi.String("public-read"), + RequestACL: fi.PtrTo("public-read"), } default: return nil, fmt.Errorf("the %q path does not support public ACL", p.Path()) @@ -146,7 +146,7 @@ func (e *ManagedFile) getACL(c *fi.Context, p vfs.Path) (vfs.ACL, error) { } func (_ *ManagedFile) Render(c *fi.Context, a, e, changes *ManagedFile) error { - location := fi.StringValue(e.Location) + location := fi.ValueOf(e.Location) if location == "" { return fi.RequiredField("Location") } @@ -176,7 +176,7 @@ func (_ *ManagedFile) Render(c *fi.Context, a, e, changes *ManagedFile) error { } func getBasePath(c *fi.Context, e *ManagedFile) (vfs.Path, error) { - base := fi.StringValue(e.Base) + base := fi.ValueOf(e.Base) if base != "" { p, err := vfs.Context.BuildVfsPath(base) if err != nil { @@ -194,7 +194,7 @@ func (f *ManagedFile) RenderTerraform(c *fi.Context, t *terraform.TerraformTarge return f.Render(c, a, e, changes) } - location := fi.StringValue(e.Location) + location := fi.ValueOf(e.Location) if location == "" { return fi.RequiredField("Location") } diff --git a/upup/pkg/fi/fitasks/secret.go b/upup/pkg/fi/fitasks/secret.go index f51b10c5dc859..8078927a12a42 100644 --- a/upup/pkg/fi/fitasks/secret.go +++ b/upup/pkg/fi/fitasks/secret.go @@ -38,7 +38,7 @@ func (e *Secret) CheckExisting(c *fi.Context) bool { func (e *Secret) Find(c *fi.Context) (*Secret, error) { secrets := c.SecretStore - name := fi.StringValue(e.Name) + name := fi.ValueOf(e.Name) if name == "" { return nil, nil } @@ -75,7 +75,7 @@ func (s *Secret) CheckChanges(a, e, changes *Secret) error { } func (_ *Secret) Render(c *fi.Context, a, e, changes *Secret) error { - name := fi.StringValue(e.Name) + name := fi.ValueOf(e.Name) if name == "" { return fi.RequiredField("Name") } diff --git a/upup/pkg/fi/nodeup/command.go b/upup/pkg/fi/nodeup/command.go index 34479a01f269e..aab7852380549 100644 --- a/upup/pkg/fi/nodeup/command.go +++ b/upup/pkg/fi/nodeup/command.go @@ -116,7 +116,7 @@ func (c *NodeUpCommand) Run(out io.Writer) error { return fmt.Errorf("failed to get node config from server: %w", err) } nodeConfig = response.NodeConfig - } else if fi.StringValue(bootConfig.ConfigBase) != "" { + } else if fi.ValueOf(bootConfig.ConfigBase) != "" { var err error configBase, err = vfs.Context.BuildVfsPath(*bootConfig.ConfigBase) if err != nil { @@ -287,7 +287,7 @@ func (c *NodeUpCommand) Run(out io.Writer) error { // If Nvidia is enabled in the cluster, check if this instance has support for it. nvidia := c.cluster.Spec.Containerd.NvidiaGPU - if nvidia != nil && fi.BoolValue(nvidia.Enabled) { + if nvidia != nil && fi.ValueOf(nvidia.Enabled) { awsCloud := cloud.(awsup.AWSCloud) // Get the instance type's detailed information. instanceType, err := awsup.GetMachineTypeInfo(awsCloud, modelContext.MachineType) @@ -302,7 +302,7 @@ func (c *NodeUpCommand) Run(out io.Writer) error { } } else if cloudProvider == api.CloudProviderOpenstack { // NvidiaGPU possible to enable only in instance group level in OpenStack. When we assume that GPU is supported - if nodeupConfig.NvidiaGPU != nil && fi.BoolValue(nodeupConfig.NvidiaGPU.Enabled) { + if nodeupConfig.NvidiaGPU != nil && fi.ValueOf(nodeupConfig.NvidiaGPU.Enabled) { klog.Info("instance supports GPU acceleration") modelContext.GPUVendor = architectures.GPUVendorNvidia } @@ -447,7 +447,7 @@ func completeWarmingLifecycleAction(cloud awsup.AWSCloud, modelContext *model.No AutoScalingGroupName: &asgName, InstanceId: &modelContext.InstanceID, LifecycleHookName: &hookName, - LifecycleActionResult: fi.String("CONTINUE"), + LifecycleActionResult: fi.PtrTo("CONTINUE"), }) if err != nil { return fmt.Errorf("failed to complete lifecycle hook %q for %q: %v", hookName, modelContext.InstanceID, err) @@ -592,8 +592,8 @@ func evaluateBindAddress(bindAddress string) (string, error) { // evaluateDockerSpec selects the first supported storage mode, if it is a list func evaluateDockerSpecStorage(spec *api.DockerConfig) error { - storage := fi.StringValue(spec.Storage) - if strings.Contains(fi.StringValue(spec.Storage), ",") { + storage := fi.ValueOf(spec.Storage) + if strings.Contains(fi.ValueOf(spec.Storage), ",") { precedence := strings.Split(storage, ",") for _, opt := range precedence { fs := opt @@ -623,7 +623,7 @@ func evaluateDockerSpecStorage(spec *api.DockerConfig) error { if supported { klog.Infof("Using supported docker storage %q", opt) - spec.Storage = fi.String(opt) + spec.Storage = fi.PtrTo(opt) return nil } @@ -633,7 +633,7 @@ func evaluateDockerSpecStorage(spec *api.DockerConfig) error { // Just in case we don't recognize the driver? // TODO: Is this the best behaviour klog.Warningf("No storage module was supported from %q, will default to %q", storage, precedence[0]) - spec.Storage = fi.String(precedence[0]) + spec.Storage = fi.PtrTo(precedence[0]) return nil } @@ -807,7 +807,7 @@ func getAWSConfigurationMode(c *model.NodeupModelContext) (string, error) { if len(result.AutoScalingInstances) < 1 { return "", nil } - lifecycle := fi.StringValue(result.AutoScalingInstances[0].LifecycleState) + lifecycle := fi.ValueOf(result.AutoScalingInstances[0].LifecycleState) if strings.HasPrefix(lifecycle, "Warmed:") { klog.Info("instance is entering warm pool") return model.ConfigurationModeWarming, nil diff --git a/upup/pkg/fi/nodeup/nodetasks/bindmount.go b/upup/pkg/fi/nodeup/nodetasks/bindmount.go index ccbe24d51b9e3..fa08382002fd9 100644 --- a/upup/pkg/fi/nodeup/nodetasks/bindmount.go +++ b/upup/pkg/fi/nodeup/nodetasks/bindmount.go @@ -52,7 +52,7 @@ func (e *BindMount) Dir() string { var _ fi.HasName = &Archive{} func (e *BindMount) GetName() *string { - return fi.String("BindMount-" + e.Mountpoint) + return fi.PtrTo("BindMount-" + e.Mountpoint) } var _ fi.HasDependencies = &BindMount{} diff --git a/upup/pkg/fi/nodeup/nodetasks/chattr.go b/upup/pkg/fi/nodeup/nodetasks/chattr.go index a18afba1d2d87..27283fe1add0b 100644 --- a/upup/pkg/fi/nodeup/nodetasks/chattr.go +++ b/upup/pkg/fi/nodeup/nodetasks/chattr.go @@ -43,7 +43,7 @@ func (s *Chattr) String() string { var _ fi.HasName = &Archive{} func (e *Chattr) GetName() *string { - return fi.String("Chattr-" + e.File) + return fi.PtrTo("Chattr-" + e.File) } var _ fi.HasDependencies = &Chattr{} diff --git a/upup/pkg/fi/nodeup/nodetasks/file.go b/upup/pkg/fi/nodeup/nodetasks/file.go index b75cb14f9581d..8624f2bb9b697 100644 --- a/upup/pkg/fi/nodeup/nodetasks/file.go +++ b/upup/pkg/fi/nodeup/nodetasks/file.go @@ -125,7 +125,7 @@ func findFile(p string) (*File, error) { actual := &File{} actual.Path = p - actual.Mode = fi.String(fi.FileModeToString(stat.Mode() & os.ModePerm)) + actual.Mode = fi.PtrTo(fi.FileModeToString(stat.Mode() & os.ModePerm)) uid := int(stat.Sys().(*syscall.Stat_t).Uid) owner, err := fi.LookupUserByID(uid) @@ -133,9 +133,9 @@ func findFile(p string) (*File, error) { return nil, err } if owner != nil { - actual.Owner = fi.String(owner.Name) + actual.Owner = fi.PtrTo(owner.Name) } else { - actual.Owner = fi.String(strconv.Itoa(uid)) + actual.Owner = fi.PtrTo(strconv.Itoa(uid)) } gid := int(stat.Sys().(*syscall.Stat_t).Gid) @@ -144,9 +144,9 @@ func findFile(p string) (*File, error) { return nil, err } if group != nil { - actual.Group = fi.String(group.Name) + actual.Group = fi.PtrTo(group.Name) } else { - actual.Group = fi.String(strconv.Itoa(gid)) + actual.Group = fi.PtrTo(strconv.Itoa(gid)) } if (stat.Mode() & os.ModeSymlink) != 0 { @@ -156,7 +156,7 @@ func findFile(p string) (*File, error) { } actual.Type = FileType_Symlink - actual.Symlink = fi.String(target) + actual.Symlink = fi.PtrTo(target) } else if (stat.Mode() & os.ModeDir) != 0 { actual.Type = FileType_Directory } else { @@ -196,9 +196,9 @@ func (s *File) CheckChanges(a, e, changes *File) error { func (_ *File) RenderLocal(t *local.LocalTarget, a, e, changes *File) error { dirMode := os.FileMode(0o755) - fileMode, err := fi.ParseFileMode(fi.StringValue(e.Mode), 0o644) + fileMode, err := fi.ParseFileMode(fi.ValueOf(e.Mode), 0o644) if err != nil { - return fmt.Errorf("invalid file mode for %q: %q", e.Path, fi.StringValue(e.Mode)) + return fmt.Errorf("invalid file mode for %q: %q", e.Path, fi.ValueOf(e.Mode)) } if a != nil { @@ -236,7 +236,7 @@ func (_ *File) RenderLocal(t *local.LocalTarget, a, e, changes *File) error { } } else if e.Type == FileType_File { if changes.Contents != nil { - err = fi.WriteFile(e.Path, e.Contents, fileMode, dirMode, fi.StringValue(e.Owner), fi.StringValue(e.Group)) + err = fi.WriteFile(e.Path, e.Contents, fileMode, dirMode, fi.ValueOf(e.Owner), fi.ValueOf(e.Group)) if err != nil { return fmt.Errorf("error copying file %q: %v", e.Path, err) } @@ -255,7 +255,7 @@ func (_ *File) RenderLocal(t *local.LocalTarget, a, e, changes *File) error { } if changes.Owner != nil || changes.Group != nil { - ownerChanged, err := fi.EnsureFileOwner(e.Path, fi.StringValue(e.Owner), fi.StringValue(e.Group)) + ownerChanged, err := fi.EnsureFileOwner(e.Path, fi.ValueOf(e.Owner), fi.ValueOf(e.Group)) if err != nil { return fmt.Errorf("error changing owner/group on %q: %v", e.Path, err) } @@ -281,13 +281,13 @@ func (_ *File) RenderLocal(t *local.LocalTarget, a, e, changes *File) error { func (_ *File) RenderCloudInit(t *cloudinit.CloudInitTarget, a, e, changes *File) error { dirMode := os.FileMode(0o755) - fileMode, err := fi.ParseFileMode(fi.StringValue(e.Mode), 0o644) + fileMode, err := fi.ParseFileMode(fi.ValueOf(e.Mode), 0o644) if err != nil { return fmt.Errorf("invalid file mode for %s: %q", e.Path, *e.Mode) } if e.Type == FileType_Symlink { - t.AddCommand(cloudinit.Always, "ln", "-s", fi.StringValue(e.Symlink), e.Path) + t.AddCommand(cloudinit.Always, "ln", "-s", fi.ValueOf(e.Symlink), e.Path) } else if e.Type == FileType_Directory { parent := filepath.Dir(strings.TrimSuffix(e.Path, "/")) t.AddCommand(cloudinit.Once, "mkdir", "-p", "-m", fi.FileModeToString(dirMode), parent) @@ -302,7 +302,7 @@ func (_ *File) RenderCloudInit(t *cloudinit.CloudInitTarget, a, e, changes *File } if e.Owner != nil || e.Group != nil { - t.Chown(e.Path, fi.StringValue(e.Owner), fi.StringValue(e.Group)) + t.Chown(e.Path, fi.ValueOf(e.Owner), fi.ValueOf(e.Group)) } if e.OnChangeExecute != nil { diff --git a/upup/pkg/fi/nodeup/nodetasks/file_test.go b/upup/pkg/fi/nodeup/nodetasks/file_test.go index aa9a8146df6ca..e74074fa8573e 100644 --- a/upup/pkg/fi/nodeup/nodetasks/file_test.go +++ b/upup/pkg/fi/nodeup/nodetasks/file_test.go @@ -40,7 +40,7 @@ func TestFileDependencies(t *testing.T) { Home: "/home/owner", }, child: &File{ - Owner: fi.String("owner"), + Owner: fi.PtrTo("owner"), Path: childFileName, Contents: fi.NewStringResource("I depend on an owner"), Type: FileType_File, diff --git a/upup/pkg/fi/nodeup/nodetasks/issue_cert.go b/upup/pkg/fi/nodeup/nodetasks/issue_cert.go index e923096d9ddfe..0bd706ceddc12 100644 --- a/upup/pkg/fi/nodeup/nodetasks/issue_cert.go +++ b/upup/pkg/fi/nodeup/nodetasks/issue_cert.go @@ -91,7 +91,7 @@ func (i *IssueCert) AddFileTasks(c *fi.ModelBuilderContext, dir string, name str err := c.EnsureTask(&File{ Path: dir, Type: FileType_Directory, - Mode: fi.String("0755"), + Mode: fi.PtrTo("0755"), }) if err != nil { return err @@ -101,7 +101,7 @@ func (i *IssueCert) AddFileTasks(c *fi.ModelBuilderContext, dir string, name str Path: filepath.Join(dir, name+".crt"), Contents: certResource, Type: FileType_File, - Mode: fi.String("0644"), + Mode: fi.PtrTo("0644"), Owner: owner, }) @@ -109,7 +109,7 @@ func (i *IssueCert) AddFileTasks(c *fi.ModelBuilderContext, dir string, name str Path: filepath.Join(dir, name+".key"), Contents: keyResource, Type: FileType_File, - Mode: fi.String("0600"), + Mode: fi.PtrTo("0600"), Owner: owner, }) @@ -118,7 +118,7 @@ func (i *IssueCert) AddFileTasks(c *fi.ModelBuilderContext, dir string, name str Path: filepath.Join(dir, caName+".crt"), Contents: caResource, Type: FileType_File, - Mode: fi.String("0644"), + Mode: fi.PtrTo("0644"), Owner: owner, }) if err != nil { diff --git a/upup/pkg/fi/nodeup/nodetasks/package.go b/upup/pkg/fi/nodeup/nodetasks/package.go index 87ae2ff6215d6..b545777e66dd5 100644 --- a/upup/pkg/fi/nodeup/nodetasks/package.go +++ b/upup/pkg/fi/nodeup/nodetasks/package.go @@ -120,7 +120,7 @@ func (f *Package) GetName() *string { // isOSPackage returns true if this is an OS provided package (as opposed to a bare .deb, for example) func (p *Package) isOSPackage() bool { - return fi.StringValue(p.Source) == "" + return fi.ValueOf(p.Source) == "" } // String returns a string representation, implementing the Stringer interface @@ -178,11 +178,11 @@ func (e *Package) findDpkg(c *fi.Context) (*Package, error) { case "ii": installed = true installedVersion = version - healthy = fi.Bool(true) + healthy = fi.PtrTo(true) case "iF", "iU": installed = true installedVersion = version - healthy = fi.Bool(false) + healthy = fi.PtrTo(false) case "rc": // removed installed = false @@ -201,13 +201,13 @@ func (e *Package) findDpkg(c *fi.Context) (*Package, error) { // TODO: Take InstanceGroup-level overriding of the Cluster-level update policy into account // here. Doing so requires that we make the current InstanceGroup available within Package's // methods. - if fi.StringValue(c.Cluster.Spec.UpdatePolicy) != kops.UpdatePolicyExternal || !installed { + if fi.ValueOf(c.Cluster.Spec.UpdatePolicy) != kops.UpdatePolicyExternal || !installed { return nil, nil } return &Package{ Name: e.Name, - Version: fi.String(installedVersion), + Version: fi.PtrTo(installedVersion), Healthy: healthy, }, nil } @@ -246,19 +246,19 @@ func (e *Package) findYum(c *fi.Context) (*Package, error) { installed = true installedVersion = tokens[1] // If we implement unhealthy; be sure to implement repair in Render - healthy = fi.Bool(true) + healthy = fi.PtrTo(true) } // TODO: Take InstanceGroup-level overriding of the Cluster-level update policy into account // here. Doing so requires that we make the current InstanceGroup available within Package's // methods. - if fi.StringValue(c.Cluster.Spec.UpdatePolicy) != kops.UpdatePolicyExternal || !installed { + if fi.ValueOf(c.Cluster.Spec.UpdatePolicy) != kops.UpdatePolicyExternal || !installed { return nil, nil } return &Package{ Name: e.Name, - Version: fi.String(installedVersion), + Version: fi.PtrTo(installedVersion), Healthy: healthy, }, nil } @@ -311,14 +311,14 @@ func (_ *Package) RenderLocal(t *local.LocalTarget, a, e, changes *Package) erro local := path.Join(localPackageDir, pkg.Name+ext) pkgs[i] = local var hash *hashing.Hash - if fi.StringValue(pkg.Hash) != "" { - parsed, err := hashing.FromString(fi.StringValue(pkg.Hash)) + if fi.ValueOf(pkg.Hash) != "" { + parsed, err := hashing.FromString(fi.ValueOf(pkg.Hash)) if err != nil { return fmt.Errorf("error parsing hash: %v", err) } hash = parsed } - _, err = fi.DownloadURL(fi.StringValue(pkg.Source), local, hash) + _, err = fi.DownloadURL(fi.ValueOf(pkg.Source), local, hash) if err != nil { return err } diff --git a/upup/pkg/fi/nodeup/nodetasks/prefix.go b/upup/pkg/fi/nodeup/nodetasks/prefix.go index 818893244bc53..7c21b1d7ff81b 100644 --- a/upup/pkg/fi/nodeup/nodetasks/prefix.go +++ b/upup/pkg/fi/nodeup/nodetasks/prefix.go @@ -94,13 +94,13 @@ func (_ *Prefix) RenderLocal(t *local.LocalTarget, a, e, changes *Prefix) error } response, err := t.Cloud.(awsup.AWSCloud).EC2().AssignIpv6Addresses(&ec2.AssignIpv6AddressesInput{ - Ipv6PrefixCount: fi.Int64(1), - NetworkInterfaceId: fi.String(interfaceId), + Ipv6PrefixCount: fi.PtrTo(int64(1)), + NetworkInterfaceId: fi.PtrTo(interfaceId), }) if err != nil { return fmt.Errorf("failed to assign prefix: %w", err) } - klog.V(2).Infof("assigned prefix to primary network interface: %q", fi.StringValue(response.AssignedIpv6Prefixes[0])) + klog.V(2).Infof("assigned prefix to primary network interface: %q", fi.ValueOf(response.AssignedIpv6Prefixes[0])) return nil } diff --git a/upup/pkg/fi/nodeup/nodetasks/service.go b/upup/pkg/fi/nodeup/nodetasks/service.go index f2f92a3b176dc..81ea57b81c975 100644 --- a/upup/pkg/fi/nodeup/nodetasks/service.go +++ b/upup/pkg/fi/nodeup/nodetasks/service.go @@ -37,14 +37,13 @@ const ( // TODO: Generally only repo packages write to /usr/lib/systemd/system on _rhel_family // But we use it in two ways: we update the docker manifest, and we install our own // package (protokube, kubelet). Maybe we should have the idea of a "system" package. - centosSystemdSystemPath = "/usr/lib/systemd/system" - - flatcarSystemdSystemPath = "/etc/systemd/system" - + centosSystemdSystemPath = "/usr/lib/systemd/system" + flatcarSystemdSystemPath = "/etc/systemd/system" containerosSystemdSystemPath = "/etc/systemd/system" containerdService = "containerd.service" dockerService = "docker.service" + kubeletService = "kubelet.service" protokubeService = "protokube.service" ) @@ -75,8 +74,12 @@ func (p *Service) GetDependencies(tasks map[string]fi.Task) []fi.Task { switch v := v.(type) { case *Package, *UpdatePackages, *UserTask, *GroupTask, *Chattr, *BindMount, *Archive, *Prefix, *UpdateEtcHostsTask: deps = append(deps, v) - case *Service, *LoadImageTask, *PullImageTask, *IssueCert, *BootstrapClientTask, *KubeConfig: + case *Service, *PullImageTask, *IssueCert, *BootstrapClientTask, *KubeConfig: // ignore + case *LoadImageTask: + if p.Name == kubeletService { + deps = append(deps, v) + } case *File: if len(v.BeforeServices) > 0 { for _, s := range v.BeforeServices { @@ -102,13 +105,13 @@ func (s *Service) String() string { func (s *Service) InitDefaults() *Service { // Default some values to true: Running, SmartRestart, ManageState if s.Running == nil { - s.Running = fi.Bool(true) + s.Running = fi.PtrTo(true) } if s.SmartRestart == nil { - s.SmartRestart = fi.Bool(true) + s.SmartRestart = fi.PtrTo(true) } if s.ManageState == nil { - s.ManageState = fi.Bool(true) + s.ManageState = fi.PtrTo(true) } // Default Enabled to be the same as running @@ -178,13 +181,13 @@ func (e *Service) Find(c *fi.Context) (*Service, error) { return &Service{ Name: e.Name, Definition: nil, - Running: fi.Bool(false), + Running: fi.PtrTo(false), }, nil } actual := &Service{ Name: e.Name, - Definition: fi.String(string(d)), + Definition: fi.PtrTo(string(d)), // Avoid spurious changes ManageState: e.ManageState, @@ -199,27 +202,27 @@ func (e *Service) Find(c *fi.Context) (*Service, error) { activeState := properties["ActiveState"] switch activeState { case "active": - actual.Running = fi.Bool(true) + actual.Running = fi.PtrTo(true) case "failed", "inactive": - actual.Running = fi.Bool(false) + actual.Running = fi.PtrTo(false) default: klog.Warningf("Unknown ActiveState=%q; will treat as not running", activeState) - actual.Running = fi.Bool(false) + actual.Running = fi.PtrTo(false) } wantedBy := properties["WantedBy"] switch wantedBy { case "": - actual.Enabled = fi.Bool(false) + actual.Enabled = fi.PtrTo(false) // TODO: Can probably do better here! case "multi-user.target", "graphical.target multi-user.target": - actual.Enabled = fi.Bool(true) + actual.Enabled = fi.PtrTo(true) default: klog.Warningf("Unknown WantedBy=%q; will treat as not enabled", wantedBy) - actual.Enabled = fi.Bool(false) + actual.Enabled = fi.PtrTo(false) } return actual, nil @@ -268,8 +271,8 @@ func (_ *Service) RenderLocal(t *local.LocalTarget, a, e, changes *Service) erro action := "" - if changes.Running != nil && fi.BoolValue(e.ManageState) { - if fi.BoolValue(e.Running) { + if changes.Running != nil && fi.ValueOf(e.ManageState) { + if fi.ValueOf(e.Running) { action = "restart" } else { action = "stop" @@ -292,13 +295,13 @@ func (_ *Service) RenderLocal(t *local.LocalTarget, a, e, changes *Service) erro } // "SmartRestart" - look at the obvious dependencies in the systemd service, restart if start time older - if fi.BoolValue(e.ManageState) && fi.BoolValue(e.SmartRestart) { - definition := fi.StringValue(e.Definition) + if fi.ValueOf(e.ManageState) && fi.ValueOf(e.SmartRestart) { + definition := fi.ValueOf(e.Definition) if definition == "" && a != nil { - definition = fi.StringValue(a.Definition) + definition = fi.ValueOf(a.Definition) } - if action == "" && fi.BoolValue(e.Running) && definition != "" { + if action == "" && fi.ValueOf(e.Running) && definition != "" { dependencies, err := getSystemdDependencies(serviceName, definition) if err != nil { return err @@ -345,7 +348,7 @@ func (_ *Service) RenderLocal(t *local.LocalTarget, a, e, changes *Service) erro } } - if action != "" && fi.BoolValue(e.ManageState) { + if action != "" && fi.ValueOf(e.ManageState) { klog.Infof("Restarting service %q", serviceName) cmd := exec.Command("systemctl", action, serviceName) output, err := cmd.CombinedOutput() @@ -354,9 +357,9 @@ func (_ *Service) RenderLocal(t *local.LocalTarget, a, e, changes *Service) erro } } - if changes.Enabled != nil && fi.BoolValue(e.ManageState) { + if changes.Enabled != nil && fi.ValueOf(e.ManageState) { var args []string - if fi.BoolValue(e.Enabled) { + if fi.ValueOf(e.Enabled) { klog.Infof("Enabling service %q", serviceName) args = []string{"enable", serviceName} } else { @@ -388,7 +391,7 @@ func (_ *Service) RenderCloudInit(t *cloudinit.CloudInitTarget, a, e, changes *S return err } - if fi.BoolValue(e.ManageState) { + if fi.ValueOf(e.ManageState) { t.AddCommand(cloudinit.Once, "systemctl", "daemon-reload") t.AddCommand(cloudinit.Once, "systemctl", "start", "--no-block", serviceName) } diff --git a/upup/pkg/fi/task.go b/upup/pkg/fi/task.go index f8325f695d488..a3924f00e699e 100644 --- a/upup/pkg/fi/task.go +++ b/upup/pkg/fi/task.go @@ -141,7 +141,7 @@ func buildTaskKey(task Task) string { klog.Fatalf("task %T does not implement HasName", task) } - name := StringValue(hasName.GetName()) + name := ValueOf(hasName.GetName()) if name == "" { klog.Fatalf("task %T (%v) did not have a Name", task, task) } diff --git a/upup/pkg/fi/values.go b/upup/pkg/fi/values.go index 980fffd1bb37a..b6d4d2ef7ae9c 100644 --- a/upup/pkg/fi/values.go +++ b/upup/pkg/fi/values.go @@ -23,11 +23,17 @@ import ( "strconv" ) -func StringValue(s *string) string { - if s == nil { - return "" +// PtrTo returns a pointer to a copy of any value. +func PtrTo[T any](v T) *T { + return &v +} + +// ValueOf returns the value of a pointer or its zero value +func ValueOf[T any](v *T) T { + if v == nil { + return *new(T) } - return *s + return *v } // StringSliceValue takes a slice of string pointers and returns a slice of strings @@ -48,12 +54,6 @@ func IsNilOrEmpty(s *string) bool { return *s == "" } -// String is a helper that builds a *string from a string value -// This is similar to aws.String, except that we use it for non-AWS values -func String(s string) *string { - return &s -} - // StringSlice is a helper that builds a []*string from a slice of strings func StringSlice(stringSlice []string) []*string { var newSlice []*string @@ -63,89 +63,6 @@ func StringSlice(stringSlice []string) []*string { return newSlice } -// Float32 returns a point to a float32 -func Float32(v float32) *float32 { - return &v -} - -// Float32Value returns the value of the float -func Float32Value(v *float32) float32 { - if v == nil { - return 0.0 - } - - return *v -} - -// Float64 returns a point to a float64 -func Float64(v float64) *float64 { - return &v -} - -// Float64Value returns the value of the float -func Float64Value(v *float64) float64 { - if v == nil { - return 0.0 - } - - return *v -} - -// Bool returns a pointer to a bool -func Bool(v bool) *bool { - return &v -} - -// BoolValue returns the value of bool pointer or false -func BoolValue(v *bool) bool { - if v == nil { - return false - } - return *v -} - -func Int32(v int32) *int32 { - return &v -} - -func Int32Value(v *int32) int32 { - if v == nil { - return 0 - } - return *v -} - -// Int64 is a helper that builds a *int64 from an int64 value -// This is similar to aws.Int64, except that we use it for non-AWS values -func Int64(v int64) *int64 { - return &v -} - -func Int64Value(v *int64) int64 { - if v == nil { - return 0 - } - return *v -} - -func Int(v int) *int { - return &v -} - -func IntValue(v *int) int { - if v == nil { - return 0 - } - return *v -} - -func Uint64Value(v *uint64) uint64 { - if v == nil { - return 0 - } - return *v -} - // ArrayContains is checking does array contain single word func ArrayContains(array []string, word string) bool { for _, item := range array {